apehex commited on
Commit
04c34c6
1 Parent(s): 64cd647

Convert binary fields to HEX

Browse files
Files changed (1) hide show
  1. hexify.py +97 -0
hexify.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """EVMC is a collection of smart contracts from the ETH blockchain."""
2
+
3
+ import itertools
4
+ import os
5
+
6
+ import pyarrow as pa
7
+ import pyarrow.lib as pl
8
+ import pyarrow.parquet as pq
9
+
10
+ # META ########################################################################
11
+
12
+ INPUT_DATASET_PATH = '.data/ethereum/train/'
13
+ OUTPUT_DATASET_PATH = 'data/ethereum/train/'
14
+
15
+ # PARQUET SCHEMA ##############################################################
16
+
17
+ INPUT_SCHEMA = pa.schema(fields=[
18
+ pl.field('chain_id', pa.uint64()),
19
+ pl.field('block_number', pa.uint32()),
20
+ pl.field('block_hash', pa.large_binary()),
21
+ pl.field('transaction_hash', pa.large_binary()),
22
+ pl.field('deployer_address', pa.large_binary()),
23
+ pl.field('factory_address', pa.large_binary()),
24
+ pl.field('contract_address', pa.large_binary()),
25
+ pl.field('creation_bytecode', pa.large_binary()),
26
+ pl.field('runtime_bytecode', pa.large_binary()),
27
+ pl.field('creation_sourcecode', pa.large_binary()),])
28
+
29
+ OUTPUT_SCHEMA = pa.schema(fields=[
30
+ pl.field('chain_id', pa.uint64()),
31
+ pl.field('block_number', pa.uint64()),
32
+ pl.field('block_hash', pa.string()),
33
+ pl.field('transaction_hash', pa.string()),
34
+ pl.field('deployer_address', pa.string()),
35
+ pl.field('factory_address', pa.string()),
36
+ pl.field('contract_address', pa.string()),
37
+ pl.field('creation_bytecode', pa.string()),
38
+ pl.field('runtime_bytecode', pa.string()),
39
+ pl.field('creation_sourcecode', pa.string()),])
40
+
41
+ # GENERIC #####################################################################
42
+
43
+ def chunk(seq: list, size: int, repeats: bool=True) -> list:
44
+ __chunks = (seq[__i:__i+size] for __i in range(0, len(seq), size))
45
+ return list(__chunks if repeats else set(__chunks))
46
+
47
+ # ENCODE ######################################################################
48
+
49
+ def _get_field(record: dict, key: str, default: bytes=b'') -> bytes:
50
+ __value = record.get(key, default)
51
+ return default if __value is None else __value
52
+
53
+ def encode_binary_to_hex(record: dict) -> dict:
54
+ return {
55
+ 'chain_id': _get_field(record=record, key='chain_id', default=1),
56
+ 'block_number': _get_field(record=record, key='block_number', default=0),
57
+ 'block_hash': _get_field(record=record, key='block_hash', default=b'').hex(),
58
+ 'transaction_hash': _get_field(record=record, key='transaction_hash', default=b'').hex(),
59
+ 'deployer_address': _get_field(record=record, key='deployer_address', default=b'').hex(),
60
+ 'factory_address': _get_field(record=record, key='factory_address', default=b'').hex(),
61
+ 'contract_address': _get_field(record=record, key='contract_address', default=b'').hex(),
62
+ 'creation_bytecode': _get_field(record=record, key='creation_bytecode', default=b'').hex(),
63
+ 'runtime_bytecode': _get_field(record=record, key='runtime_bytecode', default=b'').hex(),
64
+ 'creation_sourcecode': _get_field(record=record, key='creation_sourcecode', default=b'').decode('utf-8'),}
65
+
66
+ # TABLE #######################################################################
67
+
68
+ def process_batch(batch: pl.RecordBatch) -> list:
69
+ return [encode_binary_to_hex(record=__r) for __r in batch.to_pylist()]
70
+
71
+ def process_table(table: pl.Table) -> list:
72
+ # iterate on parquet files / table
73
+ __rows = []
74
+ __batches = list(table.to_batches(max_chunksize=128))
75
+ # iterate
76
+ for __b in __batches:
77
+ __rows.extend(process_batch(batch=__b))
78
+ # convert to pyarrow
79
+ return __rows
80
+
81
+ # DATASET #####################################################################
82
+
83
+ def process_dataset(dataset: pq.ParquetDataset, output: str=OUTPUT_DATASET_PATH, schema: pl.Schema=OUTPUT_SCHEMA) -> None:
84
+ for __f in dataset.fragments:
85
+ __path = os.path.join(output, os.path.basename(__f.path))
86
+ # encode each record
87
+ __table = pl.Table.from_pylist(process_table(table=__f.to_table()))
88
+ # save
89
+ pq.write_table(table=__table, where=__path)
90
+ # log
91
+ print('replaced {}'.format(__path))
92
+
93
+ # MAIN ########################################################################
94
+
95
+ if __name__ == '__main__':
96
+ __raw = pq.ParquetDataset(INPUT_DATASET_PATH, schema=INPUT_SCHEMA)
97
+ process_dataset(dataset=__raw, output=OUTPUT_DATASET_PATH, schema=OUTPUT_SCHEMA)