File size: 4,275 Bytes
04c34c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""EVMC is a collection of smart contracts from the ETH blockchain."""

import itertools
import os

import pyarrow as pa
import pyarrow.lib as pl
import pyarrow.parquet as pq

# META ########################################################################

INPUT_DATASET_PATH = '.data/ethereum/train/'
OUTPUT_DATASET_PATH = 'data/ethereum/train/'

# PARQUET SCHEMA ##############################################################

INPUT_SCHEMA = pa.schema(fields=[
    pl.field('chain_id', pa.uint64()),
    pl.field('block_number', pa.uint32()),
    pl.field('block_hash', pa.large_binary()),
    pl.field('transaction_hash', pa.large_binary()),
    pl.field('deployer_address', pa.large_binary()),
    pl.field('factory_address', pa.large_binary()),
    pl.field('contract_address', pa.large_binary()),
    pl.field('creation_bytecode', pa.large_binary()),
    pl.field('runtime_bytecode', pa.large_binary()),
    pl.field('creation_sourcecode', pa.large_binary()),])

OUTPUT_SCHEMA = pa.schema(fields=[
    pl.field('chain_id', pa.uint64()),
    pl.field('block_number', pa.uint64()),
    pl.field('block_hash', pa.string()),
    pl.field('transaction_hash', pa.string()),
    pl.field('deployer_address', pa.string()),
    pl.field('factory_address', pa.string()),
    pl.field('contract_address', pa.string()),
    pl.field('creation_bytecode', pa.string()),
    pl.field('runtime_bytecode', pa.string()),
    pl.field('creation_sourcecode', pa.string()),])

# GENERIC #####################################################################

def chunk(seq: list, size: int, repeats: bool=True) -> list:
    __chunks = (seq[__i:__i+size] for __i in range(0, len(seq), size))
    return list(__chunks if repeats else set(__chunks))

# ENCODE ######################################################################

def _get_field(record: dict, key: str, default: bytes=b'') -> bytes:
    __value = record.get(key, default)
    return default if __value is None else __value

def encode_binary_to_hex(record: dict) -> dict:
    return {
        'chain_id': _get_field(record=record, key='chain_id', default=1),
        'block_number': _get_field(record=record, key='block_number', default=0),
        'block_hash': _get_field(record=record, key='block_hash', default=b'').hex(),
        'transaction_hash': _get_field(record=record, key='transaction_hash', default=b'').hex(),
        'deployer_address': _get_field(record=record, key='deployer_address', default=b'').hex(),
        'factory_address': _get_field(record=record, key='factory_address', default=b'').hex(),
        'contract_address': _get_field(record=record, key='contract_address', default=b'').hex(),
        'creation_bytecode': _get_field(record=record, key='creation_bytecode', default=b'').hex(),
        'runtime_bytecode': _get_field(record=record, key='runtime_bytecode', default=b'').hex(),
        'creation_sourcecode': _get_field(record=record, key='creation_sourcecode', default=b'').decode('utf-8'),}

# TABLE #######################################################################

def process_batch(batch: pl.RecordBatch) -> list:
    return [encode_binary_to_hex(record=__r) for __r in batch.to_pylist()]

def process_table(table: pl.Table) -> list:
    # iterate on parquet files / table
    __rows = []
    __batches = list(table.to_batches(max_chunksize=128))
    # iterate
    for __b in __batches:
        __rows.extend(process_batch(batch=__b))
    # convert to pyarrow
    return __rows

# DATASET #####################################################################

def process_dataset(dataset: pq.ParquetDataset, output: str=OUTPUT_DATASET_PATH, schema: pl.Schema=OUTPUT_SCHEMA) -> None:
    for __f in dataset.fragments:
        __path = os.path.join(output, os.path.basename(__f.path))
        # encode each record
        __table = pl.Table.from_pylist(process_table(table=__f.to_table()))
        # save
        pq.write_table(table=__table, where=__path)
        # log
        print('replaced {}'.format(__path))

# MAIN ########################################################################

if __name__ == '__main__':
    __raw = pq.ParquetDataset(INPUT_DATASET_PATH, schema=INPUT_SCHEMA)
    process_dataset(dataset=__raw, output=OUTPUT_DATASET_PATH, schema=OUTPUT_SCHEMA)