keplersj commited on
Commit
40edb18
1 Parent(s): ac614a9

refactor: initial version of 1000 row w/ schema

Browse files
Files changed (2) hide show
  1. convert.py +130 -113
  2. wikidata-20240304-all-1000.parquet +2 -2
convert.py CHANGED
@@ -9,17 +9,46 @@ wjd_dump_path = "wikidata-20240304-all.json.bz2"
9
  wjd = WikidataJsonDump(wjd_dump_path)
10
 
11
  # Create an empty list to store the dictionaries
12
- # data = []
13
 
14
  # # Iterate over the entities in wjd and add them to the list
15
- # for ii, entity_dict in enumerate(wjd):
16
- # if ii > 1:
17
- # break
18
 
19
- # if entity_dict["type"] == "item":
20
- # data.append(entity_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # TODO: Schema for Data Set
 
23
  # Create a schema for the table
24
  # {
25
  # "id": "Q60",
@@ -32,115 +61,103 @@ wjd = WikidataJsonDump(wjd_dump_path)
32
  # "lastrevid": 195301613,
33
  # "modified": "2020-02-10T12:42:02Z"
34
  #}
35
- # schema = pa.schema([
36
- # ("id", pa.string()),
37
- # ("type", pa.string()),
38
- # # {
39
- # # "labels": {
40
- # # "en": {
41
- # # "language": "en",
42
- # # "value": "New York City"
43
- # # },
44
- # # "ar": {
45
- # # "language": "ar",
46
- # # "value": "\u0645\u062f\u064a\u0646\u0629 \u0646\u064a\u0648 \u064a\u0648\u0631\u0643"
47
- # # }
48
- # # }
49
- # ("labels", pa.map_(pa.string(), pa.struct([
50
- # ("language", pa.string()),
51
- # ("value", pa.string())
52
- # ]))),
53
- # # "descriptions": {
54
- # # "en": {
55
- # # "language": "en",
56
- # # "value": "largest city in New York and the United States of America"
57
- # # },
58
- # # "it": {
59
- # # "language": "it",
60
- # # "value": "citt\u00e0 degli Stati Uniti d'America"
61
- # # }
62
- # # }
63
- # ("descriptions", pa.map_(pa.string(), pa.struct([
64
- # ("language", pa.string()),
65
- # ("value", pa.string())
66
- # ]))),
67
- # # "aliases": {
68
- # # "en": [
69
- # # {
70
- # # "language": "en",pa.string
71
- # # "value": "New York"
72
- # # }
73
- # # ],
74
- # # "fr": [
75
- # # {
76
- # # "language": "fr",
77
- # # "value": "New York City"
78
- # # },
79
- # # {
80
- # # "language": "fr",
81
- # # "value": "NYC"
82
- # # },
83
- # # {
84
- # # "language": "fr",
85
- # # "value": "The City"
86
- # # },
87
- # # {
88
- # # "language": "fr",
89
- # # "value": "La grosse pomme"
90
- # # }
91
- # # ]
92
- # # }
93
- # # }
94
- # ("aliases", pa.map_(pa.string(), pa.struct([
95
- # ("language", pa.string()),
96
- # ("value", pa.string())
97
- # ]))),
98
- # # {
99
- # # "claims": {
100
- # # "P17": [
101
- # # {
102
- # # "id": "q60$5083E43C-228B-4E3E-B82A-4CB20A22A3FB",
103
- # # "mainsnak": {},
104
- # # "type": "statement",
105
- # # "rank": "normal",
106
- # # "qualifiers": {
107
- # # "P580": [],
108
- # # "P5436": []
109
- # # },
110
- # # "references": [
111
- # # {
112
- # # "hash": "d103e3541cc531fa54adcaffebde6bef28d87d32",
113
- # # "snaks": []
114
- # # }
115
- # # ]
116
- # # }
117
- # # ]
118
- # # }
119
- # # }
120
- # ("claims", pa.map_(pa.string(), pa.array(pa.struct([
121
- # ("id", pa.string()),
122
- # ("mainsnak", pa.struct([])),
123
- # ("type", pa.string()),
124
- # ("rank", pa.string()),
125
- # ("qualifiers", pa.map_(pa.string(), pa.array(pa.struct([
126
-
127
- # ])))),
128
- # ("references", pa.array(pa.struct([
129
- # ("hash", pa.string()),
130
- # ("snaks", pa.array(pa.struct([])))
131
- # ])))
132
- # ])))),
133
- # ("sitelinks", pa.struct([
134
- # ("site", pa.string()),
135
- # ("title", pa.string())
136
- # ])),
137
- # ("lastrevid", pa.int64()),
138
- # ("modified", pa.string())
139
- # ])
140
 
141
  # Create a table from the list of dictionaries and the schema
142
- # table = pa.Table.from_pandas(pd.DataFrame(data), schema=schema)
143
- table = pa.Table.from_pandas(pd.DataFrame(wjd))
144
 
145
  # Write the table to disk as parquet
146
  parquet_path = "wikidata-20240304-all.parquet"
 
9
  wjd = WikidataJsonDump(wjd_dump_path)
10
 
11
  # Create an empty list to store the dictionaries
12
+ data = []
13
 
14
  # # Iterate over the entities in wjd and add them to the list
15
+ for ii, entity_dict in enumerate(wjd):
16
+ if ii > 1000:
17
+ break
18
 
19
+ if entity_dict["type"] == "item":
20
+ data.append(entity_dict)
21
+
22
+ # Create a Parquet schema for the [Wikidata Snak Format](https://doc.wikimedia.org/Wikibase/master/php/docs_topics_json.html#json_snaks)
23
+ # {
24
+ # "snaktype": "value",
25
+ # "property": "P17",
26
+ # "datatype": "wikibase-item",
27
+ # "datavalue": {
28
+ # "value": {
29
+ # "entity-type": "item",
30
+ # "id": "Q30",
31
+ # "numeric-id": 30
32
+ # },
33
+ # "type": "wikibase-entityid"
34
+ # }
35
+ snak = pa.struct([
36
+ ("snaktype", pa.string()),
37
+ ("property", pa.string()),
38
+ ("datatype", pa.string()),
39
+ ("datavalue", pa.struct([
40
+ ("value", pa.struct([
41
+ ("entity-type", pa.string()),
42
+ ("id", pa.string()),
43
+ ("numeric-id", pa.int64())
44
+ ])),
45
+ ("type", pa.string())
46
+ ]))
47
+ ])
48
+
49
 
50
  # TODO: Schema for Data Set
51
+ # Based on the [Wikidata JSON Format Docs](https://doc.wikimedia.org/Wikibase/master/php/docs_topics_json.html)
52
  # Create a schema for the table
53
  # {
54
  # "id": "Q60",
 
61
  # "lastrevid": 195301613,
62
  # "modified": "2020-02-10T12:42:02Z"
63
  #}
64
+ schema = pa.schema([
65
+ ("id", pa.string()),
66
+ ("type", pa.string()),
67
+ # {
68
+ # "labels": {
69
+ # "en": {
70
+ # "language": "en",
71
+ # "value": "New York City"
72
+ # },
73
+ # "ar": {
74
+ # "language": "ar",
75
+ # "value": "\u0645\u062f\u064a\u0646\u0629 \u0646\u064a\u0648 \u064a\u0648\u0631\u0643"
76
+ # }
77
+ # }
78
+ ("labels", pa.map_(pa.string(), pa.struct([
79
+ ("language", pa.string()),
80
+ ("value", pa.string())
81
+ ]))),
82
+ # "descriptions": {
83
+ # "en": {
84
+ # "language": "en",
85
+ # "value": "largest city in New York and the United States of America"
86
+ # },
87
+ # "it": {
88
+ # "language": "it",
89
+ # "value": "citt\u00e0 degli Stati Uniti d'America"
90
+ # }
91
+ # }
92
+ ("descriptions", pa.map_(pa.string(), pa.struct([
93
+ ("language", pa.string()),
94
+ ("value", pa.string())
95
+ ]))),
96
+ # "aliases": {
97
+ # "en": [
98
+ # {
99
+ # "language": "en",pa.string
100
+ # "value": "New York"
101
+ # }
102
+ # ],
103
+ # "fr": [
104
+ # {
105
+ # "language": "fr",
106
+ # "value": "New York City"
107
+ # },
108
+ # {
109
+ # "language": "fr",
110
+ # "value": "NYC"
111
+ # },
112
+ # {
113
+ # "language": "fr",
114
+ # "value": "The City"
115
+ # },
116
+ # {
117
+ # "language": "fr",
118
+ # "value": "La grosse pomme"
119
+ # }
120
+ # ]
121
+ # }
122
+ # }
123
+ ("aliases", pa.map_(pa.string(), pa.list_(pa.struct([
124
+ ("language", pa.string()),
125
+ ("value", pa.string())
126
+ ])))),
127
+ # {
128
+ # "claims": {
129
+ # "P17": [
130
+ # {
131
+ # "id": "q60$5083E43C-228B-4E3E-B82A-4CB20A22A3FB",
132
+ # "mainsnak": {},
133
+ # "type": "statement",
134
+ # "rank": "normal",
135
+ # "qualifiers": {
136
+ # "P580": [],
137
+ # "P5436": []
138
+ # },
139
+ # "references": [
140
+ # {
141
+ # "hash": "d103e3541cc531fa54adcaffebde6bef28d87d32",
142
+ # "snaks": []
143
+ # }
144
+ # ]
145
+ # }
146
+ # ]
147
+ # }
148
+ # }
149
+ ("claims", pa.map_(pa.string(), pa.list_(snak))),
150
+ ("sitelinks", pa.struct([
151
+ ("site", pa.string()),
152
+ ("title", pa.string())
153
+ ])),
154
+ ("lastrevid", pa.int64()),
155
+ ("modified", pa.string())
156
+ ])
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
  # Create a table from the list of dictionaries and the schema
159
+ table = pa.Table.from_pandas(pd.DataFrame(data), schema=schema)
160
+ # table = pa.Table.from_pandas(pd.DataFrame(wjd))
161
 
162
  # Write the table to disk as parquet
163
  parquet_path = "wikidata-20240304-all.parquet"
wikidata-20240304-all-1000.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40316d762768104615584aa6df5b2809846ebd9e26ec9d220a3110fd3c7c3948
3
- size 69861490
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f7e27f98a954aec271bd3742091275da5be259741e40fa0157ef115089c3563
3
+ size 2496325