andstor commited on
Commit
8e4e6a1
1 Parent(s): 70c602f

Fix encodings

Browse files
Files changed (1) hide show
  1. the_pile_github.py +8 -8
the_pile_github.py CHANGED
@@ -18,6 +18,7 @@ import os
18
  import re
19
  import pandas as pd
20
  import datasets
 
21
 
22
 
23
  _CITATION = """\
@@ -135,7 +136,7 @@ class SmartContracts(datasets.GeneratorBasedBuilder):
135
  features = datasets.Features(
136
  {
137
  "text": datasets.Value("string"),
138
- "meta": datasets.Sequence(feature={'language': datasets.Value('string')}),
139
  }
140
  )
141
 
@@ -164,13 +165,13 @@ class SmartContracts(datasets.GeneratorBasedBuilder):
164
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
165
  train_urls = _URLS["train"][self.config.name]
166
  train_files = dl_manager.download_and_extract(train_urls)
167
- dev_files = dl_manager.download_and_extract(_URLS["dev"])
168
- test_files = dl_manager.download_and_extract(_URLS["test"])
169
 
170
  return [
171
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split_key": "train", "files": train_files}),
172
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split_key": "validation", "files": dev_files}),
173
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split_key": "test", "files": test_files}),
174
  ]
175
 
176
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
@@ -179,7 +180,6 @@ class SmartContracts(datasets.GeneratorBasedBuilder):
179
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
180
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
181
  #data = pd.read_parquet(filepath)
182
-
183
  key = 0
184
  for path in files:
185
  data = pd.read_parquet(path)
@@ -189,7 +189,7 @@ class SmartContracts(datasets.GeneratorBasedBuilder):
189
  if split_key == "train":
190
  yield key, {
191
  "text": row.text,
192
- "meta": row.meta,
193
  }
194
 
195
  elif split_key in ["validation", "test"]:
@@ -199,7 +199,7 @@ class SmartContracts(datasets.GeneratorBasedBuilder):
199
  "meta": row.meta,
200
  }
201
  else:
202
- language = row.meta[0]["language"].lower().replace(" ", "_") # e.g. "Jupyter Notebook" -> "jupyter_notebook"
203
  if language == self.config.name:
204
  yield key, {
205
  "text": row.text,
 
18
  import re
19
  import pandas as pd
20
  import datasets
21
+ import json
22
 
23
 
24
  _CITATION = """\
 
136
  features = datasets.Features(
137
  {
138
  "text": datasets.Value("string"),
139
+ "meta": {'language': datasets.Value('string')},
140
  }
141
  )
142
 
 
165
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
166
  train_urls = _URLS["train"][self.config.name]
167
  train_files = dl_manager.download_and_extract(train_urls)
168
+ dev_file = dl_manager.download_and_extract(_URLS["dev"])
169
+ test_file = dl_manager.download_and_extract(_URLS["test"])
170
 
171
  return [
172
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split_key": "train", "files": train_files}),
173
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split_key": "validation", "files": [dev_file]}),
174
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split_key": "test", "files": [test_file]}),
175
  ]
176
 
177
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
180
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
181
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
182
  #data = pd.read_parquet(filepath)
 
183
  key = 0
184
  for path in files:
185
  data = pd.read_parquet(path)
 
189
  if split_key == "train":
190
  yield key, {
191
  "text": row.text,
192
+ "meta": json.loads(row.meta.decode('utf-8')),
193
  }
194
 
195
  elif split_key in ["validation", "test"]:
 
199
  "meta": row.meta,
200
  }
201
  else:
202
+ language = row.meta["language"].lower().replace(" ", "_") # e.g. "Jupyter Notebook" -> "jupyter_notebook"
203
  if language == self.config.name:
204
  yield key, {
205
  "text": row.text,