Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
00ba0a2
1 Parent(s): 1f9f8f8

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. opus100.py +27 -20
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  task_categories:
3
  - sequence-modeling
4
  multilinguality:
 
1
  ---
2
+ pretty_name: Opus100
3
  task_categories:
4
  - sequence-modeling
5
  multilinguality:
opus100.py CHANGED
@@ -15,8 +15,6 @@
15
  """OPUS-100"""
16
 
17
 
18
- import os
19
-
20
  import datasets
21
 
22
 
@@ -208,23 +206,25 @@ class Opus100(datasets.GeneratorBasedBuilder):
208
  domain = "zero-shot"
209
 
210
  if domain == "supervised":
211
- dl_dir = dl_manager.download_and_extract(_URL["supervised"].format(lang_pair))
212
  elif domain == "zero-shot":
213
- dl_dir = dl_manager.download_and_extract(_URL["zero-shot"])
214
 
215
- data_dir = os.path.join(dl_dir, os.path.join("opus-100-corpus", "v1.0", domain, lang_pair))
216
  output = []
217
 
218
  test = datasets.SplitGenerator(
219
  name=datasets.Split.TEST,
220
  # These kwargs will be passed to _generate_examples
221
  gen_kwargs={
222
- "filepath": os.path.join(data_dir, f"opus.{lang_pair}-test.{src_tag}"),
223
- "labelpath": os.path.join(data_dir, f"opus.{lang_pair}-test.{tgt_tag}"),
 
224
  },
225
  )
226
 
227
- if f"opus.{lang_pair}-test.{src_tag}" in os.listdir(data_dir):
 
228
  output.append(test)
229
 
230
  if domain == "supervised":
@@ -232,33 +232,40 @@ class Opus100(datasets.GeneratorBasedBuilder):
232
  train = datasets.SplitGenerator(
233
  name=datasets.Split.TRAIN,
234
  gen_kwargs={
235
- "filepath": os.path.join(data_dir, f"opus.{lang_pair}-train.{src_tag}"),
236
- "labelpath": os.path.join(data_dir, f"opus.{lang_pair}-train.{tgt_tag}"),
 
237
  },
238
  )
239
 
240
- if f"opus.{lang_pair}-train.{src_tag}" in os.listdir(data_dir):
241
  output.append(train)
242
 
243
  valid = datasets.SplitGenerator(
244
  name=datasets.Split.VALIDATION,
245
  # These kwargs will be passed to _generate_examples
246
  gen_kwargs={
247
- "filepath": os.path.join(data_dir, f"opus.{lang_pair}-dev.{src_tag}"),
248
- "labelpath": os.path.join(data_dir, f"opus.{lang_pair}-dev.{tgt_tag}"),
 
249
  },
250
  )
251
 
252
- if f"opus.{lang_pair}-dev.{src_tag}" in os.listdir(data_dir):
253
  output.append(valid)
254
 
255
  return output
256
 
257
- def _generate_examples(self, filepath, labelpath):
258
  """Yields examples."""
259
  src_tag, tgt_tag = self.config.language_pair.split("-")
260
- with open(filepath, encoding="utf-8") as f1, open(labelpath, encoding="utf-8") as f2:
261
- src = f1.read().split("\n")[:-1]
262
- tgt = f2.read().split("\n")[:-1]
263
- for idx, (s, t) in enumerate(zip(src, tgt)):
264
- yield idx, {"translation": {src_tag: s, tgt_tag: t}}
 
 
 
 
 
 
15
  """OPUS-100"""
16
 
17
 
 
 
18
  import datasets
19
 
20
 
 
206
  domain = "zero-shot"
207
 
208
  if domain == "supervised":
209
+ archive = dl_manager.download(_URL["supervised"].format(lang_pair))
210
  elif domain == "zero-shot":
211
+ archive = dl_manager.download(_URL["zero-shot"])
212
 
213
+ data_dir = "/".join(["opus-100-corpus", "v1.0", domain, lang_pair])
214
  output = []
215
 
216
  test = datasets.SplitGenerator(
217
  name=datasets.Split.TEST,
218
  # These kwargs will be passed to _generate_examples
219
  gen_kwargs={
220
+ "filepath": f"{data_dir}/opus.{lang_pair}-test.{src_tag}",
221
+ "labelpath": f"{data_dir}/opus.{lang_pair}-test.{tgt_tag}",
222
+ "files": dl_manager.iter_archive(archive),
223
  },
224
  )
225
 
226
+ available_files = [path for path, _ in dl_manager.iter_archive(archive)]
227
+ if f"{data_dir}/opus.{lang_pair}-test.{src_tag}" in available_files:
228
  output.append(test)
229
 
230
  if domain == "supervised":
 
232
  train = datasets.SplitGenerator(
233
  name=datasets.Split.TRAIN,
234
  gen_kwargs={
235
+ "filepath": f"{data_dir}/opus.{lang_pair}-train.{src_tag}",
236
+ "labelpath": f"{data_dir}/opus.{lang_pair}-train.{tgt_tag}",
237
+ "files": dl_manager.iter_archive(archive),
238
  },
239
  )
240
 
241
+ if f"{data_dir}/opus.{lang_pair}-train.{src_tag}" in available_files:
242
  output.append(train)
243
 
244
  valid = datasets.SplitGenerator(
245
  name=datasets.Split.VALIDATION,
246
  # These kwargs will be passed to _generate_examples
247
  gen_kwargs={
248
+ "filepath": f"{data_dir}/opus.{lang_pair}-dev.{src_tag}",
249
+ "labelpath": f"{data_dir}/opus.{lang_pair}-dev.{tgt_tag}",
250
+ "files": dl_manager.iter_archive(archive),
251
  },
252
  )
253
 
254
+ if f"{data_dir}/opus.{lang_pair}-dev.{src_tag}" in available_files:
255
  output.append(valid)
256
 
257
  return output
258
 
259
+ def _generate_examples(self, filepath, labelpath, files):
260
  """Yields examples."""
261
  src_tag, tgt_tag = self.config.language_pair.split("-")
262
+ src, tgt = None, None
263
+ for path, f in files:
264
+ if path == filepath:
265
+ src = f.read().decode("utf-8").split("\n")[:-1]
266
+ elif path == labelpath:
267
+ tgt = f.read().decode("utf-8").split("\n")[:-1]
268
+ if src is not None and tgt is not None:
269
+ for idx, (s, t) in enumerate(zip(src, tgt)):
270
+ yield idx, {"translation": {src_tag: s, tgt_tag: t}}
271
+ break