Enrique Noriega commited on
Commit
1ae609c
·
1 Parent(s): 0a8efaf

Updated the script, erased the old data file

Browse files
Files changed (2) hide show
  1. odinsynth_sequence_dataset.py +31 -14
  2. out.jsonl.gz +0 -3
odinsynth_sequence_dataset.py CHANGED
@@ -13,14 +13,12 @@
13
  # limitations under the License.
14
  """Dataset builder script for the Odinsynth sequence generation dataset"""
15
 
16
-
17
  import csv
18
  import json
19
  import os
20
 
21
  import datasets
22
 
23
-
24
  # TODO: Add BibTeX citation
25
  # Find for instance the citation on arxiv or on the dataset repo/website
26
  # _CITATION = """\
@@ -43,6 +41,10 @@ _HOMEPAGE = ""
43
 
44
  _LICENSE = ""
45
 
 
 
 
 
46
 
47
  class OdinsynthSequenceDataset(datasets.GeneratorBasedBuilder):
48
  """This contains a dataset for odinsynth rule synthesis in a supervised manner"""
@@ -64,13 +66,12 @@ class OdinsynthSequenceDataset(datasets.GeneratorBasedBuilder):
64
  BUILDER_CONFIGS = [
65
  datasets.BuilderConfig(name="synthetic_surface",
66
  version=VERSION,
67
- description="Synthetic data with surface rules only"),
68
  ]
69
 
70
  DEFAULT_CONFIG_NAME = "synthetic_surface"
71
 
72
  def _info(self):
73
-
74
  features = datasets.Features(
75
  {
76
  "rule": datasets.Value("string"),
@@ -118,17 +119,33 @@ class OdinsynthSequenceDataset(datasets.GeneratorBasedBuilder):
118
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
119
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
120
 
121
- data_path = dl_manager.download_and_extract("out.jsonl.gz")
122
 
123
  return [
124
  datasets.SplitGenerator(
125
  name=datasets.Split.TRAIN,
126
  # These kwargs will be passed to _generate_examples
127
  gen_kwargs={
128
- "filepath": data_path,
129
  "split": "train",
130
  },
131
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  ]
133
 
134
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
@@ -141,20 +158,20 @@ class OdinsynthSequenceDataset(datasets.GeneratorBasedBuilder):
141
  "rule": data["rule"],
142
  "spec": [
143
  {
144
- "sentence": s[0],
145
  "matched_text": s[1],
146
- "words": s[3],
147
- "match_start": s[4],
148
- "match_end": s[5]
149
  }
150
  for s in data['spec']
151
  ],
152
  "generation_info": [
153
  {
154
- "transitions": i["transitions"],
155
- "generation_rules": i["generation_rules"],
156
- "delexicalized_generation_rules": i["delexicalized_generation_rules"],
157
- "innermost_substitutions": i["innermost_substitutions"],
158
  }
159
  for i in data['generation_info']
160
  ]
 
13
  # limitations under the License.
14
  """Dataset builder script for the Odinsynth sequence generation dataset"""
15
 
 
16
  import csv
17
  import json
18
  import os
19
 
20
  import datasets
21
 
 
22
  # TODO: Add BibTeX citation
23
  # Find for instance the citation on arxiv or on the dataset repo/website
24
  # _CITATION = """\
 
41
 
42
  _LICENSE = ""
43
 
44
+ _URLS = {
45
+ "synthetic_surface": "synthetic_surface.tar.gz"
46
+ }
47
+
48
 
49
  class OdinsynthSequenceDataset(datasets.GeneratorBasedBuilder):
50
  """This contains a dataset for odinsynth rule synthesis in a supervised manner"""
 
66
  BUILDER_CONFIGS = [
67
  datasets.BuilderConfig(name="synthetic_surface",
68
  version=VERSION,
69
+ description="Synthetic data with synthetic_surface rules only"),
70
  ]
71
 
72
  DEFAULT_CONFIG_NAME = "synthetic_surface"
73
 
74
  def _info(self):
 
75
  features = datasets.Features(
76
  {
77
  "rule": datasets.Value("string"),
 
119
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
120
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
121
 
122
+ data_path = dl_manager.download_and_extract(_URLS[self.config.name])
123
 
124
  return [
125
  datasets.SplitGenerator(
126
  name=datasets.Split.TRAIN,
127
  # These kwargs will be passed to _generate_examples
128
  gen_kwargs={
129
+ "filepath": os.path.join(data_path, "train.jsonl"),
130
  "split": "train",
131
  },
132
  ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.VALIDATION,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "filepath": os.path.join(data_path, "dev.jsonl"),
138
+ "split": "dev",
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ # These kwargs will be passed to _generate_examples
144
+ gen_kwargs={
145
+ "filepath": os.path.join(data_path, "test.jsonl"),
146
+ "split": "test",
147
+ },
148
+ ),
149
  ]
150
 
151
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
158
  "rule": data["rule"],
159
  "spec": [
160
  {
161
+ "sentence": s[0],
162
  "matched_text": s[1],
163
+ "words": s[3],
164
+ "match_start": s[4],
165
+ "match_end": s[5]
166
  }
167
  for s in data['spec']
168
  ],
169
  "generation_info": [
170
  {
171
+ "transitions": i["transitions"],
172
+ "generation_rules": i["generation_rules"],
173
+ "delexicalized_generation_rules": i["delexicalized_generation_rules"],
174
+ "innermost_substitutions": i["innermost_substitutions"],
175
  }
176
  for i in data['generation_info']
177
  ]
out.jsonl.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ad9505cd0526bea591491b88f7964a6f2f53e4f68d06333c3a1db4a4da97b5d
3
- size 108196388