JP-SystemsX commited on
Commit
7a9ec4c
1 Parent(s): 758b62b

Prototype Init (MVP)

Browse files
Files changed (3) hide show
  1. meta_data/0.jsonl +0 -0
  2. super_eurlex.py +109 -76
  3. text_data/DE/0.zip +3 -0
meta_data/0.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
super_eurlex.py CHANGED
@@ -18,26 +18,18 @@
18
  import csv
19
  import json
20
  import os
 
21
 
22
  import datasets
23
 
24
 
25
  # TODO: Add BibTeX citation
26
  # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
33
- }
34
- """
35
 
36
  # TODO: Add description of the dataset here
37
  # You can copy an official description
38
- _DESCRIPTION = """\
39
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
40
- """
41
 
42
  # TODO: Add a link to an official homepage for the dataset here
43
  _HOMEPAGE = ""
@@ -52,18 +44,64 @@ _URLS = {
52
  "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
53
  "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
54
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
- class NewDataset(datasets.GeneratorBasedBuilder):
59
  """TODO: Short description of my dataset."""
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
63
- # This is an example of a dataset with multiple configurations.
64
- # If you don't want/need to define several sub-sets in your dataset,
65
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
-
67
  # If you need to make complex sub-parts in the datasets with configurable options
68
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
  # BUILDER_CONFIG_CLASS = MyBuilderConfig
@@ -71,34 +109,24 @@ class NewDataset(datasets.GeneratorBasedBuilder):
71
  # You will be able to load one or the other configurations in the following list with
72
  # data = datasets.load_dataset('my_dataset', 'first_domain')
73
  # data = datasets.load_dataset('my_dataset', 'second_domain')
 
74
  BUILDER_CONFIGS = [
75
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
76
- datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
77
- ]
 
 
 
 
 
 
78
 
79
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
 
81
  def _info(self):
82
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
83
- if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
84
- features = datasets.Features(
85
- {
86
- "sentence": datasets.Value("string"),
87
- "option1": datasets.Value("string"),
88
- "answer": datasets.Value("string")
89
- # These are the features of your dataset like images, labels ...
90
- }
91
- )
92
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
93
- features = datasets.Features(
94
- {
95
- "sentence": datasets.Value("string"),
96
- "option2": datasets.Value("string"),
97
- "second_domain_answer": datasets.Value("string")
98
- # These are the features of your dataset like images, labels ...
99
- }
100
- )
101
- return datasets.DatasetInfo(
102
  # This is the description that will appear on the datasets page.
103
  description=_DESCRIPTION,
104
  # This defines the different columns of the dataset and their types
@@ -113,6 +141,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
113
  # Citation for the dataset
114
  citation=_CITATION,
115
  )
 
116
 
117
  def _split_generators(self, dl_manager):
118
  # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
@@ -121,52 +150,56 @@ class NewDataset(datasets.GeneratorBasedBuilder):
121
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
- urls = _URLS[self.config.name]
 
125
  data_dir = dl_manager.download_and_extract(urls)
126
  return [
127
  datasets.SplitGenerator(
128
  name=datasets.Split.TRAIN,
129
  # These kwargs will be passed to _generate_examples
130
  gen_kwargs={
131
- "filepath": os.path.join(data_dir, "train.jsonl"),
132
- "split": "train",
 
 
 
133
  },
134
- ),
135
- datasets.SplitGenerator(
136
- name=datasets.Split.VALIDATION,
137
- # These kwargs will be passed to _generate_examples
138
- gen_kwargs={
139
- "filepath": os.path.join(data_dir, "dev.jsonl"),
140
- "split": "dev",
141
- },
142
- ),
143
- datasets.SplitGenerator(
144
- name=datasets.Split.TEST,
145
- # These kwargs will be passed to _generate_examples
146
- gen_kwargs={
147
- "filepath": os.path.join(data_dir, "test.jsonl"),
148
- "split": "test"
149
- },
150
- ),
151
  ]
152
 
153
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
154
- def _generate_examples(self, filepath, split):
155
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
156
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
157
- with open(filepath, encoding="utf-8") as f:
158
- for key, row in enumerate(f):
159
- data = json.loads(row)
160
- if self.config.name == "first_domain":
161
- # Yields examples as (key, example) tuples
162
- yield key, {
163
- "sentence": data["sentence"],
164
- "option1": data["option1"],
165
- "answer": "" if split == "test" else data["answer"],
166
- }
167
- else:
168
- yield key, {
169
- "sentence": data["sentence"],
170
- "option2": data["option2"],
171
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
172
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  import csv
19
  import json
20
  import os
21
+ import pandas as pd
22
 
23
  import datasets
24
 
25
 
26
  # TODO: Add BibTeX citation
27
  # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """ """
 
 
 
 
 
 
 
29
 
30
  # TODO: Add description of the dataset here
31
  # You can copy an official description
32
+ _DESCRIPTION = """ """
 
 
33
 
34
  # TODO: Add a link to an official homepage for the dataset here
35
  _HOMEPAGE = ""
 
44
  "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
45
  "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
46
  }
47
+ AVAILABLE_LANGUAGES=['DE']#, 'EN'
48
+ SECTORS=['1']#, '1', '2', '3', '4', '5', '6', '7', '8', '9', 'C', 'E']
49
+ AVAILABLE_FEATURES={
50
+ '1': datasets.Features({
51
+ 'celex_id': datasets.Value("string"),
52
+ 'text_html_raw': datasets.Value("string"),
53
+ 'text_html_cleaned': datasets.Value("string"),
54
+ 'text_cleaned': datasets.Value("string"),
55
+ 'form': datasets.Sequence(datasets.Value("string")),
56
+ 'subject_matter': datasets.Sequence(datasets.Value("string")),
57
+ 'current_consolidated_version': datasets.Sequence(datasets.Value("string")),
58
+ 'harmonisation_of_customs_law_community_transit': datasets.Sequence(datasets.Value("string")),
59
+ 'harmonisation_of_customs_law_customs_territory': datasets.Sequence(datasets.Value("string")),
60
+ 'harmonisation_of_customs_law_value_for_customs_purposes': datasets.Sequence(datasets.Value("string")),
61
+ 'directory_code': datasets.Sequence(datasets.Value("string")),
62
+ 'eurovoc': datasets.Sequence(datasets.Value("string")),
63
+ 'customs_duties_community_tariff_quotas': datasets.Sequence(datasets.Value("string")),
64
+ 'customs_duties_authorisation_to_defer_application_of_cct': datasets.Sequence(datasets.Value("string")),
65
+ 'harmonisation_of_customs_law_various': datasets.Sequence(datasets.Value("string")),
66
+ 'customs_duties_suspensions': datasets.Sequence(datasets.Value("string"))})
67
+ }
68
+ SECTOR_DESCRIPTIONS={
69
+ '1':""
70
+ }
71
 
72
 
73
+ class SuperEurlexConfig(datasets.BuilderConfig):
74
+ """BuilderConfig for SuperGLUE."""
75
+
76
+ def __init__(self, sector, language, features, citation, url, **kwargs):
77
+ """BuilderConfig for SuperGLUE.
78
+
79
+ Args:
80
+ sector: sector of the wanted data
81
+ language: the language code for the language in which the text shall
82
+ be written in
83
+ features: *list[string]*, list of the features that will appear in the
84
+ feature dict.
85
+ citation: *string*, citation for the data set.
86
+ url: *string*, url for information about the data set.
87
+ **kwargs: keyword arguments forwarded to super.
88
+ """
89
+ name=sector+'.'+language
90
+ super().__init__(name=name, version=datasets.Version("0.1.0"), **kwargs)
91
+ self.features = features
92
+ self.language = language
93
+ self.sector = sector
94
+ self.text_data_url = f"text_data/{language}/{sector}.jsonl"
95
+ self.meta_data_url = f"meta_data/{sector}.jsonl"
96
+ self.citation = citation
97
+ self.url = url
98
+
99
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
100
+ class SuperEurlex(datasets.GeneratorBasedBuilder):
101
  """TODO: Short description of my dataset."""
102
 
103
  VERSION = datasets.Version("1.1.0")
104
 
 
 
 
 
105
  # If you need to make complex sub-parts in the datasets with configurable options
106
  # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
107
  # BUILDER_CONFIG_CLASS = MyBuilderConfig
 
109
  # You will be able to load one or the other configurations in the following list with
110
  # data = datasets.load_dataset('my_dataset', 'first_domain')
111
  # data = datasets.load_dataset('my_dataset', 'second_domain')
112
+
113
  BUILDER_CONFIGS = [
114
+ SuperEurlexConfig(#version=VERSION,
115
+ sector=sect,
116
+ language=lang,
117
+ description=SECTOR_DESCRIPTIONS[sect],
118
+ features=AVAILABLE_FEATURES[sect],
119
+ citation=_CITATION,
120
+ url=_HOMEPAGE)
121
+ for lang in AVAILABLE_LANGUAGES for sect in SECTORS
122
+ ]
123
 
124
+ DEFAULT_CONFIG_NAME = "3.DE" # It's not mandatory to have a default configuration. Just use one if it make sense.
125
 
126
  def _info(self):
127
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
128
+ features = AVAILABLE_FEATURES[self.config.sector]
129
+ info = datasets.DatasetInfo(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  # This is the description that will appear on the datasets page.
131
  description=_DESCRIPTION,
132
  # This defines the different columns of the dataset and their types
 
141
  # Citation for the dataset
142
  citation=_CITATION,
143
  )
144
+ return info
145
 
146
  def _split_generators(self, dl_manager):
147
  # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
 
150
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
151
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
152
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
153
+ urls = {'text': self.config.text_data_url,
154
+ 'meta': self.config.meta_data_url} #_URLS[self.config.name]
155
  data_dir = dl_manager.download_and_extract(urls)
156
  return [
157
  datasets.SplitGenerator(
158
  name=datasets.Split.TRAIN,
159
  # These kwargs will be passed to _generate_examples
160
  gen_kwargs={
161
+ "text": data_dir['text'],
162
+ "meta": data_dir['meta'],
163
+ "language": self.config.language,
164
+ "sector": self.config.sector,
165
+ 'split': 'train'
166
  },
167
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  ]
169
 
170
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
171
+ def _generate_examples(self, text, meta, sector, language, split):
172
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
173
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
174
+ print(text)
175
+ print(meta)
176
+ print(sector)
177
+ print(split)
178
+ print(sector)
179
+
180
+ print("Reading Text Data...")
181
+ text_data = pd.read_json(text, lines=True)
182
+ text_data['celex_id'] = text_data['celex_id'].apply(lambda x: x[0] if isinstance(x,list) else x)
183
+ print("Reading Meta Data...")
184
+ meta_data = pd.read_json(meta, lines=True)
185
+ meta_data['celex_id'] = meta_data['celex_id'].apply(lambda x: x[0] if isinstance(x, list) else x)
186
+ print("Combining Text & Meta Data...")
187
+ combined_data = pd.merge(text_data, meta_data, on='celex_id')
188
+ print("Converting To final dataset...")
189
+ dataset = datasets.Dataset.from_pandas(combined_data)
190
+ dataset = dataset.remove_columns('__index_level_0__')#.cache_files()
191
+ for i, sample in enumerate(dataset):
192
+ yield i, sample
193
+
194
+
195
+
196
+ print("Hello World")
197
+ if __name__ == '__main__':
198
+ import datasets as ds
199
+ import sys
200
+ print(sys.argv[0])
201
+ dataset = ds.load_dataset(sys.argv[0],'1.DE')
202
+ print(dataset)
203
+ for sample in dataset['train']:
204
+ continue
205
+ #print(sample)
text_data/DE/0.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a886d234545b4ff700cd456a675e22aee89fb990e95bf822fd943ff8aef357a8
3
+ size 3370620904