tomrb commited on
Commit
56feadb
1 Parent(s): 0acf86a

modified the load script

Browse files
Files changed (1) hide show
  1. minipileoflaw.py +81 -127
minipileoflaw.py CHANGED
@@ -1,158 +1,112 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
  import json
20
- import os
21
 
22
  import datasets
 
 
 
 
23
 
24
 
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
33
- }
34
  """
35
 
36
- # TODO: Add description of the dataset here
37
- # You can copy an official description
38
- _DESCRIPTION = """\
39
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
 
 
 
 
40
  """
41
 
42
- # TODO: Add a link to an official homepage for the dataset here
43
- _HOMEPAGE = ""
44
 
45
- # TODO: Add the licence for the dataset here if you can find it
46
- _LICENSE = ""
47
 
48
- # TODO: Add link to the official dataset URLs here
49
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- _URLS = {
52
- "first_domain": "https://huggingface.co/datasets/tomrb/minipileoflaw/blob/main/data/minipileoflaw_acus_reports",
53
- "second_domain": "https://huggingface.co/datasets/tomrb/minipileoflaw/blob/main/data/minipileoflaw_acus_reports",
54
- }
55
 
56
 
57
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
- class MiniPileOfLaw(datasets.GeneratorBasedBuilder):
59
- """TODO: Short description of my dataset."""
60
 
61
- VERSION = datasets.Version("1.1.0")
 
 
 
 
 
 
62
 
63
- # This is an example of a dataset with multiple configurations.
64
- # If you don't want/need to define several sub-sets in your dataset,
65
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
 
67
- # If you need to make complex sub-parts in the datasets with configurable options
68
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
 
71
- # You will be able to load one or the other configurations in the following list with
72
- # data = datasets.load_dataset('my_dataset', 'first_domain')
73
- # data = datasets.load_dataset('my_dataset', 'second_domain')
74
- BUILDER_CONFIGS = [
75
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
76
- datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
77
- ]
78
 
79
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
 
81
  def _info(self):
82
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
83
-
84
- features = datasets.Features(
85
- {
86
- "text": datasets.Value("string"),
87
- "created_timestamp": datasets.Value("string"),
88
- "downloaded_timestamp": datasets.Value("string"),
89
- "url": datasets.Value("string")
90
-
91
- # These are the features of your dataset like images, labels ...
92
- }
93
- )
94
-
95
  return datasets.DatasetInfo(
96
- # This is the description that will appear on the datasets page.
97
  description=_DESCRIPTION,
98
- # This defines the different columns of the dataset and their types
99
- features=features, # Here we define them above because they are different between the two configurations
100
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
101
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
102
- # supervised_keys=("sentence", "label"),
103
- # Homepage of the dataset for documentation
104
- homepage=_HOMEPAGE,
105
- # License for the dataset if available
106
- license=_LICENSE,
107
- # Citation for the dataset
108
  citation=_CITATION,
109
  )
110
 
111
  def _split_generators(self, dl_manager):
112
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
113
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
114
-
115
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
116
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
117
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
118
- urls = _URLS[self.config.name]
119
- data_dir = dl_manager.download_and_extract(urls)
 
 
 
 
 
120
  return [
 
121
  datasets.SplitGenerator(
122
- name=datasets.Split.TRAIN,
123
- # These kwargs will be passed to _generate_examples
124
- gen_kwargs={
125
- "filepath": os.path.join(data_dir, "train.csv"),
126
- "split": "train",
127
- },
128
- ),
129
- datasets.SplitGenerator(
130
- name=datasets.Split.VALIDATION,
131
- # These kwargs will be passed to _generate_examples
132
- gen_kwargs={
133
- "filepath": os.path.join(data_dir, "valid.csv"),
134
- "split": "valid",
135
- },
136
  ),
137
  ]
138
 
139
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
140
- def _generate_examples(self, filepath, split):
141
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
142
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
143
- with open(filepath, encoding="utf-8") as f:
144
- for key, row in enumerate(f):
145
- data = json.loads(row)
146
- if self.config.name == "first_domain":
147
- # Yields examples as (key, example) tuples
148
- yield key, {
149
- "sentence": data["sentence"],
150
- "option1": data["option1"],
151
- "answer": "" if split == "test" else data["answer"],
152
- }
153
- else:
154
- yield key, {
155
- "sentence": data["sentence"],
156
- "option2": data["option2"],
157
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
158
- }
 
1
+ """minipileoflaw"""
2
+
3
+
4
+ import gzip
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import json
 
6
 
7
  import datasets
8
+ try:
9
+ import lzma as xz
10
+ except ImportError:
11
+ import pylzma as xz
12
 
13
 
14
+ datasets.logging.set_verbosity_info()
15
+ logger = datasets.logging.get_logger(__name__)
16
+
17
+
18
+ _DESCRIPTION = """
19
+ This is minipileoflaw
 
 
 
20
  """
21
 
22
+ _CITATION = """
23
+ @misc{hendersonkrass2022pileoflaw,
24
+ url = {https://arxiv.org/abs/2207.00220},
25
+ author = {Henderson, Peter and Krass, Mark S. and Zheng, Lucia and Guha, Neel and Manning, Christopher D. and Jurafsky, Dan and Ho, Daniel E.},
26
+ title = {Pile of Law: Learning Responsible Data Filtering from the Law and a 256GB Open-Source Legal Dataset},
27
+ publisher = {arXiv},
28
+ year = {2022}
29
+ }
30
  """
31
 
32
+ _URL = "https://huggingface.co/datasets/tomrb/minipileoflaw"
 
33
 
 
 
34
 
35
+ BASE_URL = "https://huggingface.co/datasets/tomrb/minipileoflaw/blob/main/data/minipileoflaw_"
 
 
 
 
 
 
36
 
37
 
38
+ subsets_names = ['r_legaladvice', 'courtlistener_docket_entry_documents', 'atticus_contracts', 'courtlistener_opinions', 'federal_register', 'bva_opinions', 'us_bills', 'cc_casebooks', 'tos', 'euro_parl', 'nlrb_decisions', 'scotus_oral_arguments', 'cfr', 'state_codes', 'scotus_filings', 'exam_outlines', 'edgar', 'cfpb_creditcard_contracts', 'constitutions', 'congressional_hearings', 'oig', 'olc_memos', 'uscode', 'founding_docs', 'ftc_advisory_opinions', 'echr', 'eurlex', 'tax_rulings', 'un_debates', 'fre', 'frcp', 'canadian_decisions', 'eoir', 'dol_ecab', 'icj-pcij', 'uspto_office_actions', 'ed_policy_guidance', 'acus_reports', 'hhs_alj_opinions', 'sec_administrative_proceedings', 'fmshrc_bluebooks', 'resource_contracts', 'medicaid_policy_guidance', 'irs_legal_advice_memos', 'doj_guidance_documents']
 
 
39
 
40
+ _DATA_URL = {
41
+ key: {
42
+ "train": [f"{BASE_URL}{key}_train.csv"],
43
+ "validation": [f"{BASE_URL}{key}_valid.csv"]
44
+ }
45
+ for key in subsets_names
46
+ }
47
 
48
+ _VARIANTS = ["all"] + list(_DATA_URL.keys())
 
 
49
 
 
 
 
50
 
51
+ class MiniPileOfLaw(datasets.GeneratorBasedBuilder):
52
+ """TODO"""
 
 
 
 
 
53
 
54
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
55
 
56
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  return datasets.DatasetInfo(
 
58
  description=_DESCRIPTION,
59
+ features=datasets.Features(
60
+ {
61
+ "text": datasets.Value("string"),
62
+ "created_timestamp": datasets.Value("string"),
63
+ "downloaded_timestamp": datasets.Value("string"),
64
+ "url": datasets.Value("string"),
65
+ }
66
+ ),
67
+ supervised_keys=None,
68
+ homepage=_URL,
69
  citation=_CITATION,
70
  )
71
 
72
  def _split_generators(self, dl_manager):
73
+ data_urls = {}
74
+ if self.config.name == "all":
75
+ data_sources = list(_DATA_URL.keys())
76
+ else:
77
+ data_sources = [self.config.name]
78
+ for split in ["train", "validation"]:
79
+ data_urls[split] = []
80
+ for source in data_sources:
81
+ for chunk in _DATA_URL[source][split]:
82
+ data_urls[split].append(chunk)
83
+
84
+ train_downloaded_files = dl_manager.download(data_urls["train"])
85
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
86
  return [
87
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
88
  datasets.SplitGenerator(
89
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  ),
91
  ]
92
 
93
+ def _generate_examples(self, filepaths):
94
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
95
+ id_ = 0
96
+ for filepath in filepaths:
97
+ logger.info("Generating examples from = %s", filepath)
98
+ try:
99
+ with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
100
+ for line in f:
101
+ if line:
102
+ example = json.loads(line)
103
+ if example is not None and isinstance(example, dict):
104
+ yield id_, {
105
+ "text": example.get("text", ""),
106
+ "created_timestamp": example.get("created_timestamp", ""),
107
+ "downloaded_timestamp": example.get("downloaded_timestamp", ""),
108
+ "url": example.get("url", "")
109
+ }
110
+ id_ += 1
111
+ except:
112
+ print("Error reading file:", filepath)