File size: 4,584 Bytes
56feadb
 
 
 
0acf86a
e0b55da
9d962b5
0acf86a
fca3715
 
6c59d7a
fca3715
0acf86a
56feadb
 
 
 
0acf86a
 
56feadb
 
 
 
 
 
0acf86a
 
56feadb
 
 
 
 
 
 
 
0acf86a
 
56feadb
0acf86a
 
56feadb
0acf86a
 
56feadb
0acf86a
56feadb
 
ab1e622
 
56feadb
 
 
0acf86a
56feadb
0acf86a
 
56feadb
 
0acf86a
56feadb
0acf86a
 
 
 
56feadb
 
 
 
 
 
 
 
 
 
0acf86a
 
 
 
56feadb
 
 
 
 
 
 
 
 
 
 
 
 
0acf86a
56feadb
0acf86a
56feadb
0acf86a
 
d711d0d
6c59d7a
56feadb
4941981
 
56feadb
4941981
56feadb
4941981
 
 
 
 
 
 
 
 
 
 
 
 
 
d711d0d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
"""minipileoflaw"""


import gzip
import json
import csv
import pandas as pd

import json
import logging
import ast

import datasets
try:
    import lzma as xz
except ImportError:
    import pylzma as xz


datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """
This is minipileoflaw
"""

_CITATION = """
@misc{hendersonkrass2022pileoflaw,
  url = {https://arxiv.org/abs/2207.00220},
  author = {Henderson, Peter and Krass, Mark S. and Zheng, Lucia and Guha, Neel and Manning, Christopher D. and Jurafsky, Dan and Ho, Daniel E.},
  title = {Pile of Law: Learning Responsible Data Filtering from the Law and a 256GB Open-Source Legal Dataset},
  publisher = {arXiv},
  year = {2022}
}
"""

_URL = "https://huggingface.co/datasets/tomrb/minipileoflaw"


BASE_URL = "https://huggingface.co/datasets/tomrb/minipileoflaw/blob/main/data/minipileoflaw_"


subsets_names = ['r_legaladvice', 'courtlistener_docket_entry_documents', 'atticus_contracts', 'courtlistener_opinions', 'federal_register', 'bva_opinions', 'us_bills', 'cc_casebooks', 'tos', 'euro_parl', 'nlrb_decisions', 'scotus_oral_arguments', 'cfr', 'state_codes', 'scotus_filings', 'exam_outlines', 'edgar', 'cfpb_creditcard_contracts', 'constitutions', 'congressional_hearings', 'oig', 'olc_memos', 'uscode', 'founding_docs', 'ftc_advisory_opinions', 'echr', 'eurlex', 'tax_rulings', 'un_debates', 'fre', 'frcp', 'canadian_decisions', 'eoir', 'dol_ecab', 'icj-pcij', 'uspto_office_actions', 'ed_policy_guidance', 'acus_reports', 'hhs_alj_opinions', 'sec_administrative_proceedings', 'fmshrc_bluebooks', 'resource_contracts', 'medicaid_policy_guidance', 'irs_legal_advice_memos', 'doj_guidance_documents']

_DATA_URL = {
    key: {
        "train": [f"{BASE_URL}{key}_train.pkl"],
        "validation": [f"{BASE_URL}{key}_valid.pkl"]
    }
    for key in subsets_names
}

_VARIANTS = ["all"] + list(_DATA_URL.keys())


class MiniPileOfLaw(datasets.GeneratorBasedBuilder):
    """TODO"""

    BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "created_timestamp": datasets.Value("string"),
                    "downloaded_timestamp": datasets.Value("string"),
                    "url": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_urls = {}
        if self.config.name == "all":
            data_sources = list(_DATA_URL.keys())
        else:
            data_sources = [self.config.name]
        for split in ["train", "validation"]:
            data_urls[split] = []
            for source in data_sources:
                for chunk in _DATA_URL[source][split]:
                    data_urls[split].append(chunk)

        train_downloaded_files = dl_manager.download(data_urls["train"])
        validation_downloaded_files = dl_manager.download(data_urls["validation"])
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
            ),
        ]
    

    def _generate_examples(self, filepaths):
        """This function returns the examples in the raw (text) form by iterating on all the files."""
        id_ = 0
        for filepath in filepaths:
            logger.info("Generating examples from = %s", filepath)
            try:
                with open(filepath, "r", encoding="utf-8") as f:
                    for line in f:
                        if line:
                            example = json.loads(line)
                            if example is not None and isinstance(example, dict):
                                yield id_, {
                                    "text": example.get("text", ""),
                                    "created_timestamp": example.get("created_timestamp", ""),
                                    "downloaded_timestamp": example.get("downloaded_timestamp", ""),
                                    "url": example.get("url", "")
                                }
                                id_ += 1
            except:
                print("Error reading file:", filepath)