"""minipileoflaw""" import json import logging import gzip import csv import pandas as pd import ast import datasets try: import lzma as xz except ImportError: import pylzma as xz datasets.logging.set_verbosity_info() logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """ This is minipileoflaw """ _CITATION = """ @misc{hendersonkrass2022pileoflaw, url = {https://arxiv.org/abs/2207.00220}, author = {Henderson, Peter and Krass, Mark S. and Zheng, Lucia and Guha, Neel and Manning, Christopher D. and Jurafsky, Dan and Ho, Daniel E.}, title = {Pile of Law: Learning Responsible Data Filtering from the Law and a 256GB Open-Source Legal Dataset}, publisher = {arXiv}, year = {2022} } """ _URL = "https://huggingface.co/datasets/tomrb/minipileoflaw" BASE_URL = "https://huggingface.co/datasets/tomrb/minipileoflaw/resolve/main/data/minipileoflaw_" subsets_names = ['r_legaladvice', 'courtlistener_docket_entry_documents', 'atticus_contracts', 'courtlistener_opinions', 'federal_register', 'bva_opinions', 'us_bills', 'cc_casebooks', 'tos', 'euro_parl', 'nlrb_decisions', 'scotus_oral_arguments', 'cfr', 'state_codes', 'scotus_filings', 'exam_outlines', 'edgar', 'cfpb_creditcard_contracts', 'constitutions', 'congressional_hearings', 'oig', 'olc_memos', 'uscode', 'founding_docs', 'ftc_advisory_opinions', 'echr', 'eurlex', 'tax_rulings', 'un_debates', 'fre', 'frcp', 'canadian_decisions', 'eoir', 'dol_ecab', 'icj-pcij', 'uspto_office_actions', 'ed_policy_guidance', 'acus_reports', 'hhs_alj_opinions', 'sec_administrative_proceedings', 'fmshrc_bluebooks', 'resource_contracts', 'medicaid_policy_guidance', 'irs_legal_advice_memos', 'doj_guidance_documents'] _DATA_URL = { key: { "train": [f"{BASE_URL}{key}_train.jsonl"], "validation": [f"{BASE_URL}{key}_validation.jsonl"] } for key in subsets_names } _VARIANTS = ["all"] + list(_DATA_URL.keys()) class MiniPileOfLaw(datasets.GeneratorBasedBuilder): """TODO""" BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), "created_timestamp": datasets.Value("string"), "downloaded_timestamp": datasets.Value("string"), "url": datasets.Value("string"), } ), supervised_keys=None, homepage=_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): data_urls = {} if self.config.name == "all": data_sources = list(_DATA_URL.keys()) else: data_sources = [self.config.name] for split in ["train", "validation"]: data_urls[split] = [] for source in data_sources: for chunk in _DATA_URL[source][split]: data_urls[split].append(chunk) train_downloaded_files = dl_manager.download(data_urls["train"]) validation_downloaded_files = dl_manager.download(data_urls["validation"]) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} ), ] def _generate_examples(self, filepaths): """This function returns the examples in the raw (text) form by iterating on all the files.""" id_ = 0 for filepath in filepaths: logger.info("Generating examples from = %s", filepath) try: with open(filepath, "r", encoding="utf-8") as f: for line in f: if line: example = json.loads(line) if example is not None and isinstance(example, dict): yield id_, { "text": example.get("text", ""), "created_timestamp": example.get("created_timestamp", ""), "downloaded_timestamp": example.get("downloaded_timestamp", ""), "url": example.get("url", "") } id_ += 1 except Exception as e: print("Error reading file:", filepath) print("Exception:", str(e))