|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""AsyLex: A Dataset for Legal Language Processing of Refugee Claims""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_VERSION = "1.0.0" |
|
|
|
_DESCRIPTION = """AsyLex: A Dataset for Legal Language Processing of Refugee Claims""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/clairebarale/AsyLex" |
|
|
|
_LICENSE = "cc-by-nc-sa-4.0" |
|
|
|
_URLS = { |
|
"raw_documents": "https://huggingface.co/datasets/clairebarale/AsyLex/raw/main/cases_anonymized_txt_raw.tar.gz", |
|
"raw_sentences": "https://huggingface.co/datasets/clairebarale/AsyLex/raw/main/all_sentences_anonymized.tar.xz", |
|
"all_legal_entities": "https://huggingface.co/datasets/clairebarale/AsyLex/raw/main/main_and_case_cover_all_entities_inferred.csv", |
|
"casecover_legal_entities": "https://huggingface.co/datasets/clairebarale/AsyLex/blob/main/case_cover/case_cover_anonymised_extracted_entities.csv", |
|
"casecover_entities_outcome": "https://huggingface.co/datasets/clairebarale/AsyLex/blob/main/case_cover/case_cover_entities_and_decision_outcome.csv", |
|
"determination_sentences": "https://huggingface.co/datasets/clairebarale/AsyLex/blob/main/determination_label_extracted_sentences.csv", |
|
"outcome_classification": "https://huggingface.co/datasets/clairebarale/AsyLex/tree/main/outcome_train_test/" |
|
} |
|
|
|
class AsyLexConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for AsyLex""" |
|
def __init__(self, url, **kwargs): |
|
super(AsyLexConfig, self).__init__(**kwargs) |
|
self.url = url |
|
|
|
|
|
class Asylex(datasets.GeneratorBasedBuilder): |
|
"""AsyLex: A Dataset for Legal Language Processing of Refugee Claims""" |
|
|
|
VERSION = datasets.Version(_VERSION) |
|
|
|
BUILDER_CONFIG_CLASS = AsyLexConfig |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
AsyLexConfig( |
|
name="raw_documents", |
|
description = "contains the raw text from all documents, by case, with the corresponding case identifier", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["raw_documents"] |
|
), |
|
AsyLexConfig( |
|
name="raw_sentences", |
|
description = "contains the raw text from all retrieved documents, split by sentences, with the corresponding case identifier", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["raw_sentences"] |
|
), |
|
AsyLexConfig( |
|
name="all_legal_entities", |
|
description = "contains the structured dataset, all extracted entities (one column per entity type), with the corresponding case identifier", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["all_legal_entities"] |
|
), |
|
AsyLexConfig( |
|
name="casecover_legal_entities", |
|
description = "contains the structured dataset derived from the case covers only (one column per entity type), with the corresponding case identifier", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["casecover_legal_entities"] |
|
), |
|
AsyLexConfig( |
|
name="casecover_entities_outcome", |
|
description = "contains the structured dataset derived from the case covers only (one column per entity type), with the corresponding case identifier, with the addition of the decision outcome of the case", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["casecover_entities_outcome"] |
|
), |
|
AsyLexConfig( |
|
name="determination_sentences", |
|
description = "contains all sentences that have been extracted with the Entity Type determination. All sentences included here should therefore directly state the outcome of the decision, with the correspinding case identifier", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["determination_sentences"] |
|
), |
|
AsyLexConfig( |
|
name="outcome_classification", |
|
description = "folder containing a train and test set for the task of outcome classificiation. Each set includes the case identifier and the decision outcome (0,1,2). The test set only contains gold-standard manually labeled data.", |
|
version=datasets.Version(_VERSION, ""), |
|
url = _URLS["outcome_classification"] |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "raw_sentences" |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "raw_documents": |
|
features = datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "raw_sentences": |
|
features = datasets.Features( |
|
{ |
|
"decisionID": datasets.Value("int64"), |
|
"Text": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "all_legal_entities": |
|
features = datasets.Features( |
|
{ |
|
"decisionID": datasets.Value("int64"), |
|
"Text": datasets.Value("string"), |
|
"GPE": datasets.Value("string"), |
|
"DATE": datasets.Value("string"), |
|
"NORP": datasets.Value("string"), |
|
"ORG": datasets.Value("string"), |
|
"LAW": datasets.Value("string"), |
|
"CLAIMANT_EVENTS": datasets.Value("string"), |
|
"CREDIBILITY": datasets.Value("string"), |
|
"DETERMINATION": datasets.Value("string"), |
|
"CLAIMANT_INFO": datasets.Value("string"), |
|
"PROCEDURE": datasets.Value("string"), |
|
"DOC_EVIDENCE": datasets.Value("string"), |
|
"EXPLANATION": datasets.Value("string"), |
|
"LEGAL_GROUND": datasets.Value("string"), |
|
"LAW_CASE": datasets.Value("string"), |
|
"LAW_REPORT": datasets.Value("string"), |
|
"decision_outcome": datasets.ClassLabel( |
|
names=['Rejected', 'Granted', 'Uncertain'] |
|
), |
|
"extracted_dates": datasets.Value("string"), |
|
"LOC_HEARING": datasets.Value("string"), |
|
"TRIBUNAL": datasets.Value("string"), |
|
"PUBLIC_PRIVATE_HEARING": datasets.Value("string"), |
|
"INCHAMBER_VIRTUAL_HEARING": datasets.Value("string"), |
|
"JUDGE": datasets.Value("string"), |
|
"text_case_cover": datasets.Value("string"), |
|
"DATE_DECISION": datasets.Value("string"), |
|
} |
|
) |
|
|
|
elif self.config.name == "casecover_legal_entities": |
|
features = datasets.Features( |
|
{ |
|
"decision_ID": datasets.Value("int64"), |
|
"extracted_dates": datasets.Value("string"), |
|
"extracted_gpe": datasets.Value("string"), |
|
"extracted_org": datasets.Value("string"), |
|
"public_private_hearing": datasets.Value("string"), |
|
"in_chamber_virtual": datasets.Value("string"), |
|
"judge_name": datasets.Value("string"), |
|
"date_decision": datasets.Value("string"), |
|
"text_case_cover": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "casecover_entities_outcome": |
|
features = datasets.Features( |
|
{ |
|
"decision_ID": datasets.Value("int64"), |
|
"extracted_dates": datasets.Value("string"), |
|
"LOC_HEARING": datasets.Value("string"), |
|
"TRIBUNAL": datasets.Value("string"), |
|
"PUBLIC_PRIVATE_HEARING": datasets.Value("string"), |
|
"INCHAMBER_VIRTUAL_HEARING": datasets.Value("string"), |
|
"JUDGE": datasets.Value("string"), |
|
"text_case_cover": datasets.Value("string"), |
|
"DATE_DECISION": datasets.Value("string"), |
|
"decision_outcome": datasets.ClassLabel( |
|
names=['Rejected', 'Granted', 'Uncertain']), |
|
} |
|
) |
|
elif self.config.name == "determination_sentences": |
|
features = datasets.Features( |
|
{ |
|
"decisionID": datasets.Value("int64"), |
|
"extracted_sentences_determination": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "outcome_classification": |
|
features = datasets.Features( |
|
{ |
|
"decisionID": datasets.Value("float64"), |
|
"decision_outcome": datasets.ClassLabel( |
|
names=['Rejected', 'Granted', 'Uncertain']), |
|
} |
|
) |
|
|
|
data_files = { |
|
"train": "outcome_train_test/train_dataset_silver.csv", |
|
"test": "outcome_train_test/test_dataset_gold.csv", |
|
} |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
license=_LICENSE, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
urls_to_download = _URLS[self.config.name] |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
if self.config.name == "outcome_classification": |
|
data_dir = dl_manager.download_and_extract(_URLS["outcome_classification"]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "train_dataset_silver.csv"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "test_dataset_gold.csv"), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
else: |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": downloaded_files, |
|
"split": "train", |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
logger.info("⏳ Generating examples from = %s", filepath) |
|
|
|
if self.config.name == "raw_documents": |
|
for idx, filename in enumerate(os.listdir(filepath)): |
|
if filename.endswith(".txt"): |
|
with open(os.path.join(filepath, filename), "r", encoding="utf-8") as f: |
|
|
|
text_content = f.read() |
|
yield idx, {"case_files": text_content} |
|
|
|
elif self.config.name == "raw_sentences": |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
data = csv.DictReader(f, delimiter = ";") |
|
for idx, row in enumerate(data): |
|
yield idx, { |
|
"decisionID": row["decisionID"], |
|
"Text": row["Text"], |
|
} |
|
|
|
elif self.config.name == "all_legal_entities": |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=";") |
|
for idx, row in enumerate(reader): |
|
yield idx, { |
|
"decisionID": row["decisionID"], |
|
"Text": row["Text"], |
|
"GPE": row["GPE"], |
|
"DATE": row["DATE"], |
|
"NORP": row["NORP"], |
|
"ORG": row["ORG"], |
|
"LAW": row["LAW"], |
|
"CLAIMANT_EVENTS": row["CLAIMANT_EVENTS"], |
|
"CREDIBILITY": row["CREDIBILITY"], |
|
"DETERMINATION": row["DETERMINATION"], |
|
"CLAIMANT_INFO": row["CLAIMANT_INFO"], |
|
"PROCEDURE": row["PROCEDURE"], |
|
"DOC_EVIDENCE": row["DOC_EVIDENCE"], |
|
"EXPLANATION": row["EXPLANATION"], |
|
"LEGAL_GROUND": row["LEGAL_GROUND"], |
|
"LAW_CASE": row["LAW_CASE"], |
|
"LAW_REPORT": row["LAW_REPORT"], |
|
"decision_outcome": row["decision_outcome"], |
|
"extracted_dates": row["extracted_dates"], |
|
"LOC_HEARING": row["LOC_HEARING"], |
|
"TRIBUNAL": row["TRIBUNAL"], |
|
"PUBLIC_PRIVATE_HEARING": row["PUBLIC_PRIVATE_HEARING"], |
|
"INCHAMBER_VIRTUAL_HEARING": row["INCHAMBER_VIRTUAL_HEARING"], |
|
"JUDGE": row["JUDGE"], |
|
"text_case_cover": row["text_case_cover"], |
|
"DATE_DECISION": row["DATE_DECISION"], |
|
} |
|
|
|
elif self.config.name == "casecover_legal_entities": |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=",") |
|
for idx, row in enumerate(reader): |
|
yield idx, { |
|
"decision_ID": row["decision_ID"], |
|
"extracted_dates": row["extracted_dates"], |
|
"extracted_gpe": row["extracted_gpe"], |
|
"extracted_org": row["extracted_org"], |
|
"public_private_hearing": row["public_private_hearing"], |
|
"in_chamber_virtual": row["in_chamber_virtual"], |
|
"judge_name": row["judge_name"], |
|
"date_decision": row["date_decision"], |
|
"text_case_cover": row["text_case_cover"], |
|
} |
|
|
|
elif self.config.name == "casecover_entities_outcome": |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=";") |
|
for idx, row in enumerate(reader): |
|
yield idx, { |
|
"decision_ID": row["decision_ID"], |
|
"extracted_dates": row["extracted_dates"], |
|
"LOC_HEARING": row["LOC_HEARING"], |
|
"TRIBUNAL": row["TRIBUNAL"], |
|
"PUBLIC_PRIVATE_HEARING": row["PUBLIC_PRIVATE_HEARING"], |
|
"INCHAMBER_VIRTUAL_HEARING": row["INCHAMBER_VIRTUAL_HEARING"], |
|
"JUDGE": row["JUDGE"], |
|
"text_case_cover": row["text_case_cover"], |
|
"DATE_DECISION": row["DATE_DECISION"], |
|
"decision_outcome": row["decision_outcome"], |
|
} |
|
|
|
elif self.config.name == "determination_sentences": |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=";") |
|
for idx, line in enumerate(reader): |
|
yield idx, { |
|
"decisionID": line["decisionID"], |
|
"extracted_sentences_determination": line["extracted_sentences_determination"], |
|
} |
|
|
|
elif self.config.name == "outcome_classification": |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=";") |
|
for idx, row in enumerate(reader): |
|
yield idx, { |
|
"decisionID": row["decisionID"], |
|
"decision_outcome": row["decision_outcome"], |
|
} |
|
|
|
|