|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Super-EURLEX dataset containing legal documents from multiple languages""" |
|
|
|
import numpy as np |
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """ """ |
|
|
|
|
|
|
|
_DESCRIPTION = """ |
|
Super-EURLEX dataset containing legal documents from multiple languages. |
|
The datasets are build/scrapped from the EURLEX Website [https://eur-lex.europa.eu/homepage.html] |
|
With one split per language and sector, because the available features (metadata) differs for each |
|
sector. Therefore, each sample contains the content of a full legal document in up to 3 different |
|
formats. Those are raw HTML and cleaned HTML (if the HTML format was available on the EURLEX website |
|
during the scrapping process) and cleaned text. |
|
The cleaned text should be available for each sample and was extracted from HTML or PDF. |
|
'Cleaned' HTML stands here for minor cleaning that was done to preserve to a large extent the necessary |
|
HTML information like table structures while removing unnecessary complexity which was introduced to the |
|
original documents due to actions like writing each sentence into a new object. |
|
Additionally, each sample contains metadata which was scrapped on the fly, this implies the following |
|
2 things. First, not every sector contains the same metadata. Second, most metadata might be |
|
irrelevant for most use cases. |
|
In our minds the most interesting metadata is the celex-id which is used to identify the legal |
|
document at hand, but also contains a lot of information about the document |
|
see [https://eur-lex.europa.eu/content/tools/eur-lex-celex-infographic-A3.pdf] as well as eurovoc- |
|
concepts, which are labels that define the content of the documents. |
|
Eurovoc-Concepts are, for example, only available for the sectors 1, 2, 3, 4, 5, 6, 9, C, and E. |
|
The Naming of most metadata is kept like it was on the eurlex website, except for converting |
|
it to lower case and replacing whitespaces with '_'. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
AVAILABLE_LANGUAGES="BG CS DA DE EL EN ES ET FI FR GA HR HU IT LT LV MT NL PL PT RO SK SL SV".split(" ") |
|
SECTORS=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'C', 'E'] |
|
VERSIONS=[None, 'html'] |
|
|
|
|
|
|
|
FEATURES = { |
|
'celex_id': datasets.Value("string"), |
|
'text_cleaned': datasets.Value("string"), |
|
'text_html_cleaned': datasets.Value("string"), |
|
'text_html_raw': datasets.Value("string"), |
|
} |
|
FEATURES_IN_SECTOR={ |
|
'0':['form'], |
|
'1':['form', 'subject_matter', 'current_consolidated_version', 'directory_code', 'eurovoc',], |
|
'2':['form', 'directory_code', |
|
'subject_matter', 'eurovoc', |
|
'current_consolidated_version', |
|
'latest_consolidated_version', |
|
], |
|
'3':['form', 'directory_code', 'subject_matter', |
|
'eurovoc', 'latest_consolidated_version', |
|
'current_consolidated_version', |
|
], |
|
'4':['form', 'eurovoc', 'subject_matter', |
|
'current_consolidated_version', |
|
'directory_code', 'latest_consolidated_version', |
|
], |
|
'5':['form', 'directory_code', |
|
'subject_matter', 'eurovoc', |
|
'current_consolidated_version', |
|
], |
|
'6':['form', 'case-law_directory_code_before_lisbon', |
|
'subject_matter', 'eurovoc', |
|
'directory_code', |
|
], |
|
'7':['form', 'transposed_legal_acts'], |
|
'8':['form', 'case-law_directory_code_before_lisbon', 'subject_matter',], |
|
'9':['form', 'eurovoc', 'subject_matter', 'directory_code',], |
|
'C':['form', 'eurovoc'], |
|
'E':['form', 'directory_code', 'subject_matter', 'eurovoc',], |
|
} |
|
|
|
VERSION_FEATURES={ |
|
None: ['celex_id', 'text_cleaned'], |
|
'clean': ['celex_id', 'text_cleaned'], |
|
'html': ['celex_id', 'text_html_raw'], |
|
} |
|
|
|
available_features_tmp = [] |
|
for version in VERSIONS: |
|
v = '' if version is None else f"_{version}" |
|
available_features_tmp.append( |
|
{sector+v: datasets.Features({feature:(FEATURES[feature] if feature in FEATURES else datasets.Sequence(datasets.Value("string"))) for feature in VERSION_FEATURES[version] + FEATURES_IN_SECTOR[sector]}) for sector in SECTORS} |
|
) |
|
|
|
AVAILABLE_FEATURES={k: v for d in available_features_tmp for k, v in d.items()} |
|
|
|
SECTOR_DESCRIPTIONS={ |
|
'0': "Consolidated acts ", |
|
'1': "Treaties", |
|
'2': "International agreements", |
|
'3': "Legislation", |
|
'4': "Complementary legislation", |
|
'5': "Preparatory acts and working documents", |
|
'6': "Case-law", |
|
'7': "National transposition measures", |
|
'8': "References to national case-law concerning EU law", |
|
'9': "Parliamentary questions", |
|
'C': "Other documents published in the Official Journal C series", |
|
'E': "EFTA documents", |
|
} |
|
|
|
|
|
class SuperEurlexConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SuperGLUE.""" |
|
|
|
def __init__(self, sector, language, feature_version, features, citation, url, **kwargs): |
|
"""BuilderConfig for SuperGLUE. |
|
|
|
Args: |
|
sector: sector of the wanted data |
|
language: the language code for the language in which the text shall |
|
be written in |
|
features: *list[string]*, list of the features that will appear in the |
|
feature dict. |
|
citation: *string*, citation for the data set. |
|
url: *string*, url for information about the data set. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
name=sector+'.'+language+('' if feature_version==None else f".{feature_version}") |
|
super().__init__(name=name, version=datasets.Version("0.1.0"), **kwargs) |
|
self.features = features |
|
self.language = language |
|
self.sector = sector |
|
self.feature_version = str(feature_version) |
|
self.text_data_url = f"text_data/{language}/{sector}_{'clean' if feature_version is None else feature_version}.parquet" |
|
self.meta_data_url = f"meta_data/{sector}.parquet" |
|
self.citation = citation |
|
self.url = url |
|
|
|
class FileNotFoundException(Exception): |
|
pass |
|
|
|
|
|
class SuperEurlex(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
SuperEurlexConfig( |
|
sector=sect, |
|
language=lang, |
|
feature_version=version, |
|
description=SECTOR_DESCRIPTIONS[sect], |
|
features=AVAILABLE_FEATURES[sect+("" if version is None else f"_{version}")], |
|
citation=_CITATION, |
|
url=_HOMEPAGE) |
|
for lang in AVAILABLE_LANGUAGES for sect in SECTORS for version in VERSIONS |
|
] |
|
|
|
|
|
|
|
def _info(self): |
|
|
|
v = "" if self.config.feature_version=="None" else "_"+self.config.feature_version |
|
features = AVAILABLE_FEATURES[self.config.sector+v] |
|
info = datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
return info |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = {'text': self.config.text_data_url, |
|
'meta': self.config.meta_data_url} |
|
try: |
|
data_dir = dl_manager.download_and_extract(urls) |
|
except FileNotFoundError: |
|
raise FileNotFoundError("""The demanded Files weren't found. |
|
It could be that the demanded sector isn't yet available in your language of choice""") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"text": data_dir['text'], |
|
"meta": data_dir['meta'], |
|
"language": self.config.language, |
|
"sector": self.config.sector, |
|
'split': 'train' |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, text, meta, sector, language, split): |
|
|
|
|
|
text_data = pd.read_parquet(text) |
|
text_data['celex_id'] = text_data['celex_id'].apply(lambda x: str(x.tolist()[0]) if isinstance(x,list) else x) |
|
meta_data = pd.read_parquet(meta) |
|
meta_data['celex_id'] = meta_data['celex_id'].apply(lambda x: str(x.tolist()[0]) if isinstance(x, np.ndarray) else x) |
|
combined_data = pd.merge(text_data, meta_data, on='celex_id') |
|
dataset = datasets.Dataset.from_pandas(combined_data) |
|
if '__index_level_0__' in dataset.column_names: |
|
dataset = dataset.remove_columns('__index_level_0__') |
|
for i, sample in enumerate(dataset): |
|
yield i, sample |
|
|
|
|
|
if __name__ == '__main__': |
|
import datasets as ds |
|
import sys |
|
print(sys.argv[0]) |
|
for sector in SECTORS: |
|
for lang in AVAILABLE_LANGUAGES: |
|
for version in VERSIONS: |
|
v = "" if version is None else f".{version}" |
|
print(f'{sector}.{lang}{v}') |
|
|
|
try: |
|
dataset = ds.load_dataset(sys.argv[0],f'{sector}.{lang}{v}') |
|
print(dataset) |
|
print('\n') |
|
except Exception as e: |
|
|
|
print("An error occurred: " + str(e.with_traceback(None))) |
|
print(f"\n{sector}.{lang} Couldn't be loaded\n") |
|
|