|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:46:57.042545Z" |
|
}, |
|
"title": "Named entity recognition in the Romanian legal domain", |
|
"authors": [ |
|
{ |
|
"first": "Vasile", |
|
"middle": [], |
|
"last": "P\u0203is", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Research Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\"", |
|
"location": { |
|
"country": "Romanian Academy" |
|
} |
|
}, |
|
"email": "vasile@racai.ro" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Mitrofan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Research Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\"", |
|
"location": { |
|
"country": "Romanian Academy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Carol", |
|
"middle": [ |
|
"Luca" |
|
], |
|
"last": "Gasan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Research Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\"", |
|
"location": { |
|
"country": "Romanian Academy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Vlad", |
|
"middle": [], |
|
"last": "Coneschi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Research Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\"", |
|
"location": { |
|
"country": "Romanian Academy" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alexandru", |
|
"middle": [], |
|
"last": "Ianov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Research Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\"", |
|
"location": { |
|
"country": "Romanian Academy" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recognition of named entities present in text is an important step towards information extraction and natural language understanding. This work presents a named entity recognition system for the Romanian legal domain. The system makes use of the gold annotated Legal-NERo corpus. Furthermore, the system combines multiple distributional representations of words, including word embeddings trained on a large legal domain corpus. All the resources, including the corpus, model and word embeddings are open sourced. Finally, the best system is available for direct usage in the RELATE platform.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recognition of named entities present in text is an important step towards information extraction and natural language understanding. This work presents a named entity recognition system for the Romanian legal domain. The system makes use of the gold annotated Legal-NERo corpus. Furthermore, the system combines multiple distributional representations of words, including word embeddings trained on a large legal domain corpus. All the resources, including the corpus, model and word embeddings are open sourced. Finally, the best system is available for direct usage in the RELATE platform.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Natural language processing for the legal domain has its own unique challenges. This is due to the way legal documents are structured as well as to the domain-specific language being used. Technology dealing with legal documents has received increased attention in recent years. This can be seen from the number of recent scientific papers being published, the existence of the Natural Legal Language Processing (NLLP) workshop (Aletras et al., 2019 (Aletras et al., , 2020 and different international projects dealing with natural language processing for the legal domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 428, |
|
"end": 449, |
|
"text": "(Aletras et al., 2019", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 473, |
|
"text": "(Aletras et al., , 2020", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Named entity recognition (NER) is the process of identifying text spans that refer to real-world objects, such as organizations or persons, etc. One of the annotation schemes being used in a large number of works was introduced by the CoNLL-2003 shared task on language independent NER (Tjong Kim Sang and De Meulder, 2003) and refers to names of persons, organizations and locations. This annotation scheme can be applied in the legal domain as well, thus allowing existing systems to try to annotate legal documents (with or without being adapted to legal text). However, domainspecific entities are usually added to enhance the in-formation extraction capabilities of text processing algorithms specifically designed for the legal domain. Dozier et al. (2010) , while dealing with depositions, pleadings and trial-specific documents, propose including entities for attorneys, judges, courts and jurisdictions. Glaser et al. (2018) proposed adding the entities date, money value, reference and \"other\" for analyzing legal contracts. Leitner et al. (2019 Leitner et al. ( , 2020 proposed using 7 coarse-grained entity classes which can be further expanded into 19 fine-grained classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 323, |
|
"text": "(Tjong Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 762, |
|
"text": "Dozier et al. (2010)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 913, |
|
"end": 933, |
|
"text": "Glaser et al. (2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1035, |
|
"end": 1055, |
|
"text": "Leitner et al. (2019", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1056, |
|
"end": 1079, |
|
"text": "Leitner et al. ( , 2020", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the context of the \"Multilingual Resources for CEF.AT in the legal domain\" (MARCELL) project 1 a large, clean, validated domain-specific corpus was created. It contains monolingual corpora extracted from national legislation (laws, decrees, regulations, etc.) of the seven involved countries, including Romania (Tufis , et al., 2020) . All seven corpora are aligned at topic level domains. The Romanian corpus was preprocessed (split at sentence level, tokenized, lemmatized and annotated with POS tags) using tools developed in the Research Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\", Romanian Academy (RACAI). Named entities were identified using a general-purpose tool (P\u0203is , , 2019) . This tool was designed for general Romanian language and allowed only four entity types: organizations, locations, persons and time expressions. The tool was not trained on any legal texts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 336, |
|
"text": "(Tufis , et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 704, |
|
"text": "(P\u0203is , , 2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For the purposes of this work, we created a manually annotated corpus, comprising legal documents extracted from the larger MARCELL-RO corpus. We choose an annotation scheme covering 5 entity classes: person (PER), location (LOC), organization (ORG), time expressions (TIME) and legal document references (LEGAL). References are introduced similar to the work of Landthaler et al. (2016) and the coarse-grained class proposed by Leitner et al. (2019) , without additional sub-classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 387, |
|
"text": "Landthaler et al. (2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 450, |
|
"text": "Leitner et al. (2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Thus, they are references to legal documents such as laws, ordinances, government decisions, etc. For the purposes of this work, in the Romanian legal domain, we decided to explore only these coarsegrained classes, without any fine-grained entities. This has the advantage of allowing the corpus to be used together with other general-purpose NER corpora. Furthermore, it allows us to judge the quality of the resulting NER system against existing systems. In order to train domain-specific NER systems, we constructed distributional representations of words (also known as word embeddings) based on the entire MARCELL corpus. Finally, we explored several neural architectures and adapted them as needed to the legal domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper is organized as follows: in Section 2 we present related work, in Section 3 is described the LegalNERo corpus, Section 4 presents the legal domain word embeddings, Section 5 describes the NER system architecture, while Section 6 gives the results and finally conclusions are presented in Section 7.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Legal NER is an important task in extracting key information from legal documents, such as dates, references to different types of legal documents, locations, organizations and persons. As Zhong and Tang (2020) stated, once the NEs are identified and classified they can be used in workflows to perform different functionalities such as document anonymization or case summarization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "One of the pioneering work in this research area was made by Dozier et al. (2010) . The authors examined legal NEs in US depositions, pleadings, case law and other legal documents using statistical models, context rules, and a lookup list of NEs. They also developed different taggers for NEs such as document type of jurisdiction, obtaining an F1 score of 0.92 for the NEs belonging to jurisdiction class. This work formed the basis for Cardellino et al. (2017) to develop a tool for identifying, classifying and linking legal NEs. They trained and evaluated different systems (Stanford NER 2 , a Support Vector Machine, and a neural network (NN)) on Wikipedia and on decisions coming from the European Court of Human Rights, obtaining an F1 score of 0.86 using NN and an F1 score of 0.56 using Stanford NER. Glaser et al. (2018) studied and evaluated on German legal data three NER systems. GermaNER, a generic German NER tagger, has been adapted to identify and classify NEs such as persons, organizations, locations, and dates and money values. The second NER system used was DBpedia Spotlight pipeline, an interlinking hub, a tool that can be used to perform annotation tasks on a text provided by a user (Mendes et al., 2011) . The third NER system employed in this task was based on contract templates to identify NEs (Minakov et al., 2007) . For GermaNER pipeline and DBpedia Spotlight pipeline the evaluation was performed on a corpus of 500 judgements, and achieved an F1 score of 0.8 and 0.87 respectively. The template NER system was evaluated on a corpus of contract templates and obtained an F1 score of 0.92. Leitner et al. (2019) also evaluated two systems, based on Conditional Random Fields (CRFs) and bidirectional Long-Short Term Memory Networks (BiLSTMs), on NER for German language documents from the legal domain. The evaluation was performed on a German court decisions corpus annotated with 19 fine-grained classes, and also with 7 generalised coarse-grained classes. The best performance achieved by the CRFs was 93.23 on fined-grained classes and 93.22 on coarse-grained classes, and the BiLSTMs models reach a maximum of 95.46 for the fined-grained classes and 95.95 for coarse-grained ones. In the Lynx project 3 (Moreno-Schneider et al., 2020) a set of services, including NER, were developed in order to help with the creation of a legal domain knowledge graph (Legal Knowledge Graph -LKG) and its use for the semantic analysis of documents in the legal domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 81, |
|
"text": "Dozier et al. (2010)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 462, |
|
"text": "Cardellino et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 810, |
|
"end": 830, |
|
"text": "Glaser et al. (2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1210, |
|
"end": 1231, |
|
"text": "(Mendes et al., 2011)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1325, |
|
"end": 1347, |
|
"text": "(Minakov et al., 2007)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1624, |
|
"end": 1645, |
|
"text": "Leitner et al. (2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Barriere and Fouret (2019) described a method to generate contextual dictionaries for NER. The system was evaluated on a French legal corpus of 94 court decisions (276,705 tokens), which was annotated with 4 classes of entities. The best performance of this system was 96.52.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Even though there are several NER systems trained for Romanian language both for general language (P\u0203is , , 2019) and specialised domains (Mitrofan, 2019), regarding the legal domain, the experiments are very few and the performances are low (for example Tufis , et al. (2020) note an average precision of 64.1% on a random sample extracted from the MARCELL-RO corpus, using the system devel-oped in (P\u0203is , , 2019) ). Apart from the new Legal-NERo corpus (see Section 3), existing Romanian NER corpora do not focus on the legal domain. Romanian TimeBank (For\u0203scu and Tufi\u015f, 2012) is an annotated parallel corpus for temporal information. The RONEC (Dumitrescu and Avram, 2020) news corpus contains 26,377 named entities, belonging to 16 different classes. SiMoNERo (Barbu Mititelu and Mitrofan, 2020) is a gold standard corpus for biomedical domain, manually annotated with four types of domain-specific named entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 415, |
|
"text": "(P\u0203is , , 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 580, |
|
"text": "(For\u0203scu and Tufi\u015f, 2012)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Annotation of the LegalNERo corpus was performed by 5 human annotators, supervised by two senior researchers at the Institute for Artificial Intelligence \"Mihai Dr\u0203g\u0203nescu\" of the Romanian Academy (RACAI). For annotation purposes we used the BRAT tool 4 (Stenetorp et al., 2012) , integrated in the RELATE platform . Inside the legal reference class, we considered subentities of type organization and time. This allows for using the LegalNERo corpus in two scenarios: using all the 5 entity classes or using only the remaining general-purpose classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 278, |
|
"text": "(Stenetorp et al., 2012)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The LegalNERo corpus", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The LegalNERo corpus contains a total of 370 documents from the larger MARCELL-RO corpus. These documents were split amongst the 5 annotators, with certain documents being annotated by multiple annotators. Each annotator manually annotated 100 documents. The annotators were unaware of the overlap, which allowed us to compute an inter-annotator agreement. We used the Cohen's Kappa measure and obtained a value of 0.89, which we consider to be a good result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The LegalNERo corpus", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The raw annotations were obtained in the BRAT specific format, consisting of text spans, characterized by start and end positions with the associated entities. However, since many NER systems make use of token-based annotations, we further employed the Romanian pipelines integrated in the RELATE platform (P\u0203is , , 2020) and annotated the corpus at token level. For tokenization, lemmatization, part-of-speech tagging and dependency parsing we used UDPipe. Finally, the named entity annotations were mapped to tokens and exported in CoNLL-U Plus format 5 , similar to the original format being used in the MARCELL-RO corpus. Documents 370 Sentences 8,284 Tokens 265,335 Unique lemmas 12,887 RDF Triples 5,761,781 Additionally, location entities were mapped to the GeoNames 6 ontology, but this information was not used for the purposes of this work. Nevertheless, the information is available in the LegalNERo corpus for future use. Finally, since we have multiple annotation levels available, we converted all the data into RDF format, specific to Linguistic Linked Open Data (LLOD) and made this available in the Linked Open Data Cloud 7 . Key statistics computed on the LegalNERo corpus are presented in Table 1 . The number of entities, considering all the entity types, are given in Table 2 , while considering only persons, locations, organization and time expressions are given in Table 3 . These numbers are obtained at entity level (not at token level). We further computed in Table 4 the average number of tokens associated with each entity type.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 626, |
|
"end": 690, |
|
"text": "Documents 370 Sentences 8,284 Tokens 265,335 Unique lemmas", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1214, |
|
"end": 1221, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 1295, |
|
"end": 1302, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1395, |
|
"end": 1402, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1493, |
|
"end": 1500, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The LegalNERo corpus", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Results presented in Table 4 clearly indicate that the legal reference entity type has the largest number of tokens (7.29 in average). A typical example is \"ORDIN nr. 625 din 25 aprilie 2019\" (\"Order no. 625 from 25 April 2019\"). Nevertheless, longer entities are also present, such as \"Ordinul pres , edintelui Casei Nat , ionale de Asigur\u0203ri de S\u0203n\u0203tate nr. 141 / 2017\" (\"Order of the president of the National Health Insurance House no. 141 / 2017\"). This example has 12 tokens and contains an orga- Person 914 Location 2,276 Organization 6,209 Time 4,643 Total 14,042 Table 4 : Average number of tokens for each entity type nization sub-entity (\"Casei Nat , ionale de Asigur\u0203ri de S\u0203n\u0203tate\" / \"National Health Insurance House\") as well as a time expression (\"2017\").", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 28, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 572, |
|
"text": "Person 914 Location 2,276 Organization 6,209 Time 4,643 Total", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 587, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Statistic Value", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Previously computed Romanian word embeddings (P\u0203is , and Tufis , , 2018) made use of the Representative Corpus of Contemporary Romanian Language (CoRoLa) (Tufis , et al., 2019) . This large corpus contains texts from multiple domains, including the legal domain. However, in addition to the legal domain, the CoRoLa corpus contains texts from completely different areas, such as news, literature and mathematics. This makes the CoRoLa-based embeddings suitable for general tasks, but also allows for creation of legal-domain specific representations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 176, |
|
"text": "(Tufis , et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Romanian legal-domain word embeddings", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the purposes of this work, in addition to the already available CoRoLa embeddings, we constructed word representations based on the entire MARCELL-RO corpus. We used the same approach which obtained the best performing CoRoLa embeddings. Thus, we used the FastText 8 toolkit (Joulin et al., 2017) and produced vector representations of dimension 300, while considering only words appearing a minimum of 20 times. Furthermore, the model made use of n-gram windows of dimension 5. The resulting embeddings are available for download within the RELATE 8 https://fasttext.cc/ For Romanian language there are currently no legal domain contextual embeddings, like the Legal-BERT (Chalkidis et al., 2020) model for English. Furthermore, existing Romanian BERT models, like (Masala et al., 2020 ; S , tefan Daniel Dumitrescu et al., 2020), were not trained on corpora containing legal documents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 279, |
|
"end": 300, |
|
"text": "(Joulin et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 790, |
|
"text": "(Masala et al., 2020", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Romanian legal-domain word embeddings", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our proposed NER model makes use of recurrent neural networks, based on BiLSTM cells, with a final CRF layer. For features we considered word representations, character embeddings, gazetteer resources and known affixes. The word embeddings are initialized from a pre-trained model and are fine-tuned during training. The character embeddings are computed during training and the embedding layer is followed by a BiLSTM layer helping with the representation generation. For implementation we used a modified version (Armengol-Estap\u00e9 et al., 2019) of the NeuroNER (Dernoncourt et al., 2017) software package. This implementation was further adapted to our needs in order to allow online model serving. The overall system diagram is presented in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 562, |
|
"end": 588, |
|
"text": "(Dernoncourt et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 743, |
|
"end": 751, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To construct the gazetteer resources we employed the GeoNames database for the country Romania and the JRC-Names 10 (Steinberger et al., 2011) multilingual named entity collection. These two collections cover a large number of entity names like locations, organizations and persons.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We trained multiple models using different features. This includes different word embeddings (CoRoLa and MARCELL embeddings) and also combinations of the two embeddings. Previous work (P\u0203is , and Mitrofan, 2021) , (Casillas et al., 2019) has shown that using different word embeddings and combinations can improve NER performance. For each word representation we adapted the main BiLSTM layer size to match the embedding size. For example, in the case of CoRoLa embeddings we used a layer with size 300 and in the case of CoRoLa + MARCELL embeddings we used a layer with size 600. This was done to accommodate the increased vector size associated with the word representation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 211, |
|
"text": "(P\u0203is , and Mitrofan, 2021)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 237, |
|
"text": "(Casillas et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In addition to the main BiLSTM layer size, we used a character BiLSTM of size 25. Furthermore, to prevent overfitting, a dropout of probability 50% was introduced. A gradient clipping (Pascanu et al., 2013 ) with a value of 5 is used to deal with exploding gradients. Finally, we use a stochastic gradient descent (SGD) algorithm with a learning rate of 0.005.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 205, |
|
"text": "(Pascanu et al., 2013", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Besides experimenting with the aforementioned NER models, we also made tests with an ensemble method that combines the results from the different models. The idea was to see what combination provides better results and in what conditions. We've used four types of operations for combining the results. The first one is the union of two or more models. The second one is the full intersection of two or more models. In this case, if for the same set of tokens the annotations are different then none of them are represented in the final set. The third type is the implementation of a majority voting system, meaning that if an entity span has the same annotation in (n/2) + 1 cases, where n is the total number of models used, it is the winner. For example, given at least three models it takes at least two of the candidates with the same annotation to allow it to be represented in the final set. The fourth one is a merge between two or more models where for a given set of tokens the longest annotation between all of them is represented in the final set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Afterwards, for each of the resulting final sets, the precision, recall and F1 scores are computed against the gold corpus for each entity types in the corpus. The macro average score is finally calculated. Each of these pairs of combinations and scores are then recorded and a best that has the highest F1 score is calculated. The final step of measuring the precision, recall and F1 scores was done using NeuroNER evaluation script, in order to have a consistency with the initial models scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Experiments were performed on the two perspectives associated with the corpus: the complete set of 5 classes (person, organization, location, time expressions and legal reference) and the reduced set (person, organization, location, time expression). In the second case, we took into consideration the additional annotations associated with the 4 remaining classes present inside the legal reference entities. Furthermore, in order to explore the impact of different features, we conducted multiple experiments on each perspective. Each experiment was allowed to train for at most 100 epochs. However, we used early stopping in case no improvement was perceived on the validation set for 10 epochs. As a result, neither of the experiments actually trained for the maximum of 100 epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The LegalNERo corpus was split into three subcorpora for training, validation during training and testing on data unseen during training. We used a 80% split for training and 10% for each of the validation and testing sub-corpora. The split was realized at file level, thus the training split contains 290 files, while the validation and test splits contain 40 files each. For reproducibility of the reported results as well as for comparison with future models, we offer the splits for download from the RELATE platform 11, 12 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 521, |
|
"end": 524, |
|
"text": "11,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 527, |
|
"text": "12", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Using the same splits, we also trained baseline models using well-known libraries such as spaCy 13 . The spaCy library provides a variety of tools for fast text processing and is developed as a modular pipeline. For implementation of our models we focused onto two critical components of the spaCy pipeline, namely the components which are responsible for converting string tokens to vectors and the named entity recognition component. During training, besides word embeddings, we used spaCy's class Lexeme 14 , as an entry in the vocabulary. A Lexeme has no string context, it is a word type, as opposed to a word token. It therefore has no part-ofspeech tag, dependency parse, or lemma (if lemmatization depends on the part-of-speech tag). After exhaustive tests, the representative model for each word embedding source also had its own optimal selection of the parameters that can be found at 15 . All of them made use of at least one of the affixes and some of them also used the shape. The spaCy's training process, which is stopped automatically when the F1 score doesn't vary very much for a couple of epochs, guarantees saving the best model from all checkpoints. Although it doesn't save any information during training, we used wandb 16 in order to retrieve the aforementioned data. Using spaCy, the best F1 score was 83.77, for all the five entity classes and using ro_core_news_lg 17 embeddings with a layer of size 300. When the LEGAL class was excluded and with the same embeddings configuration, the F1 score obtained by spaCy is 86.21.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In Table 5 are presented different experiments using the system architecture described in Section 5, for all the entities. Similarly, in Table 6 are given results from experiments without the legal reference entity class. The usage of gazetteer resources helps to increase the F1 score associated with persons, locations and organizations. Therefore the best models associated with the two scenarios make use of gazetteers. Table 7 presents the results of the four types of ensemble operations. It can be seen that the best F1 score (90.36) was achieved by reunion of three models, two containing all types of entities and one obtained without legal entity type (CoRoLa+MARCELL Y N). The model, CoRoLa+MARCELL Y N presented in Table 3 has significantly contributed to increasing the F1 score. Another important observation presented in Table 7 is that an ensemble of models can, in principle, perform better than any individual model, because the various errors of the models were averaged out. It can also be seen that the F1 score for each operation is greater than the ones obtained by individual models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 144, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 431, |
|
"text": "Table 7", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 734, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 836, |
|
"end": 844, |
|
"text": "Table 7", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The resulting best performing models are available for direct online usage through the RELATE platform 18 . This integration allows the user to enter a Romanian legal document inside the platform's web interface, select the desired model, by using a 15 https://spacy.io/api/lexeme# attributes 16 https://wandb.ai/site 17 https://spacy.io/models/ro#ro_core_ news_lg", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 105, |
|
"text": "18", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "18 https://relate.racai.ro/index.php? path=ner/demo dropdown menu, and then execute the model. The selected model together with the raw text are sent to the server process, which produces a list of recognized entities. These are returned to the user and displayed in the web interface. The interface is presented in Figure 2 and example results are presented in Figure 3 . Furthermore, pre-trained models can be downloaded from the same interface.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 324, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 370, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This paper introduced a neural named entity recognition system designed specifically for the Romanian legal domain. It employed the LegalNERo corpus for training and evaluation. The system is available for querying inside the RELATE platform and pre-trained models are available for download. As indicated in Section 6, the best performing models made use of word embeddings trained on the legal-domain MARCELL corpus. When considering all the entity types available, CoRoLa and MARCELL embeddings seem to provide similar performance ( Table 5 , F1 difference on the test set is less than 1%). However, when considering only persons, organizations, locations and time expressions (Table 6 ), MARCELL embeddings provide over 1% F1 improvement compared to CoRoLa, while a combination of CoRoLa and MARCELL embeddings provide the best results with an improvement of over 2% over simple CoRoLa based embeddings. Even more, an ensemble model combining models with all the entity types with a model without the legal reference entity type achieves the best performance on the test set with almost 5% improvement, considering overall macro F1. As future work we foresee expanding the LegalNERo corpus with additional annotations including finegrained classes of entities and to make more experiments with different NER architectures.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 536, |
|
"end": 543, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 688, |
|
"text": "(Table 6", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://marcell-project.eu/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://nlp.stanford.edu/software/ CRF-NER.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.lynx-project.eu/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://brat.nlplab.org/ 5 https://universaldependencies.org/ ext-format.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.geonames.org/ 7 https://lod-cloud.net/dataset/ racai-legalnero", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://relate.racai.ro/index.php? path=ner/demo 10 https://ec.europa.eu/jrc/en/ language-technologies/jrc-names", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://relate.racai.ro/resources/ legalnero/legalnero_split_5classes.zip12 https://relate.racai.ro/resources/ legalnero/legalnero_split_4classes.zip 13 https://spacy.io/ 14 https://spacy.io/api/lexeme", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Proceedings of the Natural Legal Language Processing Workshop 2019. Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Aletras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elliott", |
|
"middle": [], |
|
"last": "Ash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leslie", |
|
"middle": [], |
|
"last": "Barrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Preotiuc-Pietro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Aletras, Elliott Ash, Leslie Barrett, Daniel Chen, Adam Meyers, Daniel Preotiuc-Pietro, David Rosenberg, and Amanda Stent, editors. 2019. Pro- ceedings of the Natural Legal Language Processing Workshop 2019. Association for Computational Lin- guistics, Minneapolis, Minnesota.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "2020. Proceedings of the Natural Legal Language Processing Workshop", |
|
"authors": [], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Aletras, Androutsopoulos Ion, Leslie Barrett, Adam Meyers, and Daniel Preotiuc-Pietro, editors. 2020. Proceedings of the Natural Legal Language Processing Workshop 2020.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Pharmaconer tagger: a deep learning-based tool for automatically finding chemicals and drugs in spanish medical texts", |
|
"authors": [ |
|
{ |
|
"first": "Jordi", |
|
"middle": [], |
|
"last": "Armengol-Estap\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Soares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Montserrat", |
|
"middle": [], |
|
"last": "Marimon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Genomics Inform", |
|
"volume": "17", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.5808/GI.2019.17.2.e15" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jordi Armengol-Estap\u00e9, Felipe Soares, Montserrat Ma- rimon, and Martin Krallinger. 2019. Pharmaconer tagger: a deep learning-based tool for automatically finding chemicals and drugs in spanish medical texts. Genomics Inform, 17(2):e15-.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The Romanian medical treebank-SiMoNERo", |
|
"authors": [ |
|
{ |
|
"first": "Barbu", |
|
"middle": [], |
|
"last": "Verginica", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Mititelu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitrofan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the The 15th Edition of the International Conference on Linguistic Resources and Tools for Natural Language Processing -ConsILR-2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Verginica Barbu Mititelu and Maria Mitrofan. 2020. The Romanian medical treebank-SiMoNERo. In Proceedings of the The 15th Edition of the Interna- tional Conference on Linguistic Resources and Tools for Natural Language Processing -ConsILR-2020, pages 7-16.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "May i check again?-a simple but efficient way to generate and use contextual dictionaries for named entity recognition. application to french legal texts", |
|
"authors": [ |
|
{ |
|
"first": "Valentin", |
|
"middle": [], |
|
"last": "Barriere", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amaury", |
|
"middle": [], |
|
"last": "Fouret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.03453" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valentin Barriere and Amaury Fouret. 2019. May i check again?-a simple but efficient way to gener- ate and use contextual dictionaries for named entity recognition. application to french legal texts. arXiv preprint arXiv:1909.03453.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A low-cost, highcoverage legal named entity recognizer, classifier and linker", |
|
"authors": [ |
|
{ |
|
"first": "Cristian", |
|
"middle": [], |
|
"last": "Cardellino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milagro", |
|
"middle": [], |
|
"last": "Teruel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [ |
|
"Alonso" |
|
], |
|
"last": "Alemany", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serena", |
|
"middle": [], |
|
"last": "Villata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 16th edition of the International Conference on Articial Intelligence and Law", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cristian Cardellino, Milagro Teruel, Laura Alonso Ale- many, and Serena Villata. 2017. A low-cost, high- coverage legal named entity recognizer, classifier and linker. In Proceedings of the 16th edition of the International Conference on Articial Intelligence and Law, pages 9-18.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Measuring the effect of different types of unsupervised word representations on medical named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Arantza", |
|
"middle": [], |
|
"last": "Casillas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nerea", |
|
"middle": [], |
|
"last": "Ezeiza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iakes", |
|
"middle": [], |
|
"last": "Goenaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alicia", |
|
"middle": [], |
|
"last": "P\u00e9rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xabier", |
|
"middle": [], |
|
"last": "Soto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Journal of Medical Informatics", |
|
"volume": "129", |
|
"issue": "", |
|
"pages": "100--106", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.ijmedinf.2019.05.022" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arantza Casillas, Nerea Ezeiza, Iakes Goenaga, Alicia P\u00e9rez, and Xabier Soto. 2019. Measuring the effect of different types of unsupervised word representa- tions on medical named entity recognition. Inter- national Journal of Medical Informatics, 129:100- 106.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The birth of romanian BERT", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Dumitrescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrei-Marius", |
|
"middle": [], |
|
"last": "Avram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4324--4328", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.387" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S , tefan Daniel Dumitrescu, Andrei-Marius Avram, and Sampo Pyysalo. 2020. The birth of romanian BERT. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, EMNLP 2020, Online Event, 16-20 November 2020, volume EMNLP 2020 of Findings of ACL, pages 4324-4328. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. LEGAL-BERT: the muppets straight out of law school", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manos", |
|
"middle": [], |
|
"last": "Fergadiotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Manos Fergadiotis, Prodromos Malaka- siotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. LEGAL-BERT: the muppets straight out of law school. CoRR, abs/2010.02559.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "NeuroNER: an easy-to-use program for named-entity recognition based on neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [ |
|
"Young" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Conference on Empirical Methods on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franck Dernoncourt, Ji Young Lee, and Peter Szolovits. 2017. NeuroNER: an easy-to-use program for named-entity recognition based on neural networks. Conference on Empirical Methods on Natural Lan- guage Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Named Entity Recognition and Resolution in Legal Text", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Dozier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ravikumar", |
|
"middle": [], |
|
"last": "Kondadadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Light", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arun", |
|
"middle": [], |
|
"last": "Vachher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sriharsha", |
|
"middle": [], |
|
"last": "Veeramachaneni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramdev", |
|
"middle": [], |
|
"last": "Wudali", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "27--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Dozier, Ravikumar Kondadadi, Marc Light, Arun Vachher, Sriharsha Veeramachaneni, and Ramdev Wudali. 2010. Named Entity Recog- nition and Resolution in Legal Text, pages 27-43.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Introducing RONEC -the Romanian named entity corpus", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Dumitrescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrei-Marius", |
|
"middle": [], |
|
"last": "Avram", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4436--4443", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S , tefan Daniel Dumitrescu and Andrei-Marius Avram. 2020. Introducing RONEC -the Romanian named entity corpus. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 4436- 4443, Marseille, France. European Language Re- sources Association.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Romanian timebank: An annotated parallel corpus for temporal information", |
|
"authors": [ |
|
{ |
|
"first": "Corina", |
|
"middle": [], |
|
"last": "For\u0203scu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Tufi\u015f", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3762--3766", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corina For\u0203scu and Dan Tufi\u015f. 2012. Romanian time- bank: An annotated parallel corpus for temporal in- formation. In Proceedings of the Eighth Interna- tional Conference on Language Resources and Eval- uation (LREC'12), pages 3762-3766.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Named entity recognition, extraction, and linking in german legal contracts", |
|
"authors": [ |
|
{ |
|
"first": "Ingo", |
|
"middle": [], |
|
"last": "Glaser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Waltl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Matthes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IRIS: Internationales Rechtsinformatik Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "325--334", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ingo Glaser, Bernhard Waltl, and Florian Matthes. 2018. Named entity recognition, extraction, and linking in german legal contracts. In IRIS: Inter- nationales Rechtsinformatik Symposium, pages 325- 334.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Bag of tricks for efficient text classification", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "427--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Con- ference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Pa- pers, pages 427-431, Valencia, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Unveiling references in legal texts-implicit versus explicit network structures", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Landthaler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Waltl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Matthes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IRIS: Internationales Rechtsinformatik Symposium", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "71--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Landthaler, Bernhard Waltl, and Florian Matthes. 2016. Unveiling references in legal texts-implicit versus explicit network structures. In IRIS: Inter- nationales Rechtsinformatik Symposium, volume 8, pages 71-8.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Fine-grained named entity recognition in legal documents", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Leitner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Rehm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Moreno-Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Semantic Systems. The Power of AI and Knowledge Graphs", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "272--287", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Leitner, Georg Rehm, and Julian Moreno- Schneider. 2019. Fine-grained named entity recog- nition in legal documents. In Semantic Systems. The Power of AI and Knowledge Graphs, pages 272-287, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A dataset of German legal documents for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Leitner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Rehm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Moreno-Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4478--4485", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Leitner, Georg Rehm, and Julian Moreno- Schneider. 2020. A dataset of German legal doc- uments for named entity recognition. In Proceed- ings of the 12th Language Resources and Evaluation Conference, pages 4478-4485, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "RoBERT -a Romanian BERT model", |
|
"authors": [ |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Masala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ruseti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Dascalu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6626--6637", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.coling-main.581" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihai Masala, S , tefan Ruseti, and Mihai Dascalu. 2020. RoBERT -a Romanian BERT model. In Proceed- ings of the 28th International Conference on Com- putational Linguistics, pages 6626-6637, Barcelona, Spain (Online). International Committee on Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Dbpedia spotlight: shedding light on the web of documents", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Pablo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Mendes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9s", |
|
"middle": [], |
|
"last": "Jakob", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Garc\u00eda-Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 7th international conference on semantic systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pablo N Mendes, Max Jakob, Andr\u00e9s Garc\u00eda-Silva, and Christian Bizer. 2011. Dbpedia spotlight: shedding light on the web of documents. In Proceedings of the 7th international conference on semantic sys- tems, pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Creating contract templates for car insurance using multi-agent based text understanding and clustering", |
|
"authors": [ |
|
{ |
|
"first": "Igor", |
|
"middle": [], |
|
"last": "Minakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Rzevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Petr", |
|
"middle": [], |
|
"last": "Skobelev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Volman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "International Conference on Industrial Applications of Holonic and Multi-Agent Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "361--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Igor Minakov, George Rzevski, Petr Skobelev, and Si- mon Volman. 2007. Creating contract templates for car insurance using multi-agent based text un- derstanding and clustering. In International Con- ference on Industrial Applications of Holonic and Multi-Agent Systems, pages 361-370. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Extragere de cunostinte din texte \u00een limba rom\u00e2na si date structurate cu aplicatii \u00een domeniul medical", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Mitrofan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Mitrofan. 2019. Extragere de cunostinte din texte \u00een limba rom\u00e2na si date structurate cu apli- catii \u00een domeniul medical. Ph.D. thesis, Romanian Academy.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Orchestrating NLP services for the legal domain", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Moreno-Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Rehm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Montiel-Ponsoda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00edctor", |
|
"middle": [], |
|
"last": "Rodriguez-Doncel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Artem", |
|
"middle": [], |
|
"last": "Revenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sotirios", |
|
"middle": [], |
|
"last": "Karampatakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Khvalchik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Sageder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "Gracia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filippo", |
|
"middle": [], |
|
"last": "Maganza", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2332--2340", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Moreno-Schneider, Georg Rehm, Elena Montiel- Ponsoda, V\u00edctor Rodriguez-Doncel, Artem Revenko, Sotirios Karampatakis, Maria Khvalchik, Christian Sageder, Jorge Gracia, and Filippo Maganza. 2020. Orchestrating NLP services for the legal domain. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 2332-2340, Mar- seille, France. European Language Resources Asso- ciation.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "On the difficulty of training recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Pascanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 30th International Conference on International Conference on Machine Learning", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "1310--1318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. 2013. On the difficulty of training recurrent neu- ral networks. In Proceedings of the 30th Interna- tional Conference on International Conference on Machine Learning -Volume 28, ICML'13, pages 1310-1318. JMLR.org.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Contributions to semantic processing of texts; Identification of entities and relations between textual units; Case study on Romanian language", |
|
"authors": [ |
|
{ |
|
"first": "Vasile", |
|
"middle": [], |
|
"last": "P\u0203is", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vasile P\u0203is , . 2019. Contributions to semantic process- ing of texts; Identification of entities and relations between textual units; Case study on Romanian lan- guage. Ph.D. thesis, Romanian Academy.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Multiple annotation pipelines inside the relate platform", |
|
"authors": [ |
|
{ |
|
"first": "Vasile", |
|
"middle": [], |
|
"last": "P\u0203is", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The 15th International Conference on Linguistic Resources and Tools for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vasile P\u0203is , . 2020. Multiple annotation pipelines inside the relate platform. In The 15th International Con- ference on Linguistic Resources and Tools for Natu- ral Language Processing, pages 65-75.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Assessing multiple word embeddings for named entity recognition of professions and occupations in health-related social media", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Vasile P\u0203is", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitrofan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Sixth Social Media Mining for Health (#SMM4H) Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "128--130", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.smm4h-1.27" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vasile P\u0203is , and Maria Mitrofan. 2021. Assessing mul- tiple word embeddings for named entity recognition of professions and occupations in health-related so- cial media. In Proceedings of the Sixth Social Media Mining for Health (#SMM4H) Workshop and Shared Task, pages 128-130, Mexico City, Mexico. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Computing distributed representations of words using the CoRoLa corpus", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Vasile P\u0203is", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tufis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Romanian Academy Series A -Mathematics Physics Technical Sciences Information Science", |
|
"volume": "19", |
|
"issue": "", |
|
"pages": "185--191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vasile P\u0203is , and Dan Tufis , . 2018. Computing dis- tributed representations of words using the CoRoLa corpus. Proceedings of the Romanian Academy Se- ries A -Mathematics Physics Technical Sciences In- formation Science, 19(2):185-191.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A processing platform relating data and tools for romanian language", |
|
"authors": [ |
|
{ |
|
"first": ",", |
|
"middle": [], |
|
"last": "Vasile P\u0203is", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Tufis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Ion", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vasile P\u0203is , , Dan Tufis , , and Radu Ion. 2020. A pro- cessing platform relating data and tools for roma- nian language. In Proceedings of The 12th Lan- guage Resources and Evaluation Conference, pages 81-88, Marseille, France. European Language Re- sources Association.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "JRC-NAMES: A freely available, highly multilingual named entity resource", |
|
"authors": [ |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruno", |
|
"middle": [], |
|
"last": "Pouliquen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mijail", |
|
"middle": [], |
|
"last": "Kabadjov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenya", |
|
"middle": [], |
|
"last": "Belyaeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Van Der Goot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--110", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ralf Steinberger, Bruno Pouliquen, Mijail Kabadjov, Jenya Belyaeva, and Erik van der Goot. 2011. JRC- NAMES: A freely available, highly multilingual named entity resource. In Proceedings of the In- ternational Conference Recent Advances in Natural Language Processing 2011, pages 104-110, Hissar, Bulgaria. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "brat: a web-based tool for NLP-assisted text annotation", |
|
"authors": [ |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Topi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Demonstrations at the 13th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "102--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pontus Stenetorp, Sampo Pyysalo, Goran Topi\u0107, Tomoko Ohta, Sophia Ananiadou, and Jun'ichi Tsu- jii. 2012. brat: a web-based tool for NLP-assisted text annotation. In Proceedings of the Demonstra- tions at the 13th Conference of the European Chap- ter of the Association for Computational Linguistics, pages 102-107, Avignon, France. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natu- ral Language Learning at HLT-NAACL 2003, pages 142-147.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Radu Ion, and Andrei Coman. 2020. Collection and annotation of the Romanian legal corpus", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Tufis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ",", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Mitrofan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasile", |
|
"middle": [], |
|
"last": "P\u0203is", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2773--2777", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Tufis , , Maria Mitrofan, Vasile P\u0203is , , Radu Ion, and Andrei Coman. 2020. Collection and annotation of the Romanian legal corpus. In Proceedings of the 12th Language Resources and Evaluation Con- ference, pages 2773-2777, Marseille, France. Euro- pean Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Little strokes fell great oaks. creating CoRoLa, the reference corpus of contemporary romanian", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Tufis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Verginica", |
|
"middle": [], |
|
"last": "Barbu Mititelu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Irimia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasile", |
|
"middle": [], |
|
"last": "P\u0203is", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Revue Roumaine de Linguistique", |
|
"volume": "64", |
|
"issue": "3", |
|
"pages": "227--240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Tufis , , Verginica Barbu Mititelu, Elena Irimia, Vasile P\u0203is , , Radu Ion, Nils Diewald, Maria Mitro- fan, and Onofrei Mihaela. 2019. Little strokes fell great oaks. creating CoRoLa, the reference corpus of contemporary romanian. Revue Roumaine de Lin- guistique, 64(3):227-240.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "An attention-based bilstm-crf for chinese named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE 5th International Conference on Cloud Computing and Big Data Analytics (ICCCBDA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "550--555", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qing Zhong and Yan Tang. 2020. An attention-based bilstm-crf for chinese named entity recognition. In 2020 IEEE 5th International Conference on Cloud Computing and Big Data Analytics (ICCCBDA), pages 550-555. IEEE.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Figure 1: System architecture", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Web interface for interacting with the Romanian Legal NER modelsFigure 3: Romanian Legal NER results presented in RELATE", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Entity</td><td>Number</td></tr><tr><td>Person</td><td>914</td></tr><tr><td>Location</td><td>2,276</td></tr><tr><td>Organization</td><td>4,824</td></tr><tr><td>Time</td><td>2,213</td></tr><tr><td>Legal Ref</td><td>3,387</td></tr><tr><td>Total</td><td>13,614</td></tr></table>", |
|
"text": "LegalNERo corpus statistics" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Number of entities, considering all the entity types" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Entity</td><td># Tokens</td></tr><tr><td>Person</td><td>2.30</td></tr><tr><td>Location</td><td>1.38</td></tr><tr><td>Organization</td><td>4.04</td></tr><tr><td>Time</td><td>2.31</td></tr><tr><td>Legal Ref</td><td>7.29</td></tr></table>", |
|
"text": "Number of entities, without the legal reference entity type" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td/><td colspan=\"2\">Validation set</td><td/></tr><tr><td>CoRoLa</td><td>N</td><td>N</td><td>84.58</td><td>81.75 76.11 80.07 84.12</td><td>81.37</td></tr><tr><td>CoRoLa</td><td>Y</td><td>N</td><td>84.88</td><td>80.67 76.73 80.11 83.72</td><td>81.26</td></tr><tr><td>CoRoLa</td><td>Y</td><td>Y</td><td>83.72</td><td>83.00 74.10 80.15 84.21</td><td>81.09</td></tr><tr><td>MARCELL</td><td>N</td><td>N</td><td>85.79</td><td>82.87 75.15 82.56 79.91</td><td>81.29</td></tr><tr><td>MARCELL</td><td>Y</td><td>N</td><td>82.75</td><td>78.88 77.44 82.95 85.65</td><td>81.56</td></tr><tr><td>MARCELL</td><td>Y</td><td>Y</td><td>86.12</td><td>81.30 73.58 81.10 82.48</td><td>80.97</td></tr><tr><td>CoRoLa+MARCELL</td><td>N</td><td>N</td><td>84.51</td><td>77.42 74.78 80.54 84.30</td><td>80.32</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>N</td><td>85.61</td><td>79.52 71.78 80.86 83.76</td><td>80.33</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>Y</td><td>83.84</td><td>77.11 77.58 80.78 81.78</td><td>80.24</td></tr><tr><td/><td/><td/><td colspan=\"2\">Test set</td><td/></tr><tr><td>CoRoLa</td><td>N</td><td>N</td><td>90.50</td><td>95.56 70.59 76.26 85.93</td><td>83.90</td></tr><tr><td>CoRoLa</td><td>Y</td><td>N</td><td>90.06</td><td>98.08 75.37 78.38 82.42</td><td>85.03</td></tr><tr><td>CoRoLa</td><td>Y</td><td>Y</td><td>89.80</td><td>95.56 73.33 75.80 84.53</td><td>83.94</td></tr><tr><td>MARCELL</td><td>N</td><td>N</td><td>90.41</td><td>97.38 70.30 76.70 81.64</td><td>83.49</td></tr><tr><td>MARCELL</td><td>Y</td><td>N</td><td>86.98</td><td>98.48 75.94 80.60 84.09</td><td>85.34</td></tr><tr><td>MARCELL</td><td>Y</td><td>Y</td><td>90.12</td><td>96.65 69.77 74.23 85.55</td><td>83.39</td></tr><tr><td>CoRoLa+MARCELL</td><td>N</td><td>N</td><td>88.18</td><td>98.50 75.62 76.65 84.39</td><td>84.74</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>N</td><td>89.68</td><td>97.04 75.21 78.69 83.08</td><td>84.83</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>Y</td><td>89.42</td><td>96.99 70.03 79.10 80.54</td><td>83.40</td></tr></table>", |
|
"text": "EmbeddingsGaz. Affixes LEGAL PER LOC ORG TIME Macro AVG" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Embeddings</td><td colspan=\"4\">Gaz. Affixes PER LOC ORG TIME Macro AVG</td></tr><tr><td/><td/><td/><td>Validation set</td><td/></tr><tr><td>CoRoLa</td><td>N</td><td>N</td><td>80.50 73.65 87.17 82.68</td><td>81.17</td></tr><tr><td>CoRoLa</td><td>Y</td><td>N</td><td>80.63 78.92 85.96 83.07</td><td>82.21</td></tr><tr><td>CoRoLa</td><td>Y</td><td>Y</td><td>79.01 75.00 85.81 84.15</td><td>81.18</td></tr><tr><td>MARCELL</td><td>N</td><td>N</td><td>81.75 73.58 86.17 83.51</td><td>81.57</td></tr><tr><td>MARCELL</td><td>Y</td><td>N</td><td>79.35 75.53 85.47 85.45</td><td>81.78</td></tr><tr><td>MARCELL</td><td>Y</td><td>Y</td><td>80.65 71.97 86.40 83.94</td><td>81.10</td></tr><tr><td>CoRoLa+MARCELL</td><td>N</td><td>N</td><td>77.05 75.76 85.89 84.59</td><td>81.04</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>N</td><td>81.12 73.23 85.48 83.69</td><td>81.13</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>Y</td><td>76.54 75.29 85.99 85.12</td><td>81.05</td></tr><tr><td/><td/><td/><td>Test set</td><td/></tr><tr><td>CoRoLa</td><td>N</td><td>N</td><td>96.27 66.86 80.34 89.81</td><td>83.35</td></tr><tr><td>CoRoLa</td><td>Y</td><td>N</td><td>96.65 72.13 81.36 88.31</td><td>84.64</td></tr><tr><td>CoRoLa</td><td>Y</td><td>Y</td><td>97.69 69.54 80.09 89.06</td><td>84.10</td></tr><tr><td>MARCELL</td><td>N</td><td>N</td><td>96.68 74.01 81.24 91.65</td><td>85.94</td></tr><tr><td>MARCELL</td><td>Y</td><td>N</td><td>98.86 69.83 79.85 91.93</td><td>85.14</td></tr><tr><td>MARCELL</td><td>Y</td><td>Y</td><td>96.68 74.49 78.87 92.18</td><td>85.66</td></tr><tr><td>CoRoLa+MARCELL</td><td>N</td><td>N</td><td>98.86 69.59 82.13 90.51</td><td>85.29</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>N</td><td>97.40 72.88 80.89 90.28</td><td>85.39</td></tr><tr><td>CoRoLa+MARCELL</td><td>Y</td><td>Y</td><td>98.86 76.01 80.89 91.39</td><td>86.84</td></tr></table>", |
|
"text": "F1 scores for different models, considering all entities" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "F1 scores for different models, considering only person, location, organization and time expression Operation LEGAL PER LOC ORG TIME Macro AVG Reunion 90.42 98.17 78.96 91.61 92.44 90.36 Intersection 90.12 98.66 65.45 87.67 86.64 86.14 Voting algorithm 90.41 99.33 75.05 91.18 90.57 89.37 Longest span 89.35 98.66 70.59 89.95 86.22 87.29" |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "F1 scores for different ensembles for test set, considering all entities" |
|
} |
|
} |
|
} |
|
} |