Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
parquet-converter commited on
Commit
7c8422d
1 Parent(s): 4ddaec5

Update parquet files

Browse files
README.md DELETED
@@ -1,792 +0,0 @@
1
- ---
2
- pretty_name: LexGLUE
3
- annotations_creators:
4
- - found
5
- language_creators:
6
- - found
7
- language:
8
- - en
9
- license:
10
- - cc-by-4.0
11
- multilinguality:
12
- - monolingual
13
- size_categories:
14
- - 10K<n<100K
15
- source_datasets:
16
- - extended
17
- task_categories:
18
- - question-answering
19
- - text-classification
20
- task_ids:
21
- - multi-class-classification
22
- - multi-label-classification
23
- - multiple-choice-qa
24
- - topic-classification
25
- configs:
26
- - case_hold
27
- - ecthr_a
28
- - ecthr_b
29
- - eurlex
30
- - ledgar
31
- - scotus
32
- - unfair_tos
33
- dataset_info:
34
- - config_name: ecthr_a
35
- features:
36
- - name: text
37
- sequence: string
38
- - name: labels
39
- sequence:
40
- class_label:
41
- names:
42
- 0: '2'
43
- 1: '3'
44
- 2: '5'
45
- 3: '6'
46
- 4: '8'
47
- 5: '9'
48
- 6: '10'
49
- 7: '11'
50
- 8: '14'
51
- 9: P1-1
52
- splits:
53
- - name: train
54
- num_bytes: 89637461
55
- num_examples: 9000
56
- - name: test
57
- num_bytes: 11884180
58
- num_examples: 1000
59
- - name: validation
60
- num_bytes: 10985180
61
- num_examples: 1000
62
- download_size: 32852475
63
- dataset_size: 112506821
64
- - config_name: ecthr_b
65
- features:
66
- - name: text
67
- sequence: string
68
- - name: labels
69
- sequence:
70
- class_label:
71
- names:
72
- 0: '2'
73
- 1: '3'
74
- 2: '5'
75
- 3: '6'
76
- 4: '8'
77
- 5: '9'
78
- 6: '10'
79
- 7: '11'
80
- 8: '14'
81
- 9: P1-1
82
- splits:
83
- - name: train
84
- num_bytes: 89657661
85
- num_examples: 9000
86
- - name: test
87
- num_bytes: 11886940
88
- num_examples: 1000
89
- - name: validation
90
- num_bytes: 10987828
91
- num_examples: 1000
92
- download_size: 32852475
93
- dataset_size: 112532429
94
- - config_name: eurlex
95
- features:
96
- - name: text
97
- dtype: string
98
- - name: labels
99
- sequence:
100
- class_label:
101
- names:
102
- 0: '100163'
103
- 1: '100168'
104
- 2: '100169'
105
- 3: '100170'
106
- 4: '100171'
107
- 5: '100172'
108
- 6: '100173'
109
- 7: '100174'
110
- 8: '100175'
111
- 9: '100176'
112
- 10: '100177'
113
- 11: '100179'
114
- 12: '100180'
115
- 13: '100183'
116
- 14: '100184'
117
- 15: '100185'
118
- 16: '100186'
119
- 17: '100187'
120
- 18: '100189'
121
- 19: '100190'
122
- 20: '100191'
123
- 21: '100192'
124
- 22: '100193'
125
- 23: '100194'
126
- 24: '100195'
127
- 25: '100196'
128
- 26: '100197'
129
- 27: '100198'
130
- 28: '100199'
131
- 29: '100200'
132
- 30: '100201'
133
- 31: '100202'
134
- 32: '100204'
135
- 33: '100205'
136
- 34: '100206'
137
- 35: '100207'
138
- 36: '100212'
139
- 37: '100214'
140
- 38: '100215'
141
- 39: '100220'
142
- 40: '100221'
143
- 41: '100222'
144
- 42: '100223'
145
- 43: '100224'
146
- 44: '100226'
147
- 45: '100227'
148
- 46: '100229'
149
- 47: '100230'
150
- 48: '100231'
151
- 49: '100232'
152
- 50: '100233'
153
- 51: '100234'
154
- 52: '100235'
155
- 53: '100237'
156
- 54: '100238'
157
- 55: '100239'
158
- 56: '100240'
159
- 57: '100241'
160
- 58: '100242'
161
- 59: '100243'
162
- 60: '100244'
163
- 61: '100245'
164
- 62: '100246'
165
- 63: '100247'
166
- 64: '100248'
167
- 65: '100249'
168
- 66: '100250'
169
- 67: '100252'
170
- 68: '100253'
171
- 69: '100254'
172
- 70: '100255'
173
- 71: '100256'
174
- 72: '100257'
175
- 73: '100258'
176
- 74: '100259'
177
- 75: '100260'
178
- 76: '100261'
179
- 77: '100262'
180
- 78: '100263'
181
- 79: '100264'
182
- 80: '100265'
183
- 81: '100266'
184
- 82: '100268'
185
- 83: '100269'
186
- 84: '100270'
187
- 85: '100271'
188
- 86: '100272'
189
- 87: '100273'
190
- 88: '100274'
191
- 89: '100275'
192
- 90: '100276'
193
- 91: '100277'
194
- 92: '100278'
195
- 93: '100279'
196
- 94: '100280'
197
- 95: '100281'
198
- 96: '100282'
199
- 97: '100283'
200
- 98: '100284'
201
- 99: '100285'
202
- splits:
203
- - name: train
204
- num_bytes: 390770289
205
- num_examples: 55000
206
- - name: test
207
- num_bytes: 59739102
208
- num_examples: 5000
209
- - name: validation
210
- num_bytes: 41544484
211
- num_examples: 5000
212
- download_size: 125413277
213
- dataset_size: 492053875
214
- - config_name: scotus
215
- features:
216
- - name: text
217
- dtype: string
218
- - name: label
219
- dtype:
220
- class_label:
221
- names:
222
- 0: '1'
223
- 1: '2'
224
- 2: '3'
225
- 3: '4'
226
- 4: '5'
227
- 5: '6'
228
- 6: '7'
229
- 7: '8'
230
- 8: '9'
231
- 9: '10'
232
- 10: '11'
233
- 11: '12'
234
- 12: '13'
235
- splits:
236
- - name: train
237
- num_bytes: 178959320
238
- num_examples: 5000
239
- - name: test
240
- num_bytes: 76213283
241
- num_examples: 1400
242
- - name: validation
243
- num_bytes: 75600247
244
- num_examples: 1400
245
- download_size: 104763335
246
- dataset_size: 330772850
247
- - config_name: ledgar
248
- features:
249
- - name: text
250
- dtype: string
251
- - name: label
252
- dtype:
253
- class_label:
254
- names:
255
- 0: Adjustments
256
- 1: Agreements
257
- 2: Amendments
258
- 3: Anti-Corruption Laws
259
- 4: Applicable Laws
260
- 5: Approvals
261
- 6: Arbitration
262
- 7: Assignments
263
- 8: Assigns
264
- 9: Authority
265
- 10: Authorizations
266
- 11: Base Salary
267
- 12: Benefits
268
- 13: Binding Effects
269
- 14: Books
270
- 15: Brokers
271
- 16: Capitalization
272
- 17: Change In Control
273
- 18: Closings
274
- 19: Compliance With Laws
275
- 20: Confidentiality
276
- 21: Consent To Jurisdiction
277
- 22: Consents
278
- 23: Construction
279
- 24: Cooperation
280
- 25: Costs
281
- 26: Counterparts
282
- 27: Death
283
- 28: Defined Terms
284
- 29: Definitions
285
- 30: Disability
286
- 31: Disclosures
287
- 32: Duties
288
- 33: Effective Dates
289
- 34: Effectiveness
290
- 35: Employment
291
- 36: Enforceability
292
- 37: Enforcements
293
- 38: Entire Agreements
294
- 39: Erisa
295
- 40: Existence
296
- 41: Expenses
297
- 42: Fees
298
- 43: Financial Statements
299
- 44: Forfeitures
300
- 45: Further Assurances
301
- 46: General
302
- 47: Governing Laws
303
- 48: Headings
304
- 49: Indemnifications
305
- 50: Indemnity
306
- 51: Insurances
307
- 52: Integration
308
- 53: Intellectual Property
309
- 54: Interests
310
- 55: Interpretations
311
- 56: Jurisdictions
312
- 57: Liens
313
- 58: Litigations
314
- 59: Miscellaneous
315
- 60: Modifications
316
- 61: No Conflicts
317
- 62: No Defaults
318
- 63: No Waivers
319
- 64: Non-Disparagement
320
- 65: Notices
321
- 66: Organizations
322
- 67: Participations
323
- 68: Payments
324
- 69: Positions
325
- 70: Powers
326
- 71: Publicity
327
- 72: Qualifications
328
- 73: Records
329
- 74: Releases
330
- 75: Remedies
331
- 76: Representations
332
- 77: Sales
333
- 78: Sanctions
334
- 79: Severability
335
- 80: Solvency
336
- 81: Specific Performance
337
- 82: Submission To Jurisdiction
338
- 83: Subsidiaries
339
- 84: Successors
340
- 85: Survival
341
- 86: Tax Withholdings
342
- 87: Taxes
343
- 88: Terminations
344
- 89: Terms
345
- 90: Titles
346
- 91: Transactions With Affiliates
347
- 92: Use Of Proceeds
348
- 93: Vacations
349
- 94: Venues
350
- 95: Vesting
351
- 96: Waiver Of Jury Trials
352
- 97: Waivers
353
- 98: Warranties
354
- 99: Withholdings
355
- splits:
356
- - name: train
357
- num_bytes: 43358315
358
- num_examples: 60000
359
- - name: test
360
- num_bytes: 6845585
361
- num_examples: 10000
362
- - name: validation
363
- num_bytes: 7143592
364
- num_examples: 10000
365
- download_size: 16255623
366
- dataset_size: 57347492
367
- - config_name: unfair_tos
368
- features:
369
- - name: text
370
- dtype: string
371
- - name: labels
372
- sequence:
373
- class_label:
374
- names:
375
- 0: Limitation of liability
376
- 1: Unilateral termination
377
- 2: Unilateral change
378
- 3: Content removal
379
- 4: Contract by using
380
- 5: Choice of law
381
- 6: Jurisdiction
382
- 7: Arbitration
383
- splits:
384
- - name: train
385
- num_bytes: 1041790
386
- num_examples: 5532
387
- - name: test
388
- num_bytes: 303107
389
- num_examples: 1607
390
- - name: validation
391
- num_bytes: 452119
392
- num_examples: 2275
393
- download_size: 511342
394
- dataset_size: 1797016
395
- - config_name: case_hold
396
- features:
397
- - name: context
398
- dtype: string
399
- - name: endings
400
- sequence: string
401
- - name: label
402
- dtype:
403
- class_label:
404
- names:
405
- 0: '0'
406
- 1: '1'
407
- 2: '2'
408
- 3: '3'
409
- 4: '4'
410
- splits:
411
- - name: train
412
- num_bytes: 74781766
413
- num_examples: 45000
414
- - name: test
415
- num_bytes: 5989964
416
- num_examples: 3600
417
- - name: validation
418
- num_bytes: 6474615
419
- num_examples: 3900
420
- download_size: 30422703
421
- dataset_size: 87246345
422
- ---
423
-
424
- # Dataset Card for "LexGLUE"
425
-
426
- ## Table of Contents
427
- - [Dataset Description](#dataset-description)
428
- - [Dataset Summary](#dataset-summary)
429
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
430
- - [Languages](#languages)
431
- - [Dataset Structure](#dataset-structure)
432
- - [Data Instances](#data-instances)
433
- - [Data Fields](#data-fields)
434
- - [Data Splits](#data-splits)
435
- - [Dataset Creation](#dataset-creation)
436
- - [Curation Rationale](#curation-rationale)
437
- - [Source Data](#source-data)
438
- - [Annotations](#annotations)
439
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
440
- - [Considerations for Using the Data](#considerations-for-using-the-data)
441
- - [Social Impact of Dataset](#social-impact-of-dataset)
442
- - [Discussion of Biases](#discussion-of-biases)
443
- - [Other Known Limitations](#other-known-limitations)
444
- - [Additional Information](#additional-information)
445
- - [Dataset Curators](#dataset-curators)
446
- - [Licensing Information](#licensing-information)
447
- - [Citation Information](#citation-information)
448
- - [Contributions](#contributions)
449
-
450
- ## Dataset Description
451
-
452
- - **Homepage:** https://github.com/coastalcph/lex-glue
453
- - **Repository:** https://github.com/coastalcph/lex-glue
454
- - **Paper:** https://arxiv.org/abs/2110.00976
455
- - **Leaderboard:** https://github.com/coastalcph/lex-glue
456
- - **Point of Contact:** [Ilias Chalkidis](mailto:ilias.chalkidis@di.ku.dk)
457
-
458
- ### Dataset Summary
459
-
460
- Inspired by the recent widespread use of the GLUE multi-task benchmark NLP dataset (Wang et al., 2018), the subsequent more difficult SuperGLUE (Wang et al., 2019), other previous multi-task NLP benchmarks (Conneau and Kiela, 2018; McCann et al., 2018), and similar initiatives in other domains (Peng et al., 2019), we introduce the *Legal General Language Understanding Evaluation (LexGLUE) benchmark*, a benchmark dataset to evaluate the performance of NLP methods in legal tasks. LexGLUE is based on seven existing legal NLP datasets, selected using criteria largely from SuperGLUE.
461
-
462
- As in GLUE and SuperGLUE (Wang et al., 2019b,a), one of our goals is to push towards generic (or ‘foundation’) models that can cope with multiple NLP tasks, in our case legal NLP tasks possibly with limited task-specific fine-tuning. Another goal is to provide a convenient and informative entry point for NLP researchers and practitioners wishing to explore or develop methods for legalNLP. Having these goals in mind, the datasets we include in LexGLUE and the tasks they address have been simplified in several ways to make it easier for newcomers and generic models to address all tasks.
463
-
464
- LexGLUE benchmark is accompanied by experimental infrastructure that relies on Hugging Face Transformers library and resides at: https://github.com/coastalcph/lex-glue.
465
-
466
- ### Supported Tasks and Leaderboards
467
-
468
- The supported tasks are the following:
469
-
470
- <table>
471
- <tr><td>Dataset</td><td>Source</td><td>Sub-domain</td><td>Task Type</td><td>Classes</td><tr>
472
- <tr><td>ECtHR (Task A)</td><td> <a href="https://aclanthology.org/P19-1424/">Chalkidis et al. (2019)</a> </td><td>ECHR</td><td>Multi-label classification</td><td>10+1</td></tr>
473
- <tr><td>ECtHR (Task B)</td><td> <a href="https://aclanthology.org/2021.naacl-main.22/">Chalkidis et al. (2021a)</a> </td><td>ECHR</td><td>Multi-label classification </td><td>10+1</td></tr>
474
- <tr><td>SCOTUS</td><td> <a href="http://scdb.wustl.edu">Spaeth et al. (2020)</a></td><td>US Law</td><td>Multi-class classification</td><td>14</td></tr>
475
- <tr><td>EUR-LEX</td><td> <a href="https://arxiv.org/abs/2109.00904">Chalkidis et al. (2021b)</a></td><td>EU Law</td><td>Multi-label classification</td><td>100</td></tr>
476
- <tr><td>LEDGAR</td><td> <a href="https://aclanthology.org/2020.lrec-1.155/">Tuggener et al. (2020)</a></td><td>Contracts</td><td>Multi-class classification</td><td>100</td></tr>
477
- <tr><td>UNFAIR-ToS</td><td><a href="https://arxiv.org/abs/1805.01217"> Lippi et al. (2019)</a></td><td>Contracts</td><td>Multi-label classification</td><td>8+1</td></tr>
478
- <tr><td>CaseHOLD</td><td><a href="https://arxiv.org/abs/2104.08671">Zheng et al. (2021)</a></td><td>US Law</td><td>Multiple choice QA</td><td>n/a</td></tr>
479
- </table>
480
-
481
- #### ecthr_a
482
-
483
- The European Court of Human Rights (ECtHR) hears allegations that a state has breached human rights provisions of the European Convention of Human Rights (ECHR). For each case, the dataset provides a list of factual paragraphs (facts) from the case description. Each case is mapped to articles of the ECHR that were violated (if any).
484
-
485
- #### ecthr_b
486
-
487
- The European Court of Human Rights (ECtHR) hears allegations that a state has breached human rights provisions of the European Convention of Human Rights (ECHR). For each case, the dataset provides a list of factual paragraphs (facts) from the case description. Each case is mapped to articles of ECHR that were allegedly violated (considered by the court).
488
-
489
- #### scotus
490
-
491
- The US Supreme Court (SCOTUS) is the highest federal court in the United States of America and generally hears only the most controversial or otherwise complex cases which have not been sufficiently well solved by lower courts. This is a single-label multi-class classification task, where given a document (court opinion), the task is to predict the relevant issue areas. The 14 issue areas cluster 278 issues whose focus is on the subject matter of the controversy (dispute).
492
-
493
- #### eurlex
494
-
495
- European Union (EU) legislation is published in EUR-Lex portal. All EU laws are annotated by EU's Publications Office with multiple concepts from the EuroVoc thesaurus, a multilingual thesaurus maintained by the Publications Office. The current version of EuroVoc contains more than 7k concepts referring to various activities of the EU and its Member States (e.g., economics, health-care, trade). Given a document, the task is to predict its EuroVoc labels (concepts).
496
-
497
- #### ledgar
498
-
499
- LEDGAR dataset aims contract provision (paragraph) classification. The contract provisions come from contracts obtained from the US Securities and Exchange Commission (SEC) filings, which are publicly available from EDGAR. Each label represents the single main topic (theme) of the corresponding contract provision.
500
-
501
- #### unfair_tos
502
-
503
- The UNFAIR-ToS dataset contains 50 Terms of Service (ToS) from on-line platforms (e.g., YouTube, Ebay, Facebook, etc.). The dataset has been annotated on the sentence-level with 8 types of unfair contractual terms (sentences), meaning terms that potentially violate user rights according to the European consumer law.
504
-
505
- #### case_hold
506
-
507
- The CaseHOLD (Case Holdings on Legal Decisions) dataset includes multiple choice questions about holdings of US court cases from the Harvard Law Library case law corpus. Holdings are short summaries of legal rulings accompany referenced decisions relevant for the present case. The input consists of an excerpt (or prompt) from a court decision, containing a reference to a particular case, while the holding statement is masked out. The model must identify the correct (masked) holding statement from a selection of five choices.
508
-
509
-
510
- The current leaderboard includes several Transformer-based (Vaswaniet al., 2017) pre-trained language models, which achieve state-of-the-art performance in most NLP tasks (Bommasani et al., 2021) and NLU benchmarks (Wang et al., 2019a). Results reported by [Chalkidis et al. (2021)](https://arxiv.org/abs/2110.00976):
511
-
512
- *Task-wise Test Results*
513
-
514
- <table>
515
- <tr><td><b>Dataset</b></td><td><b>ECtHR A</b></td><td><b>ECtHR B</b></td><td><b>SCOTUS</b></td><td><b>EUR-LEX</b></td><td><b>LEDGAR</b></td><td><b>UNFAIR-ToS</b></td><td><b>CaseHOLD</b></td></tr>
516
- <tr><td><b>Model</b></td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1</td><td>μ-F1 / m-F1 </td></tr>
517
- <tr><td>TFIDF+SVM</td><td> 64.7 / 51.7 </td><td>74.6 / 65.1 </td><td> <b>78.2</b> / <b>69.5</b> </td><td>71.3 / 51.4 </td><td>87.2 / 82.4 </td><td>95.4 / 78.8</td><td>n/a </td></tr>
518
- <tr><td colspan="8" style='text-align:center'><b>Medium-sized Models (L=12, H=768, A=12)</b></td></tr>
519
- <td>BERT</td> <td> 71.2 / 63.6 </td> <td> 79.7 / 73.4 </td> <td> 68.3 / 58.3 </td> <td> 71.4 / 57.2 </td> <td> 87.6 / 81.8 </td> <td> 95.6 / 81.3 </td> <td> 70.8 </td> </tr>
520
- <td>RoBERTa</td> <td> 69.2 / 59.0 </td> <td> 77.3 / 68.9 </td> <td> 71.6 / 62.0 </td> <td> 71.9 / <b>57.9</b> </td> <td> 87.9 / 82.3 </td> <td> 95.2 / 79.2 </td> <td> 71.4 </td> </tr>
521
- <td>DeBERTa</td> <td> 70.0 / 60.8 </td> <td> 78.8 / 71.0 </td> <td> 71.1 / 62.7 </td> <td> <b>72.1</b> / 57.4 </td> <td> 88.2 / 83.1 </td> <td> 95.5 / 80.3 </td> <td> 72.6 </td> </tr>
522
- <td>Longformer</td> <td> 69.9 / 64.7 </td> <td> 79.4 / 71.7 </td> <td> 72.9 / 64.0 </td> <td> 71.6 / 57.7 </td> <td> 88.2 / 83.0 </td> <td> 95.5 / 80.9 </td> <td> 71.9 </td> </tr>
523
- <td>BigBird</td> <td> 70.0 / 62.9 </td> <td> 78.8 / 70.9 </td> <td> 72.8 / 62.0 </td> <td> 71.5 / 56.8 </td> <td> 87.8 / 82.6 </td> <td> 95.7 / 81.3 </td> <td> 70.8 </td> </tr>
524
- <td>Legal-BERT</td> <td> 70.0 / 64.0 </td> <td> <b>80.4</b> / <b>74.7</b> </td> <td> 76.4 / 66.5 </td> <td> <b>72.1</b> / 57.4 </td> <td> 88.2 / 83.0 </td> <td> <b>96.0</b> / <b>83.0</b> </td> <td> 75.3 </td> </tr>
525
- <td>CaseLaw-BERT</td> <td> 69.8 / 62.9 </td> <td> 78.8 / 70.3 </td> <td> 76.6 / 65.9 </td> <td> 70.7 / 56.6 </td> <td> 88.3 / 83.0 </td> <td> <b>96.0</b> / 82.3 </td> <td> <b>75.4</b> </td> </tr>
526
- <tr><td colspan="8" style='text-align:center'><b>Large-sized Models (L=24, H=1024, A=18)</b></td></tr>
527
- <tr><td>RoBERTa</td> <td> <b>73.8</b> / <b>67.6</b> </td> <td> 79.8 / 71.6 </td> <td> 75.5 / 66.3 </td> <td> 67.9 / 50.3 </td> <td> <b>88.6</b> / <b>83.6</b> </td> <td> 95.8 / 81.6 </td> <td> 74.4 </td> </tr>
528
- </table>
529
-
530
- *Averaged (Mean over Tasks) Test Results*
531
-
532
- <table>
533
- <tr><td><b>Averaging</b></td><td><b>Arithmetic</b></td><td><b>Harmonic</b></td><td><b>Geometric</b></td></tr>
534
- <tr><td><b>Model</b></td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1 </td><td>μ-F1 / m-F1 </td></tr>
535
- <tr><td colspan="4" style='text-align:center'><b>Medium-sized Models (L=12, H=768, A=12)</b></td></tr>
536
- <tr><td>BERT</td><td> 77.8 / 69.5 </td><td> 76.7 / 68.2 </td><td> 77.2 / 68.8 </td></tr>
537
- <tr><td>RoBERTa</td><td> 77.8 / 68.7 </td><td> 76.8 / 67.5 </td><td> 77.3 / 68.1 </td></tr>
538
- <tr><td>DeBERTa</td><td> 78.3 / 69.7 </td><td> 77.4 / 68.5 </td><td> 77.8 / 69.1 </td></tr>
539
- <tr><td>Longformer</td><td> 78.5 / 70.5 </td><td> 77.5 / 69.5 </td><td> 78.0 / 70.0 </td></tr>
540
- <tr><td>BigBird</td><td> 78.2 / 69.6 </td><td> 77.2 / 68.5 </td><td> 77.7 / 69.0 </td></tr>
541
- <tr><td>Legal-BERT</td><td> <b>79.8</b> / <b>72.0</b> </td><td> <b>78.9</b> / <b>70.8</b> </td><td> <b>79.3</b> / <b>71.4</b> </td></tr>
542
- <tr><td>CaseLaw-BERT</td><td> 79.4 / 70.9 </td><td> 78.5 / 69.7 </td><td> 78.9 / 70.3 </td></tr>
543
- <tr><td colspan="4" style='text-align:center'><b>Large-sized Models (L=24, H=1024, A=18)</b></td></tr>
544
- <tr><td>RoBERTa</td><td> 79.4 / 70.8 </td><td> 78.4 / 69.1 </td><td> 78.9 / 70.0 </td></tr>
545
- </table>
546
-
547
- ### Languages
548
-
549
- We only consider English datasets, to make experimentation easier for researchers across the globe.
550
-
551
- ## Dataset Structure
552
-
553
- ### Data Instances
554
-
555
- #### ecthr_a
556
-
557
- An example of 'train' looks as follows.
558
- ```json
559
- {
560
- "text": ["8. The applicant was arrested in the early morning of 21 October 1990 ...", ...],
561
- "labels": [6]
562
- }
563
- ```
564
-
565
- #### ecthr_b
566
-
567
- An example of 'train' looks as follows.
568
- ```json
569
- {
570
- "text": ["8. The applicant was arrested in the early morning of 21 October 1990 ...", ...],
571
- "label": [5, 6]
572
- }
573
- ```
574
-
575
- #### scotus
576
-
577
- An example of 'train' looks as follows.
578
- ```json
579
- {
580
- "text": "Per Curiam\nSUPREME COURT OF THE UNITED STATES\nRANDY WHITE, WARDEN v. ROGER L. WHEELER\n Decided December 14, 2015\nPER CURIAM.\nA death sentence imposed by a Kentucky trial court and\naffirmed by the ...",
581
- "label": 8
582
- }
583
- ```
584
-
585
- #### eurlex
586
-
587
- An example of 'train' looks as follows.
588
- ```json
589
- {
590
- "text": "COMMISSION REGULATION (EC) No 1629/96 of 13 August 1996 on an invitation to tender for the refund on export of wholly milled round grain rice to certain third countries ...",
591
- "labels": [4, 20, 21, 35, 68]
592
- }
593
- ```
594
-
595
- #### ledgar
596
-
597
- An example of 'train' looks as follows.
598
- ```json
599
- {
600
- "text": "All Taxes shall be the financial responsibility of the party obligated to pay such Taxes as determined by applicable law and neither party is or shall be liable at any time for any of the other party ...",
601
- "label": 32
602
- }
603
- ```
604
-
605
- #### unfair_tos
606
-
607
- An example of 'train' looks as follows.
608
- ```json
609
- {
610
- "text": "tinder may terminate your account at any time without notice if it believes that you have violated this agreement.",
611
- "label": 2
612
- }
613
- ```
614
-
615
- #### casehold
616
-
617
- An example of 'test' looks as follows.
618
- ```json
619
- {
620
- "context": "In Granato v. City and County of Denver, No. CIV 11-0304 MSK/BNB, 2011 WL 3820730 (D.Colo. Aug. 20, 2011), the Honorable Marcia S. Krieger, now-Chief United States District Judge for the District of Colorado, ruled similarly: At a minimum, a party asserting a Mo-nell claim must plead sufficient facts to identify ... to act pursuant to City or State policy, custom, decision, ordinance, re d 503, 506-07 (3d Cir.l985)(<HOLDING>).",
621
- "endings": ["holding that courts are to accept allegations in the complaint as being true including monell policies and writing that a federal court reviewing the sufficiency of a complaint has a limited task",
622
- "holding that for purposes of a class certification motion the court must accept as true all factual allegations in the complaint and may draw reasonable inferences therefrom",
623
- "recognizing that the allegations of the complaint must be accepted as true on a threshold motion to dismiss",
624
- "holding that a court need not accept as true conclusory allegations which are contradicted by documents referred to in the complaint",
625
- "holding that where the defendant was in default the district court correctly accepted the fact allegations of the complaint as true"
626
- ],
627
- "label": 0
628
- }
629
- ```
630
-
631
- ### Data Fields
632
-
633
- #### ecthr_a
634
- - `text`: a list of `string` features (list of factual paragraphs (facts) from the case description).
635
- - `labels`: a list of classification labels (a list of violated ECHR articles, if any) .
636
- <details>
637
- <summary>List of ECHR articles</summary>
638
- "Article 2", "Article 3", "Article 5", "Article 6", "Article 8", "Article 9", "Article 10", "Article 11", "Article 14", "Article 1 of Protocol 1"
639
- </details>
640
-
641
- #### ecthr_b
642
- - `text`: a list of `string` features (list of factual paragraphs (facts) from the case description)
643
- - `labels`: a list of classification labels (a list of articles considered).
644
- <details>
645
- <summary>List of ECHR articles</summary>
646
- "Article 2", "Article 3", "Article 5", "Article 6", "Article 8", "Article 9", "Article 10", "Article 11", "Article 14", "Article 1 of Protocol 1"
647
- </details>
648
-
649
- #### scotus
650
- - `text`: a `string` feature (the court opinion).
651
- - `label`: a classification label (the relevant issue area).
652
- <details>
653
- <summary>List of issue areas</summary>
654
- (1, Criminal Procedure), (2, Civil Rights), (3, First Amendment), (4, Due Process), (5, Privacy), (6, Attorneys), (7, Unions), (8, Economic Activity), (9, Judicial Power), (10, Federalism), (11, Interstate Relations), (12, Federal Taxation), (13, Miscellaneous), (14, Private Action)
655
- </details>
656
-
657
- #### eurlex
658
- - `text`: a `string` feature (an EU law).
659
- - `labels`: a list of classification labels (a list of relevant EUROVOC concepts).
660
- <details>
661
- <summary>List of EUROVOC concepts</summary>
662
- The list is very long including 100 EUROVOC concepts. You can find the EUROVOC concepts descriptors <a href="https://raw.githubusercontent.com/nlpaueb/multi-eurlex/master/data/eurovoc_descriptors.json">here</a>.
663
- </details>
664
-
665
- #### ledgar
666
- - `text`: a `string` feature (a contract provision/paragraph).
667
- - `label`: a classification label (the type of contract provision).
668
- <details>
669
- <summary>List of contract provision types</summary>
670
- "Adjustments", "Agreements", "Amendments", "Anti-Corruption Laws", "Applicable Laws", "Approvals", "Arbitration", "Assignments", "Assigns", "Authority", "Authorizations", "Base Salary", "Benefits", "Binding Effects", "Books", "Brokers", "Capitalization", "Change In Control", "Closings", "Compliance With Laws", "Confidentiality", "Consent To Jurisdiction", "Consents", "Construction", "Cooperation", "Costs", "Counterparts", "Death", "Defined Terms", "Definitions", "Disability", "Disclosures", "Duties", "Effective Dates", "Effectiveness", "Employment", "Enforceability", "Enforcements", "Entire Agreements", "Erisa", "Existence", "Expenses", "Fees", "Financial Statements", "Forfeitures", "Further Assurances", "General", "Governing Laws", "Headings", "Indemnifications", "Indemnity", "Insurances", "Integration", "Intellectual Property", "Interests", "Interpretations", "Jurisdictions", "Liens", "Litigations", "Miscellaneous", "Modifications", "No Conflicts", "No Defaults", "No Waivers", "Non-Disparagement", "Notices", "Organizations", "Participations", "Payments", "Positions", "Powers", "Publicity", "Qualifications", "Records", "Releases", "Remedies", "Representations", "Sales", "Sanctions", "Severability", "Solvency", "Specific Performance", "Submission To Jurisdiction", "Subsidiaries", "Successors", "Survival", "Tax Withholdings", "Taxes", "Terminations", "Terms", "Titles", "Transactions With Affiliates", "Use Of Proceeds", "Vacations", "Venues", "Vesting", "Waiver Of Jury Trials", "Waivers", "Warranties", "Withholdings",
671
- </details>
672
-
673
- #### unfair_tos
674
- - `text`: a `string` feature (a ToS sentence)
675
- - `labels`: a list of classification labels (a list of unfair types, if any).
676
- <details>
677
- <summary>List of unfair types</summary>
678
- "Limitation of liability", "Unilateral termination", "Unilateral change", "Content removal", "Contract by using", "Choice of law", "Jurisdiction", "Arbitration"
679
- </details>
680
-
681
- #### casehold
682
- - `context`: a `string` feature (a context sentence incl. a masked holding statement).
683
- - `holdings`: a list of `string` features (a list of candidate holding statements).
684
- - `label`: a classification label (the id of the original/correct holding).
685
-
686
-
687
- ### Data Splits
688
-
689
- <table>
690
- <tr><td>Dataset </td><td>Training</td><td>Development</td><td>Test</td><td>Total</td></tr>
691
- <tr><td>ECtHR (Task A)</td><td>9,000</td><td>1,000</td><td>1,000</td><td>11,000</td></tr>
692
- <tr><td>ECtHR (Task B)</td><td>9,000</td><td>1,000</td><td>1,000</td><td>11,000</td></tr>
693
- <tr><td>SCOTUS</td><td>5,000</td><td>1,400</td><td>1,400</td><td>7,800</td></tr>
694
- <tr><td>EUR-LEX</td><td>55,000</td><td>5,000</td><td>5,000</td><td>65,000</td></tr>
695
- <tr><td>LEDGAR</td><td>60,000</td><td>10,000</td><td>10,000</td><td>80,000</td></tr>
696
- <tr><td>UNFAIR-ToS</td><td>5,532</td><td>2,275</td><td>1,607</td><td>9,414</td></tr>
697
- <tr><td>CaseHOLD</td><td>45,000</td><td>3,900</td><td>3,900</td><td>52,800</td></tr>
698
- </table>
699
-
700
- ## Dataset Creation
701
-
702
- ### Curation Rationale
703
-
704
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
705
-
706
- ### Source Data
707
- <table>
708
- <tr><td>Dataset</td><td>Source</td><td>Sub-domain</td><td>Task Type</td><tr>
709
- <tr><td>ECtHR (Task A)</td><td> <a href="https://aclanthology.org/P19-1424/">Chalkidis et al. (2019)</a> </td><td>ECHR</td><td>Multi-label classification</td></tr>
710
- <tr><td>ECtHR (Task B)</td><td> <a href="https://aclanthology.org/2021.naacl-main.22/">Chalkidis et al. (2021a)</a> </td><td>ECHR</td><td>Multi-label classification </td></tr>
711
- <tr><td>SCOTUS</td><td> <a href="http://scdb.wustl.edu">Spaeth et al. (2020)</a></td><td>US Law</td><td>Multi-class classification</td></tr>
712
- <tr><td>EUR-LEX</td><td> <a href="https://arxiv.org/abs/2109.00904">Chalkidis et al. (2021b)</a></td><td>EU Law</td><td>Multi-label classification</td></tr>
713
- <tr><td>LEDGAR</td><td> <a href="https://aclanthology.org/2020.lrec-1.155/">Tuggener et al. (2020)</a></td><td>Contracts</td><td>Multi-class classification</td></tr>
714
- <tr><td>UNFAIR-ToS</td><td><a href="https://arxiv.org/abs/1805.01217"> Lippi et al. (2019)</a></td><td>Contracts</td><td>Multi-label classification</td></tr>
715
- <tr><td>CaseHOLD</td><td><a href="https://arxiv.org/abs/2104.08671">Zheng et al. (2021)</a></td><td>US Law</td><td>Multiple choice QA</td></tr>
716
- </table>
717
-
718
- #### Initial Data Collection and Normalization
719
-
720
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
721
-
722
- #### Who are the source language producers?
723
-
724
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
725
-
726
- ### Annotations
727
-
728
- #### Annotation process
729
-
730
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
731
-
732
- #### Who are the annotators?
733
-
734
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
735
-
736
- ### Personal and Sensitive Information
737
-
738
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
739
-
740
- ## Considerations for Using the Data
741
-
742
- ### Social Impact of Dataset
743
-
744
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
745
-
746
-
747
- ### Discussion of Biases
748
-
749
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
750
-
751
-
752
- ### Other Known Limitations
753
-
754
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
755
-
756
-
757
- ## Additional Information
758
-
759
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
760
-
761
-
762
- ### Dataset Curators
763
-
764
- *Ilias Chalkidis, Abhik Jana, Dirk Hartung, Michael Bommarito, Ion Androutsopoulos, Daniel Martin Katz, and Nikolaos Aletras.*
765
- *LexGLUE: A Benchmark Dataset for Legal Language Understanding in English.*
766
- *2022. In the Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics. Dublin, Ireland.*
767
-
768
-
769
- ### Licensing Information
770
-
771
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
772
-
773
- ### Citation Information
774
-
775
- [*Ilias Chalkidis, Abhik Jana, Dirk Hartung, Michael Bommarito, Ion Androutsopoulos, Daniel Martin Katz, and Nikolaos Aletras.*
776
- *LexGLUE: A Benchmark Dataset for Legal Language Understanding in English.*
777
- *2022. In the Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics. Dublin, Ireland.*](https://arxiv.org/abs/2110.00976)
778
- ```
779
- @inproceedings{chalkidis-etal-2021-lexglue,
780
- title={LexGLUE: A Benchmark Dataset for Legal Language Understanding in English},
781
- author={Chalkidis, Ilias and Jana, Abhik and Hartung, Dirk and
782
- Bommarito, Michael and Androutsopoulos, Ion and Katz, Daniel Martin and
783
- Aletras, Nikolaos},
784
- year={2022},
785
- booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics},
786
- address={Dubln, Ireland},
787
- }
788
- ```
789
-
790
- ### Contributions
791
-
792
- Thanks to [@iliaschalkidis](https://github.com/iliaschalkidis) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
case_hold/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b973749d6d9672b7a3c7d457407c54a73f6fafa869495e5f7075ffba7ed23c59
3
+ size 3256650
case_hold/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f527bdeffaa7d0e7ef55c82df7fee8494de883c3436920119a3ba29d7fd9e9ae
3
+ size 40533451
case_hold/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a8e224ab59c790d2ed94de2e6a20595d7fa88c8f5571e5ac2221289c1ba9f9
3
+ size 3513433
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"ecthr_a": {"description": "The European Court of Human Rights (ECtHR) hears allegations that a state has\nbreached human rights provisions of the European Convention of Human Rights (ECHR).\nFor each case, the dataset provides a list of factual paragraphs (facts) from the case description.\nEach case is mapped to articles of the ECHR that were violated (if any).", "citation": "@inproceedings{chalkidis-etal-2021-paragraph,\n title = \"Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases\",\n author = \"Chalkidis, Ilias and\n Fergadiotis, Manos and\n Tsarapatsanis, Dimitrios and\n Aletras, Nikolaos and\n Androutsopoulos, Ion and\n Malakasiotis, Prodromos\",\n booktitle = \"Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies\",\n month = jun,\n year = \"2021\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.naacl-main.22\",\n doi = \"10.18653/v1/2021.naacl-main.22\",\n pages = \"226--241\",\n}\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "https://archive.org/details/ECtHR-NAACL2021", "license": "", "features": {"text": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "labels": {"feature": {"num_classes": 10, "names": ["2", "3", "5", "6", "8", "9", "10", "11", "14", "P1-1"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "ecthr_a", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 89637461, "num_examples": 9000, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 11884180, "num_examples": 1000, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 10985180, "num_examples": 1000, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/ecthr.tar.gz": {"num_bytes": 32852475, "checksum": "461c1f6016af3a7ac0bd115c1f9ff65031258bfec39e570fec74a16d8946398e"}}, "download_size": 32852475, "post_processing_size": null, "dataset_size": 112506821, "size_in_bytes": 145359296}, "ecthr_b": {"description": "The European Court of Human Rights (ECtHR) hears allegations that a state has\nbreached human rights provisions of the European Convention of Human Rights (ECHR).\nFor each case, the dataset provides a list of factual paragraphs (facts) from the case description.\nEach case is mapped to articles of ECHR that were allegedly violated (considered by the court).", "citation": "@inproceedings{chalkidis-etal-2021-paragraph,\n title = \"Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases\",\n author = \"Chalkidis, Ilias\n and Fergadiotis, Manos\n and Tsarapatsanis, Dimitrios\n and Aletras, Nikolaos\n and Androutsopoulos, Ion\n and Malakasiotis, Prodromos\",\n booktitle = \"Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies\",\n year = \"2021\",\n address = \"Online\",\n url = \"https://aclanthology.org/2021.naacl-main.22\",\n}\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "https://archive.org/details/ECtHR-NAACL2021", "license": "", "features": {"text": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "labels": {"feature": {"num_classes": 10, "names": ["2", "3", "5", "6", "8", "9", "10", "11", "14", "P1-1"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "ecthr_b", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 89657661, "num_examples": 9000, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 11886940, "num_examples": 1000, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 10987828, "num_examples": 1000, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/ecthr.tar.gz": {"num_bytes": 32852475, "checksum": "461c1f6016af3a7ac0bd115c1f9ff65031258bfec39e570fec74a16d8946398e"}}, "download_size": 32852475, "post_processing_size": null, "dataset_size": 112532429, "size_in_bytes": 145384904}, "eurlex": {"description": "European Union (EU) legislation is published in EUR-Lex portal.\nAll EU laws are annotated by EU's Publications Office with multiple concepts from the EuroVoc thesaurus,\na multilingual thesaurus maintained by the Publications Office.\nThe current version of EuroVoc contains more than 7k concepts referring to various activities\nof the EU and its Member States (e.g., economics, health-care, trade).\nGiven a document, the task is to predict its EuroVoc labels (concepts).", "citation": "@inproceedings{chalkidis-etal-2021-multieurlex,\n author = {Chalkidis, Ilias and\n Fergadiotis, Manos and\n Androutsopoulos, Ion},\n title = {MultiEURLEX -- A multi-lingual and multi-label legal document\n classification dataset for zero-shot cross-lingual transfer},\n booktitle = {Proceedings of the 2021 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2021},\n location = {Punta Cana, Dominican Republic},\n}\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "https://zenodo.org/record/5363165#.YVJOAi8RqaA", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "labels": {"feature": {"num_classes": 100, "names": ["100163", "100168", "100169", "100170", "100171", "100172", "100173", "100174", "100175", "100176", "100177", "100179", "100180", "100183", "100184", "100185", "100186", "100187", "100189", "100190", "100191", "100192", "100193", "100194", "100195", "100196", "100197", "100198", "100199", "100200", "100201", "100202", "100204", "100205", "100206", "100207", "100212", "100214", "100215", "100220", "100221", "100222", "100223", "100224", "100226", "100227", "100229", "100230", "100231", "100232", "100233", "100234", "100235", "100237", "100238", "100239", "100240", "100241", "100242", "100243", "100244", "100245", "100246", "100247", "100248", "100249", "100250", "100252", "100253", "100254", "100255", "100256", "100257", "100258", "100259", "100260", "100261", "100262", "100263", "100264", "100265", "100266", "100268", "100269", "100270", "100271", "100272", "100273", "100274", "100275", "100276", "100277", "100278", "100279", "100280", "100281", "100282", "100283", "100284", "100285"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "eurlex", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 390770289, "num_examples": 55000, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 59739102, "num_examples": 5000, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 41544484, "num_examples": 5000, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/eurlex.tar.gz": {"num_bytes": 125413277, "checksum": "82376ff55c3812632d8a21ad0d7e515e2e7ec6431ca7673a454cdd41a3a7bf46"}}, "download_size": 125413277, "post_processing_size": null, "dataset_size": 492053875, "size_in_bytes": 617467152}, "scotus": {"description": "The US Supreme Court (SCOTUS) is the highest federal court in the United States of America\nand generally hears only the most controversial or otherwise complex cases which have not\nbeen sufficiently well solved by lower courts. This is a single-label multi-class classification\ntask, where given a document (court opinion), the task is to predict the relevant issue areas.\nThe 14 issue areas cluster 278 issues whose focus is on the subject matter of the controversy (dispute).", "citation": "@misc{spaeth2020,\n author = {Harold J. Spaeth and Lee Epstein and Andrew D. Martin, Jeffrey A. Segal\n and Theodore J. Ruger and Sara C. Benesh},\n year = {2020},\n title ={{Supreme Court Database, Version 2020 Release 01}},\n url= {http://Supremecourtdatabase.org},\n howpublished={Washington University Law}\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "http://scdb.wustl.edu/data.php", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 13, "names": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "scotus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 178959320, "num_examples": 5000, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 76213283, "num_examples": 1400, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 75600247, "num_examples": 1400, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/scotus.tar.gz": {"num_bytes": 104763335, "checksum": "d53cc99aaf60b24ca7e4cf634f08a2572b5b3532f83aecdfc2c4257050dc9d0a"}}, "download_size": 104763335, "post_processing_size": null, "dataset_size": 330772850, "size_in_bytes": 435536185}, "ledgar": {"description": "LEDGAR dataset aims contract provision (paragraph) classification.\nThe contract provisions come from contracts obtained from the US Securities and Exchange Commission (SEC)\nfilings, which are publicly available from EDGAR. Each label represents the single main topic\n(theme) of the corresponding contract provision.", "citation": "@inproceedings{tuggener-etal-2020-ledgar,\n title = \"{LEDGAR}: A Large-Scale Multi-label Corpus for Text Classification of Legal Provisions in Contracts\",\n author = {Tuggener, Don and\n von D{\"a}niken, Pius and\n Peetz, Thomas and\n Cieliebak, Mark},\n booktitle = \"Proceedings of the 12th Language Resources and Evaluation Conference\",\n year = \"2020\",\n address = \"Marseille, France\",\n url = \"https://aclanthology.org/2020.lrec-1.155\",\n}\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "https://metatext.io/datasets/ledgar", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 100, "names": ["Adjustments", "Agreements", "Amendments", "Anti-Corruption Laws", "Applicable Laws", "Approvals", "Arbitration", "Assignments", "Assigns", "Authority", "Authorizations", "Base Salary", "Benefits", "Binding Effects", "Books", "Brokers", "Capitalization", "Change In Control", "Closings", "Compliance With Laws", "Confidentiality", "Consent To Jurisdiction", "Consents", "Construction", "Cooperation", "Costs", "Counterparts", "Death", "Defined Terms", "Definitions", "Disability", "Disclosures", "Duties", "Effective Dates", "Effectiveness", "Employment", "Enforceability", "Enforcements", "Entire Agreements", "Erisa", "Existence", "Expenses", "Fees", "Financial Statements", "Forfeitures", "Further Assurances", "General", "Governing Laws", "Headings", "Indemnifications", "Indemnity", "Insurances", "Integration", "Intellectual Property", "Interests", "Interpretations", "Jurisdictions", "Liens", "Litigations", "Miscellaneous", "Modifications", "No Conflicts", "No Defaults", "No Waivers", "Non-Disparagement", "Notices", "Organizations", "Participations", "Payments", "Positions", "Powers", "Publicity", "Qualifications", "Records", "Releases", "Remedies", "Representations", "Sales", "Sanctions", "Severability", "Solvency", "Specific Performance", "Submission To Jurisdiction", "Subsidiaries", "Successors", "Survival", "Tax Withholdings", "Taxes", "Terminations", "Terms", "Titles", "Transactions With Affiliates", "Use Of Proceeds", "Vacations", "Venues", "Vesting", "Waiver Of Jury Trials", "Waivers", "Warranties", "Withholdings"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "ledgar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43358315, "num_examples": 60000, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 6845585, "num_examples": 10000, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 7143592, "num_examples": 10000, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/ledgar.tar.gz": {"num_bytes": 16255623, "checksum": "f7507bcce46ce03e3e91b8aaa1b84ddf6e8f1d628c0d7fa351f97ce45366d5d8"}}, "download_size": 16255623, "post_processing_size": null, "dataset_size": 57347492, "size_in_bytes": 73603115}, "unfair_tos": {"description": "The UNFAIR-ToS dataset contains 50 Terms of Service (ToS) from on-line platforms (e.g., YouTube,\nEbay, Facebook, etc.). The dataset has been annotated on the sentence-level with 8 types of\nunfair contractual terms (sentences), meaning terms that potentially violate user rights\naccording to the European consumer law.", "citation": "@article{lippi-etal-2019-claudette,\n title = \"{CLAUDETTE}: an automated detector of potentially unfair clauses in online terms of service\",\n author = {Lippi, Marco\n and Pa\u0142ka, Przemys\u0142aw\n and Contissa, Giuseppe\n and Lagioia, Francesca\n and Micklitz, Hans-Wolfgang\n and Sartor, Giovanni\n and Torroni, Paolo},\n journal = \"Artificial Intelligence and Law\",\n year = \"2019\",\n publisher = \"Springer\",\n url = \"https://doi.org/10.1007/s10506-019-09243-2\",\n pages = \"117--139\",\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "http://claudette.eui.eu", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "labels": {"feature": {"num_classes": 8, "names": ["Limitation of liability", "Unilateral termination", "Unilateral change", "Content removal", "Contract by using", "Choice of law", "Jurisdiction", "Arbitration"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "unfair_tos", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1041790, "num_examples": 5532, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 303107, "num_examples": 1607, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 452119, "num_examples": 2275, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/unfair_tos.tar.gz": {"num_bytes": 511342, "checksum": "934470d74b62139dfbfad4a13b75a32e4a4d26a680ab12eedfb7659cdf669d53"}}, "download_size": 511342, "post_processing_size": null, "dataset_size": 1797016, "size_in_bytes": 2308358}, "case_hold": {"description": "The CaseHOLD (Case Holdings on Legal Decisions) dataset contains approx. 53k multiple choice\nquestions about holdings of US court cases from the Harvard Law Library case law corpus.\nHoldings are short summaries of legal rulings accompany referenced decisions relevant for the present case.\nThe input consists of an excerpt (or prompt) from a court decision, containing a reference\nto a particular case, while the holding statement is masked out. The model must identify\nthe correct (masked) holding statement from a selection of five choices.", "citation": "@inproceedings{Zheng2021,\n author = {Lucia Zheng and\n Neel Guha and\n Brandon R. Anderson and\n Peter Henderson and\n Daniel E. Ho},\n title = {When Does Pretraining Help? Assessing Self-Supervised Learning for\n Law and the CaseHOLD Dataset},\n year = {2021},\n booktitle = {International Conference on Artificial Intelligence and Law},\n}\n@article{chalkidis-etal-2021-lexglue,\n title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},\n author={Chalkidis, Ilias and\n Jana, Abhik and\n Hartung, Dirk and\n Bommarito, Michael and\n Androutsopoulos, Ion and\n Katz, Daniel Martin and\n Aletras, Nikolaos},\n year={2021},\n eprint={2110.00976},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n note = {arXiv: 2110.00976},\n}", "homepage": "https://github.com/reglab/casehold", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "endings": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "label": {"num_classes": 5, "names": ["0", "1", "2", "3", "4"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "lex_glue", "config_name": "case_hold", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 74781766, "num_examples": 45000, "dataset_name": "lex_glue"}, "test": {"name": "test", "num_bytes": 5989964, "num_examples": 3600, "dataset_name": "lex_glue"}, "validation": {"name": "validation", "num_bytes": 6474615, "num_examples": 3900, "dataset_name": "lex_glue"}}, "download_checksums": {"https://zenodo.org/record/5532997/files/casehold.tar.gz": {"num_bytes": 30422703, "checksum": "728827dae0019880fe6be609e23f8c47fa2b49a2f0814a36687ace8db1c32d5e"}}, "download_size": 30422703, "post_processing_size": null, "dataset_size": 87246345, "size_in_bytes": 117669048}}
 
 
ecthr_a/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1a12d4b6fd508147f442ac68ba4ffd466af1bceaab5315894f73bea9767fb3a
3
+ size 5677113
ecthr_a/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4951103522bb31d117fdb7212552056b4577dfaed9aa47b39c8ede0b6e006aa4
3
+ size 42415323
ecthr_a/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfae860b6429fdb10875756e4fa5f430cdf0a5bfce4d1cdfbe7700b81c8159a
3
+ size 5260147
ecthr_b/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0f13660a23f329b62854fb34110ea49df9c69e31b72780394f5cdcfe2a4cd8
3
+ size 5677090
ecthr_b/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d71538667f7f482416157b609b615f51be85c81f9fd0224f8cedb02ffa356d15
3
+ size 42415289
ecthr_b/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20a2c5dcf46900919d38b6542d85529bf8ec4fe40752fcacafb7372213ff2490
3
+ size 5260112
eurlex/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:727f8f3577db7833b8ef6aa61ff69e683a35ae4c79014b89dc9c6623aa45690f
3
+ size 24273775
eurlex/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5da15e426b108683f5d9d441dbb0f439ac7b5248e63bf458ed16b2e1a5ca65f
3
+ size 166694289
eurlex/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4051525287a01684a42086691dbb27f79c68b0d1c0c93707b07081a8616dea78
3
+ size 17059982
ledgar/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9af6d1cc1cac05622f64dd39489ad5730f06a470e0749356c1f9b60500ecdd
3
+ size 3313507
ledgar/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c16694c2a08dd52450dc48b4a7e472089a0d58ccbccb2c63bd433b443306cd39
3
+ size 20897972
ledgar/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c65bfb5959d5ea0f4098b98abdb709c584f28ab8fc120d428558bd3984e8d581
3
+ size 3439103
lex_glue.py DELETED
@@ -1,659 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """LexGLUE: A Benchmark Dataset for Legal Language Understanding in English."""
16
-
17
- import csv
18
- import json
19
- import textwrap
20
-
21
- import datasets
22
-
23
-
24
- MAIN_CITATION = """\
25
- @article{chalkidis-etal-2021-lexglue,
26
- title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},
27
- author={Chalkidis, Ilias and
28
- Jana, Abhik and
29
- Hartung, Dirk and
30
- Bommarito, Michael and
31
- Androutsopoulos, Ion and
32
- Katz, Daniel Martin and
33
- Aletras, Nikolaos},
34
- year={2021},
35
- eprint={2110.00976},
36
- archivePrefix={arXiv},
37
- primaryClass={cs.CL},
38
- note = {arXiv: 2110.00976},
39
- }"""
40
-
41
- _DESCRIPTION = """\
42
- Legal General Language Understanding Evaluation (LexGLUE) benchmark is
43
- a collection of datasets for evaluating model performance across a diverse set of legal NLU tasks
44
- """
45
-
46
- ECTHR_ARTICLES = ["2", "3", "5", "6", "8", "9", "10", "11", "14", "P1-1"]
47
-
48
- EUROVOC_CONCEPTS = [
49
- "100163",
50
- "100168",
51
- "100169",
52
- "100170",
53
- "100171",
54
- "100172",
55
- "100173",
56
- "100174",
57
- "100175",
58
- "100176",
59
- "100177",
60
- "100179",
61
- "100180",
62
- "100183",
63
- "100184",
64
- "100185",
65
- "100186",
66
- "100187",
67
- "100189",
68
- "100190",
69
- "100191",
70
- "100192",
71
- "100193",
72
- "100194",
73
- "100195",
74
- "100196",
75
- "100197",
76
- "100198",
77
- "100199",
78
- "100200",
79
- "100201",
80
- "100202",
81
- "100204",
82
- "100205",
83
- "100206",
84
- "100207",
85
- "100212",
86
- "100214",
87
- "100215",
88
- "100220",
89
- "100221",
90
- "100222",
91
- "100223",
92
- "100224",
93
- "100226",
94
- "100227",
95
- "100229",
96
- "100230",
97
- "100231",
98
- "100232",
99
- "100233",
100
- "100234",
101
- "100235",
102
- "100237",
103
- "100238",
104
- "100239",
105
- "100240",
106
- "100241",
107
- "100242",
108
- "100243",
109
- "100244",
110
- "100245",
111
- "100246",
112
- "100247",
113
- "100248",
114
- "100249",
115
- "100250",
116
- "100252",
117
- "100253",
118
- "100254",
119
- "100255",
120
- "100256",
121
- "100257",
122
- "100258",
123
- "100259",
124
- "100260",
125
- "100261",
126
- "100262",
127
- "100263",
128
- "100264",
129
- "100265",
130
- "100266",
131
- "100268",
132
- "100269",
133
- "100270",
134
- "100271",
135
- "100272",
136
- "100273",
137
- "100274",
138
- "100275",
139
- "100276",
140
- "100277",
141
- "100278",
142
- "100279",
143
- "100280",
144
- "100281",
145
- "100282",
146
- "100283",
147
- "100284",
148
- "100285",
149
- ]
150
-
151
- LEDGAR_CATEGORIES = [
152
- "Adjustments",
153
- "Agreements",
154
- "Amendments",
155
- "Anti-Corruption Laws",
156
- "Applicable Laws",
157
- "Approvals",
158
- "Arbitration",
159
- "Assignments",
160
- "Assigns",
161
- "Authority",
162
- "Authorizations",
163
- "Base Salary",
164
- "Benefits",
165
- "Binding Effects",
166
- "Books",
167
- "Brokers",
168
- "Capitalization",
169
- "Change In Control",
170
- "Closings",
171
- "Compliance With Laws",
172
- "Confidentiality",
173
- "Consent To Jurisdiction",
174
- "Consents",
175
- "Construction",
176
- "Cooperation",
177
- "Costs",
178
- "Counterparts",
179
- "Death",
180
- "Defined Terms",
181
- "Definitions",
182
- "Disability",
183
- "Disclosures",
184
- "Duties",
185
- "Effective Dates",
186
- "Effectiveness",
187
- "Employment",
188
- "Enforceability",
189
- "Enforcements",
190
- "Entire Agreements",
191
- "Erisa",
192
- "Existence",
193
- "Expenses",
194
- "Fees",
195
- "Financial Statements",
196
- "Forfeitures",
197
- "Further Assurances",
198
- "General",
199
- "Governing Laws",
200
- "Headings",
201
- "Indemnifications",
202
- "Indemnity",
203
- "Insurances",
204
- "Integration",
205
- "Intellectual Property",
206
- "Interests",
207
- "Interpretations",
208
- "Jurisdictions",
209
- "Liens",
210
- "Litigations",
211
- "Miscellaneous",
212
- "Modifications",
213
- "No Conflicts",
214
- "No Defaults",
215
- "No Waivers",
216
- "Non-Disparagement",
217
- "Notices",
218
- "Organizations",
219
- "Participations",
220
- "Payments",
221
- "Positions",
222
- "Powers",
223
- "Publicity",
224
- "Qualifications",
225
- "Records",
226
- "Releases",
227
- "Remedies",
228
- "Representations",
229
- "Sales",
230
- "Sanctions",
231
- "Severability",
232
- "Solvency",
233
- "Specific Performance",
234
- "Submission To Jurisdiction",
235
- "Subsidiaries",
236
- "Successors",
237
- "Survival",
238
- "Tax Withholdings",
239
- "Taxes",
240
- "Terminations",
241
- "Terms",
242
- "Titles",
243
- "Transactions With Affiliates",
244
- "Use Of Proceeds",
245
- "Vacations",
246
- "Venues",
247
- "Vesting",
248
- "Waiver Of Jury Trials",
249
- "Waivers",
250
- "Warranties",
251
- "Withholdings",
252
- ]
253
-
254
- SCDB_ISSUE_AREAS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"]
255
-
256
- UNFAIR_CATEGORIES = [
257
- "Limitation of liability",
258
- "Unilateral termination",
259
- "Unilateral change",
260
- "Content removal",
261
- "Contract by using",
262
- "Choice of law",
263
- "Jurisdiction",
264
- "Arbitration",
265
- ]
266
-
267
- CASEHOLD_LABELS = ["0", "1", "2", "3", "4"]
268
-
269
-
270
- class LexGlueConfig(datasets.BuilderConfig):
271
- """BuilderConfig for LexGLUE."""
272
-
273
- def __init__(
274
- self,
275
- text_column,
276
- label_column,
277
- url,
278
- data_url,
279
- data_file,
280
- citation,
281
- label_classes=None,
282
- multi_label=None,
283
- dev_column="dev",
284
- **kwargs,
285
- ):
286
- """BuilderConfig for LexGLUE.
287
-
288
- Args:
289
- text_column: ``string`, name of the column in the jsonl file corresponding
290
- to the text
291
- label_column: `string`, name of the column in the jsonl file corresponding
292
- to the label
293
- url: `string`, url for the original project
294
- data_url: `string`, url to download the zip file from
295
- data_file: `string`, filename for data set
296
- citation: `string`, citation for the data set
297
- url: `string`, url for information about the data set
298
- label_classes: `list[string]`, the list of classes if the label is
299
- categorical. If not provided, then the label will be of type
300
- `datasets.Value('float32')`.
301
- multi_label: `boolean`, True if the task is multi-label
302
- dev_column: `string`, name for the development subset
303
- **kwargs: keyword arguments forwarded to super.
304
- """
305
- super(LexGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
306
- self.text_column = text_column
307
- self.label_column = label_column
308
- self.label_classes = label_classes
309
- self.multi_label = multi_label
310
- self.dev_column = dev_column
311
- self.url = url
312
- self.data_url = data_url
313
- self.data_file = data_file
314
- self.citation = citation
315
-
316
-
317
- class LexGLUE(datasets.GeneratorBasedBuilder):
318
- """LexGLUE: A Benchmark Dataset for Legal Language Understanding in English. Version 1.0"""
319
-
320
- BUILDER_CONFIGS = [
321
- LexGlueConfig(
322
- name="ecthr_a",
323
- description=textwrap.dedent(
324
- """\
325
- The European Court of Human Rights (ECtHR) hears allegations that a state has
326
- breached human rights provisions of the European Convention of Human Rights (ECHR).
327
- For each case, the dataset provides a list of factual paragraphs (facts) from the case description.
328
- Each case is mapped to articles of the ECHR that were violated (if any)."""
329
- ),
330
- text_column="facts",
331
- label_column="violated_articles",
332
- label_classes=ECTHR_ARTICLES,
333
- multi_label=True,
334
- dev_column="dev",
335
- data_url="https://zenodo.org/record/5532997/files/ecthr.tar.gz",
336
- data_file="ecthr.jsonl",
337
- url="https://archive.org/details/ECtHR-NAACL2021",
338
- citation=textwrap.dedent(
339
- """\
340
- @inproceedings{chalkidis-etal-2021-paragraph,
341
- title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
342
- author = "Chalkidis, Ilias and
343
- Fergadiotis, Manos and
344
- Tsarapatsanis, Dimitrios and
345
- Aletras, Nikolaos and
346
- Androutsopoulos, Ion and
347
- Malakasiotis, Prodromos",
348
- booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
349
- month = jun,
350
- year = "2021",
351
- address = "Online",
352
- publisher = "Association for Computational Linguistics",
353
- url = "https://aclanthology.org/2021.naacl-main.22",
354
- doi = "10.18653/v1/2021.naacl-main.22",
355
- pages = "226--241",
356
- }
357
- }"""
358
- ),
359
- ),
360
- LexGlueConfig(
361
- name="ecthr_b",
362
- description=textwrap.dedent(
363
- """\
364
- The European Court of Human Rights (ECtHR) hears allegations that a state has
365
- breached human rights provisions of the European Convention of Human Rights (ECHR).
366
- For each case, the dataset provides a list of factual paragraphs (facts) from the case description.
367
- Each case is mapped to articles of ECHR that were allegedly violated (considered by the court)."""
368
- ),
369
- text_column="facts",
370
- label_column="allegedly_violated_articles",
371
- label_classes=ECTHR_ARTICLES,
372
- multi_label=True,
373
- dev_column="dev",
374
- url="https://archive.org/details/ECtHR-NAACL2021",
375
- data_url="https://zenodo.org/record/5532997/files/ecthr.tar.gz",
376
- data_file="ecthr.jsonl",
377
- citation=textwrap.dedent(
378
- """\
379
- @inproceedings{chalkidis-etal-2021-paragraph,
380
- title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
381
- author = "Chalkidis, Ilias
382
- and Fergadiotis, Manos
383
- and Tsarapatsanis, Dimitrios
384
- and Aletras, Nikolaos
385
- and Androutsopoulos, Ion
386
- and Malakasiotis, Prodromos",
387
- booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
388
- year = "2021",
389
- address = "Online",
390
- url = "https://aclanthology.org/2021.naacl-main.22",
391
- }
392
- }"""
393
- ),
394
- ),
395
- LexGlueConfig(
396
- name="eurlex",
397
- description=textwrap.dedent(
398
- """\
399
- European Union (EU) legislation is published in EUR-Lex portal.
400
- All EU laws are annotated by EU's Publications Office with multiple concepts from the EuroVoc thesaurus,
401
- a multilingual thesaurus maintained by the Publications Office.
402
- The current version of EuroVoc contains more than 7k concepts referring to various activities
403
- of the EU and its Member States (e.g., economics, health-care, trade).
404
- Given a document, the task is to predict its EuroVoc labels (concepts)."""
405
- ),
406
- text_column="text",
407
- label_column="labels",
408
- label_classes=EUROVOC_CONCEPTS,
409
- multi_label=True,
410
- dev_column="dev",
411
- url="https://zenodo.org/record/5363165#.YVJOAi8RqaA",
412
- data_url="https://zenodo.org/record/5532997/files/eurlex.tar.gz",
413
- data_file="eurlex.jsonl",
414
- citation=textwrap.dedent(
415
- """\
416
- @inproceedings{chalkidis-etal-2021-multieurlex,
417
- author = {Chalkidis, Ilias and
418
- Fergadiotis, Manos and
419
- Androutsopoulos, Ion},
420
- title = {MultiEURLEX -- A multi-lingual and multi-label legal document
421
- classification dataset for zero-shot cross-lingual transfer},
422
- booktitle = {Proceedings of the 2021 Conference on Empirical Methods
423
- in Natural Language Processing},
424
- year = {2021},
425
- location = {Punta Cana, Dominican Republic},
426
- }
427
- }"""
428
- ),
429
- ),
430
- LexGlueConfig(
431
- name="scotus",
432
- description=textwrap.dedent(
433
- """\
434
- The US Supreme Court (SCOTUS) is the highest federal court in the United States of America
435
- and generally hears only the most controversial or otherwise complex cases which have not
436
- been sufficiently well solved by lower courts. This is a single-label multi-class classification
437
- task, where given a document (court opinion), the task is to predict the relevant issue areas.
438
- The 14 issue areas cluster 278 issues whose focus is on the subject matter of the controversy (dispute)."""
439
- ),
440
- text_column="text",
441
- label_column="issueArea",
442
- label_classes=SCDB_ISSUE_AREAS,
443
- multi_label=False,
444
- dev_column="dev",
445
- url="http://scdb.wustl.edu/data.php",
446
- data_url="https://zenodo.org/record/5532997/files/scotus.tar.gz",
447
- data_file="scotus.jsonl",
448
- citation=textwrap.dedent(
449
- """\
450
- @misc{spaeth2020,
451
- author = {Harold J. Spaeth and Lee Epstein and Andrew D. Martin, Jeffrey A. Segal
452
- and Theodore J. Ruger and Sara C. Benesh},
453
- year = {2020},
454
- title ={{Supreme Court Database, Version 2020 Release 01}},
455
- url= {http://Supremecourtdatabase.org},
456
- howpublished={Washington University Law}
457
- }"""
458
- ),
459
- ),
460
- LexGlueConfig(
461
- name="ledgar",
462
- description=textwrap.dedent(
463
- """\
464
- LEDGAR dataset aims contract provision (paragraph) classification.
465
- The contract provisions come from contracts obtained from the US Securities and Exchange Commission (SEC)
466
- filings, which are publicly available from EDGAR. Each label represents the single main topic
467
- (theme) of the corresponding contract provision."""
468
- ),
469
- text_column="text",
470
- label_column="clause_type",
471
- label_classes=LEDGAR_CATEGORIES,
472
- multi_label=False,
473
- dev_column="dev",
474
- url="https://metatext.io/datasets/ledgar",
475
- data_url="https://zenodo.org/record/5532997/files/ledgar.tar.gz",
476
- data_file="ledgar.jsonl",
477
- citation=textwrap.dedent(
478
- """\
479
- @inproceedings{tuggener-etal-2020-ledgar,
480
- title = "{LEDGAR}: A Large-Scale Multi-label Corpus for Text Classification of Legal Provisions in Contracts",
481
- author = {Tuggener, Don and
482
- von D{\"a}niken, Pius and
483
- Peetz, Thomas and
484
- Cieliebak, Mark},
485
- booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
486
- year = "2020",
487
- address = "Marseille, France",
488
- url = "https://aclanthology.org/2020.lrec-1.155",
489
- }
490
- }"""
491
- ),
492
- ),
493
- LexGlueConfig(
494
- name="unfair_tos",
495
- description=textwrap.dedent(
496
- """\
497
- The UNFAIR-ToS dataset contains 50 Terms of Service (ToS) from on-line platforms (e.g., YouTube,
498
- Ebay, Facebook, etc.). The dataset has been annotated on the sentence-level with 8 types of
499
- unfair contractual terms (sentences), meaning terms that potentially violate user rights
500
- according to the European consumer law."""
501
- ),
502
- text_column="text",
503
- label_column="labels",
504
- label_classes=UNFAIR_CATEGORIES,
505
- multi_label=True,
506
- dev_column="val",
507
- url="http://claudette.eui.eu",
508
- data_url="https://zenodo.org/record/5532997/files/unfair_tos.tar.gz",
509
- data_file="unfair_tos.jsonl",
510
- citation=textwrap.dedent(
511
- """\
512
- @article{lippi-etal-2019-claudette,
513
- title = "{CLAUDETTE}: an automated detector of potentially unfair clauses in online terms of service",
514
- author = {Lippi, Marco
515
- and Pałka, Przemysław
516
- and Contissa, Giuseppe
517
- and Lagioia, Francesca
518
- and Micklitz, Hans-Wolfgang
519
- and Sartor, Giovanni
520
- and Torroni, Paolo},
521
- journal = "Artificial Intelligence and Law",
522
- year = "2019",
523
- publisher = "Springer",
524
- url = "https://doi.org/10.1007/s10506-019-09243-2",
525
- pages = "117--139",
526
- }"""
527
- ),
528
- ),
529
- LexGlueConfig(
530
- name="case_hold",
531
- description=textwrap.dedent(
532
- """\
533
- The CaseHOLD (Case Holdings on Legal Decisions) dataset contains approx. 53k multiple choice
534
- questions about holdings of US court cases from the Harvard Law Library case law corpus.
535
- Holdings are short summaries of legal rulings accompany referenced decisions relevant for the present case.
536
- The input consists of an excerpt (or prompt) from a court decision, containing a reference
537
- to a particular case, while the holding statement is masked out. The model must identify
538
- the correct (masked) holding statement from a selection of five choices."""
539
- ),
540
- text_column="text",
541
- label_column="labels",
542
- dev_column="dev",
543
- multi_label=False,
544
- label_classes=CASEHOLD_LABELS,
545
- url="https://github.com/reglab/casehold",
546
- data_url="https://zenodo.org/record/5532997/files/casehold.tar.gz",
547
- data_file="casehold.csv",
548
- citation=textwrap.dedent(
549
- """\
550
- @inproceedings{Zheng2021,
551
- author = {Lucia Zheng and
552
- Neel Guha and
553
- Brandon R. Anderson and
554
- Peter Henderson and
555
- Daniel E. Ho},
556
- title = {When Does Pretraining Help? Assessing Self-Supervised Learning for
557
- Law and the CaseHOLD Dataset},
558
- year = {2021},
559
- booktitle = {International Conference on Artificial Intelligence and Law},
560
- }"""
561
- ),
562
- ),
563
- ]
564
-
565
- def _info(self):
566
- if self.config.name == "case_hold":
567
- features = {
568
- "context": datasets.Value("string"),
569
- "endings": datasets.features.Sequence(datasets.Value("string")),
570
- }
571
- elif "ecthr" in self.config.name:
572
- features = {"text": datasets.features.Sequence(datasets.Value("string"))}
573
- else:
574
- features = {"text": datasets.Value("string")}
575
- if self.config.multi_label:
576
- features["labels"] = datasets.features.Sequence(datasets.ClassLabel(names=self.config.label_classes))
577
- else:
578
- features["label"] = datasets.ClassLabel(names=self.config.label_classes)
579
- return datasets.DatasetInfo(
580
- description=self.config.description,
581
- features=datasets.Features(features),
582
- homepage=self.config.url,
583
- citation=self.config.citation + "\n" + MAIN_CITATION,
584
- )
585
-
586
- def _split_generators(self, dl_manager):
587
- archive = dl_manager.download(self.config.data_url)
588
- return [
589
- datasets.SplitGenerator(
590
- name=datasets.Split.TRAIN,
591
- # These kwargs will be passed to _generate_examples
592
- gen_kwargs={
593
- "filepath": self.config.data_file,
594
- "split": "train",
595
- "files": dl_manager.iter_archive(archive),
596
- },
597
- ),
598
- datasets.SplitGenerator(
599
- name=datasets.Split.TEST,
600
- # These kwargs will be passed to _generate_examples
601
- gen_kwargs={
602
- "filepath": self.config.data_file,
603
- "split": "test",
604
- "files": dl_manager.iter_archive(archive),
605
- },
606
- ),
607
- datasets.SplitGenerator(
608
- name=datasets.Split.VALIDATION,
609
- # These kwargs will be passed to _generate_examples
610
- gen_kwargs={
611
- "filepath": self.config.data_file,
612
- "split": self.config.dev_column,
613
- "files": dl_manager.iter_archive(archive),
614
- },
615
- ),
616
- ]
617
-
618
- def _generate_examples(self, filepath, split, files):
619
- """This function returns the examples in the raw (text) form."""
620
- if self.config.name == "case_hold":
621
- if "dummy" in filepath:
622
- SPLIT_RANGES = {"train": (1, 3), "dev": (3, 5), "test": (5, 7)}
623
- else:
624
- SPLIT_RANGES = {"train": (1, 45001), "dev": (45001, 48901), "test": (48901, 52501)}
625
- for path, f in files:
626
- if path == filepath:
627
- f = (line.decode("utf-8") for line in f)
628
- for id_, row in enumerate(list(csv.reader(f))[SPLIT_RANGES[split][0] : SPLIT_RANGES[split][1]]):
629
- yield id_, {
630
- "context": row[1],
631
- "endings": [row[2], row[3], row[4], row[5], row[6]],
632
- "label": str(row[12]),
633
- }
634
- break
635
- elif self.config.multi_label:
636
- for path, f in files:
637
- if path == filepath:
638
- for id_, row in enumerate(f):
639
- data = json.loads(row.decode("utf-8"))
640
- labels = sorted(
641
- list(set(data[self.config.label_column]).intersection(set(self.config.label_classes)))
642
- )
643
- if data["data_type"] == split:
644
- yield id_, {
645
- "text": data[self.config.text_column],
646
- "labels": labels,
647
- }
648
- break
649
- else:
650
- for path, f in files:
651
- if path == filepath:
652
- for id_, row in enumerate(f):
653
- data = json.loads(row.decode("utf-8"))
654
- if data["data_type"] == split:
655
- yield id_, {
656
- "text": data[self.config.text_column],
657
- "label": data[self.config.label_column],
658
- }
659
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scotus/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f93bd6519931b0da6e31a934ea76713c4f04201914504c1171ccfa3984b3d2a
3
+ size 39976264
scotus/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:622bb9ac4cdd8532a3fe8e1c2b1992ebe422416ac2d50ec74665e1af7b0d2e5c
3
+ size 94360447
scotus/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d1d4f7ecabf883cefdaad40e35ef81d666f0ce30ddadf687b84528d79ba89b
3
+ size 39074685
unfair_tos/lex_glue-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e631d09f9ac24dc7375f1fb95aa5aa8ed3a3735cd7eaf47adda3d0f071ca7d48
3
+ size 146863
unfair_tos/lex_glue-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6a4cf79da66cef9c8d9a93d9806f50d492b3bf5e8d26876a71de1768e8fe721
3
+ size 500892
unfair_tos/lex_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3d84c4b8d9a0d1b79b1d2c431c133f9330fc31449ac8cd965689235c346d7e3
3
+ size 217846