paoloitaliani commited on
Commit
a2ecd8a
1 Parent(s): 19fd74e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +201 -122
README.md CHANGED
@@ -1,4 +1,11 @@
1
  ---
 
 
 
 
 
 
 
2
  dataset_info:
3
  - config_name: en
4
  features:
@@ -8,8 +15,6 @@ dataset_info:
8
  dtype: int64
9
  - name: epigraph
10
  dtype: string
11
- - name: body
12
- dtype: string
13
  - name: decision
14
  dtype: string
15
  - name: maxims_text
@@ -27,105 +32,30 @@ dataset_info:
27
  - name: judgment_type
28
  dtype: int64
29
  - name: constitutional_parameters
30
- dtype: string
 
31
  - name: maxims
32
- dtype: string
33
- splits:
34
- - name: train
35
- num_bytes: 555145830
36
- num_examples: 12600
37
- - name: test
38
- num_bytes: 30737608
39
- num_examples: 700
40
- - name: validation
41
- num_bytes: 31671019
42
- num_examples: 700
43
- download_size: 278441383
44
- dataset_size: 617554457
45
- - config_name: es
46
- features:
47
- - name: id
48
- dtype: string
49
- - name: ruling_type
50
- dtype: int64
51
- - name: epigraph
52
- dtype: string
53
  - name: body
54
  dtype: string
55
- - name: decision
56
- dtype: string
57
- - name: maxims_text
58
- dtype: string
59
- - name: maxims_title
60
- dtype: string
61
- - name: full_text
62
- dtype: string
63
- - name: num_maxims
64
- dtype: int64
65
- - name: maxims_len
66
- dtype: int64
67
- - name: full_text_len
68
- dtype: int64
69
- - name: judgment_type
70
- dtype: int64
71
- - name: constitutional_parameters
72
- dtype: string
73
- - name: maxims
74
- dtype: string
75
  splits:
76
  - name: train
77
- num_bytes: 575679719
78
- num_examples: 12600
79
- - name: test
80
- num_bytes: 31896832
81
- num_examples: 700
82
  - name: validation
83
- num_bytes: 32827830
84
- num_examples: 700
85
- download_size: 300803577
86
- dataset_size: 640404381
87
- - config_name: fr
88
- features:
89
- - name: id
90
- dtype: string
91
- - name: ruling_type
92
- dtype: int64
93
- - name: epigraph
94
- dtype: string
95
- - name: body
96
- dtype: string
97
- - name: decision
98
- dtype: string
99
- - name: maxims_text
100
- dtype: string
101
- - name: maxims_title
102
- dtype: string
103
- - name: full_text
104
- dtype: string
105
- - name: num_maxims
106
- dtype: int64
107
- - name: maxims_len
108
- dtype: int64
109
- - name: full_text_len
110
- dtype: int64
111
- - name: judgment_type
112
- dtype: int64
113
- - name: constitutional_parameters
114
- dtype: string
115
- - name: maxims
116
- dtype: string
117
- splits:
118
- - name: train
119
- num_bytes: 580985816
120
- num_examples: 12600
121
  - name: test
122
- num_bytes: 32177379
123
- num_examples: 700
124
- - name: validation
125
- num_bytes: 33152939
126
- num_examples: 700
127
- download_size: 306338176
128
- dataset_size: 646316134
129
  - config_name: it
130
  features:
131
  - name: id
@@ -153,52 +83,201 @@ dataset_info:
153
  - name: judgment_type
154
  dtype: int64
155
  - name: constitutional_parameters
156
- dtype: string
 
157
  - name: maxims
158
- dtype: string
 
 
 
 
 
 
159
  splits:
160
  - name: train
161
- num_bytes: 557553146
162
  num_examples: 12600
163
- - name: test
164
- num_bytes: 30850184
165
- num_examples: 700
166
  - name: validation
167
- num_bytes: 31775341
 
 
 
168
  num_examples: 700
169
- download_size: 293523614
170
- dataset_size: 620178671
171
  configs:
172
  - config_name: en
173
  data_files:
174
  - split: train
175
  path: en/train-*
176
- - split: test
177
- path: en/test-*
178
  - split: validation
179
  path: en/validation-*
180
- - config_name: es
181
- data_files:
182
- - split: train
183
- path: es/train-*
184
- - split: test
185
- path: es/test-*
186
- - split: validation
187
- path: es/validation-*
188
- - config_name: fr
189
- data_files:
190
- - split: train
191
- path: fr/train-*
192
  - split: test
193
- path: fr/test-*
194
- - split: validation
195
- path: fr/validation-*
196
  - config_name: it
197
  data_files:
198
  - split: train
199
  path: it/train-*
200
- - split: test
201
- path: it/test-*
202
  - split: validation
203
  path: it/validation-*
 
 
204
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - it
4
+ - en
5
+ - es
6
+ - fr
7
+ size_categories:
8
+ - 10K<n<100K
9
  dataset_info:
10
  - config_name: en
11
  features:
 
15
  dtype: int64
16
  - name: epigraph
17
  dtype: string
 
 
18
  - name: decision
19
  dtype: string
20
  - name: maxims_text
 
32
  - name: judgment_type
33
  dtype: int64
34
  - name: constitutional_parameters
35
+ sequence:
36
+ sequence: string
37
  - name: maxims
38
+ struct:
39
+ - name: numbers
40
+ sequence: float64
41
+ - name: texts
42
+ sequence: string
43
+ - name: titles
44
+ sequence: string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  - name: body
46
  dtype: string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  splits:
48
  - name: train
49
+ num_bytes: 2261057820
50
+ num_examples: 50400
 
 
 
51
  - name: validation
52
+ num_bytes: 128951061
53
+ num_examples: 2800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  - name: test
55
+ num_bytes: 125202513
56
+ num_examples: 2800
57
+ download_size: 1180085450
58
+ dataset_size: 2515211394
 
 
 
59
  - config_name: it
60
  features:
61
  - name: id
 
83
  - name: judgment_type
84
  dtype: int64
85
  - name: constitutional_parameters
86
+ sequence:
87
+ sequence: string
88
  - name: maxims
89
+ struct:
90
+ - name: texts
91
+ sequence: string
92
+ - name: titles
93
+ sequence: string
94
+ - name: numbers
95
+ sequence: float64
96
  splits:
97
  - name: train
98
+ num_bytes: 555460844
99
  num_examples: 12600
 
 
 
100
  - name: validation
101
+ num_bytes: 31655527
102
+ num_examples: 700
103
+ - name: test
104
+ num_bytes: 30734422
105
  num_examples: 700
106
+ download_size: 293892604
107
+ dataset_size: 617850793
108
  configs:
109
  - config_name: en
110
  data_files:
111
  - split: train
112
  path: en/train-*
 
 
113
  - split: validation
114
  path: en/validation-*
 
 
 
 
 
 
 
 
 
 
 
 
115
  - split: test
116
+ path: en/test-*
 
 
117
  - config_name: it
118
  data_files:
119
  - split: train
120
  path: it/train-*
 
 
121
  - split: validation
122
  path: it/validation-*
123
+ - split: test
124
+ path: it/test-*
125
  ---
126
+ # Dataset Card for Dataset Name
127
+ ## Table of Contents
128
+ - [Dataset Card for COMMA](#dataset-card-for-comma)
129
+ - [Table of Contents](#table-of-contents)
130
+ - [Dataset Description](#dataset-description)
131
+ - [Dataset Summary](#dataset-summary)
132
+ - [Languages](#languages)
133
+ - [Dataset](#dataset)
134
+ - [Data Fields](#data-fields)
135
+ - [Data Splits](#data-splits)
136
+ - [Dataset Sheet (Datasheet)](#dataset-sheet-datasheet)
137
+ - [Additional Information](#additional-information)
138
+ - [Dataset Curators](#dataset-curators)
139
+ - [Licensing Information](#licensing-information)
140
+ - [Citation Information](#citation-information)
141
+ - [Release History](#release-history)
142
+
143
+
144
+ ## Dataset Description
145
+
146
+ - **Homepage:**
147
+ - **Repository:**
148
+ - **Paper:**
149
+ - **Leaderboard:**
150
+ - **Point of Contact:**
151
+
152
+ ### Dataset Summary
153
+ COMMA is a constitutional multi-task and multi-lingual archive consisting of 14K CCIR rulings with expert-authored annotations. It embodies distinctive features that render it a valuable object of study for broader NLP research.
154
+
155
+ ### Supported Tasks and Leaderboards
156
+
157
+ [More Information Needed]
158
+
159
+ ### Languages
160
+ Italian, English, Spanish, French
161
+
162
+
163
+ ## Dataset
164
+
165
+ ### Data Fields
166
+
167
+ The dataset contains a list of instances (rulings); each instance contains the following data:
168
+
169
+ | Field | Description |
170
+ |-------------------------: | ------------------------------------------------: |
171
+ | id | `(str)` The ruling ID |
172
+ | ruling_type | `(int)` The ruling type |
173
+ | epigraph | `(str)` The ruling epigraph |
174
+ | text | `(str)` The ruling text |
175
+ | decision | `(str)` The ruling decision |
176
+ | maxims_text | `(str)` The text of ruling maxims |
177
+ | maxims_title | `(str)` The title of ruling maxims |
178
+ | full_text | `(str)` The ruling full_text |
179
+ | num_maxims | `(int)` The number of maxims |
180
+ | maxims_len | `(int)` The length of maxims |
181
+ | full_text_len | `(int)` The length of the full text |
182
+ | judgment_type | `(int)` The judgment type |
183
+ | constitutional_parameters | `(List[List[str]])` The constitutional parameters |
184
+ | maxims | `(dict)` The maxims' numbers, texts, and titles |
185
+
186
+ Please check the exemplar usage below for loading the data:
187
+
188
+ ```python
189
+ from datasets import load_dataset
190
+
191
+ comma_en = load_dataset("disi-unibo-nlp/COMMA", "en")
192
+ # Download comma_en locally and load it as a Dataset object.
193
+
194
+ example = comma_en["validation"][0] # The first instance of the dev set
195
+ example["full_text"] # The full text (i.e., epigraph + text + decision) for the ruling
196
+
197
+ print(example['maxims_title']) # The corresponding maxims title for the ruling
198
+ ```
199
+
200
+ ### Data Splits
201
+
202
+ | IT | Instances |
203
+ | ----------: | --------: |
204
+ | Train (90%) | 12,600 |
205
+ | Test (5%) | 700 |
206
+ | Dev (5%) | 700 |
207
+
208
+ | EN | Instances |
209
+ | ----------: | --------: |
210
+ | Train (90%) | 12,600 |
211
+ | Test (5%) | 700 |
212
+ | Dev (5%) | 700 |
213
+
214
+ | ES | Instances |
215
+ | ----------: | --------: |
216
+ | Train (90%) | 12,600 |
217
+ | Test (5%) | 700 |
218
+ | Dev (5%) | 700 |
219
+
220
+ | FR | Instances |
221
+ | ----------: | --------: |
222
+ | Train (90%) | 12,600 |
223
+ | Test (5%) | 700 |
224
+ | Dev (5%) | 700 |
225
+
226
+
227
+ ## Dataset Creation
228
+
229
+ ### Curation Rationale
230
+
231
+ [More Information Needed]
232
+
233
+ ### Source Data
234
+
235
+ #### Initial Data Collection and Normalization
236
+
237
+ [More Information Needed]
238
+
239
+ #### Who are the source language producers?
240
+
241
+ [More Information Needed]
242
+
243
+ ### Annotations
244
+
245
+ #### Annotation process
246
+
247
+ [More Information Needed]
248
+
249
+ #### Who are the annotators?
250
+
251
+ [More Information Needed]
252
+
253
+ ### Personal and Sensitive Information
254
+
255
+ [More Information Needed]
256
+
257
+ ## Considerations for Using the Data
258
+
259
+ ### Social Impact of Dataset
260
+
261
+ [More Information Needed]
262
+
263
+ ### Discussion of Biases
264
+
265
+ [More Information Needed]
266
+
267
+ ### Other Known Limitations
268
+
269
+ [More Information Needed]
270
+
271
+ ## Additional Information
272
+
273
+ ### Dataset Curators
274
+
275
+ [More Information Needed]
276
+
277
+ ### Licensing Information
278
+
279
+ [More Information Needed]
280
+
281
+ ### Citation Information
282
+
283
+ [More Information Needed]