Datasets:
Commit
•
c9eb937
0
Parent(s):
Update files from the datasets library (from 1.4.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.4.0
- .gitattributes +27 -0
- README.md +293 -0
- dataset_infos.json +1 -0
- dummy/1.0.1/dummy_data.zip +3 -0
- norec.py +146 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- expert-generated
|
4 |
+
language_creators:
|
5 |
+
- found
|
6 |
+
languages:
|
7 |
+
- nb
|
8 |
+
- nn
|
9 |
+
- 'no'
|
10 |
+
licenses:
|
11 |
+
- cc-by-nc-4-0
|
12 |
+
multilinguality:
|
13 |
+
- monolingual
|
14 |
+
size_categories:
|
15 |
+
- 100K<n<1M
|
16 |
+
source_datasets:
|
17 |
+
- original
|
18 |
+
task_categories:
|
19 |
+
- structure-prediction
|
20 |
+
task_ids:
|
21 |
+
- named-entity-recognition
|
22 |
+
---
|
23 |
+
|
24 |
+
# Dataset Card Creation Guide
|
25 |
+
|
26 |
+
## Table of Contents
|
27 |
+
- [Dataset Description](#dataset-description)
|
28 |
+
- [Dataset Summary](#dataset-summary)
|
29 |
+
- [Supported Tasks](#supported-tasks-and-leaderboards)
|
30 |
+
- [Languages](#languages)
|
31 |
+
- [Dataset Structure](#dataset-structure)
|
32 |
+
- [Data Instances](#data-instances)
|
33 |
+
- [Data Fields](#data-instances)
|
34 |
+
- [Data Splits](#data-instances)
|
35 |
+
- [Dataset Creation](#dataset-creation)
|
36 |
+
- [Curation Rationale](#curation-rationale)
|
37 |
+
- [Source Data](#source-data)
|
38 |
+
- [Annotations](#annotations)
|
39 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
40 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
41 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
42 |
+
- [Discussion of Biases](#discussion-of-biases)
|
43 |
+
- [Other Known Limitations](#other-known-limitations)
|
44 |
+
- [Additional Information](#additional-information)
|
45 |
+
- [Dataset Curators](#dataset-curators)
|
46 |
+
- [Licensing Information](#licensing-information)
|
47 |
+
- [Citation Information](#citation-information)
|
48 |
+
- [Contributions](#contributions)
|
49 |
+
|
50 |
+
## Dataset Description
|
51 |
+
|
52 |
+
- **Repository:** https://github.com/ltgoslo/norec
|
53 |
+
- **Paper:** http://www.lrec-conf.org/proceedings/lrec2018/pdf/851.pdf
|
54 |
+
- **Leaderboard:** [More Information Needed]
|
55 |
+
- **Point of Contact:** [More Information Needed]
|
56 |
+
|
57 |
+
### Dataset Summary
|
58 |
+
|
59 |
+
This dataset contains Norwegian Review Corpus (NoReC), created for the purpose of training and evaluating models for document-level sentiment analysis. More than 43,000 full-text reviews have been collected from major Norwegian news sources and cover a range of different domains, including literature, movies, video games, restaurants, music and theater, in addition to product reviews across a range of categories. Each review is labeled with a manually assigned score of 1–6, as provided by the rating of the original author.
|
60 |
+
|
61 |
+
### Supported Tasks and Leaderboards
|
62 |
+
|
63 |
+
[More Information Needed]
|
64 |
+
|
65 |
+
### Languages
|
66 |
+
|
67 |
+
The sentences in the dataset are in Norwegian (nb, nn, no).
|
68 |
+
|
69 |
+
## Dataset Structure
|
70 |
+
|
71 |
+
### Data Instances
|
72 |
+
|
73 |
+
A sample from training set is provided below:
|
74 |
+
|
75 |
+
```
|
76 |
+
{'deprel': ['det',
|
77 |
+
'amod',
|
78 |
+
'cc',
|
79 |
+
'conj',
|
80 |
+
'nsubj',
|
81 |
+
'case',
|
82 |
+
'nmod',
|
83 |
+
'cop',
|
84 |
+
'case',
|
85 |
+
'case',
|
86 |
+
'root',
|
87 |
+
'flat:name',
|
88 |
+
'flat:name',
|
89 |
+
'punct'],
|
90 |
+
'deps': ['None',
|
91 |
+
'None',
|
92 |
+
'None',
|
93 |
+
'None',
|
94 |
+
'None',
|
95 |
+
'None',
|
96 |
+
'None',
|
97 |
+
'None',
|
98 |
+
'None',
|
99 |
+
'None',
|
100 |
+
'None',
|
101 |
+
'None',
|
102 |
+
'None',
|
103 |
+
'None'],
|
104 |
+
'feats': ["{'Gender': 'Masc', 'Number': 'Sing', 'PronType': 'Dem'}",
|
105 |
+
"{'Definite': 'Def', 'Degree': 'Pos', 'Number': 'Sing'}",
|
106 |
+
'None',
|
107 |
+
"{'Definite': 'Def', 'Degree': 'Pos', 'Number': 'Sing'}",
|
108 |
+
"{'Definite': 'Def', 'Gender': 'Masc', 'Number': 'Sing'}",
|
109 |
+
'None',
|
110 |
+
'None',
|
111 |
+
"{'Mood': 'Ind', 'Tense': 'Pres', 'VerbForm': 'Fin'}",
|
112 |
+
'None',
|
113 |
+
'None',
|
114 |
+
'None',
|
115 |
+
'None',
|
116 |
+
'None',
|
117 |
+
'None'],
|
118 |
+
'head': ['5',
|
119 |
+
'5',
|
120 |
+
'4',
|
121 |
+
'2',
|
122 |
+
'11',
|
123 |
+
'7',
|
124 |
+
'5',
|
125 |
+
'11',
|
126 |
+
'11',
|
127 |
+
'11',
|
128 |
+
'0',
|
129 |
+
'11',
|
130 |
+
'11',
|
131 |
+
'11'],
|
132 |
+
'idx': '000000-02-01',
|
133 |
+
'lemmas': ['den',
|
134 |
+
'andre',
|
135 |
+
'og',
|
136 |
+
'sist',
|
137 |
+
'sesong',
|
138 |
+
'av',
|
139 |
+
'Rome',
|
140 |
+
'være',
|
141 |
+
'ute',
|
142 |
+
'på',
|
143 |
+
'DVD',
|
144 |
+
'i',
|
145 |
+
'Norge',
|
146 |
+
'$.'],
|
147 |
+
'misc': ['None',
|
148 |
+
'None',
|
149 |
+
'None',
|
150 |
+
'None',
|
151 |
+
'None',
|
152 |
+
'None',
|
153 |
+
'None',
|
154 |
+
'None',
|
155 |
+
'None',
|
156 |
+
'None',
|
157 |
+
'None',
|
158 |
+
'None',
|
159 |
+
"{'SpaceAfter': 'No'}",
|
160 |
+
'None'],
|
161 |
+
'pos_tags': [5, 0, 4, 0, 7, 1, 11, 3, 1, 1, 11, 1, 11, 12],
|
162 |
+
'text': 'Den andre og siste sesongen av Rome er ute på DVD i Norge.',
|
163 |
+
'tokens': ['Den',
|
164 |
+
'andre',
|
165 |
+
'og',
|
166 |
+
'siste',
|
167 |
+
'sesongen',
|
168 |
+
'av',
|
169 |
+
'Rome',
|
170 |
+
'er',
|
171 |
+
'ute',
|
172 |
+
'på',
|
173 |
+
'DVD',
|
174 |
+
'i',
|
175 |
+
'Norge',
|
176 |
+
'.'],
|
177 |
+
'xpos_tags': ['None',
|
178 |
+
'None',
|
179 |
+
'None',
|
180 |
+
'None',
|
181 |
+
'None',
|
182 |
+
'None',
|
183 |
+
'None',
|
184 |
+
'None',
|
185 |
+
'None',
|
186 |
+
'None',
|
187 |
+
'None',
|
188 |
+
'None',
|
189 |
+
'None',
|
190 |
+
'None']}
|
191 |
+
|
192 |
+
```
|
193 |
+
|
194 |
+
|
195 |
+
### Data Fields
|
196 |
+
|
197 |
+
The data instances have the following fields:
|
198 |
+
|
199 |
+
- deprel: [More Information Needed]
|
200 |
+
- deps: [More Information Needed]
|
201 |
+
- feats: [More Information Needed]
|
202 |
+
- head: [More Information Needed]
|
203 |
+
- idx: index
|
204 |
+
- lemmas: lemmas of all tokens
|
205 |
+
- misc: [More Information Needed]
|
206 |
+
- pos_tags: part of speech tags
|
207 |
+
- text: text string
|
208 |
+
- tokens: tokens
|
209 |
+
- xpos_tags: [More Information Needed]
|
210 |
+
|
211 |
+
The part of speech taggs correspond to these labels: "ADJ" (0), "ADP" (1), "ADV" (2), "AUX" (3), "CCONJ" (4), "DET" (5), "INTJ" (6), "NOUN" (7), "NUM" (8), "PART" (9), "PRON" (10), "PROPN" (11), "PUNCT" (12), "SCONJ" (13), "SYM" (14), "VERB" (15), "X" (16),
|
212 |
+
|
213 |
+
### Data Splits
|
214 |
+
|
215 |
+
The training, validation, and test set contain `680792`, `101106`, and `101594` sentences respectively.
|
216 |
+
|
217 |
+
## Dataset Creation
|
218 |
+
|
219 |
+
### Curation Rationale
|
220 |
+
|
221 |
+
[More Information Needed]
|
222 |
+
|
223 |
+
### Source Data
|
224 |
+
|
225 |
+
[More Information Needed]
|
226 |
+
|
227 |
+
#### Initial Data Collection and Normalization
|
228 |
+
|
229 |
+
[More Information Needed]
|
230 |
+
|
231 |
+
#### Who are the source language producers?
|
232 |
+
|
233 |
+
[More Information Needed]
|
234 |
+
|
235 |
+
### Annotations
|
236 |
+
|
237 |
+
[More Information Needed]
|
238 |
+
|
239 |
+
#### Annotation process
|
240 |
+
|
241 |
+
[More Information Needed]
|
242 |
+
|
243 |
+
#### Who are the annotators?
|
244 |
+
|
245 |
+
[More Information Needed]
|
246 |
+
|
247 |
+
### Personal and Sensitive Information
|
248 |
+
|
249 |
+
[More Information Needed]
|
250 |
+
|
251 |
+
## Considerations for Using the Data
|
252 |
+
|
253 |
+
### Social Impact of Dataset
|
254 |
+
|
255 |
+
[More Information Needed]
|
256 |
+
|
257 |
+
### Discussion of Biases
|
258 |
+
|
259 |
+
[More Information Needed]
|
260 |
+
|
261 |
+
### Other Known Limitations
|
262 |
+
|
263 |
+
[More Information Needed]
|
264 |
+
|
265 |
+
## Additional Information
|
266 |
+
|
267 |
+
### Dataset Curators
|
268 |
+
|
269 |
+
[More Information Needed]
|
270 |
+
|
271 |
+
### Licensing Information
|
272 |
+
|
273 |
+
[More Information Needed]
|
274 |
+
|
275 |
+
### Citation Information
|
276 |
+
|
277 |
+
```
|
278 |
+
@InProceedings{VelOvrBer18,
|
279 |
+
author = {Erik Velldal and Lilja {\O}vrelid and
|
280 |
+
Eivind Alexander Bergem and Cathrine Stadsnes and
|
281 |
+
Samia Touileb and Fredrik J{\o}rgensen},
|
282 |
+
title = {{NoReC}: The {N}orwegian {R}eview {C}orpus},
|
283 |
+
booktitle = {Proceedings of the 11th edition of the
|
284 |
+
Language Resources and Evaluation Conference},
|
285 |
+
year = {2018},
|
286 |
+
address = {Miyazaki, Japan},
|
287 |
+
pages = {4186--4191}
|
288 |
+
}
|
289 |
+
```
|
290 |
+
|
291 |
+
### Contributions
|
292 |
+
|
293 |
+
Thanks to [@abhishekkrthakur](https://github.com/abhishekkrthakur) for adding this dataset.
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "NoReC was created as part of the SANT project (Sentiment Analysis for Norwegian Text), a collaboration between the Language Technology Group (LTG) at the Department of Informatics at the University of Oslo, the Norwegian Broadcasting Corporation (NRK), Schibsted Media Group and Aller Media. This first release of the corpus comprises 35,194 reviews extracted from eight different news sources: Dagbladet, VG, Aftenposten, Bergens Tidende, F\u00e6drelandsvennen, Stavanger Aftenblad, DinSide.no and P3.no. In terms of publishing date the reviews mainly cover the time span 2003\u20132017, although it also includes a handful of reviews dating back as far as 1998.\n", "citation": "NoReC: The Norwegian Review Corpus\nErik Velldal, Lilja \u00d8vrelid, Eivind Alexander Bergem, Cathrine Stadsnes, Samia Touileb, Fredrik J\u00f8rgensen\n2018\nhttp://www.lrec-conf.org/proceedings/lrec2018/pdf/851.pdf\n", "homepage": "https://github.com/ljos/navnkjenner", "license": "", "features": {"idx": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "xpos_tags": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "feats": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "head": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "deprel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "deps": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "misc": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "norec", "config_name": "default", "version": {"version_str": "1.0.1", "description": null, "major": 1, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 1254757266, "num_examples": 680792, "dataset_name": "norec"}, "validation": {"name": "validation", "num_bytes": 189534106, "num_examples": 101106, "dataset_name": "norec"}, "test": {"name": "test", "num_bytes": 193801708, "num_examples": 101594, "dataset_name": "norec"}}, "download_checksums": {"https://www.mn.uio.no/ifi/english/research/projects/sant/data/norec/norec-1.0.1.tar.gz": {"num_bytes": 212492611, "checksum": "76f7ef4d4dc6717c82af9c5d8d54e0f0fb20eb5ff39f2c6b5770f26a7bfaf82a"}}, "download_size": 212492611, "post_processing_size": null, "dataset_size": 1638093080, "size_in_bytes": 1850585691}}
|
dummy/1.0.1/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29102914e9e34179e41feca385b9af8f7feebceaa9722482c80a43c4eaa37c5b
|
3 |
+
size 19217
|
norec.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# Lint as: python3
|
17 |
+
import glob
|
18 |
+
import os
|
19 |
+
|
20 |
+
import conllu
|
21 |
+
|
22 |
+
import datasets
|
23 |
+
|
24 |
+
|
25 |
+
_CITATION = """\
|
26 |
+
@InProceedings{VelOvrBer18,
|
27 |
+
author = {Erik Velldal and Lilja Ovrelid and
|
28 |
+
Eivind Alexander Bergem and Cathrine Stadsnes and
|
29 |
+
Samia Touileb and Fredrik Jorgensen},
|
30 |
+
title = {{NoReC}: The {N}orwegian {R}eview {C}orpus},
|
31 |
+
booktitle = {Proceedings of the 11th edition of the
|
32 |
+
Language Resources and Evaluation Conference},
|
33 |
+
year = {2018},
|
34 |
+
address = {Miyazaki, Japan},
|
35 |
+
pages = {4186--4191}
|
36 |
+
}
|
37 |
+
"""
|
38 |
+
|
39 |
+
_DESCRIPTION = """\
|
40 |
+
NoReC was created as part of the SANT project (Sentiment Analysis for Norwegian Text), a collaboration between the Language Technology Group (LTG) at the Department of Informatics at the University of Oslo, the Norwegian Broadcasting Corporation (NRK), Schibsted Media Group and Aller Media. This first release of the corpus comprises 35,194 reviews extracted from eight different news sources: Dagbladet, VG, Aftenposten, Bergens Tidende, Fædrelandsvennen, Stavanger Aftenblad, DinSide.no and P3.no. In terms of publishing date the reviews mainly cover the time span 2003–2017, although it also includes a handful of reviews dating back as far as 1998.
|
41 |
+
"""
|
42 |
+
|
43 |
+
_URL = "https://www.mn.uio.no/ifi/english/research/projects/sant/data/norec/norec-1.0.1.tar.gz"
|
44 |
+
_TRAIN = "conllu/train"
|
45 |
+
_DEV = "conllu/dev"
|
46 |
+
_TEST = "conllu/test"
|
47 |
+
|
48 |
+
|
49 |
+
class Norec(datasets.GeneratorBasedBuilder):
|
50 |
+
VERSION = datasets.Version("1.0.1")
|
51 |
+
|
52 |
+
def _info(self):
|
53 |
+
return datasets.DatasetInfo(
|
54 |
+
description=_DESCRIPTION,
|
55 |
+
features=datasets.Features(
|
56 |
+
{
|
57 |
+
"idx": datasets.Value("string"),
|
58 |
+
"text": datasets.Value("string"),
|
59 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
60 |
+
"lemmas": datasets.Sequence(datasets.Value("string")),
|
61 |
+
"pos_tags": datasets.Sequence(
|
62 |
+
datasets.features.ClassLabel(
|
63 |
+
names=[
|
64 |
+
"ADJ",
|
65 |
+
"ADP",
|
66 |
+
"ADV",
|
67 |
+
"AUX",
|
68 |
+
"CCONJ",
|
69 |
+
"DET",
|
70 |
+
"INTJ",
|
71 |
+
"NOUN",
|
72 |
+
"NUM",
|
73 |
+
"PART",
|
74 |
+
"PRON",
|
75 |
+
"PROPN",
|
76 |
+
"PUNCT",
|
77 |
+
"SCONJ",
|
78 |
+
"SYM",
|
79 |
+
"VERB",
|
80 |
+
"X",
|
81 |
+
]
|
82 |
+
)
|
83 |
+
),
|
84 |
+
"xpos_tags": datasets.Sequence(datasets.Value("string")),
|
85 |
+
"feats": datasets.Sequence(datasets.Value("string")),
|
86 |
+
"head": datasets.Sequence(datasets.Value("string")),
|
87 |
+
"deprel": datasets.Sequence(datasets.Value("string")),
|
88 |
+
"deps": datasets.Sequence(datasets.Value("string")),
|
89 |
+
"misc": datasets.Sequence(datasets.Value("string")),
|
90 |
+
}
|
91 |
+
),
|
92 |
+
supervised_keys=None,
|
93 |
+
homepage="https://github.com/ljos/navnkjenner",
|
94 |
+
citation=_CITATION,
|
95 |
+
)
|
96 |
+
|
97 |
+
def _split_generators(self, dl_manager):
|
98 |
+
path = dl_manager.download_and_extract(_URL)
|
99 |
+
sub_path = os.path.join(path, "norec", "conllu.tar.gz")
|
100 |
+
conllu_path = dl_manager.extract(sub_path)
|
101 |
+
return [
|
102 |
+
datasets.SplitGenerator(
|
103 |
+
name=datasets.Split.TRAIN,
|
104 |
+
gen_kwargs={
|
105 |
+
"datapath": os.path.join(conllu_path, "conllu", "train"),
|
106 |
+
"path": path,
|
107 |
+
},
|
108 |
+
),
|
109 |
+
datasets.SplitGenerator(
|
110 |
+
name=datasets.Split.VALIDATION,
|
111 |
+
gen_kwargs={
|
112 |
+
"datapath": os.path.join(conllu_path, "conllu", "dev"),
|
113 |
+
"path": path,
|
114 |
+
},
|
115 |
+
),
|
116 |
+
datasets.SplitGenerator(
|
117 |
+
name=datasets.Split.TEST,
|
118 |
+
gen_kwargs={
|
119 |
+
"datapath": os.path.join(conllu_path, "conllu", "test"),
|
120 |
+
"path": path,
|
121 |
+
},
|
122 |
+
),
|
123 |
+
]
|
124 |
+
|
125 |
+
def _generate_examples(self, datapath, path):
|
126 |
+
conllu_files = sorted(glob.glob(os.path.join(datapath, "*.conllu")))
|
127 |
+
counter = 0
|
128 |
+
for cf in conllu_files:
|
129 |
+
with open(cf, "r", encoding="utf-8") as data_file:
|
130 |
+
tokenlist = list(conllu.parse_incr(data_file))
|
131 |
+
for sent in tokenlist:
|
132 |
+
res = {
|
133 |
+
"idx": sent.metadata["sent_id"],
|
134 |
+
"text": sent.metadata["text"],
|
135 |
+
"tokens": [str(token["form"]) for token in sent],
|
136 |
+
"lemmas": [str(token["lemma"]) for token in sent],
|
137 |
+
"pos_tags": [str(token["upostag"]) for token in sent],
|
138 |
+
"xpos_tags": [str(token["xpostag"]) for token in sent],
|
139 |
+
"feats": [str(token["feats"]) for token in sent],
|
140 |
+
"head": [str(token["head"]) for token in sent],
|
141 |
+
"deprel": [str(token["deprel"]) for token in sent],
|
142 |
+
"deps": [str(token["deps"]) for token in sent],
|
143 |
+
"misc": [str(token["misc"]) for token in sent],
|
144 |
+
}
|
145 |
+
yield counter, res
|
146 |
+
counter += 1
|