Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- dummy/1.0.1/dummy_data.zip +2 -2
- norec.py +39 -36
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: NoReC
|
3 |
annotations_creators:
|
4 |
- expert-generated
|
5 |
language_creators:
|
dummy/1.0.1/dummy_data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:566085645c954b68377e7a79ce9ea694cbca4aefee955600632f160a82f658f6
|
3 |
+
size 19489
|
norec.py
CHANGED
@@ -13,9 +13,7 @@
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
|
16 |
-
|
17 |
-
import glob
|
18 |
-
import os
|
19 |
|
20 |
import conllu
|
21 |
|
@@ -41,9 +39,6 @@ NoReC was created as part of the SANT project (Sentiment Analysis for Norwegian
|
|
41 |
"""
|
42 |
|
43 |
_URL = "https://www.mn.uio.no/ifi/english/research/projects/sant/data/norec/norec-1.0.1.tar.gz"
|
44 |
-
_TRAIN = "conllu/train"
|
45 |
-
_DEV = "conllu/dev"
|
46 |
-
_TEST = "conllu/test"
|
47 |
|
48 |
|
49 |
class Norec(datasets.GeneratorBasedBuilder):
|
@@ -95,52 +90,60 @@ class Norec(datasets.GeneratorBasedBuilder):
|
|
95 |
)
|
96 |
|
97 |
def _split_generators(self, dl_manager):
|
98 |
-
|
99 |
-
|
100 |
-
conllu_path = dl_manager.extract(sub_path)
|
101 |
return [
|
102 |
datasets.SplitGenerator(
|
103 |
name=datasets.Split.TRAIN,
|
104 |
gen_kwargs={
|
105 |
-
"
|
106 |
-
"
|
|
|
107 |
},
|
108 |
),
|
109 |
datasets.SplitGenerator(
|
110 |
name=datasets.Split.VALIDATION,
|
111 |
gen_kwargs={
|
112 |
-
"
|
113 |
-
"
|
|
|
114 |
},
|
115 |
),
|
116 |
datasets.SplitGenerator(
|
117 |
name=datasets.Split.TEST,
|
118 |
gen_kwargs={
|
119 |
-
"
|
120 |
-
"
|
|
|
121 |
},
|
122 |
),
|
123 |
]
|
124 |
|
125 |
-
def _generate_examples(self,
|
126 |
-
conllu_files = sorted(glob.glob(os.path.join(datapath, "*.conllu")))
|
127 |
counter = 0
|
128 |
-
for
|
129 |
-
|
130 |
-
|
131 |
-
for
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
|
16 |
+
import tarfile
|
|
|
|
|
17 |
|
18 |
import conllu
|
19 |
|
|
|
39 |
"""
|
40 |
|
41 |
_URL = "https://www.mn.uio.no/ifi/english/research/projects/sant/data/norec/norec-1.0.1.tar.gz"
|
|
|
|
|
|
|
42 |
|
43 |
|
44 |
class Norec(datasets.GeneratorBasedBuilder):
|
|
|
90 |
)
|
91 |
|
92 |
def _split_generators(self, dl_manager):
|
93 |
+
archive = dl_manager.download(_URL)
|
94 |
+
subarchive_path = "norec/conllu.tar.gz"
|
|
|
95 |
return [
|
96 |
datasets.SplitGenerator(
|
97 |
name=datasets.Split.TRAIN,
|
98 |
gen_kwargs={
|
99 |
+
"data_dir": "conllu/train",
|
100 |
+
"subarchive_path": subarchive_path,
|
101 |
+
"files": dl_manager.iter_archive(archive),
|
102 |
},
|
103 |
),
|
104 |
datasets.SplitGenerator(
|
105 |
name=datasets.Split.VALIDATION,
|
106 |
gen_kwargs={
|
107 |
+
"data_dir": "conllu/dev",
|
108 |
+
"subarchive_path": subarchive_path,
|
109 |
+
"files": dl_manager.iter_archive(archive),
|
110 |
},
|
111 |
),
|
112 |
datasets.SplitGenerator(
|
113 |
name=datasets.Split.TEST,
|
114 |
gen_kwargs={
|
115 |
+
"data_dir": "conllu/test",
|
116 |
+
"subarchive_path": subarchive_path,
|
117 |
+
"files": dl_manager.iter_archive(archive),
|
118 |
},
|
119 |
),
|
120 |
]
|
121 |
|
122 |
+
def _generate_examples(self, data_dir, subarchive_path, files):
|
|
|
123 |
counter = 0
|
124 |
+
for path, f in files:
|
125 |
+
if path == subarchive_path:
|
126 |
+
stream = tarfile.open(fileobj=f, mode="r|*")
|
127 |
+
for tarinfo in stream:
|
128 |
+
file_path = tarinfo.name
|
129 |
+
if file_path.startswith(data_dir) and file_path.endswith(".conllu"):
|
130 |
+
data = stream.extractfile(tarinfo).read().decode("utf-8")
|
131 |
+
for sent in conllu.parse(data):
|
132 |
+
res = {
|
133 |
+
"idx": sent.metadata["sent_id"],
|
134 |
+
"text": sent.metadata["text"],
|
135 |
+
"tokens": [str(token["form"]) for token in sent],
|
136 |
+
"lemmas": [str(token["lemma"]) for token in sent],
|
137 |
+
"pos_tags": [str(token["upostag"]) for token in sent],
|
138 |
+
"xpos_tags": [str(token["xpostag"]) for token in sent],
|
139 |
+
"feats": [str(token["feats"]) for token in sent],
|
140 |
+
"head": [str(token["head"]) for token in sent],
|
141 |
+
"deprel": [str(token["deprel"]) for token in sent],
|
142 |
+
"deps": [str(token["deps"]) for token in sent],
|
143 |
+
"misc": [str(token["misc"]) for token in sent],
|
144 |
+
}
|
145 |
+
yield counter, res
|
146 |
+
counter += 1
|
147 |
+
stream.members = []
|
148 |
+
del stream
|
149 |
+
break
|