Datasets:
Tasks:
Token Classification
Sub-tasks:
part-of-speech
Languages:
Portuguese
Size:
10K<n<100K
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- mac_morpho.py +33 -33
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: Mac-Morpho
|
3 |
annotations_creators:
|
4 |
- expert-generated
|
5 |
language_creators:
|
mac_morpho.py
CHANGED
@@ -14,8 +14,6 @@
|
|
14 |
# limitations under the License.
|
15 |
"""Mac-Morpho dataset"""
|
16 |
|
17 |
-
|
18 |
-
import os
|
19 |
import re
|
20 |
|
21 |
import datasets
|
@@ -121,51 +119,53 @@ class MacMorpho(datasets.GeneratorBasedBuilder):
|
|
121 |
|
122 |
def _split_generators(self, dl_manager):
|
123 |
"""Returns SplitGenerators."""
|
124 |
-
|
125 |
return [
|
126 |
datasets.SplitGenerator(
|
127 |
name=datasets.Split.TRAIN,
|
128 |
gen_kwargs={
|
129 |
-
"filepath":
|
130 |
-
"
|
131 |
},
|
132 |
),
|
133 |
datasets.SplitGenerator(
|
134 |
name=datasets.Split.TEST,
|
135 |
-
gen_kwargs={
|
|
|
|
|
|
|
136 |
),
|
137 |
datasets.SplitGenerator(
|
138 |
name=datasets.Split.VALIDATION,
|
139 |
gen_kwargs={
|
140 |
-
"filepath":
|
141 |
-
"
|
142 |
},
|
143 |
),
|
144 |
]
|
145 |
|
146 |
-
def _generate_examples(self, filepath,
|
147 |
"""Yields examples."""
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
id_ += 1
|
|
|
14 |
# limitations under the License.
|
15 |
"""Mac-Morpho dataset"""
|
16 |
|
|
|
|
|
17 |
import re
|
18 |
|
19 |
import datasets
|
|
|
119 |
|
120 |
def _split_generators(self, dl_manager):
|
121 |
"""Returns SplitGenerators."""
|
122 |
+
archive = dl_manager.download(_URL)
|
123 |
return [
|
124 |
datasets.SplitGenerator(
|
125 |
name=datasets.Split.TRAIN,
|
126 |
gen_kwargs={
|
127 |
+
"filepath": "macmorpho-train.txt",
|
128 |
+
"files": dl_manager.iter_archive(archive),
|
129 |
},
|
130 |
),
|
131 |
datasets.SplitGenerator(
|
132 |
name=datasets.Split.TEST,
|
133 |
+
gen_kwargs={
|
134 |
+
"filepath": "macmorpho-test.txt",
|
135 |
+
"files": dl_manager.iter_archive(archive),
|
136 |
+
},
|
137 |
),
|
138 |
datasets.SplitGenerator(
|
139 |
name=datasets.Split.VALIDATION,
|
140 |
gen_kwargs={
|
141 |
+
"filepath": "macmorpho-dev.txt",
|
142 |
+
"files": dl_manager.iter_archive(archive),
|
143 |
},
|
144 |
),
|
145 |
]
|
146 |
|
147 |
+
def _generate_examples(self, filepath, files):
|
148 |
"""Yields examples."""
|
149 |
+
for path, f in files:
|
150 |
+
if path == filepath:
|
151 |
+
id_ = 0
|
152 |
+
|
153 |
+
for line in f:
|
154 |
+
|
155 |
+
line = line.decode("utf-8").rstrip()
|
156 |
+
chunks = re.split(r"\s+", line)
|
157 |
+
|
158 |
+
tokens = []
|
159 |
+
pos_tags = []
|
160 |
+
for chunk in chunks:
|
161 |
+
token, tag = chunk.rsplit("_", 1)
|
162 |
+
tokens.append(token)
|
163 |
+
pos_tags.append(tag)
|
164 |
+
|
165 |
+
yield id_, {
|
166 |
+
"id": str(id_),
|
167 |
+
"tokens": tokens,
|
168 |
+
"pos_tags": pos_tags,
|
169 |
+
}
|
170 |
+
id_ += 1
|
171 |
+
break
|
|