Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
10K - 100K
License:
Tom Aarsen
commited on
Commit
•
00dc020
1
Parent(s):
f260d50
Add 'document_id' and 'sentence_id' columns
Browse files- README.md +7 -3
- conllpp.py +12 -0
README.md
CHANGED
@@ -215,11 +215,13 @@ An example of 'train' looks as follows.
|
|
215 |
This example was too long and was cropped:
|
216 |
|
217 |
{
|
218 |
-
"chunk_tags": [11, 12, 12, 21, 13, 11, 11, 21, 13, 11, 12, 13, 11, 21, 22, 11, 12, 17, 11, 21, 17, 11, 12, 12, 21, 22, 22, 13, 11, 0],
|
219 |
"id": "0",
|
220 |
-
"
|
221 |
-
"
|
222 |
"tokens": ["The", "European", "Commission", "said", "on", "Thursday", "it", "disagreed", "with", "German", "advice", "to", "consumers", "to", "shun", "British", "lamb", "until", "scientists", "determine", "whether", "mad", "cow", "disease", "can", "be", "transmitted", "to", "sheep", "."]
|
|
|
|
|
|
|
223 |
}
|
224 |
```
|
225 |
|
@@ -229,6 +231,8 @@ The data fields are the same among all splits.
|
|
229 |
|
230 |
#### conllpp
|
231 |
- `id`: a `string` feature.
|
|
|
|
|
232 |
- `tokens`: a `list` of `string` features.
|
233 |
- `pos_tags`: a `list` of classification labels, with possible values including `"` (0), `''` (1), `#` (2), `$` (3), `(` (4).
|
234 |
- `chunk_tags`: a `list` of classification labels, with possible values including `O` (0), `B-ADJP` (1), `I-ADJP` (2), `B-ADVP` (3), `I-ADVP` (4).
|
|
|
215 |
This example was too long and was cropped:
|
216 |
|
217 |
{
|
|
|
218 |
"id": "0",
|
219 |
+
"document_id": 1,
|
220 |
+
"sentence_id": 3,
|
221 |
"tokens": ["The", "European", "Commission", "said", "on", "Thursday", "it", "disagreed", "with", "German", "advice", "to", "consumers", "to", "shun", "British", "lamb", "until", "scientists", "determine", "whether", "mad", "cow", "disease", "can", "be", "transmitted", "to", "sheep", "."]
|
222 |
+
"pos_tags": [12, 22, 22, 38, 15, 22, 28, 38, 15, 16, 21, 35, 24, 35, 37, 16, 21, 15, 24, 41, 15, 16, 21, 21, 20, 37, 40, 35, 21, 7],
|
223 |
+
"ner_tags": [0, 3, 4, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
224 |
+
"chunk_tags": [11, 12, 12, 21, 13, 11, 11, 21, 13, 11, 12, 13, 11, 21, 22, 11, 12, 17, 11, 21, 17, 11, 12, 12, 21, 22, 22, 13, 11, 0],
|
225 |
}
|
226 |
```
|
227 |
|
|
|
231 |
|
232 |
#### conllpp
|
233 |
- `id`: a `string` feature.
|
234 |
+
- `document_id`: an `int32` feature tracking which document the sample is from.
|
235 |
+
- `sentence_id`: an `int32` feature tracking which sentence in this document the sample is from.
|
236 |
- `tokens`: a `list` of `string` features.
|
237 |
- `pos_tags`: a `list` of classification labels, with possible values including `"` (0), `''` (1), `#` (2), `$` (3), `(` (4).
|
238 |
- `chunk_tags`: a `list` of classification labels, with possible values including `O` (0), `B-ADJP` (1), `I-ADJP` (2), `B-ADVP` (3), `I-ADVP` (4).
|
conllpp.py
CHANGED
@@ -67,6 +67,8 @@ class Conllpp(datasets.GeneratorBasedBuilder):
|
|
67 |
features=datasets.Features(
|
68 |
{
|
69 |
"id": datasets.Value("string"),
|
|
|
|
|
70 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
71 |
"pos_tags": datasets.Sequence(
|
72 |
datasets.features.ClassLabel(
|
@@ -191,20 +193,28 @@ class Conllpp(datasets.GeneratorBasedBuilder):
|
|
191 |
logging.info("⏳ Generating examples from = %s", filepath)
|
192 |
with open(filepath, encoding="utf-8") as f:
|
193 |
guid = 0
|
|
|
|
|
194 |
tokens = []
|
195 |
pos_tags = []
|
196 |
chunk_tags = []
|
197 |
ner_tags = []
|
198 |
for line in f:
|
199 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
|
|
|
|
|
|
200 |
if tokens:
|
201 |
yield guid, {
|
202 |
"id": str(guid),
|
|
|
|
|
203 |
"tokens": tokens,
|
204 |
"pos_tags": pos_tags,
|
205 |
"chunk_tags": chunk_tags,
|
206 |
"ner_tags": ner_tags,
|
207 |
}
|
|
|
208 |
guid += 1
|
209 |
tokens = []
|
210 |
pos_tags = []
|
@@ -221,6 +231,8 @@ class Conllpp(datasets.GeneratorBasedBuilder):
|
|
221 |
if tokens:
|
222 |
yield guid, {
|
223 |
"id": str(guid),
|
|
|
|
|
224 |
"tokens": tokens,
|
225 |
"pos_tags": pos_tags,
|
226 |
"chunk_tags": chunk_tags,
|
|
|
67 |
features=datasets.Features(
|
68 |
{
|
69 |
"id": datasets.Value("string"),
|
70 |
+
"document_id": datasets.Value("int32"),
|
71 |
+
"sentence_id": datasets.Value("int32"),
|
72 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
73 |
"pos_tags": datasets.Sequence(
|
74 |
datasets.features.ClassLabel(
|
|
|
193 |
logging.info("⏳ Generating examples from = %s", filepath)
|
194 |
with open(filepath, encoding="utf-8") as f:
|
195 |
guid = 0
|
196 |
+
document_id = 0
|
197 |
+
sentence_id = 0
|
198 |
tokens = []
|
199 |
pos_tags = []
|
200 |
chunk_tags = []
|
201 |
ner_tags = []
|
202 |
for line in f:
|
203 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
204 |
+
if line.startswith("-DOCSTART-"):
|
205 |
+
document_id += 1
|
206 |
+
sentence_id = 0
|
207 |
if tokens:
|
208 |
yield guid, {
|
209 |
"id": str(guid),
|
210 |
+
"document_id": document_id,
|
211 |
+
"sentence_id": sentence_id,
|
212 |
"tokens": tokens,
|
213 |
"pos_tags": pos_tags,
|
214 |
"chunk_tags": chunk_tags,
|
215 |
"ner_tags": ner_tags,
|
216 |
}
|
217 |
+
sentence_id += 1
|
218 |
guid += 1
|
219 |
tokens = []
|
220 |
pos_tags = []
|
|
|
231 |
if tokens:
|
232 |
yield guid, {
|
233 |
"id": str(guid),
|
234 |
+
"document_id": document_id,
|
235 |
+
"sentence_id": sentence_id,
|
236 |
"tokens": tokens,
|
237 |
"pos_tags": pos_tags,
|
238 |
"chunk_tags": chunk_tags,
|