Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
json
Sub-tasks:
named-entity-recognition
Size:
10K - 100K
License:
joelniklaus
commited on
Commit
•
ec755d7
1
Parent(s):
3984c49
changed notation to IOB
Browse files- README.md +57 -1
- convert_to_hf_dataset.py +32 -2
- test.jsonl +2 -2
- train.jsonl +2 -2
- validation.jsonl +2 -2
README.md
CHANGED
@@ -3,7 +3,7 @@ annotations_creators:
|
|
3 |
- other
|
4 |
language_creators:
|
5 |
- found
|
6 |
-
|
7 |
- bg, cs, da, de, el, en, es, et, fi, fr, ga, hu, it, lt, lv, mt, nl, pt, ro, sk, sv
|
8 |
license:
|
9 |
- cc-by-4.0
|
@@ -148,6 +148,62 @@ The tagset used for the global and the fine-grained named entities is the follow
|
|
148 |
- Model
|
149 |
- Type
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
### Data Splits
|
152 |
|
153 |
Splits created by Joel Niklaus.
|
|
|
3 |
- other
|
4 |
language_creators:
|
5 |
- found
|
6 |
+
language:
|
7 |
- bg, cs, da, de, el, en, es, et, fi, fr, ga, hu, it, lt, lv, mt, nl, pt, ro, sk, sv
|
8 |
license:
|
9 |
- cc-by-4.0
|
|
|
148 |
- Model
|
149 |
- Type
|
150 |
|
151 |
+
The final coarse grained tagset (in IOB notation) is the following:
|
152 |
+
|
153 |
+
`['O', 'B-ORGANISATION', 'I-ORGANISATION', 'B-ADDRESS', 'I-ADDRESS', 'B-DATE', 'I-DATE', 'B-PERSON', 'I-PERSON', 'B-AMOUNT', 'I-AMOUNT', 'B-TIME', 'I-TIME']`
|
154 |
+
|
155 |
+
|
156 |
+
The final fine grained tagset (in IOB notation) is the following:
|
157 |
+
|
158 |
+
`[
|
159 |
+
'o',
|
160 |
+
'b-day',
|
161 |
+
'i-day',
|
162 |
+
'b-month',
|
163 |
+
'i-month',
|
164 |
+
'b-year',
|
165 |
+
'i-year',
|
166 |
+
'b-title',
|
167 |
+
'i-title',
|
168 |
+
'b-family name',
|
169 |
+
'i-family name',
|
170 |
+
'b-initial name',
|
171 |
+
'i-initial name',
|
172 |
+
'b-age',
|
173 |
+
'i-age',
|
174 |
+
'b-value',
|
175 |
+
'i-value',
|
176 |
+
'b-unit',
|
177 |
+
'i-unit',
|
178 |
+
'b-country',
|
179 |
+
'i-country',
|
180 |
+
'b-city',
|
181 |
+
'i-city',
|
182 |
+
'b-place',
|
183 |
+
'i-place',
|
184 |
+
'b-territory',
|
185 |
+
'i-territory',
|
186 |
+
'b-role',
|
187 |
+
'i-role',
|
188 |
+
'b-profession',
|
189 |
+
'i-profession',
|
190 |
+
'b-marital status',
|
191 |
+
'i-marital status',
|
192 |
+
'b-url',
|
193 |
+
'i-url',
|
194 |
+
'b-ethnic category',
|
195 |
+
'i-ethnic category',
|
196 |
+
'b-standard abbreviation',
|
197 |
+
'i-standard abbreviation'
|
198 |
+
'b-type',
|
199 |
+
'i-type',
|
200 |
+
'b-building',
|
201 |
+
'i-building',
|
202 |
+
'b-nationality',
|
203 |
+
'i-nationality',
|
204 |
+
]`
|
205 |
+
|
206 |
+
|
207 |
### Data Splits
|
208 |
|
209 |
Splits created by Joel Niklaus.
|
convert_to_hf_dataset.py
CHANGED
@@ -26,6 +26,9 @@ annotation_labels = {'ADDRESS': ['building', 'city', 'country', 'place', 'postco
|
|
26 |
# make all coarse_grained upper case and all fine_grained lower case
|
27 |
annotation_labels = {key.upper(): [label.lower() for label in labels] for key, labels in annotation_labels.items()}
|
28 |
print(annotation_labels)
|
|
|
|
|
|
|
29 |
|
30 |
base_path = Path("extracted")
|
31 |
|
@@ -92,15 +95,31 @@ def get_token_annotations(token, annotations):
|
|
92 |
if token.start >= annotation.start and token.stop <= annotation.stop: # course_grained annotation
|
93 |
# we don't support multilabel annotations for each token for simplicity.
|
94 |
# So when a token already has an annotation for either coarse or fine grained, we don't assign new ones.
|
95 |
-
if coarse_grained
|
96 |
coarse_grained = label
|
97 |
-
elif fine_grained
|
98 |
# some DATE are mislabeled as day but it is hard to correct this. So we ignore it
|
99 |
fine_grained = label
|
100 |
|
101 |
return coarse_grained.upper(), fine_grained.lower()
|
102 |
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
def get_annotated_sentence(result_sentence, sentence):
|
105 |
result_sentence["tokens"] = []
|
106 |
result_sentence["coarse_grained"] = []
|
@@ -112,6 +131,8 @@ def get_annotated_sentence(result_sentence, sentence):
|
|
112 |
result_sentence["tokens"].append(token)
|
113 |
result_sentence["coarse_grained"].append(coarse_grained)
|
114 |
result_sentence["fine_grained"].append(fine_grained)
|
|
|
|
|
115 |
return result_sentence
|
116 |
|
117 |
|
@@ -147,6 +168,8 @@ for language in languages:
|
|
147 |
df, not_parsable_files = parse_files(language)
|
148 |
file_names = df.file_name.unique()
|
149 |
|
|
|
|
|
150 |
# split by file_name
|
151 |
num_fn = len(file_names)
|
152 |
train_fn, validation_fn, test_fn = np.split(np.array(file_names), [int(.8 * num_fn), int(.9 * num_fn)])
|
@@ -178,6 +201,13 @@ train = pd.concat(train_dfs)
|
|
178 |
validation = pd.concat(validation_dfs)
|
179 |
test = pd.concat(test_dfs)
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
# save splits
|
182 |
def save_splits_to_jsonl(config_name):
|
183 |
# save to jsonl files for huggingface
|
|
|
26 |
# make all coarse_grained upper case and all fine_grained lower case
|
27 |
annotation_labels = {key.upper(): [label.lower() for label in labels] for key, labels in annotation_labels.items()}
|
28 |
print(annotation_labels)
|
29 |
+
print("coarse_grained:", list(annotation_labels.keys()))
|
30 |
+
print("fine_grained:",
|
31 |
+
[finegrained for finegrained in [finegrained_list for finegrained_list in annotation_labels.values()]])
|
32 |
|
33 |
base_path = Path("extracted")
|
34 |
|
|
|
95 |
if token.start >= annotation.start and token.stop <= annotation.stop: # course_grained annotation
|
96 |
# we don't support multilabel annotations for each token for simplicity.
|
97 |
# So when a token already has an annotation for either coarse or fine grained, we don't assign new ones.
|
98 |
+
if coarse_grained == "O" and is_coarse_grained(label):
|
99 |
coarse_grained = label
|
100 |
+
elif fine_grained == "o" and is_fine_grained(label):
|
101 |
# some DATE are mislabeled as day but it is hard to correct this. So we ignore it
|
102 |
fine_grained = label
|
103 |
|
104 |
return coarse_grained.upper(), fine_grained.lower()
|
105 |
|
106 |
|
107 |
+
def generate_IOB_labelset(series, casing_function):
|
108 |
+
last_ent = ""
|
109 |
+
new_series = []
|
110 |
+
for ent in series:
|
111 |
+
if ent in ["o", "O"]:
|
112 |
+
ent_to_add = ent
|
113 |
+
else:
|
114 |
+
if ent != last_ent: # we are the first one
|
115 |
+
ent_to_add = "B-" + ent
|
116 |
+
else:
|
117 |
+
ent_to_add = "I-" + ent
|
118 |
+
new_series.append(casing_function(ent_to_add))
|
119 |
+
last_ent = ent
|
120 |
+
return new_series
|
121 |
+
|
122 |
+
|
123 |
def get_annotated_sentence(result_sentence, sentence):
|
124 |
result_sentence["tokens"] = []
|
125 |
result_sentence["coarse_grained"] = []
|
|
|
131 |
result_sentence["tokens"].append(token)
|
132 |
result_sentence["coarse_grained"].append(coarse_grained)
|
133 |
result_sentence["fine_grained"].append(fine_grained)
|
134 |
+
result_sentence["coarse_grained"] = generate_IOB_labelset(result_sentence["coarse_grained"], str.upper)
|
135 |
+
result_sentence["fine_grained"] = generate_IOB_labelset(result_sentence["fine_grained"], str.lower)
|
136 |
return result_sentence
|
137 |
|
138 |
|
|
|
168 |
df, not_parsable_files = parse_files(language)
|
169 |
file_names = df.file_name.unique()
|
170 |
|
171 |
+
# df.coarse_grained.apply(lambda x: print(set(x)))
|
172 |
+
|
173 |
# split by file_name
|
174 |
num_fn = len(file_names)
|
175 |
train_fn, validation_fn, test_fn = np.split(np.array(file_names), [int(.8 * num_fn), int(.9 * num_fn)])
|
|
|
201 |
validation = pd.concat(validation_dfs)
|
202 |
test = pd.concat(test_dfs)
|
203 |
|
204 |
+
df = pd.concat([train, validation, test])
|
205 |
+
print(f"The final coarse grained tagset (in IOB notation) is the following: "
|
206 |
+
f"`{list(df.coarse_grained.explode().unique())}`")
|
207 |
+
print(f"The final fine grained tagset (in IOB notation) is the following: "
|
208 |
+
f"`{list(df.fine_grained.explode().unique())}`")
|
209 |
+
|
210 |
+
|
211 |
# save splits
|
212 |
def save_splits_to_jsonl(config_name):
|
213 |
# save to jsonl files for huggingface
|
test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c04197907b6539cf2020f34f91fb643c91dcc8590fc3b2301d765e130eb4e06
|
3 |
+
size 7717849
|
train.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2f305e569d60293e6a808e176c8ad82e97f2019967bb424bac3d3696d61adee
|
3 |
+
size 22116076
|
validation.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3dea9d63c656d0db6cf306296252e1adceed09991abfb650d040b928c555684
|
3 |
+
size 2874796
|