File size: 3,453 Bytes
f4ef793 1b48831 f4ef793 1b48831 1d03dae 1b48831 39bcadd 1b48831 f4ef793 5399d79 1b48831 66855b1 f681059 5399d79 6d5c22b 5399d79 66855b1 f4ef793 5399d79 f4ef793 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import os
import json
from typing import List
from pprint import pprint
from datasets import load_dataset
label2id = {
"B-corporation": 0,
"B-creative_work": 1,
"B-event": 2,
"B-group": 3,
"B-location": 4,
"B-person": 5,
"B-product": 6,
"I-corporation": 7,
"I-creative_work": 8,
"I-event": 9,
"I-group": 10,
"I-location": 11,
"I-person": 12,
"I-product": 13,
"O": 14
}
id2label = {v: k for k, v in label2id.items()}
def decode_ner_tags(tag_sequence: List, input_sequence: List):
""" decode ner tag sequence """
def update_collection(_tmp_entity, _tmp_entity_type, _tmp_pos, _out):
if len(_tmp_entity) != 0 and _tmp_entity_type is not None:
_out.append({'type': _tmp_entity_type, 'entity': _tmp_entity, 'position': _tmp_pos})
_tmp_entity = []
_tmp_entity_type = None
return _tmp_entity, _tmp_entity_type, _tmp_pos, _out
assert len(tag_sequence) == len(input_sequence), str([len(tag_sequence), len(input_sequence)])
out = []
tmp_entity = []
tmp_pos = []
tmp_entity_type = None
for n, (_l, _i) in enumerate(zip(tag_sequence, input_sequence)):
_l = id2label[_l]
if _l.startswith('B-'):
_, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
tmp_entity_type = '-'.join(_l.split('-')[1:])
tmp_entity = [_i]
tmp_pos = [n]
elif _l.startswith('I-'):
tmp_tmp_entity_type = '-'.join(_l.split('-')[1:])
if len(tmp_entity) == 0:
# if 'I' not start with 'B', skip it
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
elif tmp_tmp_entity_type != tmp_entity_type:
# if the type does not match with the B, skip
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
else:
tmp_entity.append(_i)
tmp_pos.append(n)
elif _l == 'O':
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
else:
raise ValueError('unknown tag: {}'.format(_l))
_, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
return out
os.makedirs("data/tweet_ner7", exist_ok=True)
data = load_dataset("tner/tweetner7")
def process(tmp):
tmp = [i.to_dict() for _, i in tmp.iterrows()]
for i in tmp:
i.pop("id")
entities = decode_ner_tags(i['tags'].tolist(), i['tokens'].tolist())
for e in entities:
e.pop("position")
e["entity"] = " ".join(e["entity"])
i['gold_label_sequence'] = i.pop('tags').tolist()
i['text_tokenized'] = i.pop('tokens').tolist()
i['text'] = ' '.join(i['text_tokenized'])
i['entities'] = entities
return tmp
train = process(data["train_2020"].to_pandas())
val = process(data["validation_2020"].to_pandas())
test = process(data["test_2021"].to_pandas())
with open("data/tweet_ner7/train.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in train]))
with open("data/tweet_ner7/validation.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in val]))
with open("data/tweet_ner7/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test]))
|