blunt700 commited on
Commit
689cc8a
·
verified ·
1 Parent(s): 1e63b27

Upload weaver-ner.py

Browse files
Files changed (1) hide show
  1. weaver-ner.py +153 -0
weaver-ner.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+ _CITATION = """\
26
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
27
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
28
+ author = "Tjong Kim Sang, Erik F. and
29
+ De Meulder, Fien",
30
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
31
+ year = "2003",
32
+ url = "https://www.aclweb.org/anthology/W03-0419",
33
+ pages = "142--147",
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
39
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
40
+ not belong to the previous three groups.
41
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
42
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
43
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
44
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
45
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
46
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
47
+ tagging scheme, whereas the original dataset uses IOB1.
48
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
49
+ """
50
+
51
+ # 当前目录
52
+ _URL = "D:\\workspace\\traveller\\huashan\\ner\\weaver-ner"
53
+ _TRAINING_FILE = "train_data_conll.csv"
54
+ _DEV_FILE = "val_data_conll.csv"
55
+
56
+
57
+ class CustomNerDatasetConfig(datasets.BuilderConfig):
58
+ """BuilderConfig for Conll2003"""
59
+
60
+ def __init__(self, **kwargs):
61
+ """BuilderConfig forConll2003.
62
+ Args:
63
+ **kwargs: keyword arguments forwarded to super.
64
+ """
65
+ super(CustomNerDatasetConfig, self).__init__(**kwargs)
66
+
67
+
68
+ class CustomNerDataset(datasets.GeneratorBasedBuilder):
69
+ """Conll2003 dataset."""
70
+
71
+ BUILDER_CONFIGS = [
72
+ CustomNerDatasetConfig(name="custom_ner", version=datasets.Version("1.0.0"), description="custom ner dataset"),
73
+ ]
74
+
75
+ def _info(self):
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=datasets.Features(
79
+ {
80
+ "tokens": datasets.Sequence(datasets.Value("string")),
81
+ "ner_tags": datasets.Sequence(
82
+ datasets.features.ClassLabel(
83
+ names=[
84
+ "O",
85
+ "B-bod",
86
+ "I-bob",
87
+ "B-dep",
88
+ "I-dep",
89
+ "B-dis",
90
+ "I-dis",
91
+ "B-dru",
92
+ "I-dru",
93
+ "B-equ",
94
+ "I-equ",
95
+ "B-ite",
96
+ "I-ite",
97
+ "B-mic",
98
+ "I-mic",
99
+ "B-pro",
100
+ "I-pro",
101
+ "B-sym",
102
+ "I-sym",
103
+ ]
104
+ )
105
+ ),
106
+ }
107
+ ),
108
+ supervised_keys=None,
109
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager):
114
+ data_files = {
115
+ "train": os.path.join(_URL, _TRAINING_FILE),
116
+ "dev": os.path.join(_URL, _DEV_FILE),
117
+ }
118
+
119
+ return [
120
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
121
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
122
+ # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
123
+ ]
124
+
125
+ def _generate_examples(self, filepath):
126
+ logger.info("⏳ Generating examples from = %s", filepath)
127
+ with open(filepath, encoding="utf-8") as f:
128
+ guid = 0
129
+ tokens = []
130
+ ner_tags = []
131
+ for line in f:
132
+ if line.startswith("tokens") or line == "" or line == "\n":
133
+ if tokens:
134
+ yield guid, {
135
+ "id": str(guid),
136
+ "tokens": tokens,
137
+ "ner_tags": ner_tags,
138
+ }
139
+ guid += 1
140
+ tokens = []
141
+ ner_tags = []
142
+ else:
143
+ # conll2003 tokens are space separated
144
+ splits = line.split(" ")
145
+ tokens.append(splits[0])
146
+ ner_tags.append(splits[1].rstrip())
147
+ # last example
148
+ if tokens:
149
+ yield guid, {
150
+ "id": str(guid),
151
+ "tokens": tokens,
152
+ "ner_tags": ner_tags,
153
+ }