meczifho commited on
Commit
1c3ae31
1 Parent(s): 6b3156a

test separate folders

Browse files
Files changed (3) hide show
  1. WikiNER.py +0 -124
  2. data/test.parquet +0 -3
  3. data/train.parquet +0 -3
WikiNER.py DELETED
@@ -1,124 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import os
16
- import pandas as pd
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """
22
- @inproceedings{ghaddar-langlais-2017-winer,
23
- title = "{W}i{NER}: A {W}ikipedia Annotated Corpus for Named Entity Recognition",
24
- author = "Ghaddar, Abbas and
25
- Langlais, Phillippe",
26
- booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
27
- month = nov,
28
- year = "2017",
29
- address = "Taipei, Taiwan",
30
- publisher = "Asian Federation of Natural Language Processing",
31
- url = "https://aclanthology.org/I17-1042",
32
- pages = "413--422",
33
- abstract = "We revisit the idea of mining Wikipedia in order to generate named-entity annotations. We propose a new methodology that we applied to English Wikipedia to build WiNER, a large, high quality, annotated corpus. We evaluate its usefulness on 6 NER tasks, comparing 4 popular state-of-the art approaches. We show that LSTM-CRF is the approach that benefits the most from our corpus. We report impressive gains with this model when using a small portion of WiNER on top of the CONLL training material. Last, we propose a simple but efficient method for exploiting the full range of WiNER, leading to further improvements.",
34
- }
35
- """
36
-
37
- _DESCRIPTION = """
38
- Created by Nothman et al. at 2013, the WikiNER Dataset
39
- contains 7,200 manually-labelled Wikipedia articles
40
- across nine languages: English, German, French, Polish,
41
- Italian, Spanish,Dutch, Portuguese and Russian., in
42
- Multi-Lingual language. Containing 7,2 in Text file format.
43
- """
44
-
45
- _HOMEPAGE = ""
46
- LANGUAGES = ["en", "fr", "de", "es", "it", "nl", "pt", "pl", "ru"]
47
- _URLS = {lang: "https://huggingface.co/datasets/mnaguib/WikiNER/tree/main/data" for lang in LANGUAGES}
48
-
49
-
50
- class WikiNER(datasets.GeneratorBasedBuilder):
51
- """
52
- This is the WikiNER dataset. It is a dataset of sentences from Wikipedia with named entities tagged.
53
- """
54
-
55
- VERSION = datasets.Version("2.0.0")
56
-
57
- BUILDER_CONFIGS = [
58
- datasets.BuilderConfig(name="en", version=VERSION, description="English dataset"),
59
- datasets.BuilderConfig(name="fr", version=VERSION, description="French dataset"),
60
- datasets.BuilderConfig(name="de", version=VERSION, description="German dataset"),
61
- datasets.BuilderConfig(name="es", version=VERSION, description="Spanish dataset"),
62
- datasets.BuilderConfig(name="it", version=VERSION, description="Italian dataset"),
63
- datasets.BuilderConfig(name="nl", version=VERSION, description="Dutch dataset"),
64
- datasets.BuilderConfig(name="pt", version=VERSION, description="Portuguese dataset"),
65
- datasets.BuilderConfig(name="pl", version=VERSION, description="Polish dataset"),
66
- datasets.BuilderConfig(name="ru", version=VERSION, description="Russian dataset"),
67
- ]
68
-
69
- DEFAULT_CONFIG_NAME = "en"
70
-
71
- def _info(self):
72
- features = datasets.Features(
73
- {
74
- "id": datasets.Value("string"),
75
- "words": datasets.Sequence(datasets.Value("string")),
76
- "ner_tags": datasets.Sequence(datasets.Value("int32")),
77
- }
78
- )
79
-
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=features,
83
- supervised_keys=("words", "ner_tags"),
84
- homepage=_HOMEPAGE,
85
- citation=_CITATION,
86
- )
87
-
88
- def _split_generators(self, dl_manager):
89
- train_pq = dl_manager.download_and_extract(
90
- 'https://huggingface.co/datasets/mnaguib/WikiNER/resolve/main/data/train.parquet')
91
- test_pq = dl_manager.download_and_extract(
92
- 'https://huggingface.co/datasets/mnaguib/WikiNER/resolve/main/data/test.parquet')
93
- return [
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TRAIN,
96
- # These kwargs will be passed to _generate_examples
97
- gen_kwargs={
98
- "filepath": train_pq,
99
- "split": "train",
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=datasets.Split.TEST,
104
- # These kwargs will be passed to _generate_examples
105
- gen_kwargs={
106
- "filepath": test_pq,
107
- "split": "test"
108
- },
109
- ),
110
- ]
111
-
112
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
113
- def _generate_examples(self, filepath, split):
114
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
115
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
116
- df = pd.read_parquet(filepath)
117
- print("Loading only {} examples".format(self.config.name))
118
- df = df[df['id'].str.startswith(self.config.name)]
119
- for key, row in df.iterrows():
120
- yield key, {
121
- "id": row["id"],
122
- "words": row["words"],
123
- "ner_tags": row["ner_tags"],
124
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a2730b8676b1157632c8a51037f1b7305f136e93ee19b8a6d259872429e5932
3
- size 20779194
 
 
 
 
data/train.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e91b2ddaff1ee61c53780d3b57e0d305ac9015c5d8ee843087d420a6faa0917
3
- size 189892537