meczifho commited on
Commit
6b3156a
·
1 Parent(s): fa13d66

print the loaded configuration

Browse files
Files changed (3) hide show
  1. WikiNER.py +48 -39
  2. data/test.parquet +2 -2
  3. data/train.parquet +2 -2
WikiNER.py CHANGED
@@ -18,20 +18,33 @@ import pandas as pd
18
  import datasets
19
 
20
 
21
-
22
- # Find for instance the citation on arxiv or on the dataset repo/website
23
  _CITATION = """
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  """
25
 
26
- # You can copy an official description
27
- _DESCRIPTION = """\
 
 
 
 
28
  """
29
- _URLS = {
30
- "en": "https://huggingface.co/datasets/meczifho/WikiNER/tree/main/data",
31
- "fr": "https://huggingface.co/datasets/meczifho/WikiNER/tree/main/data",
32
- }
33
 
34
  _HOMEPAGE = ""
 
 
35
 
36
 
37
  class WikiNER(datasets.GeneratorBasedBuilder):
@@ -39,34 +52,31 @@ class WikiNER(datasets.GeneratorBasedBuilder):
39
  This is the WikiNER dataset. It is a dataset of sentences from Wikipedia with named entities tagged.
40
  """
41
 
42
- VERSION = datasets.Version("1.1.0")
43
 
44
- # You will be able to load one or the other configurations in the following list with
45
  BUILDER_CONFIGS = [
46
- datasets.BuilderConfig(name="en", version=VERSION, description=""),
47
- datasets.BuilderConfig(name="fr", version=VERSION, description=""),
 
 
 
 
 
 
 
48
  ]
49
-
50
- DEFAULT_CONFIG_NAME = "en"
51
 
52
  def _info(self):
53
- if self.config.name == "fr": # This is the name of the configuration selected in BUILDER_CONFIGS above
54
- features = datasets.Features(
55
- {
56
- "id": datasets.Value("string"),
57
- "words": datasets.Sequence(datasets.Value("string")),
58
- "ner_tags": datasets.Sequence(datasets.Value("int32")),
59
- # These are the features of your dataset like images, labels ...
60
- }
61
- )
62
- else:
63
- features = datasets.Features(
64
- {
65
- "id": datasets.Value("string"),
66
- "words": datasets.Sequence(datasets.Value("string")),
67
- "ner_tags": datasets.Sequence(datasets.Value("int32")),
68
- }
69
- )
70
  return datasets.DatasetInfo(
71
  description=_DESCRIPTION,
72
  features=features,
@@ -76,8 +86,10 @@ class WikiNER(datasets.GeneratorBasedBuilder):
76
  )
77
 
78
  def _split_generators(self, dl_manager):
79
- train_pq = dl_manager.download_and_extract('https://huggingface.co/datasets/meczifho/WikiNER/resolve/main/data/train.parquet')
80
- test_pq = dl_manager.download_and_extract('https://huggingface.co/datasets/meczifho/WikiNER/resolve/main/data/test.parquet')
 
 
81
  return [
82
  datasets.SplitGenerator(
83
  name=datasets.Split.TRAIN,
@@ -101,15 +113,12 @@ class WikiNER(datasets.GeneratorBasedBuilder):
101
  def _generate_examples(self, filepath, split):
102
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
103
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
104
- #read the parquet file
105
  df = pd.read_parquet(filepath)
106
- if self.config.name == "en":
107
- df = df[df['id'].str.startswith("en")]
108
- else:
109
- df = df[df['id'].str.startswith("fr")]
110
  for key, row in df.iterrows():
111
  yield key, {
112
  "id": row["id"],
113
  "words": row["words"],
114
- "ner_tags": row["tags"],
115
  }
 
18
  import datasets
19
 
20
 
 
 
21
  _CITATION = """
22
+ @inproceedings{ghaddar-langlais-2017-winer,
23
+ title = "{W}i{NER}: A {W}ikipedia Annotated Corpus for Named Entity Recognition",
24
+ author = "Ghaddar, Abbas and
25
+ Langlais, Phillippe",
26
+ booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
27
+ month = nov,
28
+ year = "2017",
29
+ address = "Taipei, Taiwan",
30
+ publisher = "Asian Federation of Natural Language Processing",
31
+ url = "https://aclanthology.org/I17-1042",
32
+ pages = "413--422",
33
+ abstract = "We revisit the idea of mining Wikipedia in order to generate named-entity annotations. We propose a new methodology that we applied to English Wikipedia to build WiNER, a large, high quality, annotated corpus. We evaluate its usefulness on 6 NER tasks, comparing 4 popular state-of-the art approaches. We show that LSTM-CRF is the approach that benefits the most from our corpus. We report impressive gains with this model when using a small portion of WiNER on top of the CONLL training material. Last, we propose a simple but efficient method for exploiting the full range of WiNER, leading to further improvements.",
34
+ }
35
  """
36
 
37
+ _DESCRIPTION = """
38
+ Created by Nothman et al. at 2013, the WikiNER Dataset
39
+ contains 7,200 manually-labelled Wikipedia articles
40
+ across nine languages: English, German, French, Polish,
41
+ Italian, Spanish,Dutch, Portuguese and Russian., in
42
+ Multi-Lingual language. Containing 7,2 in Text file format.
43
  """
 
 
 
 
44
 
45
  _HOMEPAGE = ""
46
+ LANGUAGES = ["en", "fr", "de", "es", "it", "nl", "pt", "pl", "ru"]
47
+ _URLS = {lang: "https://huggingface.co/datasets/mnaguib/WikiNER/tree/main/data" for lang in LANGUAGES}
48
 
49
 
50
  class WikiNER(datasets.GeneratorBasedBuilder):
 
52
  This is the WikiNER dataset. It is a dataset of sentences from Wikipedia with named entities tagged.
53
  """
54
 
55
+ VERSION = datasets.Version("2.0.0")
56
 
 
57
  BUILDER_CONFIGS = [
58
+ datasets.BuilderConfig(name="en", version=VERSION, description="English dataset"),
59
+ datasets.BuilderConfig(name="fr", version=VERSION, description="French dataset"),
60
+ datasets.BuilderConfig(name="de", version=VERSION, description="German dataset"),
61
+ datasets.BuilderConfig(name="es", version=VERSION, description="Spanish dataset"),
62
+ datasets.BuilderConfig(name="it", version=VERSION, description="Italian dataset"),
63
+ datasets.BuilderConfig(name="nl", version=VERSION, description="Dutch dataset"),
64
+ datasets.BuilderConfig(name="pt", version=VERSION, description="Portuguese dataset"),
65
+ datasets.BuilderConfig(name="pl", version=VERSION, description="Polish dataset"),
66
+ datasets.BuilderConfig(name="ru", version=VERSION, description="Russian dataset"),
67
  ]
68
+
69
+ DEFAULT_CONFIG_NAME = "en"
70
 
71
  def _info(self):
72
+ features = datasets.Features(
73
+ {
74
+ "id": datasets.Value("string"),
75
+ "words": datasets.Sequence(datasets.Value("string")),
76
+ "ner_tags": datasets.Sequence(datasets.Value("int32")),
77
+ }
78
+ )
79
+
 
 
 
 
 
 
 
 
 
80
  return datasets.DatasetInfo(
81
  description=_DESCRIPTION,
82
  features=features,
 
86
  )
87
 
88
  def _split_generators(self, dl_manager):
89
+ train_pq = dl_manager.download_and_extract(
90
+ 'https://huggingface.co/datasets/mnaguib/WikiNER/resolve/main/data/train.parquet')
91
+ test_pq = dl_manager.download_and_extract(
92
+ 'https://huggingface.co/datasets/mnaguib/WikiNER/resolve/main/data/test.parquet')
93
  return [
94
  datasets.SplitGenerator(
95
  name=datasets.Split.TRAIN,
 
113
  def _generate_examples(self, filepath, split):
114
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
115
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
 
116
  df = pd.read_parquet(filepath)
117
+ print("Loading only {} examples".format(self.config.name))
118
+ df = df[df['id'].str.startswith(self.config.name)]
 
 
119
  for key, row in df.iterrows():
120
  yield key, {
121
  "id": row["id"],
122
  "words": row["words"],
123
+ "ner_tags": row["ner_tags"],
124
  }
data/test.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb47bc2c89270bd33ac9353931f0955bf044603513311bfa146b23480a3ecb40
3
- size 20778038
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a2730b8676b1157632c8a51037f1b7305f136e93ee19b8a6d259872429e5932
3
+ size 20779194
data/train.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84d55f1327515b7655c72bd992d560301e50bd69416a4b9e16849748d8e746ec
3
- size 189882205
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e91b2ddaff1ee61c53780d3b57e0d305ac9015c5d8ee843087d420a6faa0917
3
+ size 189892537