Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Hindi
Size:
100K<n<1M
ArXiv:
License:
dipteshkanojia
commited on
Commit
·
a902ef3
1
Parent(s):
a2e5678
changes
Browse files- HiNER-collapsed.py +5 -5
HiNER-collapsed.py
CHANGED
@@ -60,7 +60,7 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
60 |
citation=_CITATION,
|
61 |
)
|
62 |
|
63 |
-
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/
|
64 |
_URLS = {
|
65 |
"train": _URL + "train_clean.conll",
|
66 |
"validation": _URL + "validation_clean.conll",
|
@@ -72,8 +72,8 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
72 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
73 |
|
74 |
return [
|
75 |
-
|
76 |
-
|
77 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
78 |
]
|
79 |
|
@@ -117,10 +117,10 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
117 |
# conll2003 tokens are space separated
|
118 |
print(guid)
|
119 |
splits = line.split("\t")
|
120 |
-
tokens.append(splits[0])
|
121 |
# pos_tags.append(splits[1])
|
122 |
# chunk_tags.append(splits[2])
|
123 |
-
ner_tags.append(splits[
|
124 |
# last example
|
125 |
yield guid, {
|
126 |
"id": str(guid),
|
|
|
60 |
citation=_CITATION,
|
61 |
)
|
62 |
|
63 |
+
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/resolve/main/data/"
|
64 |
_URLS = {
|
65 |
"train": _URL + "train_clean.conll",
|
66 |
"validation": _URL + "validation_clean.conll",
|
|
|
72 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
73 |
|
74 |
return [
|
75 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
76 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
|
77 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
78 |
]
|
79 |
|
|
|
117 |
# conll2003 tokens are space separated
|
118 |
print(guid)
|
119 |
splits = line.split("\t")
|
120 |
+
tokens.append(splits[0].strip())
|
121 |
# pos_tags.append(splits[1])
|
122 |
# chunk_tags.append(splits[2])
|
123 |
+
ner_tags.append(splits[1].rstrip())
|
124 |
# last example
|
125 |
yield guid, {
|
126 |
"id": str(guid),
|