sadrasabouri
commited on
Commit
•
437229a
1
Parent(s):
f889754
Update naab-raw.py
Browse files- naab-raw.py +19 -11
naab-raw.py
CHANGED
@@ -21,9 +21,17 @@ import os
|
|
21 |
import datasets
|
22 |
|
23 |
|
24 |
-
# TODO: Add BibTeX citation
|
25 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
26 |
_CITATION = """\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
"""
|
28 |
|
29 |
# You can copy an official description
|
@@ -33,7 +41,6 @@ Huge corpora of textual data are always known to be a crucial need for training
|
|
33 |
|
34 |
_HOMEPAGE = "https://huggingface.co/datasets/SLPL/naab"
|
35 |
|
36 |
-
# TODO: ?
|
37 |
_LICENSE = "mit"
|
38 |
|
39 |
_BASE_URL = "https://huggingface.co/datasets/SLPL/naab/resolve/main/data/"
|
@@ -93,17 +100,18 @@ class NaabRawConfig(datasets.GeneratorBasedBuilder):
|
|
93 |
datasets.SplitGenerator(
|
94 |
name=datasets.Split.TRAIN,
|
95 |
gen_kwargs={
|
96 |
-
"
|
97 |
"split": "train"
|
98 |
}
|
99 |
)
|
100 |
]
|
101 |
|
102 |
|
103 |
-
def _generate_examples(self,
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
21 |
import datasets
|
22 |
|
23 |
|
|
|
|
|
24 |
_CITATION = """\
|
25 |
+
@misc{https://doi.org/10.48550/arxiv.2208.13486,
|
26 |
+
doi = {10.48550/ARXIV.2208.13486},
|
27 |
+
url = {https://arxiv.org/abs/2208.13486},
|
28 |
+
author = {Sabouri, Sadra and Rahmati, Elnaz and Gooran, Soroush and Sameti, Hossein},
|
29 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
30 |
+
title = {naab: A ready-to-use plug-and-play corpus for Farsi},
|
31 |
+
publisher = {arXiv},
|
32 |
+
year = {2022},
|
33 |
+
copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International}
|
34 |
+
}
|
35 |
"""
|
36 |
|
37 |
# You can copy an official description
|
|
|
41 |
|
42 |
_HOMEPAGE = "https://huggingface.co/datasets/SLPL/naab"
|
43 |
|
|
|
44 |
_LICENSE = "mit"
|
45 |
|
46 |
_BASE_URL = "https://huggingface.co/datasets/SLPL/naab/resolve/main/data/"
|
|
|
100 |
datasets.SplitGenerator(
|
101 |
name=datasets.Split.TRAIN,
|
102 |
gen_kwargs={
|
103 |
+
"filepaths": downloaded_files,
|
104 |
"split": "train"
|
105 |
}
|
106 |
)
|
107 |
]
|
108 |
|
109 |
|
110 |
+
def _generate_examples(self, filepaths, split):
|
111 |
+
for filepath in filepaths:
|
112 |
+
with open(filepath, encoding="utf-8") as f:
|
113 |
+
for key, row in enumerate(f):
|
114 |
+
if row.strip():
|
115 |
+
yield key, {"text": row}
|
116 |
+
else:
|
117 |
+
yield key, {"text": ""}
|