Upload dataset script
Browse files- saw_corpus.py +64 -0
saw_corpus.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import datasets
|
3 |
+
|
4 |
+
logger = datasets.logging.get_logger(__name__)
|
5 |
+
|
6 |
+
_DESCRIPTION = """\
|
7 |
+
The Selective Armenian Web (SAW) Corpus is an assembled collection of Armenian language texts sourced from various online platforms. The dataset is designed to support and enhance natural language processing tasks specifically for the Armenian language, offering a diverse range of texts that include news articles, legal documents, and other web content.
|
8 |
+
"""
|
9 |
+
|
10 |
+
_CITATION = """\
|
11 |
+
@dataset{saw_corpus_2024,
|
12 |
+
title = {Selective Armenian Web (SAW) Corpus},
|
13 |
+
author = {Mkrtich Minasyan},
|
14 |
+
year = {2024}
|
15 |
+
}
|
16 |
+
"""
|
17 |
+
|
18 |
+
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
19 |
+
|
20 |
+
class SAWCorpus(datasets.GeneratorBasedBuilder):
|
21 |
+
|
22 |
+
VERSION = datasets.Version("1.0.0")
|
23 |
+
|
24 |
+
def _info(self):
|
25 |
+
return datasets.DatasetInfo(
|
26 |
+
description=_DESCRIPTION,
|
27 |
+
features=datasets.Features({
|
28 |
+
"text": datasets.Value("string"),
|
29 |
+
"url": datasets.Value("string"),
|
30 |
+
"date": datasets.Value("string"),
|
31 |
+
"tags": datasets.Sequence(datasets.Value("string")),
|
32 |
+
"source": datasets.Value("string"),
|
33 |
+
}),
|
34 |
+
supervised_keys=None,
|
35 |
+
homepage="https://huggingface.co/datasets/MMinasyan/SAW-corpus",
|
36 |
+
citation=_CITATION,
|
37 |
+
license=_LICENSE,
|
38 |
+
)
|
39 |
+
|
40 |
+
def _split_generators(self, dl_manager):
|
41 |
+
urls = {
|
42 |
+
"train": "https://huggingface.co/datasets/SAW-corpus/train.jsonl",
|
43 |
+
"validation": "https://huggingface.co/datasets/SAW-corpus/val.jsonl",
|
44 |
+
"test": "https://huggingface.co/datasets/SAW-corpus/test.jsonl",
|
45 |
+
}
|
46 |
+
downloaded_files = dl_manager.download_and_extract(urls)
|
47 |
+
return [
|
48 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
49 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
|
50 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
51 |
+
]
|
52 |
+
|
53 |
+
def _generate_examples(self, filepath):
|
54 |
+
logger.info("Generating examples from = %s", filepath)
|
55 |
+
with open(filepath, encoding="utf-8") as f:
|
56 |
+
for id_, line in enumerate(f):
|
57 |
+
data = json.loads(line)
|
58 |
+
yield id_, {
|
59 |
+
"text": data["text"],
|
60 |
+
"url": data["url"],
|
61 |
+
"date": data["date"],
|
62 |
+
"tags": data["tags"],
|
63 |
+
"source": data["source"]
|
64 |
+
}
|