Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
8b47668
1 Parent(s): 09dc404

Delete loading script

Browse files
Files changed (1) hide show
  1. bianet.py +0 -148
bianet.py DELETED
@@ -1,148 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Bianet: A parallel news corpus in Turkish, Kurdish and English"""
16
-
17
-
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @InProceedings{ATAMAN18.6,
25
- author = {Duygu Ataman},
26
- title = {Bianet: A Parallel News Corpus in Turkish, Kurdish and English},
27
- booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
28
- year = {2018},
29
- month = {may},
30
- date = {7-12},
31
- location = {Miyazaki, Japan},
32
- editor = {Jinhua Du and Mihael Arcan and Qun Liu and Hitoshi Isahara},
33
- publisher = {European Language Resources Association (ELRA)},
34
- address = {Paris, France},
35
- isbn = {979-10-95546-15-3},
36
- language = {english}
37
- }"""
38
-
39
- _HOMEPAGE = "http://opus.nlpl.eu/Bianet.php"
40
-
41
-
42
- _LICENSE = "CC-BY-SA-4.0"
43
-
44
- _VALID_LANGUAGE_PAIRS = {
45
- ("en", "ku"): "http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip",
46
- ("en", "tr"): "http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-tr.txt.zip",
47
- ("ku", "tr"): "http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/ku-tr.txt.zip",
48
- }
49
-
50
- _VERSION = "1.0.0"
51
-
52
- _DESCRIPTION = """\
53
- A parallel news corpus in Turkish, Kurdish and English.
54
- Bianet collects 3,214 Turkish articles with their sentence-aligned Kurdish or English translations from the Bianet online newspaper.
55
- 3 languages, 3 bitexts
56
- total number of files: 6
57
- total number of tokens: 2.25M
58
- total number of sentence fragments: 0.14M
59
- """
60
-
61
- _BASE_NAME = "Bianet.{}-{}.{}"
62
-
63
-
64
- class BianetConfig(datasets.BuilderConfig):
65
- """BuilderConfig for Bianet: A parallel news corpus in Turkish, Kurdish and English"""
66
-
67
- def __init__(self, language_pair=(None, None), **kwargs):
68
- """BuilderConfig for Bianet: A parallel news corpus in Turkish, Kurdish and English.
69
- The first language in `language_pair` should consist of two strings joined by
70
- an underscore (e.g. "en-tr").
71
- Args:
72
- language_pair: pair of languages that will be used for translation.
73
- **kwargs: keyword arguments forwarded to super.
74
- """
75
- name = "%s_to_%s" % (language_pair[0], language_pair[1])
76
-
77
- description = ("Translation dataset from %s to %s or %s to %s.") % (
78
- language_pair[0],
79
- language_pair[1],
80
- language_pair[1],
81
- language_pair[0],
82
- )
83
- super(BianetConfig, self).__init__(
84
- name=name, description=description, version=datasets.Version(_VERSION, ""), **kwargs
85
- )
86
-
87
- # Validate language pair.
88
- assert language_pair in _VALID_LANGUAGE_PAIRS, (
89
- "Config language pair (%s, " "%s) not supported"
90
- ) % language_pair
91
-
92
- self.language_pair = language_pair
93
-
94
-
95
- class Bianet(datasets.GeneratorBasedBuilder):
96
-
97
- BUILDER_CONFIGS = [
98
- BianetConfig(
99
- language_pair=pair,
100
- )
101
- for pair in _VALID_LANGUAGE_PAIRS.keys()
102
- ]
103
-
104
- BUILDER_CONFIG_CLASS = BianetConfig
105
-
106
- def _info(self):
107
- return datasets.DatasetInfo(
108
- description=_DESCRIPTION,
109
- features=datasets.Features(
110
- {
111
- "id": datasets.Value("string"),
112
- "translation": datasets.Translation(languages=tuple(self.config.language_pair)),
113
- },
114
- ),
115
- supervised_keys=None,
116
- homepage=_HOMEPAGE,
117
- citation=_CITATION,
118
- )
119
-
120
- def _split_generators(self, dl_manager):
121
- download_url = _VALID_LANGUAGE_PAIRS.get(tuple(self.config.language_pair))
122
- path = dl_manager.download_and_extract(download_url)
123
- return [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN,
126
- gen_kwargs={"datapath": path},
127
- )
128
- ]
129
-
130
- def _generate_examples(self, datapath):
131
- lang1, lang2 = self.config.language_pair
132
- lang1_file = _BASE_NAME.format(lang1, lang2, lang1)
133
- lang2_file = _BASE_NAME.format(lang1, lang2, lang2)
134
- lang1_path = os.path.join(datapath, lang1_file)
135
- lang2_path = os.path.join(datapath, lang2_file)
136
-
137
- with open(lang1_path, encoding="utf-8") as f1, open(lang2_path, encoding="utf-8") as f2:
138
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
139
- x = x.strip()
140
- y = y.strip()
141
- result = (
142
- sentence_counter,
143
- {
144
- "id": str(sentence_counter),
145
- "translation": {lang1: x, lang2: y},
146
- },
147
- )
148
- yield result