holylovenia commited on
Commit
93f9a52
·
verified ·
1 Parent(s): ca2f503

Upload cc_aligned_sent.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cc_aligned_sent.py +167 -0
cc_aligned_sent.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from datasets.download.download_manager import DownloadManager
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = r"""
12
+ @inproceedings{chaudhary-etal-2019-low,
13
+ title = "Low-Resource Corpus Filtering Using Multilingual Sentence Embeddings",
14
+ author = "Chaudhary, Vishrav and
15
+ Tang, Yuqing and
16
+ Guzm{\'a}n, Francisco and
17
+ Schwenk, Holger and
18
+ Koehn, Philipp",
19
+ editor = "Bojar, Ond{\v{r}}ej and
20
+ Chatterjee, Rajen and
21
+ Federmann, Christian and
22
+ Fishel, Mark and
23
+ Graham, Yvette and
24
+ Haddow, Barry and
25
+ Huck, Matthias and
26
+ Yepes, Antonio Jimeno and
27
+ Koehn, Philipp and
28
+ Martins, Andr{\'e} and
29
+ Monz, Christof and
30
+ Negri, Matteo and
31
+ N{\'e}v{\'e}ol, Aur{\'e}lie and
32
+ Neves, Mariana and
33
+ Post, Matt and
34
+ Turchi, Marco and
35
+ Verspoor, Karin",
36
+ booktitle = "Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2)",
37
+ month = aug,
38
+ year = "2019",
39
+ address = "Florence, Italy",
40
+ publisher = "Association for Computational Linguistics",
41
+ url = "https://aclanthology.org/W19-5435",
42
+ doi = "10.18653/v1/W19-5435",
43
+ pages = "261--266",
44
+ }
45
+ """
46
+
47
+ _LOCAL = False
48
+ _LANGUAGES = ["ind", "jav", "sun", "tha", "vie", "zlm", "lao", "khm", "mya", "ceb"]
49
+ _DATASETNAME = "cc_aligned_sent"
50
+ _DESCRIPTION = """\
51
+ This dataset contains the sentence pairs extracted from CC-Aligned document
52
+ pairs using similarity scores of LASER embeddings (minimum similarity 1.04,
53
+ sorted based on decreasing similarity score). It misses some languages not
54
+ covered by LASER.
55
+ """
56
+
57
+ _HOMEPAGE = "https://www2.statmt.org/cc-aligned/"
58
+ _LICENSE = Licenses.UNKNOWN.value
59
+ _URL = "https://data.statmt.org/cc-aligned/sentence-aligned/"
60
+
61
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
62
+ _SOURCE_VERSION = "1.0.0"
63
+ _SEACROWD_VERSION = "2024.06.20"
64
+
65
+ _SUBSETS = ["id_ID", "jv_ID", "su_ID", "th_TH", "vi_VN", "ms_MY", "lo_LA", "km_KH", "my_MM", "cx_PH"]
66
+
67
+
68
+ class CCAlignedSentencesDataset(datasets.GeneratorBasedBuilder):
69
+ """CC Aligned Sentences dataset by Chaudhary et al., (2019)"""
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
73
+
74
+ SEACROWD_SCHEMA_NAME = "t2t"
75
+
76
+ # Add configurations for loading a dataset per language.
77
+ dataset_names = sorted([f"{_DATASETNAME}_{subset}" for subset in _SUBSETS])
78
+ BUILDER_CONFIGS = []
79
+ for name in dataset_names:
80
+ source_config = SEACrowdConfig(
81
+ name=f"{name}_source",
82
+ version=SOURCE_VERSION,
83
+ description=f"{_DATASETNAME} source schema",
84
+ schema="source",
85
+ subset_id=name,
86
+ )
87
+ BUILDER_CONFIGS.append(source_config)
88
+ seacrowd_config = SEACrowdConfig(
89
+ name=f"{name}_seacrowd_{SEACROWD_SCHEMA_NAME}",
90
+ version=SEACROWD_VERSION,
91
+ description=f"{_DATASETNAME} SEACrowd schema",
92
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
93
+ subset_id=name,
94
+ )
95
+ BUILDER_CONFIGS.append(seacrowd_config)
96
+
97
+ # Choose first language as default
98
+ first_subset = sorted(_SUBSETS)[0]
99
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_{first_subset}_source"
100
+
101
+ def _info(self) -> datasets.DatasetInfo:
102
+ if self.config.schema == "source":
103
+ features = datasets.Features(
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "Source_Sentence": datasets.Value("string"),
107
+ "Target_Sentence": datasets.Value("string"),
108
+ "LASER_similarity": datasets.Value("float64"),
109
+ }
110
+ )
111
+
112
+ if self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
113
+ features = schemas.text_to_text.features
114
+
115
+ return datasets.DatasetInfo(
116
+ description=_DESCRIPTION,
117
+ features=features,
118
+ homepage=_HOMEPAGE,
119
+ license=_LICENSE,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
124
+ """Return SplitGenerators."""
125
+ # Define some functions for parsing config and URL names
126
+ def _split_at_n(text: str, n: int) -> Tuple[str, str]:
127
+ """Split text on the n-th instance"""
128
+ return ("_".join(text.split("_")[:n]), "_".join(text.split("_")[n:]))
129
+
130
+ # Get URL. For cx_PH, the source and target languages are reversed
131
+ _, subset = _split_at_n(_split_at_n(self.config.name, 5)[0], 3)
132
+ (source_lang, target_lang) = (subset, "en_XX") if subset == "cx_PH" else ("en_XX", subset)
133
+ url = _URL + f"{source_lang}-{target_lang}.tsv.xz"
134
+ filepath = dl_manager.download_and_extract(url)
135
+
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.TRAIN,
139
+ gen_kwargs={
140
+ "filepath": filepath,
141
+ "source_lang": source_lang,
142
+ "target_lang": target_lang,
143
+ },
144
+ )
145
+ ]
146
+
147
+ def _generate_examples(self, filepath: Path, source_lang: str, target_lang: str) -> Tuple[int, Dict]:
148
+ """Yield examples as (key, example) tuples"""
149
+ with open(filepath, encoding="utf-8") as file:
150
+ for idx, row in enumerate(file):
151
+ text_1, text_2, score = row.strip().split("\t")
152
+ if self.config.schema == "source":
153
+ example = {
154
+ "id": idx,
155
+ "Source_Sentence": text_1,
156
+ "Target_Sentence": text_2,
157
+ "LASER_similarity": float(score),
158
+ }
159
+ if self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
160
+ example = {
161
+ "id": idx,
162
+ "text_1": text_1,
163
+ "text_2": text_2,
164
+ "text_1_name": source_lang,
165
+ "text_2_name": target_lang,
166
+ }
167
+ yield idx, example