File size: 21,974 Bytes
f8d90d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script borrewed and adapted from https://huggingface.co/datasets/wmt16.

# Lint as: python3
"""WMT: Translate dataset."""


import codecs
import functools
import glob
import gzip
import itertools
import os
import re
import xml.etree.cElementTree as ElementTree

import datasets


logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """\
Translation dataset based on the data from statmt.org.

Versions exist for different years using a combination of data
sources. The base `wmt` allows you to create a custom dataset by choosing
your own data/language pair. This can be done as follows:

```python
from datasets import inspect_dataset, load_dataset_builder

inspect_dataset("wmt16", "path/to/scripts")
builder = load_dataset_builder(
    "path/to/scripts/wmt_utils.py",
    language_pair=("fr", "de"),
    subsets={
        datasets.Split.TRAIN: ["commoncrawl_frde"],
        datasets.Split.VALIDATION: ["euelections_dev2019"],
    },
)

# Standard version
builder.download_and_prepare()
ds = builder.as_dataset()

# Streamable version
ds = builder.as_streaming_dataset()
```

"""


class SubDataset:
    """Class to keep track of information on a sub-dataset of WMT."""

    def __init__(self, name, target, sources, url, path, manual_dl_files=None):
        """Sub-dataset of WMT.

        Args:
          name: `string`, a unique dataset identifier.
          target: `string`, the target language code.
          sources: `set<string>`, the set of source language codes.
          url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
            where to download the raw data from. If two strings are provided, the
            first is used for the source language and the second for the target.
            Template strings can either contain '{src}' placeholders that will be
            filled in with the source language code, '{0}' and '{1}' placeholders
            that will be filled in with the source and target language codes in
            alphabetical order, or all 3.
          path: `string` or `(string, string)`, path(s) or path template(s)
            specifing the path to the raw data relative to the root of the
            downloaded archive. If two strings are provided, the dataset is assumed
            to be made up of parallel text files, the first being the source and the
            second the target. If one string is provided, both languages are assumed
            to be stored within the same file and the extension is used to determine
            how to parse it. Template strings should be formatted the same as in
            `url`.
          manual_dl_files: `<list>(string)` (optional), the list of files that must
            be manually downloaded to the data directory.
        """
        self._paths = (path,) if isinstance(path, str) else path
        self._urls = (url,) if isinstance(url, str) else url
        self._manual_dl_files = manual_dl_files if manual_dl_files else []
        self.name = name
        self.target = target
        self.sources = set(sources)

    def _inject_language(self, src, strings):
        """Injects languages into (potentially) template strings."""
        if src not in self.sources:
            raise ValueError(f"Invalid source for '{self.name}': {src}")

        def _format_string(s):
            if "{0}" in s and "{1}" and "{src}" in s:
                return s.format(*sorted([src, self.target]), src=src)
            elif "{0}" in s and "{1}" in s:
                return s.format(*sorted([src, self.target]))
            elif "{src}" in s:
                return s.format(src=src)
            else:
                return s

        return [_format_string(s) for s in strings]

    def get_url(self, src):
        return self._inject_language(src, self._urls)

    def get_manual_dl_files(self, src):
        return self._inject_language(src, self._manual_dl_files)

    def get_path(self, src):
        return self._inject_language(src, self._paths)


# Subsets used in the training sets for AmericasNLP 2021 Shared Task on Machine Translation.
_TRAIN_SUBSETS = [
    SubDataset(
        name="americasnlp2021",
        target="es",
        sources={"aym", "bzd", "cni", "gn", "nah", "oto", "quy", "shp"},
        url="https://huggingface.co/datasets/vgaraujov/americasnlp-mt-21/resolve/main/data/americasnlp2021.zip",
        path=("train.{src}-es.{src}", "train.{src}-es.es"),
    ),
]

_DEV_SUBSETS = [
    SubDataset(
        name="dev2021",
        target="es",
        sources={"aym", "bzd", "cni", "gn", "nah", "oto", "quy", "shp"},
        url="https://huggingface.co/datasets/vgaraujov/americasnlp-mt-21/resolve/main/data/americasnlp2021.zip",
        path=("dev.{src}-es.{src}", "dev.{src}-es.es"),
    ),
    SubDataset(
        name="test2021",
        target="es",
        sources={"aym", "bzd", "cni", "gn", "nah", "oto", "quy", "shp"},
        url="https://huggingface.co/datasets/vgaraujov/americasnlp-mt-21/resolve/main/data/test2021.zip",
        path=("test.{src}", "test.es"),
    ),
]

DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}


class WmtConfig(datasets.BuilderConfig):
    """BuilderConfig for WMT."""

    def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
        """BuilderConfig for WMT.

        Args:
          url: The reference URL for the dataset.
          citation: The paper citation for the dataset.
          description: The description of the dataset.
          language_pair: pair of languages that will be used for translation. Should
                     contain 2 letter coded strings. For example: ("en", "de").
            configuration for the `datasets.features.text.TextEncoder` used for the
            `datasets.features.text.Translation` features.
          subsets: Dict[split, list[str]]. List of the subset to use for each of the
            split. Note that WMT subclasses overwrite this parameter.
          **kwargs: keyword arguments forwarded to super.
        """
        name = "%s-%s" % (language_pair[0], language_pair[1])
        if "name" in kwargs:  # Add name suffix for custom configs
            name += "." + kwargs.pop("name")

        super(WmtConfig, self).__init__(name=name, description=description, **kwargs)

        self.url = url or "http://www.statmt.org"
        self.citation = citation
        self.language_pair = language_pair
        self.subsets = subsets

        # TODO(PVP): remove when manual dir works
        # +++++++++++++++++++++
        if language_pair[1] in ["cs", "hi", "ru"]:
            assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.")
        # +++++++++++++++++++++


class Wmt(datasets.GeneratorBasedBuilder):
    """WMT translation dataset."""

    BUILDER_CONFIG_CLASS = WmtConfig

    def __init__(self, *args, **kwargs):
        super(Wmt, self).__init__(*args, **kwargs)

    @property
    def _subsets(self):
        """Subsets that make up each split of the dataset."""
        raise NotImplementedError("This is a abstract method")

    @property
    def subsets(self):
        """Subsets that make up each split of the dataset for the language pair."""
        source, target = self.config.language_pair
        filtered_subsets = {}
        subsets = self._subsets if self.config.subsets is None else self.config.subsets
        for split, ss_names in subsets.items():
            filtered_subsets[split] = []
            for ss_name in ss_names:
                dataset = DATASET_MAP[ss_name]
                if dataset.target != target or source not in dataset.sources:
                    logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
                else:
                    filtered_subsets[split].append(ss_name)
        logger.info("Using sub-datasets: %s", filtered_subsets)
        return filtered_subsets

    def _info(self):
        src, target = self.config.language_pair
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {"translation": datasets.features.Translation(languages=self.config.language_pair)}
            ),
            supervised_keys=(src, target),
            homepage=self.config.url,
            citation=self.config.citation,
        )

    def _vocab_text_gen(self, split_subsets, extraction_map, language):
        for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
            yield ex[language]

    def _split_generators(self, dl_manager):
        source, _ = self.config.language_pair
        manual_paths_dict = {}
        urls_to_download = {}
        for ss_name in itertools.chain.from_iterable(self.subsets.values()):

            # get dataset
            dataset = DATASET_MAP[ss_name]
            if dataset.get_manual_dl_files(source):
                # TODO(PVP): following two lines skip configs that are incomplete for now
                # +++++++++++++++++++++
                logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
                continue
                # +++++++++++++++++++++

                manual_dl_files = dataset.get_manual_dl_files(source)
                manual_paths = [
                    os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
                    for fname in manual_dl_files
                ]
                assert all(
                    os.path.exists(path) for path in manual_paths
                ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"

                # set manual path for correct subset
                manual_paths_dict[ss_name] = manual_paths
            else:
                urls_to_download[ss_name] = dataset.get_url(source)

        # Download and extract files from URLs.
        downloaded_files = dl_manager.download_and_extract(urls_to_download)
        # Extract manually downloaded files.
        manual_files = dl_manager.extract(manual_paths_dict)
        extraction_map = dict(downloaded_files, **manual_files)

        for language in self.config.language_pair:
            self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language)

        return [
            datasets.SplitGenerator(  # pylint:disable=g-complex-comprehension
                name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
            )
            for split, split_subsets in self.subsets.items()
        ]

    def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
        """Returns the examples in the raw (text) form."""
        source, _ = self.config.language_pair

        def _get_local_paths(dataset, extract_dirs):
            rel_paths = dataset.get_path(source)
            if len(extract_dirs) == 1:
                extract_dirs = extract_dirs * len(rel_paths)
            return [
                os.path.join(ex_dir, rel_path) if rel_path else ex_dir
                for ex_dir, rel_path in zip(extract_dirs, rel_paths)
            ]

        def _get_filenames(dataset):
            rel_paths = dataset.get_path(source)
            urls = dataset.get_url(source)
            if len(urls) == 1:
                urls = urls * len(rel_paths)
            return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]

        for ss_name in split_subsets:
            # TODO(PVP) remove following five lines when manual data works
            # +++++++++++++++++++++
            dataset = DATASET_MAP[ss_name]
            source, _ = self.config.language_pair
            if dataset.get_manual_dl_files(source):
                logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
                continue
            # +++++++++++++++++++++

            logger.info("Generating examples from: %s", ss_name)
            dataset = DATASET_MAP[ss_name]
            extract_dirs = extraction_map[ss_name]
            files = _get_local_paths(dataset, extract_dirs)
            filenames = _get_filenames(dataset)

            sub_generator_args = tuple(files)

            if ss_name.startswith("czeng"):
                if ss_name.endswith("16pre"):
                    sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
                    sub_generator_args += tuple(filenames)
                else:
                    sub_generator = _parse_czeng
            elif ss_name == "hindencorp_01":
                sub_generator = _parse_hindencorp
            elif len(files) == 2:
                if ss_name.endswith("_frde"):
                    sub_generator = _parse_frde_bitext
                else:
                    sub_generator = _parse_parallel_sentences
                    sub_generator_args += tuple(filenames)
            elif len(files) == 1:
                fname = filenames[0]
                # Note: Due to formatting used by `download_manager`, the file
                # extension may not be at the end of the file path.
                if ".tsv" in fname:
                    sub_generator = _parse_tsv
                    sub_generator_args += tuple(filenames)
                elif (
                    ss_name.startswith("newscommentary_v14")
                    or ss_name.startswith("europarl_v9")
                    or ss_name.startswith("wikititles_v1")
                ):
                    sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
                    sub_generator_args += tuple(filenames)
                elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
                    sub_generator = _parse_tmx
                elif ss_name.startswith("wikiheadlines"):
                    sub_generator = _parse_wikiheadlines
                else:
                    raise ValueError("Unsupported file format: %s" % fname)
            else:
                raise ValueError("Invalid number of files: %d" % len(files))

            for sub_key, ex in sub_generator(*sub_generator_args):
                if not all(ex.values()):
                    continue
                # TODO(adarob): Add subset feature.
                # ex["subset"] = subset
                key = f"{ss_name}/{sub_key}"
                if with_translation is True:
                    ex = {"translation": ex}
                yield key, ex


def _parse_parallel_sentences(f1, f2, filename1, filename2):
    """Returns examples from parallel SGML or text files, which may be gzipped."""

    def _parse_text(path, original_filename):
        """Returns the sentences from a single text file, which may be gzipped."""
        split_path = original_filename.split(".")

        if split_path[-1] == "gz":
            lang = split_path[-2]

            def gen():
                with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
                    for line in g:
                        yield line.decode("utf-8").rstrip()

            return gen(), lang

        if split_path[-1] == "txt":
            # CWMT
            lang = split_path[-2].split("_")[-1]
            lang = "zh" if lang in ("ch", "cn", "c[hn]") else lang
        else:
            lang = split_path[-1]

        def gen():
            with open(path, "rb") as f:
                for line in f:
                    yield line.decode("utf-8").rstrip()

        return gen(), lang

    def _parse_sgm(path, original_filename):
        """Returns sentences from a single SGML file."""
        lang = original_filename.split(".")[-2]
        # Note: We can't use the XML parser since some of the files are badly
        # formatted.
        seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")

        def gen():
            with open(path, encoding="utf-8") as f:
                for line in f:
                    seg_match = re.match(seg_re, line)
                    if seg_match:
                        assert len(seg_match.groups()) == 1
                        yield seg_match.groups()[0]

        return gen(), lang

    parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text

    # Some datasets (e.g., CWMT) contain multiple parallel files specified with
    # a wildcard. We sort both sets to align them and parse them one by one.
    f1_files = sorted(glob.glob(f1))
    f2_files = sorted(glob.glob(f2))

    assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
    assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
        len(f1_files),
        len(f2_files),
        f1,
        f2,
    )

    for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
        l1_sentences, l1 = parse_file(f1_i, filename1)
        l2_sentences, l2 = parse_file(f2_i, filename2)

        for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
            key = f"{f_id}/{line_id}"
            yield key, {l1: s1, l2: s2}


def _parse_frde_bitext(fr_path, de_path):
    with open(fr_path, encoding="utf-8") as fr_f:
        with open(de_path, encoding="utf-8") as de_f:
            for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)):
                yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()}


def _parse_tmx(path):
    """Generates examples from TMX file."""

    def _get_tuv_lang(tuv):
        for k, v in tuv.items():
            if k.endswith("}lang"):
                return v
        raise AssertionError("Language not found in `tuv` attributes.")

    def _get_tuv_seg(tuv):
        segs = tuv.findall("seg")
        assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
        return segs[0].text

    with open(path, "rb") as f:
        # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
        utf_f = codecs.getreader("utf-8")(f)
        for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
            if elem.tag == "tu":
                yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
                elem.clear()


def _parse_tsv(path, filename, language_pair=None):
    """Generates examples from TSV file."""
    if language_pair is None:
        lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename)
        assert lang_match is not None, "Invalid TSV filename: %s" % filename
        l1, l2 = lang_match.groups()
    else:
        l1, l2 = language_pair
    with open(path, encoding="utf-8") as f:
        for j, line in enumerate(f):
            cols = line.split("\t")
            if len(cols) != 2:
                logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
                continue
            s1, s2 = cols
            yield j, {l1: s1.strip(), l2: s2.strip()}


def _parse_wikiheadlines(path):
    """Generates examples from Wikiheadlines dataset file."""
    lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
    assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
    l1, l2 = lang_match.groups()
    with open(path, encoding="utf-8") as f:
        for line_id, line in enumerate(f):
            s1, s2 = line.split("|||")
            yield line_id, {l1: s1.strip(), l2: s2.strip()}


def _parse_czeng(*paths, **kwargs):
    """Generates examples from CzEng v1.6, with optional filtering for v1.7."""
    filter_path = kwargs.get("filter_path", None)
    if filter_path:
        re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
        with open(filter_path, encoding="utf-8") as f:
            bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
        logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))

    for path in paths:
        for gz_path in sorted(glob.glob(path)):
            with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
                filename = os.path.basename(gz_path)
                for line_id, line in enumerate(f):
                    line = line.decode("utf-8")  # required for py3
                    if not line.strip():
                        continue
                    id_, unused_score, cs, en = line.split("\t")
                    if filter_path:
                        block_match = re.match(re_block, id_)
                        if block_match and block_match.groups()[0] in bad_blocks:
                            continue
                    sub_key = f"{filename}/{line_id}"
                    yield sub_key, {
                        "cs": cs.strip(),
                        "en": en.strip(),
                    }


def _parse_hindencorp(path):
    with open(path, encoding="utf-8") as f:
        for line_id, line in enumerate(f):
            split_line = line.split("\t")
            if len(split_line) != 5:
                logger.warning("Skipping invalid HindEnCorp line: %s", line)
                continue
            yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}