albertvillanova HF staff commited on
Commit
4e4e3c4
1 Parent(s): 96ee9ec

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (3cde639d097ad06a5ab73e715b4a682317388511)
- Delete loading script (d1a454f7336d33b8d6133979138f175a927d4ea6)
- Delete legacy dataset_infos.json (190b813af59de781d1fb39fb989141134bf91a49)

README.md CHANGED
@@ -23,6 +23,7 @@ tags:
23
  - dissertation-abstracts-translation
24
  - theses-translation
25
  dataset_info:
 
26
  features:
27
  - name: translation
28
  dtype:
@@ -30,13 +31,18 @@ dataset_info:
30
  languages:
31
  - en
32
  - pt
33
- config_name: en-pt
34
  splits:
35
  - name: train
36
- num_bytes: 472484364
37
  num_examples: 1157610
38
- download_size: 162229298
39
- dataset_size: 472484364
 
 
 
 
 
 
40
  ---
41
 
42
  # Dataset Card for CAPES
 
23
  - dissertation-abstracts-translation
24
  - theses-translation
25
  dataset_info:
26
+ config_name: en-pt
27
  features:
28
  - name: translation
29
  dtype:
 
31
  languages:
32
  - en
33
  - pt
 
34
  splits:
35
  - name: train
36
+ num_bytes: 472483436
37
  num_examples: 1157610
38
+ download_size: 285468020
39
+ dataset_size: 472483436
40
+ configs:
41
+ - config_name: en-pt
42
+ data_files:
43
+ - split: train
44
+ path: en-pt/train-*
45
+ default: true
46
  ---
47
 
48
  # Dataset Card for CAPES
capes.py DELETED
@@ -1,98 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Capes: Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{soares2018parallel,
23
- title={A Parallel Corpus of Theses and Dissertations Abstracts},
24
- author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose},
25
- booktitle={International Conference on Computational Processing of the Portuguese Language},
26
- pages={345--352},
27
- year={2018},
28
- organization={Springer}
29
- }
30
- """
31
-
32
-
33
- _DESCRIPTION = """\
34
- A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the \
35
- CAPES website (Coordenação de Aperfeiçoamento de Pessoal de Nível Superior) - Brazil. \
36
- The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were \
37
- collected and aligned using the Hunalign algorithm.
38
- """
39
-
40
-
41
- _HOMEPAGE = "https://sites.google.com/view/felipe-soares/datasets#h.p_kxOR6EhHm2a6"
42
-
43
- _URL = "https://ndownloader.figstatic.com/files/14015837"
44
-
45
-
46
- class Capes(datasets.GeneratorBasedBuilder):
47
- """Capes: Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES"""
48
-
49
- VERSION = datasets.Version("1.0.0")
50
-
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(
53
- name="en-pt",
54
- version=datasets.Version("1.0.0"),
55
- description="Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES",
56
- )
57
- ]
58
-
59
- def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
- {"translation": datasets.features.Translation(languages=self.config.name.split("-"))}
64
- ),
65
- supervised_keys=None,
66
- homepage=_HOMEPAGE,
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
- """Returns SplitGenerators."""
72
- archive = dl_manager.download(_URL)
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- gen_kwargs={
77
- "source_file": "en_pt.en",
78
- "target_file": "en_pt.pt",
79
- "src_files": dl_manager.iter_archive(archive),
80
- "tgt_files": dl_manager.iter_archive(archive),
81
- },
82
- ),
83
- ]
84
-
85
- def _generate_examples(self, source_file, target_file, src_files, tgt_files):
86
- source, target = tuple(self.config.name.split("-"))
87
- for src_path, src_f in src_files:
88
- if src_path == source_file:
89
- for tgt_path, tgt_f in tgt_files:
90
- if tgt_path == target_file:
91
- for idx, (l1, l2) in enumerate(zip(src_f, tgt_f)):
92
- l1 = l1.decode("utf-8").strip()
93
- l2 = l2.decode("utf-8").strip()
94
- if l1 and l2:
95
- result = {"translation": {source: l1, target: l2}}
96
- yield idx, result
97
- break
98
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"en-pt": {"description": "A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the CAPES website (Coordena\u00e7\u00e3o de Aperfei\u00e7oamento de Pessoal de N\u00edvel Superior) - Brazil. The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were collected and aligned using the Hunalign algorithm.\n", "citation": "@inproceedings{soares2018parallel,\n title={A Parallel Corpus of Theses and Dissertations Abstracts},\n author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose},\n booktitle={International Conference on Computational Processing of the Portuguese Language},\n pages={345--352},\n year={2018},\n organization={Springer}\n}\n", "homepage": "https://sites.google.com/view/felipe-soares/datasets#h.p_kxOR6EhHm2a6", "license": "", "features": {"translation": {"languages": ["en", "pt"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "capes", "config_name": "en-pt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 472484364, "num_examples": 1157610, "dataset_name": "capes"}}, "download_checksums": {"https://ndownloader.figstatic.com/files/14015837": {"num_bytes": 162229298, "checksum": "08e5739e78cd5b68ca6b29507f2a746fd3a5fbdec8dde2700a4141030d21e143"}}, "download_size": 162229298, "post_processing_size": null, "dataset_size": 472484364, "size_in_bytes": 634713662}}
 
 
en-pt/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f2e5cacec1504c02b08b453f7cdddaeb652772c9800359ea2fccec46d52cfc
3
+ size 285468020