andreaschandra commited on
Commit
84d218b
1 Parent(s): e60797d
Files changed (3) hide show
  1. README.md +146 -1
  2. dataset_infos.json +1 -0
  3. inglish.py +74 -0
README.md CHANGED
@@ -1,3 +1,148 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language:
5
+ - id
6
+ - en
7
+ language_creators:
8
+ - machine-generated
9
+ license:
10
+ - cc-by-4.0
11
+ multilinguality:
12
+ - translation
13
+ pretty_name: 'Inglish: Indonesian English Machine Translation Dataset'
14
+ size_categories:
15
+ - 10K<n<100K
16
+ source_datasets:
17
+ - original
18
+ tags:
19
+ - indonesian
20
+ - english
21
+ - translation
22
+ task_categories:
23
+ - translation
24
+ task_ids: []
25
  ---
26
+
27
+ # Dataset Card for Inglish: Indonesian English Translation Dataset
28
+
29
+ ## Table of Contents
30
+ - [Table of Contents](#table-of-contents)
31
+ - [Dataset Description](#dataset-description)
32
+ - [Dataset Summary](#dataset-summary)
33
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
34
+ - [Languages](#languages)
35
+ - [Dataset Structure](#dataset-structure)
36
+ - [Data Instances](#data-instances)
37
+ - [Data Fields](#data-fields)
38
+ - [Data Splits](#data-splits)
39
+ - [Dataset Creation](#dataset-creation)
40
+ - [Curation Rationale](#curation-rationale)
41
+ - [Source Data](#source-data)
42
+ - [Annotations](#annotations)
43
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
44
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
45
+ - [Social Impact of Dataset](#social-impact-of-dataset)
46
+ - [Discussion of Biases](#discussion-of-biases)
47
+ - [Other Known Limitations](#other-known-limitations)
48
+ - [Additional Information](#additional-information)
49
+ - [Dataset Curators](#dataset-curators)
50
+ - [Licensing Information](#licensing-information)
51
+ - [Citation Information](#citation-information)
52
+ - [Contributions](#contributions)
53
+
54
+ ## Dataset Description
55
+
56
+ - **Homepage:**
57
+ - **Repository:**
58
+ - **Paper:**
59
+ - **Leaderboard:**
60
+ - **Point of Contact:**
61
+
62
+ ### Dataset Summary
63
+
64
+ [More Information Needed]
65
+
66
+ ### Supported Tasks and Leaderboards
67
+
68
+ [More Information Needed]
69
+
70
+ ### Languages
71
+
72
+ [More Information Needed]
73
+
74
+ ## Dataset Structure
75
+
76
+ ### Data Instances
77
+
78
+ [More Information Needed]
79
+
80
+ ### Data Fields
81
+
82
+ [More Information Needed]
83
+
84
+ ### Data Splits
85
+
86
+ [More Information Needed]
87
+
88
+ ## Dataset Creation
89
+
90
+ ### Curation Rationale
91
+
92
+ [More Information Needed]
93
+
94
+ ### Source Data
95
+
96
+ #### Initial Data Collection and Normalization
97
+
98
+ [More Information Needed]
99
+
100
+ #### Who are the source language producers?
101
+
102
+ [More Information Needed]
103
+
104
+ ### Annotations
105
+
106
+ #### Annotation process
107
+
108
+ [More Information Needed]
109
+
110
+ #### Who are the annotators?
111
+
112
+ [More Information Needed]
113
+
114
+ ### Personal and Sensitive Information
115
+
116
+ [More Information Needed]
117
+
118
+ ## Considerations for Using the Data
119
+
120
+ ### Social Impact of Dataset
121
+
122
+ [More Information Needed]
123
+
124
+ ### Discussion of Biases
125
+
126
+ [More Information Needed]
127
+
128
+ ### Other Known Limitations
129
+
130
+ [More Information Needed]
131
+
132
+ ## Additional Information
133
+
134
+ ### Dataset Curators
135
+
136
+ [More Information Needed]
137
+
138
+ ### Licensing Information
139
+
140
+ [More Information Needed]
141
+
142
+ ### Citation Information
143
+
144
+ [More Information Needed]
145
+
146
+ ### Contributions
147
+
148
+ Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "This dataset is built as a playground for beginner to make a translation model for Indonesian and English.\n", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"english": {"dtype": "string", "id": null, "_type": "Value"}, "indonesian": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "inglish", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2162458, "num_examples": 8701, "dataset_name": "inglish"}, "validation": {"name": "validation", "num_bytes": 717672, "num_examples": 2901, "dataset_name": "inglish"}}, "download_checksums": {"https://media.githubusercontent.com/media/jakartaresearch/hf-datasets/main/inglish/train.csv": {"num_bytes": 2149052, "checksum": "09107edc5fc9e751f6ab15f1aeff3603377662b83d391a3a019a3c786992ceba"}, "https://media.githubusercontent.com/media/jakartaresearch/hf-datasets/main/inglish/validation.csv": {"num_bytes": 713149, "checksum": "7212330f04e9090bf8094f84295a465b431ca4124e632c9f085889b7b3b62120"}}, "download_size": 2862201, "post_processing_size": null, "dataset_size": 2880130, "size_in_bytes": 5742331}}
inglish.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Inglish: An Indonesian English Translation Dataset."""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _DESCRIPTION = """\
24
+ This dataset is built as a playground for beginner to make a translation model for Indonesian and English.
25
+ """
26
+
27
+ _HOMEPAGE = "https://github.com/jakartaresearch"
28
+
29
+ # TODO: Add link to the official dataset URLs here
30
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
31
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
32
+ _TRAIN_URL = "https://media.githubusercontent.com/media/jakartaresearch/hf-datasets/main/inglish/train.csv"
33
+ _VAL_URL = "https://media.githubusercontent.com/media/jakartaresearch/hf-datasets/main/inglish/validation.csv"
34
+
35
+
36
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
37
+ class Inglish(datasets.GeneratorBasedBuilder):
38
+ """Inglish: An Indonesian English Translation Dataset."""
39
+
40
+ VERSION = datasets.Version("1.0.0")
41
+
42
+ def _info(self):
43
+
44
+ features = datasets.Features(
45
+ {
46
+ "english": datasets.Value("string"),
47
+ "indonesian": datasets.Value("string")
48
+ }
49
+ )
50
+
51
+ return datasets.DatasetInfo(
52
+ description=_DESCRIPTION,
53
+ features=features,
54
+ homepage=_HOMEPAGE
55
+ )
56
+
57
+ def _split_generators(self, dl_manager):
58
+
59
+ train_path = dl_manager.download_and_extract(_TRAIN_URL)
60
+ val_path = dl_manager.download_and_extract(_VAL_URL)
61
+ return [
62
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
63
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path})
64
+ ]
65
+
66
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
67
+ def _generate_examples(self, filepath):
68
+ """Generate examples."""
69
+ with open(filepath, encoding="utf-8") as csv_file:
70
+ csv_reader = csv.reader(csv_file, delimiter=",")
71
+ next(csv_reader)
72
+ for id_, row in enumerate(csv_reader):
73
+ english, indo = row
74
+ yield id_, {"english": english, "indonesian": indo}