imdb-javanese / imdb-javanese.py
w11wo's picture
Updated description
051b740
raw
history blame
3.18 kB
"""Javanese IMDB movie reviews dataset."""
from __future__ import absolute_import, division, print_function
import csv
import os
import datasets
_CITATION = """\
@InProceedings{maas-EtAl:2011:ACL-HLT2011,
author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
title = {Learning Word Vectors for Sentiment Analysis},
booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
month = {June},
year = {2011},
address = {Portland, Oregon, USA},
publisher = {Association for Computational Linguistics},
pages = {142--150},
url = {http://www.aclweb.org/anthology/P11-1015}
}
"""
_DESCRIPTION = """
Large Movie Review Dataset translated to Javanese.
This is a dataset for binary sentiment classification containing substantially
more data than previous benchmark datasets. We provide a set of 25,000 highly
polar movie reviews for training, and 25,000 for testing. There is additional
unlabeled data for use as well. We translated the original IMDB Dataset to
Javanese using the multi-lingual MarianMT Transformer model from
`Helsinki-NLP/opus-mt-en-mul`.
"""
_URL = "https://github.com/w11wo/javanese-nlp/blob/main/imdb-javanese/javanese_imdb_csv.zip?raw=true"
_HOMEPAGE = "https://github.com/w11wo/javanese-nlp"
class JavaneseImdbReviews(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.ClassLabel(names=["0", "1", "-1"]),
}
),
citation=_CITATION,
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_path, "javanese_imdb_train.csv")
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_path, "javanese_imdb_test.csv")
},
),
datasets.SplitGenerator(
name=datasets.Split("unsupervised"),
gen_kwargs={
"filepath": os.path.join(dl_path, "javanese_imdb_unsup.csv")
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
reader = csv.reader(f, delimiter=",")
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, {"label": row[0], "text": row[1]}