political / political.py
@thien
initial commit
1975910
raw
history blame
4.49 kB
import os
from pathlib import Path
import datasets
from typing import List
STYLE, CLASSIFIER = "style", "classifier"
_CITATION = """\
@inproceedings{style_transfer_acl18,
title={Style Transfer Through Back-Translation},
author={Prabhumoye, Shrimai and Tsvetkov, Yulia and Salakhutdinov, Ruslan and Black, Alan W},
year={2018},
booktitle={Proc. ACL}
}
"""
_DESCRIPTION = """\
Political slant transfer dataset. Contains two classes of political tweets between Democratic and Republican Politicans. This dataset can be used for classification tasks.
"""
_HOMEPAGE = "https://github.com/shrimai/Style-Transfer-Through-Back-Translation"
_LICENSE = "" # could not find.
class PoliticalDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = STYLE
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=STYLE,
version=VERSION,
description="Political Tweets Dataset, used for Style Transfer tasks.",
),
datasets.BuilderConfig(
name=CLASSIFIER,
version=VERSION,
description="Political Tweets Dataset, Used for classification tasks.",
),
]
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("text", "label"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
data_dir = "political_data"
splits: List[datasets.SplitGenerator] = []
if self.config.name == STYLE:
splits.append(
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": [
os.path.join(data_dir, "republican_only.train.en"),
os.path.join(data_dir, "democratic_only.train.en"),
],
"split": "train",
},
)
)
else:
splits.append(
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": [os.path.join(data_dir, "classtrain.txt")],
"split": "train",
},
)
)
splits += [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepaths": [
os.path.join(data_dir, "republican_only.dev.en"),
os.path.join(data_dir, "democratic_only.dev.en"),
],
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepaths": [
os.path.join(data_dir, "republican_only.test.en"),
os.path.join(data_dir, "democratic_only.test.en"),
],
"split": "test",
},
),
]
return splits
def _generate_examples(self, filepaths: List[str], split: str):
for filepath in filepaths:
filename = Path(filepath).name
label = filename.split(".")[0].split("_")[0]
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
text = row.strip()
if split != "test":
# label only exists in train/eval files.
text = text.split()
label, text = text[0], text[1:]
text = " ".join(text)
yield (
key,
{"text": text, "label": label},
)
if __name__ == "__main__":
from tqdm import tqdm
dataset = PoliticalDataset(config_name="classifier")
dataset = dataset.as_streaming_dataset()
print(dataset)
for row in tqdm(dataset["train"]):
row["text"] = "hello"