|
"""STAN small dataset by Bansal et al.."""
|
|
|
|
import datasets
|
|
import pandas as pd
|
|
import ast
|
|
|
|
_CITATION = """
|
|
@misc{bansal2015deep,
|
|
title={Towards Deep Semantic Analysis Of Hashtags},
|
|
author={Piyush Bansal and Romil Bansal and Vasudeva Varma},
|
|
year={2015},
|
|
eprint={1501.03210},
|
|
archivePrefix={arXiv},
|
|
primaryClass={cs.IR}
|
|
}
|
|
"""
|
|
|
|
_DESCRIPTION = """
|
|
Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al..
|
|
"""
|
|
_URLS = {
|
|
"test": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_small.csv"
|
|
}
|
|
|
|
class StanSmall(datasets.GeneratorBasedBuilder):
|
|
|
|
VERSION = datasets.Version("1.0.0")
|
|
|
|
def _info(self):
|
|
return datasets.DatasetInfo(
|
|
description=_DESCRIPTION,
|
|
features=datasets.Features(
|
|
{
|
|
"index": datasets.Value("int32"),
|
|
"hashtag": datasets.Value("string"),
|
|
"segmentation": datasets.Value("string"),
|
|
"alternatives": datasets.Sequence(
|
|
{
|
|
"segmentation": datasets.Value("string")
|
|
}
|
|
)
|
|
}
|
|
),
|
|
supervised_keys=None,
|
|
homepage="https://github.com/mounicam/hashtag_master",
|
|
citation=_CITATION,
|
|
)
|
|
|
|
def _split_generators(self, dl_manager):
|
|
downloaded_files = dl_manager.download(_URLS)
|
|
return [
|
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
|
|
]
|
|
|
|
def _generate_examples(self, filepath):
|
|
|
|
def get_segmentation(row):
|
|
needle = row["hashtags"]
|
|
haystack = row["goldtruths"][0].strip()
|
|
output = ""
|
|
iterator = iter(haystack)
|
|
for char in needle:
|
|
output += char
|
|
while True:
|
|
try:
|
|
next_char = next(iterator)
|
|
if next_char.lower() == char.lower():
|
|
break
|
|
elif next_char.isspace():
|
|
output = output[0:-1] + next_char + output[-1]
|
|
except StopIteration:
|
|
break
|
|
return output
|
|
|
|
def get_alternatives(row, segmentation):
|
|
alts = list(set([x.strip() for x in row["goldtruths"]]))
|
|
alts = [x for x in alts if x != segmentation]
|
|
alts = [{"segmentation": x} for x in alts]
|
|
return alts
|
|
|
|
records = pd.read_csv(filepath).to_dict("records")
|
|
records = [{"hashtags": row["hashtags"], "goldtruths": ast.literal_eval(row["goldtruths"])} for row in records]
|
|
for idx, row in enumerate(records):
|
|
segmentation = get_segmentation(row)
|
|
alternatives = get_alternatives(row, segmentation)
|
|
yield idx, {
|
|
"index": idx,
|
|
"hashtag": row["hashtags"],
|
|
"segmentation": segmentation,
|
|
"alternatives": alternatives
|
|
}
|
|
|