Datasets:
File size: 3,879 Bytes
a85b37c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
"""Test-Stanford dataset by Bansal et al.."""
import datasets
import pandas as pd
_CITATION = """
@misc{bansal2015deep,
title={Towards Deep Semantic Analysis Of Hashtags},
author={Piyush Bansal and Romil Bansal and Vasudeva Varma},
year={2015},
eprint={1501.03210},
archivePrefix={arXiv},
primaryClass={cs.IR}
}
"""
_DESCRIPTION = """
Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al..
"""
_URLS = {
"test": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/Test-Stanford.txt"
}
class TestStanford(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"index": datasets.Value("int32"),
"hashtag": datasets.Value("string"),
"segmentation": datasets.Value("string"),
"gold_position": datasets.Value("int32"),
"rank": datasets.Sequence(
{
"position": datasets.Value("int32"),
"candidate": datasets.Value("string")
}
)
}
),
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
]
def _generate_examples(self, filepath):
names = ["id","hashtag","candidate", "label"]
df = pd.read_csv(filepath, sep="\t", skiprows=1, header=None,
names=names)
for col in names[0:-1]:
df[col] = df[col].apply(lambda x: x.strip("'").strip())
records = df.to_dict('records')
output = []
current_hashtag = None
hashtag = None
candidates = []
ids = []
label = []
for row in records:
hashtag = row["hashtag"]
if current_hashtag != hashtag:
new_row = {
"hashtag": current_hashtag,
"candidate": candidates,
"id": ids,
"label": label
}
if current_hashtag:
output.append(new_row)
current_hashtag = row['hashtag']
candidates = [row["candidate"]]
ids = int(row["id"])
label = [int(row["label"])]
else:
candidates.append(row["candidate"])
label.append(int(row["label"]))
def get_gold_position(row):
try:
return row["label"].index(1)
except ValueError:
return None
def get_rank(row):
return [{
"position": idx + 1,
"candidate": item
} for idx, item in enumerate(row["candidate"])]
def get_segmentation(row):
try:
gold_idx = row["label"].index(1)
return row["candidate"][gold_idx]
except ValueError:
return None
for idx, row in enumerate(output):
yield idx, {
"index": int(row["id"]),
"hashtag": row["hashtag"],
"segmentation": get_segmentation(row),
"gold_position": get_gold_position(row),
"rank": get_rank(row)
} |