Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
analogy_questions / add_new_analogy_2.py
asahi417's picture
add scan
c18e48a
raw
history blame
2.76 kB
import json
import os
from itertools import combinations
from random import seed, randint, shuffle
import pandas as pd
from datasets import load_dataset
def get_stats(filename):
with open(filename) as f:
_data = [json.loads(i) for i in f.read().splitlines()]
return len(_data), list(set([len(i['choice']) for i in _data])), len(list(set([i['prefix'] for i in _data])))
def create_analogy(_data):
analogy_data = []
seed(12)
for i in _data:
source = []
target = []
for s, t in zip(i['source'], i['target']):
if s not in source and t not in target:
source.append(s)
target.append(t)
assert len(source) == len(target), f"{len(source)} != {len(target)}"
all_combinations = list(combinations(range(len(source)), 2))
for n, (q_h_id, q_t_id) in enumerate(all_combinations):
choice = [[target[x], target[y]] for m, (x, y) in enumerate(all_combinations) if m != n]
answer_id = randint(0, len(source) - 1)
choice = choice[:answer_id] + [[target[q_h_id], target[q_t_id]]] + choice[answer_id:]
assert choice[answer_id] == [target[q_h_id], target[q_t_id]]
analogy_data.append({
"stem": [source[q_h_id], source[q_t_id]],
"choice": choice,
"answer": answer_id,
"prefix": i["type"]
})
return analogy_data
data = load_dataset("relbert/scientific_and_creative_analogy", split='test')
data = create_analogy(data)
data_m = [i for i in data if i['prefix'] == 'metaphor']
data_s = [i for i in data if i['prefix'] != 'metaphor']
seed(12)
shuffle(data_m)
shuffle(data_s)
validation = data_s[:int(0.1 * len(data_s))] + data_m[:int(0.1 * len(data_m))]
test = data_s[int(0.1 * len(data_s)):] + data_m[int(0.1 * len(data_m)):]
os.makedirs("dataset/scan", exist_ok=True)
with open("dataset/scan/valid.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in validation]))
with open("dataset/scan/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test]))
t_size, t_num_choice, t_relation_type = get_stats("dataset/scan/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/scan/valid.jsonl")
stat = [{
"name": "`scan`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/scientific_and_creative_analogy](https://huggingface.co/datasets/relbert/scientific_and_creative_analogy)"
}]
print(pd.DataFrame(stat).to_markdown(index=False))