Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
relentless / experiments /baseline_fasttext.py
asahi417's picture
init
1b77ea8
raw
history blame
3.6 kB
import zipfile
import requests
import os
import json
from statistics import mean
import pandas as pd
from gensim.models import fasttext
from datasets import load_dataset
# load fasttext
def load_model():
os.makedirs('./cache', exist_ok=True)
path = './cache/crawl-300d-2M-subword.bin'
if not os.path.exists(path):
url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M-subword.zip'
filename = os.path.basename(url)
_path = f"./cache/{filename}"
with open(_path, "wb") as f:
r = requests.get(url)
f.write(r.content)
with zipfile.ZipFile(_path, 'r') as zip_ref:
zip_ref.extractall("./cache")
os.remove(_path)
return fasttext.load_facebook_model(path)
def cosine_similarity(a, b):
norm_a = sum(map(lambda x: x * x, a)) ** 0.5
norm_b = sum(map(lambda x: x * x, b)) ** 0.5
return sum(map(lambda x: x[0] * x[1], zip(a, b)))/(norm_a * norm_b)
def get_vector(_model, _word_a, _word_b):
# return np.mean([_model[_x] for _x in _word_a.split(" ")], axis=0) - np.mean([_model[_x] for _x in _word_b.split(" ")], axis=0)
return _model[_word_a] - _model[_word_b]
# load dataset
data = load_dataset("cardiffnlp/relentless", split="test")
full_result = []
os.makedirs("results/word_embedding/fasttext", exist_ok=True)
scorer = None
for d in data:
ppl_file = f"results/word_embedding/fasttext/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
anchor_embeddings = [(a, b) for a, b in d['prototypical_examples']]
option_embeddings = [(x, y) for x, y in d['pairs']]
if not os.path.exists(ppl_file):
if scorer is None:
scorer = load_model()
anchor_embeddings = [get_vector(scorer, a, b) for a, b in d['prototypical_examples']]
option_embeddings = [get_vector(scorer, x, y) for x, y in d['pairs']]
similarity = [[cosine_similarity(a, b) for b in anchor_embeddings] for a in option_embeddings]
output = [{"similarity": s} for s in similarity]
with open(ppl_file, "w") as f:
f.write("\n".join([json.dumps(i) for i in output]))
with open(ppl_file) as f:
similarity = [json.loads(i)['similarity'] for i in f.read().split("\n") if len(i) > 0]
true_rank = d['ranks']
assert len(true_rank) == len(similarity), f"Mismatch in number of examples: {len(true_rank)} vs {len(similarity)}"
prediction = [max(s) for s in similarity]
rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
prediction_max = [rank_map[p] for p in prediction]
prediction = [min(s) for s in similarity]
rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
prediction_min = [rank_map[p] for p in prediction]
prediction = [mean(s) for s in similarity]
rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
prediction_mean = [rank_map[p] for p in prediction]
tmp = pd.DataFrame([true_rank, prediction_max, prediction_min, prediction_mean]).T
cor_max = tmp.corr("spearman").values[0, 1]
cor_min = tmp.corr("spearman").values[0, 2]
cor_mean = tmp.corr("spearman").values[0, 3]
full_result.append({"model": "fastText\textsubscript{pair}", "relation_type": d['relation_type'], "correlation": cor_max})
df = pd.DataFrame(full_result)
df = df.pivot(columns="relation_type", index="model", values="correlation")
df['average'] = df.mean(1)
df.to_csv("results/word_embedding/fasttext.csv")
df = (100 * df).round()
print(df.to_markdown())
print(df.to_latex())