Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
1bb8d13
1 Parent(s): cfcb034
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. experiments/analysis/correlation/after.all.csv +9 -0
  2. experiments/analysis/correlation/after.is_competitor-rival_of.csv +9 -0
  3. experiments/analysis/correlation/after.is_friend-ally_of.csv +9 -0
  4. experiments/analysis/correlation/after.is_influenced_by.csv +9 -0
  5. experiments/analysis/correlation/after.is_known_for.csv +9 -0
  6. experiments/analysis/correlation/after.is_similar_to.csv +9 -0
  7. experiments/analysis/correlation/before.all.csv +9 -0
  8. experiments/analysis/correlation/before.is_competitor-rival_of.csv +9 -0
  9. experiments/analysis/correlation/before.is_friend-ally_of.csv +9 -0
  10. experiments/analysis/correlation/before.is_influenced_by.csv +9 -0
  11. experiments/analysis/correlation/before.is_known_for.csv +9 -0
  12. experiments/analysis/correlation/before.is_similar_to.csv +9 -0
  13. experiments/analysis/flan_ul2_additional_analysis.py +38 -0
  14. experiments/analysis/flan_ul2_additional_analysis/comp.csv +90 -0
  15. experiments/analysis/flan_ul2_additional_analysis/frie.csv +93 -0
  16. experiments/analysis/flan_ul2_additional_analysis/infl.csv +94 -0
  17. experiments/analysis/flan_ul2_additional_analysis/know.csv +109 -0
  18. experiments/analysis/flan_ul2_additional_analysis/simi.csv +94 -0
  19. experiments/analysis/get_correlation.py +59 -0
  20. experiments/analysis/get_correlation_before.py +58 -0
  21. experiments/analysis/get_error_in_top_bottom.py +163 -0
  22. experiments/analysis/get_qualitative.py +95 -0
  23. experiments/analysis/get_statistics.py +24 -0
  24. experiments/analysis/qualitative/lc.30.csv +16 -0
  25. experiments/analysis/qualitative/lc.30.format.csv +51 -0
  26. experiments/analysis/qualitative/lc.31.csv +16 -0
  27. experiments/analysis/qualitative/lc.31.format.csv +48 -0
  28. experiments/analysis/qualitative/qa.30.csv +16 -0
  29. experiments/analysis/qualitative/qa.30.format.csv +20 -0
  30. experiments/analysis/qualitative/qa.31.csv +16 -0
  31. experiments/analysis/qualitative/qa.31.format.csv +53 -0
  32. experiments/baseline_fasttext.py +93 -0
  33. experiments/baseline_fasttext_zeroshot.py +71 -0
  34. experiments/baseline_gpt4.py +100 -0
  35. experiments/baseline_lm_lc.py +95 -0
  36. experiments/baseline_lm_lc_fewshot.py +90 -0
  37. experiments/baseline_lm_lc_zeroshot.py +74 -0
  38. experiments/baseline_lm_qa.py +92 -0
  39. experiments/baseline_lm_qa_fewshot.py +83 -0
  40. experiments/baseline_lm_qa_zeroshot.py +75 -0
  41. experiments/baseline_oracle.py +30 -0
  42. experiments/baseline_relbert.py +69 -0
  43. experiments/baseline_relbert_misc.py +67 -0
  44. experiments/baseline_validation_lc.py +75 -0
  45. experiments/baseline_validation_qa.py +72 -0
  46. experiments/figures/fewshots/lc.average.fewshot.landscape.png +3 -0
  47. experiments/figures/fewshots/lc.average.fewshot.png +3 -0
  48. experiments/figures/fewshots/lc.is_competitor-rival_of.fewshot.landscape.png +3 -0
  49. experiments/figures/fewshots/lc.is_competitor-rival_of.fewshot.png +3 -0
  50. experiments/figures/fewshots/lc.is_friend-ally_of.fewshot.landscape.png +3 -0
experiments/analysis/correlation/after.all.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100,61,82,68,72,75,74,83
3
+ B,61,100,61,60,63,59,60,66
4
+ C,82,61,100,72,69,75,74,83
5
+ D,68,60,72,100,68,69,70,77
6
+ E,72,63,69,68,100,69,72,76
7
+ F,75,59,75,69,69,100,70,78
8
+ G,74,60,74,70,72,70,100,79
9
+ Avg,76,66,76,72,73,74,74,78
experiments/analysis/correlation/after.is_competitor-rival_of.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,56.54319390515396,79.87133431279439,70.95818750915267,74.03711383203164,78.69931852275197,78.87638094632628,86.08225526308935
3
+ B,56.54319390515396,100.0,48.04659857958964,39.17693516383341,59.64637012600053,58.85352304643422,51.618426893789746,56.86820300674108
4
+ C,79.87133431279439,48.04659857958964,100.0,76.54855391331245,67.6085519390185,74.60666967684703,75.55332383117188,81.0788877824353
5
+ D,70.95818750915267,39.17693516383341,76.54855391331245,100.0,54.06490863378692,67.8861938889971,69.76801705759928,75.8361454463992
6
+ E,74.03711383203164,59.64637012600053,67.6085519390185,54.06490863378692,100.0,70.09210566043514,67.33088872309497,74.86739649782788
7
+ F,78.69931852275197,58.85352304643422,74.60666967684703,67.8861938889971,70.09210566043514,100.0,66.34696159870124,80.42278452805233
8
+ G,78.87638094632628,51.618426893789746,75.55332383117188,69.76801705759928,67.33088872309497,66.34696159870124,100.0,79.33053104999443
9
+ Avg,76.99793271831585,59.12643538782879,74.60500460753342,68.34325659524026,70.39713413062397,73.78353891345239,72.78485700724049,76.35517193921994
experiments/analysis/correlation/after.is_friend-ally_of.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,73.5158066035359,85.12066706809678,69.40314867296632,73.66824765822612,78.1088578701807,73.39476096101995,86.8532003252799
3
+ B,73.5158066035359,100.0,74.93994575530773,53.67391003422357,63.95426568650334,72.91672265302968,65.73772236823466,76.24715921844862
4
+ C,85.12066706809678,74.93994575530773,100.0,69.1201397730712,71.65542383420254,77.33646092998272,74.26522935021809,87.95166180614709
5
+ D,69.40314867296632,53.67391003422357,69.1201397730712,100.0,62.95972780275258,60.291767924280904,66.01345780309363,70.5188423648465
6
+ E,73.66824765822612,63.95426568650334,71.65542383420254,62.95972780275258,100.0,66.68150265645419,70.60536454057583,75.61675189464589
7
+ F,78.1088578701807,72.91672265302968,77.33646092998272,60.291767924280904,66.68150265645419,100.0,74.95839266319567,80.06289154527263
8
+ G,73.39476096101995,65.73772236823466,74.26522935021809,66.01345780309363,70.60536454057583,74.95839266319567,100.0,78.39054390307113
9
+ Avg,79.03021269057511,72.10548187154784,78.9196952444113,68.78030743005546,72.78921888267351,75.75624352816055,74.99641824090541,79.37729300824454
experiments/analysis/correlation/after.is_influenced_by.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,51.94171331444277,76.64686404594195,68.55138766065575,68.67476656701089,60.56268637566388,71.07198336389125,76.65846418330784
3
+ B,51.94171331444277,100.0,56.216149636956715,63.88710689660275,49.767674630280254,34.211316819741576,55.5705129270886,56.22898643303505
4
+ C,76.64686404594195,56.216149636956715,100.0,74.50188749125914,69.52037492991374,70.19678111002037,76.59617234660793,84.5912792667659
5
+ D,68.55138766065575,63.88710689660275,74.50188749125914,100.0,65.21611910474178,53.97509331865549,71.07394893761723,76.96075529482714
6
+ E,68.67476656701089,49.767674630280254,69.52037492991374,65.21611910474178,100.0,65.44921765837152,71.50287989073686,70.9076829805752
7
+ F,60.56268637566388,34.211316819741576,70.19678111002037,53.97509331865549,65.44921765837152,100.0,62.66192236851534,63.29379817092439
8
+ G,71.07198336389125,55.5705129270886,76.59617234660793,71.07394893761723,71.50287989073686,62.66192236851534,100.0,78.37981277607685
9
+ Avg,71.06420018965807,58.79921060358753,74.81117565152854,71.02936334421888,70.01871896872215,63.86528823585259,72.63963140492247,72.43153987221605
experiments/analysis/correlation/after.is_known_for.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,74.74442833532684,84.13532982131655,78.11282022609298,79.7217723101429,80.27736779799007,78.06582585030257,88.44840275453632
3
+ B,74.74442833532684,100.0,71.88484545113157,70.96478619650367,73.26309074151189,65.84376066947098,70.95702296183376,76.45141083803824
4
+ C,84.13532982131655,71.88484545113157,100.0,78.34676361771689,77.61657753751035,76.34426804007906,80.7906110355711,88.67600784268357
5
+ D,78.11282022609298,70.96478619650367,78.34676361771689,100.0,76.21394151356353,81.9798764256365,75.33292538603865,83.16422966540911
6
+ E,79.7217723101429,73.26309074151189,77.61657753751035,76.21394151356353,100.0,71.81655636035532,76.05645487800281,80.843952632741
7
+ F,80.27736779799007,65.84376066947098,76.34426804007906,81.9798764256365,71.81655636035532,100.0,72.46069739803418,81.11414255091312
8
+ G,78.06582585030257,70.95702296183376,80.7906110355711,75.33292538603865,76.05645487800281,72.46069739803418,100.0,82.87601809487577
9
+ Avg,82.15107776302456,75.37970490796839,81.3026279290465,80.13587333793603,79.24119904872668,78.38893238450945,79.09479107282615,83.08202348274244
experiments/analysis/correlation/after.is_similar_to.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,60.00975228758444,81.54101511133754,74.43602738182507,77.56590405414345,78.39753384441264,73.41096267200922,82.58933416066628
3
+ B,60.00975228758444,100.0,63.40923847092774,66.93455347751886,64.98314809483378,60.945408506618904,62.85800772150512,70.3739329356405
4
+ C,81.54101511133754,63.40923847092774,100.0,74.89990701550816,74.51949080720523,74.1472801283226,70.62798083887134,80.30647168681931
5
+ D,74.43602738182507,66.93455347751886,74.89990701550816,100.0,76.8730463177485,77.64379432171697,73.59632760275106,84.47353824438342
6
+ E,77.56590405414345,64.98314809483378,74.51949080720523,76.8730463177485,100.0,74.62112320369897,77.74721316545488,82.78509781266935
7
+ F,78.39753384441264,60.945408506618904,74.1472801283226,77.64379432171697,74.62112320369897,100.0,73.7701157624435,79.73809286146313
8
+ G,73.41096267200922,62.85800772150512,70.62798083887134,73.59632760275106,77.74721316545488,73.7701157624435,100.0,78.98319006106121
9
+ Avg,77.90874219304463,68.4485869369984,77.02070176745323,77.7690937310098,78.04427509186927,77.07503653817336,76.00151539471929,79.89280825181473
experiments/analysis/correlation/before.all.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100,52,78,60,62,69,67,79
3
+ B,52,100,52,48,50,49,48,58
4
+ C,78,52,100,62,56,69,68,78
5
+ D,60,48,62,100,52,56,59,67
6
+ E,62,50,56,52,100,58,60,66
7
+ F,69,49,69,56,58,100,63,73
8
+ G,67,48,68,59,60,63,100,73
9
+ Avg,70,57,69,62,62,66,66,71
experiments/analysis/correlation/before.is_competitor-rival_of.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,52.28721031777685,73.01988202167145,61.86751259067569,59.02327389906606,69.62464900519876,74.58759543955621,81.14875407290214
3
+ B,52.28721031777685,100.0,47.78332950150525,33.57173333983089,51.487850121438896,47.45439315170696,51.1517480290777,55.33264230791658
4
+ C,73.01988202167145,47.78332950150525,100.0,64.98466726607829,46.820061474869846,65.96915924507293,65.14892715557822,72.93086309794178
5
+ D,61.86751259067569,33.57173333983089,64.98466726607829,100.0,27.065771191857724,58.48419600706666,59.06263159011146,64.35814337603051
6
+ E,59.02327389906606,51.487850121438896,46.820061474869846,27.065771191857724,100.0,57.565084203818515,50.06001864754375,57.88758692047003
7
+ F,69.62464900519876,47.45439315170696,65.96915924507293,58.48419600706666,57.565084203818515,100.0,64.22063751846045,75.01817026033358
8
+ G,74.58759543955621,51.1517480290777,65.14892715557822,59.06263159011146,50.06001864754375,64.22063751846045,100.0,75.46874464331518
9
+ Avg,70.058589039135,54.819466351619496,66.24657523782516,57.862358855088665,56.0031513626564,66.18830273304631,66.31879405433254,68.87784352555855
experiments/analysis/correlation/before.is_friend-ally_of.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,64.00716670487974,80.96929991489021,60.91711038747562,51.07234033415702,69.93055219508739,61.54228944153153,80.81008710100899
3
+ B,64.00716670487974,100.0,64.6463769759354,39.18445396059515,45.28171497683371,63.865856548521215,54.85618995287416,66.84599833129245
4
+ C,80.96929991489021,64.6463769759354,100.0,60.98343341269177,49.48409115451416,69.15399781710762,67.14087372027983,81.05528565973273
5
+ D,60.91711038747562,39.18445396059515,60.98343341269177,100.0,39.803858867062125,37.87022510983166,57.10120713061978,59.73916828666306
6
+ E,51.07234033415702,45.28171497683371,49.48409115451416,39.803858867062125,100.0,46.676485112587955,56.43076386123579,56.737579516621196
7
+ F,69.93055219508739,63.865856548521215,69.15399781710762,37.87022510983166,46.676485112587955,100.0,61.7547119793853,71.06535432862361
8
+ G,61.54228944153153,54.85618995287416,67.14087372027983,57.10120713061978,56.43076386123579,61.7547119793853,100.0,73.2101891094599
9
+ Avg,69.77696556828879,61.6916798742342,70.33972471363128,56.551469838325154,55.535607758055825,64.17883268036016,65.54657658370377,69.92338033334313
experiments/analysis/correlation/before.is_influenced_by.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,41.13916123239298,72.7891745489556,60.38517263617589,65.39476441892661,56.59326764561505,67.2494054690764,74.89564177488828
3
+ B,41.13916123239298,100.0,42.52077993986884,47.19483292507752,34.29897205100569,25.691451698984398,38.022325185482174,44.75182675614367
4
+ C,72.7891745489556,42.52077993986884,100.0,58.52916677892595,59.20916995743405,64.6044634829157,69.52603806039939,77.81298422977686
5
+ D,60.38517263617589,47.19483292507752,58.52916677892595,100.0,53.728902194119485,45.629522263837686,61.88801859214723,64.90467803921561
6
+ E,65.39476441892661,34.29897205100569,59.20916995743405,53.728902194119485,100.0,58.54579111991007,63.97748570260461,64.69585985116137
7
+ F,56.59326764561505,25.691451698984398,64.6044634829157,45.629522263837686,58.54579111991007,100.0,59.49798850743312,60.774236785423696
8
+ G,67.2494054690764,38.022325185482174,69.52603806039939,61.88801859214723,63.97748570260461,59.49798850743312,100.0,72.96323307529103
9
+ Avg,66.22156370730609,46.98107471897309,66.73982753835708,61.05080219861197,62.16501220628579,58.651783531242295,65.73732307387755,65.82835150170007
experiments/analysis/correlation/before.is_known_for.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,67.60542521315183,78.99258414561156,70.60729823080707,74.68943205270077,73.99844745656023,70.94236998891486,85.0291674589494
3
+ B,67.60542521315183,100.0,64.40711519776913,61.948046919934484,60.83803213187071,57.95402107485319,61.0893228550051,68.76444945558835
4
+ C,78.99258414561156,64.40711519776913,100.0,72.5243047524468,71.08735855760287,71.72798299872653,78.79869372505223,86.64419989131211
5
+ D,70.60729823080707,61.948046919934484,72.5243047524468,100.0,64.16978480034184,74.0475244254116,64.46888949548037,76.86855504489868
6
+ E,74.68943205270077,60.83803213187071,71.08735855760287,64.16978480034184,100.0,62.80562081530741,67.78495226346166,74.65875054250918
7
+ F,73.99844745656023,57.95402107485319,71.72798299872653,74.0475244254116,62.80562081530741,100.0,67.67681353308181,77.38669659002932
8
+ G,70.94236998891486,61.0893228550051,78.79869372505223,64.46888949548037,67.78495226346166,67.67681353308181,100.0,78.22620278337227
9
+ Avg,76.69079386967805,67.6917090560835,76.79114848245845,72.53797837491746,71.62502580304076,72.60148718627725,72.96586312299944,78.22543168095133
experiments/analysis/correlation/before.is_similar_to.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ,A,B,C,D,E,F,G,Others
2
+ A,100.0,50.788390527450986,81.3309571392445,65.93758595023561,71.13540455548613,76.2911846293017,64.67431705394428,81.95818829358748
3
+ B,50.788390527450986,100.0,50.29789364051304,46.1791780099053,49.29530173722065,45.834119144143784,44.635319406219466,56.74584338567736
4
+ C,81.3309571392445,50.29789364051304,100.0,66.78448204534237,68.81163175266447,71.55517763803671,65.09737826880126,80.70066572829978
5
+ D,65.93758595023561,46.1791780099053,66.78448204534237,100.0,63.17852638209246,62.87725600577802,59.207656007848755,72.62927605237593
6
+ E,71.13540455548613,49.29530173722065,68.81163175266447,63.17852638209246,100.0,63.1726268025905,62.37114999056743,75.09891932367825
7
+ F,76.2911846293017,45.834119144143784,71.55517763803671,62.87725600577802,63.1726268025905,100.0,64.02868701383065,74.86624945198784
8
+ G,64.67431705394428,44.635319406219466,65.09737826880126,59.207656007848755,62.37114999056743,64.02868701383065,100.0,70.49994963795264
9
+ Avg,72.8796914079519,55.29002892363617,71.98250292637177,66.30924062874321,68.28066303151738,69.10843589052591,65.71635824874456,73.21415598193705
experiments/analysis/flan_ul2_additional_analysis.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from glob import glob
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+
7
+ os.makedirs("analysis/flan_ul2_additional_analysis", exist_ok=True)
8
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
9
+ data = {i['relation_type']: i for i in data}
10
+
11
+ pred_zero = {}
12
+ for i in glob("results/lm_qa_zeroshot/flan-ul2/*.jsonl"):
13
+ r = os.path.basename(i).replace("__", "/").replace("_", " ").replace("ppl.", "").replace("is ", "").replace(".jsonl", "")
14
+ with open(i) as f:
15
+ pred_zero[r] = [json.loads(l)['perplexity'] for l in f.read().split("\n")]
16
+
17
+ pred_few = {}
18
+ for i in glob("results/lm_qa_1shots_1seed/flan-ul2/*.jsonl"):
19
+ r = os.path.basename(i).replace("__", "/").replace("_", " ").replace("ppl.", "").replace("is ", "").replace(".jsonl", "")
20
+ with open(i) as f:
21
+ pred_few[r] = [json.loads(l)['perplexity'] for l in f.read().split("\n")]
22
+
23
+
24
+ def get_rank(score):
25
+ s2r = {s: n for n, s in enumerate(sorted(score))}
26
+ return [s2r[s] for s in score]
27
+
28
+ for k, v in data.items():
29
+ df = pd.DataFrame({
30
+ "pairs": v['pairs'],
31
+ "score_fewshot": pred_few[k],
32
+ "score_zeroshot": pred_zero[k],
33
+ "score_true": v["scores_mean"],
34
+ "rank_fewshot": get_rank(pred_few[k]),
35
+ "rank_zeroshot": get_rank(pred_zero[k]),
36
+ "rank_true": v["ranks"],
37
+ })
38
+ df.to_csv(f"analysis/flan_ul2_additional_analysis/{k[:4]}.csv", index=False)
experiments/analysis/flan_ul2_additional_analysis/comp.csv ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pairs,score_fewshot,score_zeroshot,score_true,rank_fewshot,rank_zeroshot,rank_true
2
+ "['Jeremy Corbyn', 'Keir Starmer']",2.254284692564132,1.4222025149293618,4.142857074737549,59,58,37
3
+ "['EuroBasket 2022', 'Murad III']",4.4401195005779455,2.3454037993300583,1.2857142686843872,78,76,84
4
+ "['Bugatti', 'Lamborghini']",1.4160010444318634,1.1581703913669972,2.857142925262451,31,29,60
5
+ "['Apple', 'Microsoft']",1.2913670974598686,1.116980313466796,4.857142925262451,16,20,11
6
+ "['Lee Chong Wei', 'Lin Dan']",1.63429984614617,1.2366011532822951,4.142857074737549,40,37,37
7
+ "['Microsoft', 'Google']",1.1447724456222377,1.0906696582667166,4.4285712242126465,8,15,24
8
+ "['Samsung', 'Pitbull']",4.140153957741774,2.8062127386926976,1.2857142686843872,74,81,84
9
+ "['Cristiano Ronaldo', 'Lionel Messi']",1.3742245807821651,1.076442896033145,4.857142925262451,28,14,11
10
+ "['Germany', 'Austria']",2.2091957310404364,1.1516096930453659,2.2857143878936768,58,27,67
11
+ "['Dyson', 'Dualit']",1.8600962574891424,1.4066400229807077,2.7142856121063232,47,57,64
12
+ "['Netflix', 'Disney Plus']",1.3845683515726805,1.233603087311407,4.5714287757873535,30,36,19
13
+ "['PyTorch', 'TensorFlow']",1.3366302997102721,1.2808253965315133,4.4285712242126465,20,44,24
14
+ "['Dell', 'HP']",1.1219571043441705,1.0416585847044884,5.0,5,3,3
15
+ "['Sanpellegrino', 'Volvic']",1.338617055933703,1.3447474634198389,3.5714285373687744,21,52,51
16
+ "['Mikhail Khodorkovsky', 'Hezbollah']",3.7707901731903886,2.4170384926741626,1.4285714626312256,73,78,77
17
+ "['British Airways', 'Aer Lingus']",1.270211940245148,1.1037129775740366,4.142857074737549,14,18,37
18
+ "['Microsoft Teams', 'Slack']",1.164694234892354,1.0557504472617725,4.857142925262451,9,11,11
19
+ "['Federal Reserve Board', 'Bank of England']",2.578587076949289,1.8141170517333125,2.857142925262451,61,69,60
20
+ "['American Psycho', 'Chihuahua']",4.464672718711256,1.9418134795261126,1.2857142686843872,79,71,84
21
+ "['Mars', 'Snickers']",1.8501941549035406,1.4256724727393357,3.4285714626312256,46,59,54
22
+ "['ASML', 'LAM Research']",1.7783270160683868,1.8641223203372959,4.142857074737549,44,70,37
23
+ "['Jake Paul', 'Tangled']",4.551227932548134,2.396226404066843,1.2857142686843872,80,77,84
24
+ "['Nikhita Khrushchev', 'Leonid Brezhnev']",3.266073908627386,1.4714526914342227,3.4285714626312256,71,61,54
25
+ "['Razer', 'Dell']",1.2391593524126838,1.0495182657911724,4.142857074737549,12,9,37
26
+ "['Spotify', 'Apple']",1.3442945939889763,1.1535822949864265,4.0,22,28,43
27
+ "['Kourtney Kardashian', 'Jenna Fischer']",4.280694537598255,2.1953277578209183,2.142857074737549,76,74,69
28
+ "['Twitter', 'WhatsApp']",1.6244904643929405,1.4379288274666722,3.0,39,60,58
29
+ "['Manchester United', 'Arsenal']",1.3605588591560613,1.0723828530634452,4.142857074737549,26,13,37
30
+ "['WeChat', 'WhatsApp']",1.484841699252194,1.2416368997951348,4.4285712242126465,33,38,24
31
+ "['Saudi Arabia', 'Israel']",1.6174798554892467,1.1452641071432148,4.285714149475098,38,24,29
32
+ "['Bashar al-Assad', 'Christianity']",3.550410604642534,2.1672732394398393,1.2857142686843872,72,73,84
33
+ "['Didier Deschamps', 'Scott Adkins']",2.867155455724534,1.7735129268342826,1.4285714626312256,66,68,77
34
+ "['Lionel Messi', 'Kylian Mbappé']",1.7657343743251988,1.2661665197380474,2.857142925262451,43,43,60
35
+ "['Louis Philippe', 'Peter England']",2.19019561111919,1.351565809742223,2.0,56,53,71
36
+ "['Steve Jobs', 'Atlanta']",5.904232434605679,3.540538692893887,1.2857142686843872,85,88,84
37
+ "['Bella Hadid', 'Choi Woo-shik']",5.338253953051818,2.6104166628925474,1.4285714626312256,83,80,77
38
+ "['Mali', 'Frances McDormand']",4.69652347063031,2.884401969312326,1.2857142686843872,81,83,84
39
+ "['Coca-Cola Company', 'Pepsi']",1.181473897259919,1.0438806211708804,5.0,10,5,3
40
+ "['Khabib Nurmagomedov', 'Conor McGregor']",1.1193633368491687,1.0354933291081991,4.857142925262451,4,1,11
41
+ "['Twitter', 'Facebook']",1.3590241391406035,1.1952468266881626,4.285714149475098,25,33,29
42
+ "['Cardiff University', 'Swansea University']",1.7168344574402135,1.203303933183536,4.142857074737549,42,34,37
43
+ "['Isaac Newton', 'Gottfried Leibniz']",2.6001425931445246,1.3553361325661701,4.285714149475098,62,54,29
44
+ "['Casio', 'Texas Instruments']",1.5263560718426743,1.1835395036039162,4.142857074737549,36,32,37
45
+ "['Arsenal', 'Tottenham Hotspur']",1.4652092020840817,1.0591720732323784,4.857142925262451,32,12,11
46
+ "['Nintendo', 'Xbox']",1.2938302987723913,1.0998882844596016,4.285714149475098,17,17,29
47
+ "['H&M', 'Zalora']",1.5132357286656546,1.359639423903739,4.0,35,55,43
48
+ "['Serena Williams', 'Andy Murray']",1.357703886660535,1.0459443108429878,2.2857143878936768,23,7,67
49
+ "['Liverpool FC', 'Manchester United']",1.2320421654673464,1.0449462333912818,5.0,11,6,3
50
+ "['Apple', 'Samsung']",1.1382160153053889,1.04769222313876,4.857142925262451,7,8,11
51
+ "['Expedia', 'Trivago']",1.2754892404597642,1.1483506089912217,2.857142925262451,15,26,60
52
+ "['Heathrow Airport', 'Gatwick Airport']",1.310057344200426,1.1256792359957808,3.857142925262451,18,21,46
53
+ "['Mario', 'Bowser']",2.1051866586258243,1.2529699295998475,4.5714287757873535,52,41,19
54
+ "['US', 'China']",2.1978322480965753,1.2532725062626966,4.857142925262451,57,42,11
55
+ "['Olympic Games', 'Helicobacter pylori']",6.8756513675674,3.3511833150794743,1.2857142686843872,87,86,84
56
+ "['BMW', 'Mercedes-Benz']",1.1019890189307078,1.036298820976043,4.857142925262451,1,2,11
57
+ "['Blur', 'Oasis']",2.109694036558229,1.3052386699396863,4.4285712242126465,53,46,24
58
+ "['Israel', 'Palestine']",1.5110198418932492,1.1457000668046726,5.0,34,25,3
59
+ "['Toshiba', 'LG']",1.3579084816318956,1.163162197987383,4.4285712242126465,24,30,24
60
+ "['Apple', 'Rolex']",2.117420121610843,1.55752033597564,2.7142856121063232,54,66,64
61
+ "['Tesla', 'Skoda']",1.5706529356751258,1.4840326440929967,3.4285714626312256,37,63,54
62
+ "['Thomas Jefferson', 'Alexander Hamilton']",2.669107721335398,1.322868915838166,4.142857074737549,63,49,37
63
+ "['Chester FC', 'Wrexham FC']",1.3745051107817812,1.1432810093172319,4.5714287757873535,29,23,19
64
+ "['Line of Duty', 'CSI']",2.990469070715555,1.517484179767383,3.7142856121063232,69,65,48
65
+ "['Gladiator', 'Imelda Staunton']",5.403125729441755,3.4826074368903637,1.2857142686843872,84,87,84
66
+ "['UK', 'France']",2.975482406888533,1.3125181354740607,2.5714285373687744,68,48,66
67
+ "['Nike', 'Adidas']",1.1153827300638572,1.0528324898144443,4.857142925262451,2,10,11
68
+ "['Alain Prost', 'Ayrton Senna']",1.3662056774325724,1.1037874996569803,4.285714149475098,27,19,29
69
+ "['Manchester City', 'Manchester United']",1.1175792528145987,1.042690282562067,4.857142925262451,3,4,11
70
+ "['BBC', 'The Guardian']",1.9134159077022395,1.3234590754937028,3.7142856121063232,49,50,48
71
+ "['Amazon', 'Ebay']",1.2634007233737188,1.1408217207337337,4.5714287757873535,13,22,19
72
+ "['Sir Alex Ferguson', 'Jose Mourinho']",1.7097426451365767,1.3362973630326773,3.2857143878936768,41,51,56
73
+ "['ASEAN', 'Helen Hunt']",6.710575637440718,3.295957018142525,1.2857142686843872,86,85,84
74
+ "['Hans Zimmer', 'John Williams']",2.130025039273922,1.245920637785629,3.2857143878936768,55,39,56
75
+ "['Noel Gallagher', 'Liam Gallaguer']",1.959691514461484,1.2467042808942606,4.0,50,40,43
76
+ "[""McDonald's"", 'Burger King']",1.1362206561310968,1.0334133820827456,5.0,6,0,3
77
+ "['Neoclassicism', 'Romanticism']",2.8311254802716017,1.3743393007605533,3.857142925262451,65,56,46
78
+ "['Royal Feast', 'Fast X']",2.0789058943581598,1.309691642758208,1.5714285373687744,51,47,74
79
+ "['Eminem', 'MGK']",1.8182500402863373,1.4744226711936121,4.285714149475098,45,62,29
80
+ "['Sprite', '7 Up']",1.067041902759356,1.212745195228596,5.0,0,35,3
81
+ "['Katharine Hepburn', 'Abrahamic religion']",7.435596112890825,3.0833799818271994,2.0,88,84,71
82
+ "['Martin Luther King Jr.', 'Malcolm X']",2.950741229891417,1.6853565210199075,3.5714285373687744,67,67,51
83
+ "['Ligue 1', 'Hayley Atwell']",4.241591954181677,2.5542050524768216,1.2857142686843872,75,79,84
84
+ "['Vikram', 'Coen brothers']",5.102705427087103,2.8249661298246354,1.5714285373687744,82,82,74
85
+ "['Russia', 'China']",2.2552297511812522,1.3004875998162657,2.7142856121063232,60,45,64
86
+ "['Mohamed Salah', 'Korea']",3.17013754561042,1.955964256984248,1.5714285373687744,70,72,74
87
+ "['Kingston', 'Samsung']",1.898363886873327,1.1773256866493471,3.5714285373687744,48,31,51
88
+ "['AWS', 'GCP']",1.328443870096703,1.0957580666384157,4.714285850524902,19,16,17
89
+ "['Beatles', 'Rolling Stones']",2.720976296395675,1.512356439122218,4.0,64,64,43
90
+ "['John Tyler', 'Whig Party']",4.36023707587642,2.3251733241314745,2.0,77,75,71
experiments/analysis/flan_ul2_additional_analysis/frie.csv ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pairs,score_fewshot,score_zeroshot,score_true,rank_fewshot,rank_zeroshot,rank_true
2
+ "['Rishi Sunak', 'Leo Varadkar']",2.5072286233955254,1.5328972126014884,4.0,36,38,36
3
+ "['Gondor', 'Rohan']",2.219667699120621,1.3200803632378275,4.285714149475098,27,10,24
4
+ "['FIFA', 'UEFA']",1.8393312611079873,1.577544267205364,3.7142856121063232,6,46,46
5
+ "['Joseph Stalin', 'Josip Broz Tito']",4.4628113646659155,1.7933325159421711,1.8571428060531616,73,57,74
6
+ "['Hillary Clinton', 'Barack Obama']",1.9927435787541081,1.3201421701192426,4.285714149475098,13,11,24
7
+ "['Di Maio', 'Salvini']",2.767160074374971,1.337838952235684,2.4285714626312256,46,13,66
8
+ "['Yahoo', 'Yahoo Japan']",1.8279701757878253,1.3426315225376397,4.5714287757873535,4,14,12
9
+ "['Armenia', 'Azerbaijan']",3.3910081672629055,2.019560002782508,1.2857142686843872,64,69,85
10
+ "['Doja Cat', 'Anthony Albanese']",3.6203144729988717,2.285112189877014,1.2857142686843872,66,77,85
11
+ "['Pedro Sánchez', 'Pablo Iglesias']",3.8441320471956386,1.5763126396850815,3.857142925262451,68,45,41
12
+ "['Islamic State', 'Denys Prokopenko']",7.555810974206813,2.7434138522858,1.8571428060531616,91,84,74
13
+ "['Brazil', 'India']",3.0876062265419786,1.5445218908976759,3.4285714626312256,53,42,54
14
+ "['Extinction Rebellion', 'Greta Thunberg']",4.297444603945184,1.7988221620291822,4.714285850524902,72,58,8
15
+ "['Sicily', 'Calabria']",2.14270818639152,1.7086696805448425,4.0,20,52,36
16
+ "['Oman', 'Iran']",4.655388561859594,1.8344885935861346,3.4285714626312256,79,62,54
17
+ "['Sony', 'ZEISS']",1.7508266335646165,1.4926095846618708,3.857142925262451,2,34,41
18
+ "['FTX', 'Alameda Research']",2.7771603329340837,1.8468702122489273,4.714285850524902,47,64,8
19
+ "['UK', 'Commonwealth']",2.286699100481261,1.3116195554924144,4.714285850524902,29,9,8
20
+ "['Australia', 'New Zealand']",2.0897834071538433,1.1996322618593351,4.857142925262451,15,4,4
21
+ "['Kylo Ren', 'Rey']",4.706397612009928,2.2496602704508817,3.857142925262451,82,76,41
22
+ "['Anne Boleyn', 'Columbia Pictures']",5.904263403618469,3.1023535131377296,1.5714285373687744,88,89,77
23
+ "['KGB', 'CIA']",4.205783551871509,1.9175389761631927,1.1428571939468384,71,66,90
24
+ "['Rishi Sunak', 'Joe Biden']",2.4815011107892357,1.4530706574347767,4.285714149475098,34,32,24
25
+ "['Quentin Tarantino', 'Edgar Wright']",2.118021967225266,1.5630134648523961,4.142857074737549,17,44,31
26
+ "['Keir Starmer', 'Jeremy Corbyn']",2.510536919927856,1.6027956172293558,2.5714285373687744,38,48,65
27
+ "['U.S.S.R.', 'East Germany']",3.1798063453250203,1.7490046671657096,4.5714287757873535,57,54,12
28
+ "['Harry Potter', 'Severus Snape']",3.130258366103835,2.1374828361920906,3.2857143878936768,55,73,59
29
+ "['Tata Motors', 'Jaguar']",2.0430720689925455,1.5440595393249203,4.714285850524902,14,41,8
30
+ "['Kendall Jenner', 'Bergen']",4.046531803418078,2.107965967178699,1.2857142686843872,70,72,85
31
+ "['Thomas Jefferson', 'Kid Cudi']",4.804395949063151,3.5699412974829463,1.4285714626312256,83,91,79
32
+ "['South Korea', 'Japan']",3.281895820214294,1.358307886737753,3.2857143878936768,61,17,59
33
+ "['Spain', 'Portugal']",2.7773444102348748,1.4030445224268684,4.5714287757873535,48,23,12
34
+ "['Liam Gallagher', 'Noel Gallagher']",1.7954047243557467,1.1880720173877841,2.0,3,2,72
35
+ "['France', 'Belgium']",2.68026465536471,1.263730412056352,5.0,43,6,1
36
+ "['Turkish Airlines', 'All Nippon Airways']",1.8306939117170087,1.8020300914000285,3.4285714626312256,5,59,54
37
+ "['Malaysia', 'Singapore']",3.2113783707043635,1.421034738582978,4.4285712242126465,59,28,17
38
+ "['JP Morgan', 'Morgan Stanley']",2.3167926133225145,1.6147051197573883,2.7142856121063232,30,49,63
39
+ "['Sophia Loren', 'Marlon Brando']",3.6334932429281146,1.5278672802298314,2.142857074737549,67,37,69
40
+ "['J.R.R. Tolkien', 'C.S. Lewis']",2.1717632564794833,1.4046480007981625,4.4285712242126465,23,24,17
41
+ "['China', 'North Korea']",4.635741070538269,1.9096033070427534,4.142857074737549,78,65,31
42
+ "['Margaret Thatcher', 'Ronald Reagan']",2.507500623446675,1.3577940001482511,4.4285712242126465,37,16,17
43
+ "['Eva Perón', 'Interpol']",4.584718925489622,3.0137673242361056,1.7142857313156128,77,87,76
44
+ "['UK', 'Ireland']",2.895721576596437,1.3653722695428352,3.7142856121063232,52,18,46
45
+ "['Singapore', 'Israel']",2.6640065695422295,1.4140880138313676,3.857142925262451,41,26,41
46
+ "['Eastern Orthodoxy', 'Oriental Orthodoxy']",2.144459751788138,1.5438050427063432,3.5714285373687744,21,40,49
47
+ "['India', 'US']",2.102631201814095,1.255014507349887,3.4285714626312256,16,5,54
48
+ "['Ed Gein', 'Colonel Sanders']",6.531176872066986,3.4933812565349394,1.2857142686843872,90,90,85
49
+ "['Beatles', 'Rolling Stones']",2.137642861486924,1.9865660915532268,3.4285714626312256,19,67,54
50
+ "['Red Bull', 'GoPro']",1.8860712721093587,1.5047058185996207,4.4285712242126465,7,36,17
51
+ "['HSBC', 'BlackRock']",2.886790634938107,2.0075052819565604,4.142857074737549,51,68,31
52
+ "['Elsa', 'Anna']",2.73324673786207,1.3202414370035342,4.5714287757873535,45,12,12
53
+ "['Macbeth', 'Banquo']",3.2268803106623203,1.594563451823357,2.7142856121063232,60,47,63
54
+ "['Aznar', 'Bush']",2.502721310388531,1.3840087685711524,4.714285850524902,35,20,8
55
+ "['Google', 'Samsung']",1.600535634162664,1.3997592031330308,2.142857074737549,0,22,69
56
+ "['IMF', 'The World Bank']",2.1482203071166244,1.5407179864932719,4.285714149475098,22,39,24
57
+ "['Nikon', 'Tokina']",1.891894533749897,1.8125038240195745,2.2857143878936768,8,61,67
58
+ "['Walter White', 'Gus Fring']",2.704029449327095,1.4999948860022556,2.142857074737549,44,35,69
59
+ "['Ron Weasley', 'Neville Longbottom']",2.47194408147064,1.10628288517074,4.285714149475098,32,0,24
60
+ "['Darth Vader', 'Emperor Palpatine']",4.5518926031726545,2.2899855574798225,3.7142856121063232,76,78,46
61
+ "['Coca-Cola', ""McDonald's""]",2.1347450248912994,1.8090831455918956,4.285714149475098,18,60,24
62
+ "['Instagram', 'WhatsApp']",2.1926051144849423,1.6941523367602,4.142857074737549,25,51,31
63
+ "['Noah Schnapp', 'Galatasaray S.K.']",4.544568142329219,2.8019273827454723,1.0,74,86,91
64
+ "['US', 'Canada']",2.480156982211465,1.197777087535886,4.857142925262451,33,3,4
65
+ "['Bob Marley', 'Abu Bakr']",5.859624246271322,2.649177577551091,1.2857142686843872,87,83,85
66
+ "['Jeff Bezos', 'GitHub']",3.3031318636509273,2.800474123110106,2.142857074737549,62,85,69
67
+ "['Hong Kong', 'HSBC']",2.5867981009933847,1.7907472995174216,3.5714285373687744,40,56,49
68
+ "['United States', 'United Kingdom']",2.204378009757044,1.1452585949143625,5.0,26,1,1
69
+ "['Porter Wagoner', 'Dolly Parton']",1.891955089900802,1.448369263557515,4.142857074737549,9,31,31
70
+ "['Achilles', 'Jonathan Bailey']",3.304829422561598,2.091679459310318,1.2857142686843872,63,71,85
71
+ "['Linus Sebastian', 'Marques Brownlee']",3.1908942558412865,1.4306724113989326,3.4285714626312256,58,30,54
72
+ "['Catherine Zeta-Jones', 'Johnny Knoxville']",5.361291092732239,2.3032680576640763,1.8571428060531616,86,79,74
73
+ "['Amazon', 'Royal Mail']",3.1234512057801505,1.8436059753752663,3.0,54,63,61
74
+ "['The Beatles', 'Queen']",2.6666825236320864,2.535800405044514,3.5714285373687744,42,81,49
75
+ "['Benedict Cumberbatch', 'Hanukkah']",4.5461660579119,2.587972950419903,1.4285714626312256,75,82,79
76
+ "['Huawei', 'China']",3.866617742324421,1.6263823578951324,4.857142925262451,69,50,4
77
+ "['Rishi Sunak', 'Emmanuel Macron']",2.233551767998682,1.4175925450471027,4.0,28,27,36
78
+ "['Microsoft', 'LinkedIn']",1.7256728699669188,1.4224206869370313,4.4285712242126465,1,29,17
79
+ "['Paul Rudd', 'Memento']",4.826123857746366,2.2146243768099922,1.2857142686843872,84,75,85
80
+ "['Russia', 'Georgia']",4.686735971876615,2.189183201168643,1.5714285373687744,80,74,77
81
+ "['Germany', 'France']",3.1350047721376497,1.408355684941454,4.0,56,25,36
82
+ "['Stephen Hawking', 'Brian Cox']",2.1910865064702514,1.481281276570047,3.857142925262451,24,33,41
83
+ "['Jean-Michel Basquiat', 'Andy Warhol']",2.825662976760519,1.387763339914243,4.0,49,21,36
84
+ "['Mark Drakeford', 'Rishi Sunak']",2.850747712232016,1.3819595848830297,3.0,50,19,61
85
+ "['Jürgen Klopp', 'Exo']",3.4804252140185947,2.0334641521139707,1.0,65,70,91
86
+ "['Windows', 'Xbox']",1.9842002773486025,1.3096745859497074,4.285714149475098,12,8,24
87
+ "['Saturn', 'Rachel Bilson']",4.694825694210988,2.3856047480335407,1.2857142686843872,81,80,85
88
+ "['Ottoman Empire', 'Snowpiercer']",6.0673352599523485,3.094959464267752,1.2857142686843872,89,88,85
89
+ "['Johnny Cash', 'Waylon Jennings']",1.945452892566945,1.2764023623156513,4.285714149475098,11,7,24
90
+ "['UN', 'NATO']",1.943108938657291,1.5479037084149063,4.4285712242126465,10,43,17
91
+ "['Boris Johnson', 'Emmanuel Macron']",2.3814063555318072,1.357574493019605,3.2857143878936768,31,15,59
92
+ "['Cersei Lannister', 'Euron Greyjoy']",4.890130547020662,1.789102919795851,3.857142925262451,85,55,41
93
+ "['Japan', 'Taiwan']",2.5580482576144874,1.7482978988801516,3.4285714626312256,39,53,54
experiments/analysis/flan_ul2_additional_analysis/infl.csv ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pairs,score_fewshot,score_zeroshot,score_true,rank_fewshot,rank_zeroshot,rank_true
2
+ "['Prince Harry', 'Monarchy']",2.206237453023438,1.4828068514166197,4.857142925262451,59,65,3
3
+ "['F1', 'social media']",1.6962462246989474,1.4783565184427614,2.857142925262451,38,62,69
4
+ "['Elton John', 'Elvis Presley']",1.1771033084529128,1.2207688454317978,4.285714149475098,6,5,29
5
+ "['Jack Black', 'Waffen-SS']",2.6838348668032173,1.6618920512992446,1.1428571939468384,76,84,90
6
+ "['Game of Thrones', 'Lord of the Rings']",1.5128835998361514,1.3037945218604563,4.4285712242126465,28,27,21
7
+ "['Canon', 'Nikon']",1.5171157800616262,1.499901101345442,4.142857074737549,29,69,36
8
+ "['Thomas Aquinas', 'Aristotle']",1.4434779088253025,1.2786954208798134,4.5714287757873535,25,21,13
9
+ "['Android', 'iOS']",1.605213878304682,1.383687906634991,4.142857074737549,32,43,36
10
+ "['Charles Baudelaire', 'Tornado MRCA']",3.4752121817587165,2.2726535736087294,1.2857142686843872,86,92,86
11
+ "['trending music', 'TikTok']",3.1950195245345134,1.9175394333401652,4.714285850524902,82,87,7
12
+ "['Xi Jinping', 'Kim Jong-il']",2.4402362308482393,1.4616549945990829,3.2857143878936768,64,60,62
13
+ "['Beethoven', 'romanticism']",1.2263295243780155,1.3262804176032572,3.2857143878936768,12,32,62
14
+ "['Edgar Allan Poe', 'Romanticism']",1.4185346403400776,1.3348436344608527,4.4285712242126465,22,34,21
15
+ "['US', 'NASA']",2.118110339831737,1.4853929568657513,4.0,57,66,44
16
+ "['Wales', 'Westminster']",2.782460410565209,1.4885316048811115,4.285714149475098,78,67,29
17
+ "['Sierra Leone', 'Pulp fiction']",3.5934069934903454,1.5544310550489477,1.2857142686843872,87,72,86
18
+ "['Ethereum', 'Bitcoin']",1.8874822034491636,1.2953061815438591,4.857142925262451,48,23,3
19
+ "['Theresa May', 'David Cameron']",2.949204809089442,1.586599476418079,4.4285712242126465,79,76,21
20
+ "['Anna Delvey', 'Bernie Madoff']",5.623275964002472,2.2394238525833763,3.4285714626312256,92,91,57
21
+ "['Lord of the Rings', 'Beowulf']",1.258581279473116,1.2980656118986473,4.5714287757873535,14,25,13
22
+ "['Andrew Johnson', 'Abraham Lincoln']",1.9723178231973442,1.40498846198677,4.0,52,51,44
23
+ "['LinkedIn', 'Facebook']",1.791620299521638,1.4175498334174674,3.2857143878936768,44,52,62
24
+ "['Picasso', 'Cezanne']",1.1265124619892726,1.2479383152464008,4.5714287757873535,2,11,13
25
+ "['Playstation', 'Xbox']",1.8027918921751096,1.3788052017388595,4.142857074737549,45,42,36
26
+ "['Kevin Spacey', 'Tenerife']",3.031313168135999,1.9604880644970053,1.1428571939468384,80,89,90
27
+ "['Saudi Arabia', 'US']",2.093903574994278,1.2916515768834047,4.0,56,22,44
28
+ "['Fitbit', 'heart rate monitor']",2.606259421042801,1.6164861808299595,4.0,73,80,44
29
+ "['Taiwan', 'China']",2.5733032119077044,1.5468607584026417,4.5714287757873535,72,71,13
30
+ "['Kylian Mbappe', 'Cristiano Ronaldo']",1.3462659406923982,1.3997548229520052,4.5714287757873535,17,48,13
31
+ "['Picasso', 'cubism']",1.1808883481916752,1.2655718661677833,3.857142925262451,7,17,50
32
+ "['Miley Cyrus', 'Lorde']",2.728881194862877,1.4807292942898764,3.4285714626312256,77,64,57
33
+ "['Neymar', 'Alexis Sánchez']",1.4324063715617659,1.3969767151825936,3.5714285373687744,24,47,54
34
+ "['Smashing Pumpkins', 'Beatles']",1.1954389468878004,1.1792361630004708,4.0,9,0,44
35
+ "['Portuguese', 'The Prestige']",2.528953253519461,1.9433309750579217,1.7142857313156128,69,88,78
36
+ "['Luke Evans', 'Ava Gardner']",2.3254817098444387,1.4520768040323637,1.4285714626312256,61,59,81
37
+ "['MacOS', 'Linux']",1.4525116556793338,1.332216987202794,4.0,27,33,44
38
+ "['Transport for Wales', 'National Rail']",2.6428727501051643,1.6550213721066336,4.142857074737549,74,83,36
39
+ "['Herbie Hancock', 'Miles Davis']",1.236797709077148,1.1964928260606924,4.5714287757873535,13,1,13
40
+ "['India', 'Hinduism']",1.955558701368723,1.2196591792656795,4.857142925262451,50,4,3
41
+ "['hip hop', 'jazz']",1.2144741523280185,1.2047165654123286,3.7142856121063232,11,2,52
42
+ "['Bob Dylan', 'Woody Guthrie']",1.1375488736089192,1.2326184562750893,4.285714149475098,4,7,29
43
+ "['Tim Burton', 'German expressionism']",1.6186361198175159,1.369226360059585,4.142857074737549,33,40,36
44
+ "['Eamon de Valera', 'James Napper Tandy']",3.6175586205923973,1.443439450357,2.5714285373687744,88,58,76
45
+ "['Joe Biden', 'Donald Trump']",2.5705219277419085,1.4317740304131183,3.142857074737549,71,55,67
46
+ "[""Shaquille O'Neal"", 'Selim II']",2.4778299176391325,1.4394651784951111,1.2857142686843872,66,57,86
47
+ "['Cobra Kai', 'Anna Chlumsky']",2.534315382245363,1.977163565115634,1.2857142686843872,70,90,86
48
+ "['Harry Potter', 'Wizard of Oz']",1.3981003042913704,1.2769913155132229,2.7142856121063232,21,20,73
49
+ "['Coca-Cola', 'Pepsi']",1.7056819331589494,1.3923309535278097,4.285714149475098,39,46,29
50
+ "['Apple Music', 'Spotify']",1.8163653811137668,1.576047707286359,4.285714149475098,46,74,29
51
+ "['Singaporean food', 'Malaysian food']",1.9713104797427043,1.3594139059930275,3.142857074737549,51,38,67
52
+ "['Guess', 'Gucci']",1.8272982617030182,1.3498799842494749,3.2857143878936768,47,36,62
53
+ "['Commonwealth', 'United Kingdom']",2.089215860680506,1.317790593316562,4.5714287757873535,55,30,13
54
+ "['machine learning', 'Google']",1.543740723498369,1.470958422894975,4.0,30,61,44
55
+ "['Antonio Salazar', 'fascism']",2.680113370491548,1.6184086883675441,3.2857143878936768,75,81,62
56
+ "['Facebook', 'LinkedIn']",1.6300699591575025,1.4374315972141112,2.857142925262451,34,56,69
57
+ "['Hank Williams', 'Jimmie Rodgers']",2.4118345062406004,1.297683920797394,3.857142925262451,63,24,50
58
+ "['painters', 'Stable Diffusion']",2.51175947425801,1.5677620010100897,2.7142856121063232,68,73,73
59
+ "['Walmart', 'Rivaldo']",3.8841022284955686,1.71497201525277,1.2857142686843872,91,85,86
60
+ "['Sauron', 'Shiba Inu']",3.8095395194912576,1.6006748278781522,1.2857142686843872,90,79,86
61
+ "['Bruno Mars', 'James Brown']",1.731650925843459,1.3405051681301698,4.4285712242126465,40,35,21
62
+ "['Brazil', 'Spain']",1.649395943907092,1.365578101472138,2.7142856121063232,35,39,73
63
+ "['heavy metal', 'punk music']",1.1580766665070112,1.2675698013535046,3.142857074737549,5,18,67
64
+ "['Moon', 'Göbekli Tepe']",3.2690171349998085,1.8167444522743377,1.0,83,86,92
65
+ "['English', 'William Shakespeare']",1.7492749009346042,1.3537742603597003,4.4285712242126465,42,37,21
66
+ "['Beatles', 'Alice in Wonderland']",1.1214706356719988,1.2127665918503285,3.7142856121063232,0,3,52
67
+ "['impressionism', 'Edouard Manet']",1.426646255753299,1.2426143845861817,4.714285850524902,23,10,7
68
+ "['Gilbert Gottfried', 'Mike Krieger']",3.2947400117472725,1.5883290396723415,1.2857142686843872,84,77,86
69
+ "['Vladimir Lenin', 'chess']",2.018240039235421,1.6468579185886547,3.4285714626312256,53,82,57
70
+ "['Pepsi', 'Coca-Cola']",1.5754848268216837,1.3216048983146913,4.285714149475098,31,31,29
71
+ "['Alicia Vikander', 'Richard Attenborough']",2.3518508531775946,1.4914094205132749,1.5714285373687744,62,68,79
72
+ "['Luke Bryan', 'Hank Williams']",2.070155943604063,1.369560235671437,2.5714285373687744,54,41,76
73
+ "['Bill Gates', 'Steve Jobs']",1.2081641177318276,1.2628213877198153,3.2857143878936768,10,16,62
74
+ "['Allu Arjun', 'Aaron Ramsey']",3.0687671520089728,1.5093072142863981,1.0,81,70,92
75
+ "['Stephen King', 'Arthur Machen']",1.4464092702112175,1.3031646611706496,4.285714149475098,26,26,29
76
+ "['Bangladesh', 'India']",3.3210378134420866,1.594239101732205,4.0,85,78,44
77
+ "['hamburger', 'Germany']",2.2905567186541202,1.4805603774557137,2.7142856121063232,60,63,73
78
+ "['Plato', 'Socrates']",1.3858528562501022,1.2252417514229652,5.0,19,6,1
79
+ "['Messi', 'Maradona']",1.351277719040998,1.386407854799256,4.714285850524902,18,45,7
80
+ "['Oasis', 'Blur']",1.2649262407605837,1.4267974980604674,3.4285714626312256,16,54,57
81
+ "['Quentin Tarantino', 'Sergio Leone']",1.1819027001246791,1.2378902814139754,4.4285712242126465,8,9,21
82
+ "['European Union', 'Germany']",2.155015437978733,1.3075359721381155,4.4285712242126465,58,28,21
83
+ "['Stephen Foster', 'Thomas Moore']",2.4455984338240087,1.4035468401012576,2.7142856121063232,65,49,73
84
+ "['Hoover', 'Dyson']",1.6885925695480237,1.2489516544535164,4.285714149475098,37,12,29
85
+ "['Mark Rothko', 'Claude Monet']",1.3961933573752359,1.2531560156817456,3.4285714626312256,20,13,57
86
+ "['James Brown', 'Michael Jackson']",1.2634985666005347,1.23633902308324,1.5714285373687744,15,8,79
87
+ "['Windows', 'Linux']",1.7759159585312407,1.3146882104890258,4.0,43,29,44
88
+ "['Radiohead', 'David Bowie']",1.1237945956266344,1.2615418250993753,4.285714149475098,1,15,29
89
+ "['NVIDIA', 'AMD']",1.73533381213156,1.404255266699919,4.142857074737549,41,50,36
90
+ "['India', 'Gandhi']",1.947103791346796,1.385347953173187,4.714285850524902,49,44,7
91
+ "['Viktor Yushchenko', 'Bonnie Wright']",3.6785388656306246,1.5775493447776812,1.4285714626312256,89,75,81
92
+ "['Beethoven', 'Mozart']",1.1355583896963073,1.2548317540889342,4.5714287757873535,3,14,13
93
+ "['Bitcoin', 'blockchain']",1.6690348677283227,1.2696348732034277,3.857142925262451,36,19,50
94
+ "['Hong Kong', 'China']",2.5037429119478887,1.4266732545397631,4.857142925262451,67,53,3
experiments/analysis/flan_ul2_additional_analysis/know.csv ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pairs,score_fewshot,score_zeroshot,score_true,rank_fewshot,rank_zeroshot,rank_true
2
+ "['Hawaii', 'ukulele']",2.217062208651209,1.6141553237135906,4.4285712242126465,41,50,35
3
+ "['Paris', 'Eiffel Tower']",1.9805088006095575,1.549073112637212,5.0,28,37,3
4
+ "['Sweden', 'pop']",2.5156100347776227,1.599112336564866,3.4285714626312256,63,49,64
5
+ "['Memphis Depay', 'Mendelian inheritance']",4.4230163252838945,2.7539158748346066,1.0,105,105,103
6
+ "['France', 'beaches']",1.749754689567146,1.7211045184289022,2.2857143878936768,13,68,80
7
+ "['France', 'cars']",1.8686154736316878,1.577197902525235,3.142857074737549,19,44,70
8
+ "['Alphabet Inc.', 'Google']",2.0847632937831126,1.4982438958353133,5.0,30,28,3
9
+ "['Vincent Van Gogh', 'The Starry Night']",2.3775528885917723,1.4038491705190446,4.5714287757873535,54,9,26
10
+ "['Italy', 'Hawaiian pizza']",2.3425170924645218,1.6829345832720786,1.2857142686843872,50,62,95
11
+ "['Japan', 'glass product']",2.1269027324918053,1.6285215315723371,2.5714285373687744,34,53,75
12
+ "['France', 'rococo movement']",2.3338117783895433,1.7522039622068803,3.0,49,70,72
13
+ "['Europe', 'The Final Countdown']",3.602069155411361,1.4387832395341067,4.5714287757873535,103,18,26
14
+ "['Sophie Turner', 'Sylvia Plath']",2.358691955426627,1.5106470237582166,1.2857142686843872,52,32,95
15
+ "['Bill Nye', 'scientist']",1.637630581262456,1.3207424886520618,4.4285712242126465,5,0,35
16
+ "['Mercedes Benz', 'build quality']",2.1059498341351617,1.5617058192446298,3.857142925262451,31,42,57
17
+ "['Hawaii', 'beaches']",1.659939013450344,1.5151668359092634,4.857142925262451,7,33,9
18
+ "['Jackson Pollock', 'action painting']",2.13208061424068,1.446368405963585,4.4285712242126465,35,19,35
19
+ "['Andhra Pradesh', 'Martinique']",3.079962763598426,1.9438218153226388,1.0,90,90,103
20
+ "['Keanu Reeves', 'The Matrix']",2.4481907053911445,1.3843872585630363,4.857142925262451,58,5,9
21
+ "['Christopher Columbus', 'India']",2.4675038773077578,2.201138053250345,1.7142857313156128,60,99,88
22
+ "['Picasso', 'Guernica']",2.531926183685954,1.5519262551361566,4.857142925262451,65,39,9
23
+ "['Italy', 'Renaissance']",2.1476788776698,1.5369287637424967,4.4285712242126465,36,34,35
24
+ "['Korea', 'Breakdance']",3.4438237125203486,1.8628185631970635,2.7142856121063232,97,84,73
25
+ "['Spain', 'olive oil']",1.670210365590026,1.5490954571308952,4.5714287757873535,8,38,26
26
+ "['C.S. Lewis', 'The Screwtape Letters']",1.890430724934506,1.4756095615869196,4.4285712242126465,22,24,35
27
+ "['Corsica', 'Napoleon Bonaparte']",2.4831138426360715,1.9022471325455272,4.4285712242126465,62,87,35
28
+ "['Rafael Nadal', 'Ralph Macchio']",3.396177477916396,1.647891184288785,1.0,96,58,103
29
+ "['Walther P38', 'Lupin the Third']",4.513619538352889,2.220320178652019,3.4285714626312256,106,101,64
30
+ "['Richard Harris', 'Curcuma longa']",3.925878661487128,3.0051507411666667,1.0,104,107,103
31
+ "['Smashing Pumpkins', 'Thru the eyes of Ruby']",2.701559478037864,1.617355203052018,3.5714285373687744,73,52,63
32
+ "['India', 'rum']",2.782247470441063,2.0201010411950517,2.4285714626312256,79,92,78
33
+ "['Parasite', 'Jersey']",4.665180668775107,2.0917107526710037,1.2857142686843872,107,96,95
34
+ "['India', 'Gurkhas']",1.891718176412827,1.7755981109002028,2.7142856121063232,23,74,73
35
+ "['Amazon', 'Alexa']",2.3646542660476477,1.5899399391175666,4.142857074737549,53,46,46
36
+ "['Thomas Edison', 'telephone']",1.6915911159454087,1.4076575234169095,3.7142856121063232,10,10,60
37
+ "['Buffon', 'PSG']",3.486656345031013,1.7530930722481544,2.4285714626312256,100,71,78
38
+ "['Alfred Hitchcock', 'horror']",1.7571665259988574,1.3663635836010108,4.142857074737549,15,3,46
39
+ "['William Shakespeare', 'Romeo and Juliet']",1.8295331567106516,1.5081249508332475,4.714285850524902,17,31,17
40
+ "['Thomas Edison', 'light bulb']",1.7418718493081657,1.3653392286358037,4.857142925262451,11,2,9
41
+ "['Michael Jordan', 'Tessa Thompson']",2.1705268702176226,1.5543171907584603,1.8571428060531616,38,40,86
42
+ "['Inglourious Basterds', 'Sergio Busquets']",3.318331366597315,2.1133903510632104,1.0,94,98,103
43
+ "['Boris Johnson', 'Brexit']",1.645082124709204,1.5863673272495546,4.4285712242126465,6,45,35
44
+ "['Georgia', 'Joseph Stalin']",1.967571912844332,1.8488188109955903,3.7142856121063232,27,81,60
45
+ "['China', 'coffee']",2.17544980131227,1.8135234081399012,2.142857074737549,39,80,82
46
+ "['OpenAI', 'ChatGPT']",2.9218799850556576,2.0160923267589936,4.5714287757873535,85,91,26
47
+ "['Tesco', 'groceries']",2.64246557290908,1.4777211558218561,5.0,71,25,3
48
+ "['Amazon', 'cloud computing']",2.778278227603094,1.9068170630915215,3.2857143878936768,78,88,67
49
+ "['Nvidia', 'GPUs']",2.316988987900782,1.4512691228933783,4.714285850524902,48,20,17
50
+ "['Tony Blair', 'Iraq War']",1.6255000076210981,1.7821177773658414,4.285714149475098,4,77,42
51
+ "['Germany', 'techno music']",1.557046019082083,1.629043158489762,3.7142856121063232,3,54,60
52
+ "['Michelangelo', 'Pop Art']",1.914005743262575,1.8118542261041621,2.0,25,79,84
53
+ "['Gilmore Girls', 'OpenAI']",3.4608540643783776,2.1065435226515503,1.0,99,97,103
54
+ "['Ragnarök', 'Little Boy']",3.3044717204594134,1.686534969515163,1.5714285373687744,93,63,89
55
+ "['Pizzagate', 'Hillary Clinton']",2.1153132432461224,1.7795214681654785,3.857142925262451,32,76,57
56
+ "['George Orwell', 'Coming Up for Air']",2.6697120181771408,2.038484059003444,4.0,72,94,52
57
+ "['Italy', 'wine']",1.5255845922312388,1.4373376976319032,4.0,2,16,52
58
+ "['Coca-Cola', 'Pepsi']",2.7243095716470473,1.564914630149368,1.4285714626312256,74,43,91
59
+ "['Spotify', 'Podcasts']",2.225374084336068,1.778620332709591,3.7142856121063232,43,75,60
60
+ "['Valencia', 'paella']",2.2381111940650604,1.6345744732560765,4.5714287757873535,46,56,26
61
+ "['The Office', 'IBM']",2.6144005456289228,1.6322135453268423,1.4285714626312256,70,55,91
62
+ "['Romania', 'Roman Catholicism']",1.7420108747270733,1.4311110891664438,2.142857074737549,12,15,82
63
+ "['George Washington', 'Kiribati']",2.7662484606094733,2.031883419437793,1.0,77,93,103
64
+ "['Charles Bronson', 'Rory McIlroy']",3.372548862460059,1.8027123773986695,1.0,95,78,103
65
+ "['Belgium', 'wine']",1.492993613315117,1.5421924887285943,2.4285714626312256,1,35,78
66
+ "['Luka Modrić', 'Rottweiler']",3.5986081367767677,2.397671526907933,1.0,102,103,103
67
+ "['Switzerland', 'mountains']",2.217240085924926,1.6930428362085401,4.714285850524902,42,65,17
68
+ "[""Assassin's Creed"", 'history']",2.480178565321259,1.6465573298565803,3.2857143878936768,61,57,67
69
+ "['Red Bull', 'energy drinks']",2.555565396718211,1.456624393896625,4.857142925262451,68,21,9
70
+ "['Johnny Cash', 'Ring of Fire']",2.895723647778649,1.6823023432944961,4.714285850524902,84,61,17
71
+ "['Microsoft', 'Xbox']",2.161852035473677,1.4025903288235062,4.285714149475098,37,8,42
72
+ "['Canada', 'maple syrup']",1.8791379262254932,1.558050888058955,4.714285850524902,20,41,17
73
+ "['France', 'cheese']",1.9032974601916173,1.5430241918791174,4.5714287757873535,24,36,26
74
+ "['France', 'beer']",1.799265670988058,1.616702308516782,1.8571428060531616,16,51,86
75
+ "['Nintendo', 'Super Mario Bros.']",2.522732299065512,1.4081862950465882,4.5714287757873535,64,11,26
76
+ "['democracy', 'North Korea']",2.4453375211865613,1.874067353187238,1.0,57,85,103
77
+ "['Apple', 'iPhone']",2.2376724784744693,1.504551474404591,4.857142925262451,45,30,9
78
+ "['Harry Potter', 'Bloomsbury']",2.9940553869252637,1.681124247026932,3.857142925262451,87,60,57
79
+ "['France', 'mountains']",2.5338323480997524,2.0420109399379687,3.2857143878936768,66,95,67
80
+ "['UK', 'rain']",3.4868263466820824,1.8510534368673026,4.4285712242126465,101,82,35
81
+ "['Matt Damon', ""Ocean's Eleven""]",2.850793590408457,1.710292734340407,4.285714149475098,81,67,42
82
+ "['Switzerland', 'banking secrecy']",1.4136448220985323,1.4140492427045632,4.0,0,12,52
83
+ "['Adidas', 'Yeezy Boost']",3.096664103973119,1.425090203853594,4.142857074737549,91,13,46
84
+ "['Portugal', 'Fado']",1.888578072809666,1.4374283843077116,4.142857074737549,21,17,46
85
+ "['Italy', 'tea']",2.9581045597178255,1.853469614720884,1.2857142686843872,86,83,95
86
+ "['Beatles', 'Come Together']",2.200709602117659,1.3725785586556383,4.142857074737549,40,4,46
87
+ "['Afro-Brazilians', 'Capoeira']",2.459139681663665,1.5942359184295292,4.142857074737549,59,47,46
88
+ "['Steve Jobs', 'AirPods']",2.120365087557525,1.9190476817297606,1.4285714626312256,33,89,91
89
+ "['Apple', 'Apple Watch']",2.4272660450931087,1.5014430337590525,4.0,55,29,52
90
+ "['Frank Abagnale Jr', 'doctor']",2.7335836651943284,2.204312972088224,2.142857074737549,75,100,82
91
+ "['Meta', 'Instagram']",3.1003940366119402,1.7586111048690642,4.0,92,73,52
92
+ "['Jeff Goldblum', 'Jurassic Park']",3.063036730163065,1.472164987898691,4.4285712242126465,88,23,35
93
+ "['Leonardo Da Vinci', 'Mona Lisa']",2.3521336157248,1.4838067466083005,5.0,51,26,3
94
+ "['Neil Armstrong', 'Korean War']",2.8713334808494806,2.5360696094371646,1.4285714626312256,82,104,91
95
+ "['France', 'baguette']",1.9159014965314902,1.6694015995141194,4.5714287757873535,26,59,26
96
+ "['Queen', 'Bohemian Rhapsody']",2.894437728771924,1.4253497252771552,4.714285850524902,83,14,17
97
+ "['Pixar', 'Novosibirsk']",2.788777406876352,2.9477767154494834,1.0,80,106,103
98
+ "['Greggs', 'sausage rolls']",3.0708764950960226,1.7009085678770521,4.4285712242126465,89,66,35
99
+ "['Japan', 'sake']",2.2355352210063355,1.7311980801105462,4.0,44,69,52
100
+ "['IKEA', 'food']",2.540346699614517,2.3136462632326893,2.0,67,102,84
101
+ "['William Grant & Sons', 'gin']",2.281484301404387,1.6904876254513095,3.142857074737549,47,64,70
102
+ "['Netherlands', 'tulips']",1.7528172337780288,1.4584905157760355,4.714285850524902,14,22,17
103
+ "['LAMY', 'notebook']",3.4530459984394613,1.887999111649344,3.142857074737549,98,86,70
104
+ "['Harvey Weinstein', 'Miramax']",2.739748544697059,1.754414354986099,4.714285850524902,76,72,17
105
+ "['Scotland', 'whisky']",1.681216536351356,1.399913185634903,4.714285850524902,9,7,17
106
+ "['Apple', 'MacBook']",2.43129398966038,1.3878072634111522,4.5714287757873535,56,6,26
107
+ "['Steve Jobs', 'Apple']",1.8603701274790823,1.495425847102713,5.0,18,27,3
108
+ "['Beatles', 'I Me Mine']",2.57458625549266,1.5983103230707787,2.5714285373687744,69,48,75
109
+ "['Google', 'search engine']",1.9884333243582761,1.355647107117722,4.857142925262451,29,1,9
experiments/analysis/flan_ul2_additional_analysis/simi.csv ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pairs,score_fewshot,score_zeroshot,score_true,rank_fewshot,rank_zeroshot,rank_true
2
+ "['pill', 'tablet']",3.274008286278982,1.0963185810651779,4.857142925262451,23,0,10
3
+ "['Eduardo Saverin', 'Guinea-Bissau']",19.392038302744776,4.052577211867362,1.0,90,89,90
4
+ "['Dionysus', 'Toyota Corolla']",12.934971548863354,3.4564230570493915,1.1428571939468384,84,87,86
5
+ "['Great Britian', 'British Empire']",3.652190726486786,1.272809779852253,4.5714287757873535,31,28,23
6
+ "['English', 'Persian']",4.674226586789393,1.6148763949051825,2.0,61,57,70
7
+ "['Chess', ""Rubik's Cube""]",4.116549612748183,1.879439692861986,2.142857074737549,47,67,68
8
+ "['Star Wars', 'Star Trek']",2.628031851053477,1.4347979273272462,4.285714149475098,8,50,35
9
+ "['Anne Heche', 'Ponzi scheme']",20.023976104666094,4.175371728372175,1.2857142686843872,91,90,83
10
+ "['Chris Pine', 'Good Friday']",13.089133086102137,3.440924088108119,1.0,85,86,90
11
+ "['Sudocrem', 'Germolene']",5.318910995704305,1.6453597895571181,4.4285712242126465,63,58,30
12
+ "['Yugoslavia', 'Albania']",6.372893737106588,1.777307939350594,3.7142856121063232,71,62,56
13
+ "['Beatles', 'Rolling Stones']",3.3544283622290196,1.1962706778662544,4.714285850524902,25,15,16
14
+ "['decision tree', 'neural network']",2.9065665188216347,1.4315812594673885,2.857142925262451,16,49,64
15
+ "['Suits', 'Law & Order']",3.800693314383067,1.3978820298258887,4.5714287757873535,35,46,23
16
+ "['Titanic', 'Superbad']",4.619188674370307,2.213662292000817,1.7142857313156128,59,78,73
17
+ "['Seleucid Empire', 'Macedonian Empire']",6.419775995593114,1.828381638106454,3.7142856121063232,72,64,56
18
+ "['Doctor Who', 'Timeless']",3.46088500696327,1.4524423096052965,3.7142856121063232,26,53,56
19
+ "['Srebrenica massacre', 'Rock Hudson']",22.27626107039248,9.197721177121391,1.0,92,92,90
20
+ "['Arnold Classic', 'Mr. Olympia']",4.487758083728249,1.428413776493121,4.142857074737549,55,48,41
21
+ "['Italy', 'Superman']",10.978149906422772,2.573955623240689,1.0,82,82,90
22
+ "['Alibaba', 'Amazon']",3.972773665711877,1.3082210072455742,4.4285712242126465,43,40,30
23
+ "['Indiana Jones', 'Star Wars']",4.379989690008676,1.6919490885578246,3.2857143878936768,54,60,61
24
+ "['NQ64', 'Wetherspoons']",4.132125854770325,2.154384332367406,2.5714285373687744,49,77,67
25
+ "['Pepsi', 'Fanta']",4.184848046216775,1.492284581073927,3.4285714626312256,50,54,59
26
+ "['Homebase', 'IKEA']",4.070942383407451,1.3055150387108365,4.714285850524902,44,38,16
27
+ "['ramen', 'udon']",3.2656156801207405,1.3038570428318557,4.142857074737549,22,37,41
28
+ "['England', 'Wales']",3.8088520249291715,1.284211292664953,4.0,36,32,46
29
+ "['sphinx', 'sphynx']",1.6117722505862506,1.29134319803738,2.142857074737549,2,34,68
30
+ "['iPod', 'iPad']",2.39534504274217,1.1745858450607467,3.4285714626312256,6,9,59
31
+ "['Olympic Games', 'X Games']",2.9259438158318263,1.4087563252628894,3.857142925262451,17,47,51
32
+ "[""McDonald's"", 'Burger King']",4.103140194258418,1.1845686476202482,5.0,46,12,3
33
+ "['Minnesota', 'Wisconsin']",3.8835637723211662,1.3668624638306508,4.5714287757873535,40,43,23
34
+ "['Slack', 'Microsoft Teams']",1.010722845182092,1.1083802086359376,4.714285850524902,0,1,16
35
+ "['Peter Phillips', 'Christine Baranski']",5.8545935585758615,1.9770690531727926,1.5714285373687744,67,69,75
36
+ "['Jenna Ortega', 'Anglo-Saxons']",16.313136929836602,4.34116604633945,1.0,87,91,90
37
+ "['Karl Urban', 'France 24']",18.112881550801212,3.737063890873087,1.2857142686843872,89,88,83
38
+ "['Sudan', 'South Sudan']",3.154078642930165,1.1413170766716667,3.857142925262451,21,3,51
39
+ "['Gameboy', 'Nintendo']",2.6422439751681384,1.1561978345889523,2.7142856121063232,9,5,66
40
+ "['Grammy Award', 'Novel Prize']",7.868011909588307,2.000009898204835,3.2857143878936768,76,70,61
41
+ "['George Ezra', 'Lead Belly']",5.546488610761862,1.8679975384088594,3.857142925262451,64,66,51
42
+ "['Cardiff', 'Swansea']",4.374596221358583,1.3684548520556212,4.4285712242126465,53,44,30
43
+ "['Steve Jobs', 'Tim Cook']",2.7787270352070927,1.1722038716948548,3.857142925262451,13,8,51
44
+ "['Counter Strike', 'Rainbow Six']",4.1002465293847585,1.6683302843699688,4.714285850524902,45,59,16
45
+ "['Hawaii', 'Guam']",3.8173372239015793,1.3596249987134326,4.0,37,42,46
46
+ "['Scrabble', 'Jenga']",3.6995412415920823,2.0206657104219063,2.857142925262451,32,71,64
47
+ "['Christmas', 'Easter']",3.770784329523088,1.5852128544625168,3.857142925262451,34,56,51
48
+ "['fusilli', 'rotini']",2.7562585800855093,1.2170543108656977,4.857142925262451,12,18,10
49
+ "['Coachella', 'Woodstock']",2.6805485572072394,1.3026882918541969,4.142857074737549,10,36,41
50
+ "['Avatar', 'Archimedes']",6.2921744604165974,2.0498386840332077,1.0,70,73,90
51
+ "['Shark', 'Bush']",6.076433738970192,2.473817916476143,4.5714287757873535,68,79,23
52
+ "['Eva Braun', 'Phil Jackson']",7.639720036725621,2.073420971334422,1.2857142686843872,74,75,83
53
+ "['Coca-Cola', 'Pepsi']",3.287619058514129,1.1603703122935556,5.0,24,6,3
54
+ "['Joe Burrow', 'Edward Scissorhands']",9.681085432794521,2.7072382051749715,1.4285714626312256,79,83,79
55
+ "['Australia', 'New Zealand']",4.28015926788305,1.2436423214416585,4.857142925262451,51,25,10
56
+ "['Edward I', 'William the Conqueror']",7.00715297830466,1.8079957937284559,4.142857074737549,73,63,41
57
+ "['Frank Sinatra', 'Ella Fitzgerald']",4.323018875025666,1.2618269168231726,4.285714149475098,52,27,35
58
+ "['New York', 'York']",2.0015508023469826,1.2378399248510765,1.5714285373687744,3,22,75
59
+ "['Uzbekistan', 'United States']",7.986094222427021,1.8443678727708666,1.4285714626312256,77,65,79
60
+ "['Red Bull', 'Monster Energy']",3.5782005144687097,1.2350180713452537,4.857142925262451,30,20,10
61
+ "['Champions League', 'Europa League']",2.899351869843832,1.3311126253575225,4.714285850524902,15,41,16
62
+ "['Cerave', 'Nivea']",7.860811825983757,1.7120814151917347,4.5714287757873535,75,61,23
63
+ "['Galaxy', 'iPhone']",3.890342277335262,1.4420422620374922,4.5714287757873535,41,51,23
64
+ "['Mehmet Öz', 'David Schwimmer']",10.394835233042148,2.5171469601925516,1.4285714626312256,81,81,79
65
+ "['Disney', 'Pixar']",2.7475137480770297,1.1827246431069653,5.0,11,11,3
66
+ "['Batman', 'Iron Man']",4.632936802687815,1.4505700925559022,4.285714149475098,60,52,35
67
+ "['Gisele Bündchen', 'Orson Welles']",11.733988200838617,2.482540389029044,1.4285714626312256,83,80,79
68
+ "['Estonia', 'Finland']",4.4944878540495745,1.2753268802230482,4.142857074737549,56,30,41
69
+ "['The Avengers', 'The Justice League']",3.5272328321428605,1.3073785524764345,4.857142925262451,28,39,10
70
+ "['Nicolae Ceaușescu', 'Javier Hernández']",14.816167803611087,2.866841024727357,1.2857142686843872,86,84,83
71
+ "['Adidas', 'Nike']",3.128625488872493,1.1759724560937117,5.0,19,10,3
72
+ "['Java', 'Javascript']",2.201677080836342,1.1638525989865072,3.142857074737549,4,7,63
73
+ "['bourbon', 'Scotch whisky']",3.151192694795426,1.2018479334928887,4.4285712242126465,20,17,30
74
+ "['Alaska', 'Canada']",5.749840363096197,2.051522647924928,4.142857074737549,66,74,41
75
+ "['Spain', 'Italy']",6.114873120356638,1.2304383339125753,4.285714149475098,69,19,35
76
+ "['banana', 'plantain']",3.898063267431944,1.239858409557863,4.714285850524902,42,24,16
77
+ "['Firefox', 'Chrome']",3.5003419555946533,1.2388581155089609,4.857142925262451,27,23,10
78
+ "['Pecorino Romano', 'Parmesan']",3.0545436291513535,1.1561388450582295,4.5714287757873535,18,4,23
79
+ "['Ligue 1', 'Bundesliga']",4.5064748482425,1.23614659258189,5.0,57,21,3
80
+ "['Netflix', 'Amazon Prime Video']",2.3484919883495508,1.1320119721227202,4.5714287757873535,5,2,23
81
+ "['Primark', 'Shein']",4.510338532624208,1.3975065558414137,4.4285712242126465,58,45,30
82
+ "[""Dominos' Pizza"", 'Pizza Hut']",3.769478721730652,1.1959740216706212,4.857142925262451,33,14,10
83
+ "['South Africa', 'Bhagavad Gita']",17.183548050370515,3.0240732935848014,1.0,88,85,90
84
+ "['Germany', 'France']",4.7596090117583785,1.2762333438571687,3.4285714626312256,62,31,59
85
+ "['Kindle', 'Jeff Bezos']",3.876382551537531,1.2458504058572832,1.5714285373687744,39,26,75
86
+ "['Harry Potter', 'Lord of the Rings']",3.8590344817902005,1.5231994501082553,4.142857074737549,38,55,41
87
+ "['Yakutia', 'Turkey']",9.843375743462872,1.9136274793204298,1.8571428060531616,80,68,71
88
+ "['PS5', 'XBox']",2.57469644019121,1.2867747741997058,5.0,7,33,3
89
+ "['Monet', 'Manet']",2.8312851205965224,1.1854059397722165,4.0,14,13,46
90
+ "['Glastonbury', 'Roskilde']",5.702882133219245,2.0309699733249644,4.4285712242126465,65,72,30
91
+ "['Telugu', 'Tamil']",4.131681071938202,1.200778959518929,3.857142925262451,48,16,51
92
+ "['Batman', 'Superman']",3.546625877820798,1.2740821846390384,4.0,29,29,46
93
+ "['cannoli', 'canneloni']",1.5953917799354116,1.3014626400370175,1.8571428060531616,1,35,71
94
+ "['Gerald Ford', 'Duran Duran']",8.6988355575664,2.1149843195939737,1.4285714626312256,78,76,79
experiments/analysis/get_correlation.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from itertools import permutations, chain
4
+ from string import ascii_letters
5
+ from statistics import mean
6
+ import numpy as np
7
+ import pandas as pd
8
+ from datasets import load_dataset
9
+
10
+ os.makedirs("experiments/analysis/correlation", exist_ok=True)
11
+ prefix = "after"
12
+ with open("data/data_processed.new.test.jsonl") as f:
13
+ data = [json.loads(line) for line in f]
14
+ with open("data/data_processed.new.validation.jsonl") as f:
15
+ data += [json.loads(line) for line in f]
16
+
17
+ tmp = {}
18
+ for i in data:
19
+ if i['relation_type'] not in tmp:
20
+ tmp[i['relation_type']] = i['scores_all']
21
+ else:
22
+ tmp[i['relation_type']] = i['scores_all'] + tmp[i['relation_type']]
23
+ num_annotators = len(list(tmp.values())[0][0])
24
+ for r, scores in tmp.items():
25
+ corr_matrix = np.ones((num_annotators, num_annotators)) * 100
26
+ for a, b in permutations(range(num_annotators), 2):
27
+ score_a = [s[a] for s in scores]
28
+ score_b = [s[b] for s in scores]
29
+ corr_matrix[a][b] = pd.DataFrame([score_a, score_b]).T.corr("spearman").values[0][1] * 100
30
+ corr_df = pd.DataFrame(corr_matrix, columns=[ascii_letters[i].upper() for i in range(num_annotators)],
31
+ index=[ascii_letters[i].upper() for i in range(num_annotators)])
32
+
33
+ corr_df['Others'] = [pd.DataFrame([
34
+ [s[a] for s in scores],
35
+ [mean(_s for _n, _s in enumerate(s) if _n != a) for s in scores]
36
+ ]).T.corr("spearman").values[0][1] * 100 for a in range(num_annotators)]
37
+ corr_df = corr_df.T
38
+ corr_df['Avg'] = corr_df.mean(1)
39
+ corr_df = corr_df.T
40
+ corr_df.to_csv(f"experiments/analysis/correlation/{prefix}.{r.replace(' ', '_').replace('/', '-')}.csv")
41
+ print(r)
42
+ print(corr_df.round(0).astype(int).to_latex())
43
+ print()
44
+
45
+ df = None
46
+ for r, scores in tmp.items():
47
+ if df is None:
48
+ df = pd.read_csv(f"experiments/analysis/correlation/{prefix}.{r.replace(' ', '_').replace('/', '-')}.csv", index_col=0)
49
+ else:
50
+ df += pd.read_csv(f"experiments/analysis/correlation/{prefix}.{r.replace(' ', '_').replace('/', '-')}.csv", index_col=0)
51
+ df = df/5
52
+ df = df.T
53
+ df.pop("Avg")
54
+ df['Avg'] = df.mean(1)
55
+ df = df.T
56
+ print("ALL")
57
+ print(df.round(0).astype(int).to_latex())
58
+
59
+
experiments/analysis/get_correlation_before.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from itertools import permutations, chain
4
+ from string import ascii_letters
5
+ from statistics import mean
6
+ import numpy as np
7
+ import pandas as pd
8
+
9
+ os.makedirs("experiments/analysis/correlation", exist_ok=True)
10
+ prefix = "before"
11
+ with open("data/data_processed.test.jsonl") as f:
12
+ data = [json.loads(line) for line in f]
13
+ with open("data/data_processed.validation.jsonl") as f:
14
+ data += [json.loads(line) for line in f]
15
+
16
+ tmp = {}
17
+ for i in data:
18
+ if i['relation_type'] not in tmp:
19
+ tmp[i['relation_type']] = i['scores_all']
20
+ else:
21
+ tmp[i['relation_type']] = i['scores_all'] + tmp[i['relation_type']]
22
+ num_annotators = len(list(tmp.values())[0][0])
23
+ for r, scores in tmp.items():
24
+ corr_matrix = np.ones((num_annotators, num_annotators)) * 100
25
+ for a, b in permutations(range(num_annotators), 2):
26
+ score_a = [s[a] for s in scores]
27
+ score_b = [s[b] for s in scores]
28
+ corr_matrix[a][b] = pd.DataFrame([score_a, score_b]).T.corr("spearman").values[0][1] * 100
29
+ corr_df = pd.DataFrame(corr_matrix, columns=[ascii_letters[i].upper() for i in range(num_annotators)],
30
+ index=[ascii_letters[i].upper() for i in range(num_annotators)])
31
+
32
+ corr_df['Others'] = [pd.DataFrame([
33
+ [s[a] for s in scores],
34
+ [mean(_s for _n, _s in enumerate(s) if _n != a) for s in scores]
35
+ ]).T.corr("spearman").values[0][1] * 100 for a in range(num_annotators)]
36
+ corr_df = corr_df.T
37
+ corr_df['Avg'] = corr_df.mean(1)
38
+ corr_df = corr_df.T
39
+ corr_df.to_csv(f"experiments/analysis/correlation/{prefix}.{r.replace(' ', '_').replace('/', '-')}.csv")
40
+ print(r)
41
+ print(corr_df.round(0).astype(int).to_latex())
42
+ print()
43
+
44
+ df = None
45
+ for r, scores in tmp.items():
46
+ if df is None:
47
+ df = pd.read_csv(f"experiments/analysis/correlation/{prefix}.{r.replace(' ', '_').replace('/', '-')}.csv", index_col=0)
48
+ else:
49
+ df += pd.read_csv(f"experiments/analysis/correlation/{prefix}.{r.replace(' ', '_').replace('/', '-')}.csv", index_col=0)
50
+ df = df/5
51
+ df = df.T
52
+ df.pop("Avg")
53
+ df['Avg'] = df.mean(1)
54
+ df = df.T
55
+ print("ALL")
56
+ print(df.round(0).astype(int).to_latex())
57
+
58
+
experiments/analysis/get_error_in_top_bottom.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from random import uniform, seed
3
+ from statistics import mean
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from scipy.stats import spearmanr
7
+ with pd.option_context("max_colwidth", 1000):
8
+
9
+ # baselines
10
+ target = {
11
+ "flan-t5-xxl": "Flan-T5\textsubscript{XXL}",
12
+ "opt-13b": "OPT\textsubscript{13B}",
13
+ "davinci": "GPT-3\textsubscript{davinci}"
14
+ }
15
+ pretty_name = {
16
+ 'is competitor/rival of': "Rival",
17
+ 'is friend/ally of': "Ally",
18
+ 'is influenced by': "Inf",
19
+ 'is known for': "Know",
20
+ 'is similar to': "Sim",
21
+ 'average': "Avg",
22
+ }
23
+
24
+
25
+ # def get_iaa(scores_all):
26
+ # avg = [[mean(__s for _m, __s in enumerate(_s) if _m != _n) for _s in scores_all] for _n in range(7)]
27
+ # single = [[_s[_n] for _s in scores_all] for _n in range(7)]
28
+ # tmptmp = []
29
+ # ps = []
30
+ # for a, s in zip(avg, single):
31
+ # c = round(pd.DataFrame([a, s]).T.corr("spearman").values[0][1] * 100, 1)
32
+ # ps.append(spearmanr(a, s)[1] < 0.05)
33
+ # # if str(c) == "nan":
34
+ # # seed(0)
35
+ # # c_tmp = []
36
+ # # for _ in range(1000):
37
+ # # s_tmp = [_s + uniform(-0.5, 0.5) for _s in s]
38
+ # # c_tmp.append(round(pd.DataFrame([a, s_tmp]).T.corr("spearman").values[0][1] * 100, 1))
39
+ # # c = mean(c_tmp)
40
+ # tmptmp.append(c)
41
+ # list(zip(tmptmp, ps))
42
+ # return mean(tmptmp)
43
+
44
+
45
+ def format_text(_x, _y, _z):
46
+ bf = max(_x, _y, _z)
47
+ wf = str(min(_x, _y, _z))
48
+ _x = "\textcolor{blue}{" + str(_x) + "}" if _x == bf else str(_x)
49
+ _y = "\textcolor{blue}{" + str(_y) + "}" if _y == bf else str(_y)
50
+ _z = "\textcolor{blue}{" + str(_z) + "}" if _z == bf else str(_z)
51
+ _x = "\textcolor{red}{" + str(_x) + "}" if _x == wf else str(_x)
52
+ _y = "\textcolor{red}{" + str(_y) + "}" if _y == wf else str(_y)
53
+ _z = "\textcolor{red}{" + str(_z) + "}" if _z == wf else str(_z)
54
+ return f"{_x} / {_y} / {_z}"
55
+
56
+
57
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
58
+ table_full = []
59
+ for prompt in ['qa', 'lc']:
60
+ output = []
61
+ for d in data:
62
+
63
+ for i in target.keys():
64
+ with open(f"experiments/results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
65
+ ppl = [json.loads(x)['perplexity'] for x in f.read().split("\n") if len(x) > 0]
66
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
67
+ prediction = [rank_map[p] for p in ppl]
68
+
69
+ # get index
70
+ total_n = len(d['ranks'])
71
+ p = int(total_n/3)
72
+ top_n = [0, int(total_n * p / 100) + 1]
73
+ top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]]
74
+ bottom_n = [total_n - int(total_n * p / 100), total_n]
75
+ bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
76
+ mid_n = [top_n[1], bottom_n[0]]
77
+ mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
78
+
79
+ # top
80
+ top_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[top_n[0]: top_n[1]]]
81
+ top_acc = len(set(top_pred).intersection(set(top_label))) / len(top_label) * 100
82
+ # middle
83
+ mid_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
84
+ mid_acc = len(set(mid_pred).intersection(set(mid_label))) / len(mid_label) * 100
85
+ # top
86
+ bottom_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
87
+ bottom_acc = len(set(bottom_pred).intersection(set(bottom_label))) / len(bottom_label) * 100
88
+
89
+ output.append({"model": i, "relation_type": d['relation_type'], "top": round(top_acc, 1), "bottom": round(bottom_acc, 1), "middle": round(mid_acc, 1)})
90
+
91
+ for i in target.keys():
92
+ output.append({
93
+ "model": i, "relation_type": "average",
94
+ "top": round(mean([o['top'] for o in output if o['model'] == i]), 0),
95
+ "bottom": round(mean([o['bottom'] for o in output if o['model'] == i]), 0),
96
+ "middle": round(mean([o['middle'] for o in output if o['model'] == i]), 0)
97
+ })
98
+
99
+ df = pd.DataFrame(output)
100
+ df['accuracy'] = [format_text(x, y, z) for x, y, z in zip(df['top'], df['middle'], df['bottom'])]
101
+ table = df.pivot(index="relation_type", columns="model", values="accuracy")
102
+ table.columns.name = None
103
+ table.index.name = None
104
+ table = table[target.keys()]
105
+ table.columns = [target[i] for i in table.columns]
106
+ table.index = [pretty_name[i] for i in table.index]
107
+ table = table.T[list(pretty_name.values())]
108
+ table = table.T
109
+ table = table.to_latex(escape=False)
110
+ table = table.split(r"\midrule")[1].split(r"\bottomrule")[0]
111
+ table = r"\multicolumn{4}{l}{\emph{" + prompt.upper() + r" template}} \\ " + table
112
+ table_full.append(table)
113
+
114
+ table_full = "\midrule".join(table_full)
115
+
116
+ #
117
+ # output = []
118
+ # top_all = []
119
+ # mid_all = []
120
+ # bottom_all = []
121
+ #
122
+ # for d in data:
123
+ # if d['relation_type'] == "is influenced by":
124
+ # break
125
+ # total_n = len(d['ranks'])
126
+ # p = int(total_n / 3)
127
+ # top_n = [0, int(total_n * p / 100) + 1]
128
+ # top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]]
129
+ # bottom_n = [total_n - int(total_n * p / 100), total_n]
130
+ # bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
131
+ # mid_n = [top_n[1], bottom_n[0]]
132
+ # mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
133
+ #
134
+ # output.append({
135
+ # "model": "IAA", "relation_type": d['relation_type'],
136
+ # "top": round(get_iaa([d['scores_all'][_i] for _i in top_label]), 1),
137
+ # "bottom": round(get_iaa([d['scores_all'][_i] for _i in mid_label]), 1),
138
+ # "middle": round(get_iaa([d['scores_all'][_i] for _i in bottom_label]), 1)
139
+ # })
140
+ # top_all += [d['scores_all'][_i] for _i in top_label]
141
+ # mid_all += [d['scores_all'][_i] for _i in mid_label]
142
+ # bottom_all += [d['scores_all'][_i] for _i in bottom_label]
143
+ # output.append({
144
+ # "model": "IAA", "relation_type": "average",
145
+ # "top": round(get_iaa(top_all), 1),
146
+ # "bottom": round(get_iaa(mid_all), 1),
147
+ # "middle": round(get_iaa(bottom_all), 1)
148
+ # })
149
+ #
150
+ # df = pd.DataFrame(output)
151
+ # df['accuracy'] = [format_text(x, y, z) for x, y, z in zip(df['top'], df['middle'], df['bottom'])]
152
+ # table = df.pivot(index="relation_type", columns="model", values="accuracy")
153
+ # table.columns.name = None
154
+ # table.index.name = None
155
+ # table.index = [pretty_name[i] for i in table.index]
156
+ # table = table.T[list(pretty_name.values())]
157
+ # table = table.to_latex(escape=False)
158
+ # table = table.split(r"\midrule")[1].split(r"\bottomrule")[0]
159
+ # # table = r"\multicolumn{4}{l}{\emph{" + prompt.upper() + r" template}} \\ " + table
160
+ # table_full = table_full + table
161
+ print()
162
+ print()
163
+ print(table_full)
experiments/analysis/get_qualitative.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+ from datasets import load_dataset
5
+
6
+ pd.set_option('display.max_rows', None)
7
+ pd.set_option('display.max_columns', None)
8
+ os.makedirs("experiments/analysis/qualitative", exist_ok=True)
9
+
10
+ # baselines
11
+ target = {
12
+ "flan-t5-xxl": "Flan-T5\textsubscript{XXL}",
13
+ "opt-13b": "OPT\textsubscript{13B}",
14
+ "davinci": "GPT-3\textsubscript{davinci}"
15
+ }
16
+ pretty_name = {
17
+ 'average': "Avg",
18
+ 'is competitor/rival of': "Rival",
19
+ 'is friend/ally of': "Ally",
20
+ 'is influenced by': "Inf",
21
+ 'is known for': "Know",
22
+ 'is similar to': "Sim"
23
+ }
24
+ p = 30
25
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
26
+ for prompt in ['qa', 'lc']:
27
+ output = []
28
+ for d in data:
29
+ for i in target.keys():
30
+ with open(f"experiments/results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
31
+ ppl = [json.loads(x)['perplexity'] for x in f.read().split("\n") if len(x) > 0]
32
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
33
+ prediction = [rank_map[p] for p in ppl]
34
+
35
+ # get index
36
+ total_n = len(d['ranks'])
37
+ p = int(total_n / 3)
38
+ top_n = [0, int(total_n * p / 100) + 1]
39
+ top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]]
40
+ bottom_n = [total_n - int(total_n * p / 100), total_n]
41
+ bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
42
+ mid_n = [top_n[1], bottom_n[0]]
43
+ mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
44
+
45
+ # top
46
+ top_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[top_n[0]: top_n[1]]]
47
+ top_acc = len(set(top_pred).intersection(set(top_label))) / len(top_label) * 100
48
+ # middle
49
+ mid_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
50
+ mid_acc = len(set(mid_pred).intersection(set(mid_label))) / len(mid_label) * 100
51
+ # top
52
+ bottom_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
53
+ bottom_acc = len(set(bottom_pred).intersection(set(bottom_label))) / len(bottom_label) * 100
54
+
55
+ # the index of bottom p percent
56
+ output.append({
57
+ "relation_type": d['relation_type'],
58
+ "model": i,
59
+ "top_pred_and_bottom_gold": [" : ".join(d['pairs'][x]) for x in set(top_pred).intersection(bottom_label)],
60
+ "bottom_pred_and_top_gold": [" : ".join(d['pairs'][x]) for x in set(bottom_pred).intersection(top_label)],
61
+ })
62
+
63
+ df = pd.DataFrame(output)
64
+ df.to_csv(f"experiments/analysis/qualitative/{prompt}.{p}.csv", index=False)
65
+ # df.pop("top_num")
66
+ # df.pop("bottom_num")
67
+ df['relation_type'] = [pretty_name[i] for i in df['relation_type']]
68
+ print(df)
69
+
70
+ new_df = []
71
+ for _, i in df.iterrows():
72
+ top_pred_and_bottom_gold = i['top_pred_and_bottom_gold'][:min(len(i['top_pred_and_bottom_gold']), 4)]
73
+ bottom_pred_and_top_gold = i['bottom_pred_and_top_gold'][:min(len(i['bottom_pred_and_top_gold']), 4)]
74
+ for x in range(max(len(bottom_pred_and_top_gold), len(top_pred_and_bottom_gold))):
75
+ # for x in range(max(len(bottom_pred_and_top_gold), len(top_pred_and_bottom_gold)) // 3):
76
+ if len(top_pred_and_bottom_gold) >= x + 1:
77
+ t = ", ".join(top_pred_and_bottom_gold[x * 1:min(len(top_pred_and_bottom_gold) + 1, (x + 1)*1)])
78
+ else:
79
+ t = ""
80
+ if len(bottom_pred_and_top_gold) >= x + 1:
81
+ b = ", ".join(bottom_pred_and_top_gold[x*1:min(len(bottom_pred_and_top_gold) + 1, (x + 1)*1)])
82
+ else:
83
+ b = ""
84
+ new_df.append({"relation_type": i['relation_type'], "model": i['model'], "top": t, "bottom": b})
85
+ df_new = pd.DataFrame(new_df)
86
+ df_new['model'] = [target[i] for i in df_new['model']]
87
+ df_new = df_new[['model', 'relation_type', 'top', 'bottom']]
88
+ df_new = df_new.sort_values(by=['model', 'relation_type'])
89
+ df_new.to_csv(f"experiments/analysis/qualitative/{prompt}.{p}.format.csv", index=False)
90
+ with pd.option_context("max_colwidth", 1000):
91
+ table = df_new.to_latex(index=False, escape=False)
92
+ table = table.split(r"\midrule")[1].split(r"\bottomrule")[0]
93
+ print(table)
94
+
95
+
experiments/analysis/get_statistics.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from datasets import load_dataset
3
+
4
+ data = load_dataset("cardiffnlp/relentless_full")
5
+ relation_types = [i['relation_type'] for i in data['test']]
6
+ full_stats = []
7
+ for r in relation_types:
8
+ size = [len([i for i in data[s] if i['relation_type'] == r][0]['pairs']) for s in ['validation', 'test']]
9
+ x = [i for i in data['validation'] if i['relation_type'] == r][0]
10
+ pairs = sorted(zip(x['pairs'], x['ranks']), key=lambda _x: _x[1])
11
+ top_pairs = [" / ".join(a) for a, _ in pairs[:2]]
12
+ bottom_pairs = [" / ".join(a) for a, _ in pairs[:2]]
13
+ middle_pairs = [" / ".join(a) for a, _ in pairs[int(len(pairs)/2)-1:int(len(pairs)/2) + 1]]
14
+ full_stats += [{
15
+ "relation_type": r,
16
+ "val.": size[0],
17
+ "test": size[1],
18
+ "top_pairs": t,
19
+ "middle_pairs": m,
20
+ "bottom_pairs": b} for t, m, b in zip(top_pairs, middle_pairs, bottom_pairs)]
21
+
22
+ df = pd.DataFrame(full_stats)
23
+ print(df[["relation_type", "val.", "test"]].to_markdown(index=False))
24
+ print(df.to_latex(index=False))
experiments/analysis/qualitative/lc.30.csv ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ relation_type,model,top_pred_and_bottom_gold,bottom_pred_and_top_gold
2
+ is competitor/rival of,flan-t5-xxl,['Germany : Austria'],"['Eminem : MGK', 'AWS : GCP']"
3
+ is competitor/rival of,opt-13b,['Bashar al-Assad : Christianity'],['Netflix : Disney Plus']
4
+ is competitor/rival of,davinci,['Serena Williams : Andy Murray'],"['Netflix : Disney Plus', 'Eminem : MGK']"
5
+ is friend/ally of,flan-t5-xxl,"['Liam Gallagher : Noel Gallagher', 'Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Catherine Zeta-Jones : Johnny Knoxville', 'Armenia : Azerbaijan', 'Russia : Georgia']","['Gondor : Rohan', 'FTX : Alameda Research', 'Red Bull : GoPro', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
6
+ is friend/ally of,opt-13b,"['Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['FTX : Alameda Research', 'Elsa : Anna', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
7
+ is friend/ally of,davinci,"['Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['FTX : Alameda Research', 'Elsa : Anna', 'Rishi Sunak : Joe Biden']"
8
+ is influenced by,flan-t5-xxl,"['Joe Biden : Donald Trump', 'Harry Potter : Wizard of Oz', 'Singaporean food : Malaysian food', 'James Brown : Michael Jackson', 'Brazil : Spain']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Hoover : Dyson', 'English : William Shakespeare']"
9
+ is influenced by,opt-13b,"['Alicia Vikander : Richard Attenborough', 'Joe Biden : Donald Trump', 'Harry Potter : Wizard of Oz', 'Singaporean food : Malaysian food', 'James Brown : Michael Jackson']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Stephen King : Arthur Machen', 'Wales : Westminster', 'English : William Shakespeare']"
10
+ is influenced by,davinci,"['Singaporean food : Malaysian food', 'Harry Potter : Wizard of Oz']","['Prince Harry : Monarchy', 'trending music : TikTok', 'English : William Shakespeare']"
11
+ is known for,flan-t5-xxl,"['Michael Jordan : Tessa Thompson', 'Italy : Hawaiian pizza', 'Inglourious Basterds : Sergio Busquets', 'Neil Armstrong : Korean War', 'Italy : tea', 'Coca-Cola : Pepsi', 'Rafael Nadal : Ralph Macchio', 'Romania : Roman Catholicism', 'Charles Bronson : Rory McIlroy']","['Greggs : sausage rolls', 'Thomas Edison : light bulb', 'Canada : maple syrup', 'Harvey Weinstein : Miramax', 'Europe : The Final Countdown', 'OpenAI : ChatGPT', 'UK : rain', 'Spain : olive oil', 'Valencia : paella']"
12
+ is known for,opt-13b,"['Inglourious Basterds : Sergio Busquets', 'Coca-Cola : Pepsi']","['Valencia : paella', 'OpenAI : ChatGPT', 'UK : rain']"
13
+ is known for,davinci,"['Inglourious Basterds : Sergio Busquets', 'Coca-Cola : Pepsi', 'Sophie Turner : Sylvia Plath', 'George Washington : Kiribati']","['Valencia : paella', 'OpenAI : ChatGPT', 'Bill Nye : scientist', 'Nvidia : GPUs']"
14
+ is similar to,flan-t5-xxl,"['Uzbekistan : United States', 'Dionysus : Toyota Corolla', ""Chess : Rubik's Cube""]","['Counter Strike : Rainbow Six', 'fusilli : rotini', 'Primark : Shein', 'PS5 : XBox', 'Cerave : Nivea']"
15
+ is similar to,opt-13b,"['Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube""]","['pill : tablet', 'Great Britian : British Empire', 'bourbon : Scotch whisky', 'fusilli : rotini', 'Minnesota : Wisconsin']"
16
+ is similar to,davinci,"['Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube""]","['pill : tablet', 'bourbon : Scotch whisky', 'fusilli : rotini', 'Primark : Shein', 'Homebase : IKEA', 'Cerave : Nivea']"
experiments/analysis/qualitative/lc.30.format.csv ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,relation_type,top,bottom
2
+ Flan-T5 extsubscript{XXL},Ally,Liam Gallagher : Noel Gallagher,Gondor : Rohan
3
+ Flan-T5 extsubscript{XXL},Ally,Joseph Stalin : Josip Broz Tito,FTX : Alameda Research
4
+ Flan-T5 extsubscript{XXL},Ally,Sophia Loren : Marlon Brando,Red Bull : GoPro
5
+ Flan-T5 extsubscript{XXL},Ally,Catherine Zeta-Jones : Johnny Knoxville,Aznar : Bush
6
+ Flan-T5 extsubscript{XXL},Inf,Joe Biden : Donald Trump,Prince Harry : Monarchy
7
+ Flan-T5 extsubscript{XXL},Inf,Harry Potter : Wizard of Oz,trending music : TikTok
8
+ Flan-T5 extsubscript{XXL},Inf,Singaporean food : Malaysian food,Hoover : Dyson
9
+ Flan-T5 extsubscript{XXL},Inf,James Brown : Michael Jackson,English : William Shakespeare
10
+ Flan-T5 extsubscript{XXL},Know,Michael Jordan : Tessa Thompson,Greggs : sausage rolls
11
+ Flan-T5 extsubscript{XXL},Know,Italy : Hawaiian pizza,Thomas Edison : light bulb
12
+ Flan-T5 extsubscript{XXL},Know,Inglourious Basterds : Sergio Busquets,Canada : maple syrup
13
+ Flan-T5 extsubscript{XXL},Know,Neil Armstrong : Korean War,Harvey Weinstein : Miramax
14
+ Flan-T5 extsubscript{XXL},Rival,Germany : Austria,Eminem : MGK
15
+ Flan-T5 extsubscript{XXL},Rival,,AWS : GCP
16
+ Flan-T5 extsubscript{XXL},Sim,Uzbekistan : United States,Counter Strike : Rainbow Six
17
+ Flan-T5 extsubscript{XXL},Sim,Dionysus : Toyota Corolla,fusilli : rotini
18
+ Flan-T5 extsubscript{XXL},Sim,Chess : Rubik's Cube,Primark : Shein
19
+ Flan-T5 extsubscript{XXL},Sim,,PS5 : XBox
20
+ GPT-3 extsubscript{davinci},Ally,Joseph Stalin : Josip Broz Tito,FTX : Alameda Research
21
+ GPT-3 extsubscript{davinci},Ally,Sophia Loren : Marlon Brando,Elsa : Anna
22
+ GPT-3 extsubscript{davinci},Ally,Armenia : Azerbaijan,Rishi Sunak : Joe Biden
23
+ GPT-3 extsubscript{davinci},Inf,Singaporean food : Malaysian food,Prince Harry : Monarchy
24
+ GPT-3 extsubscript{davinci},Inf,Harry Potter : Wizard of Oz,trending music : TikTok
25
+ GPT-3 extsubscript{davinci},Inf,,English : William Shakespeare
26
+ GPT-3 extsubscript{davinci},Know,Inglourious Basterds : Sergio Busquets,Valencia : paella
27
+ GPT-3 extsubscript{davinci},Know,Coca-Cola : Pepsi,OpenAI : ChatGPT
28
+ GPT-3 extsubscript{davinci},Know,Sophie Turner : Sylvia Plath,Bill Nye : scientist
29
+ GPT-3 extsubscript{davinci},Know,George Washington : Kiribati,Nvidia : GPUs
30
+ GPT-3 extsubscript{davinci},Rival,Serena Williams : Andy Murray,Netflix : Disney Plus
31
+ GPT-3 extsubscript{davinci},Rival,,Eminem : MGK
32
+ GPT-3 extsubscript{davinci},Sim,Nicolae Ceaușescu : Javier Hernández,pill : tablet
33
+ GPT-3 extsubscript{davinci},Sim,Chess : Rubik's Cube,bourbon : Scotch whisky
34
+ GPT-3 extsubscript{davinci},Sim,,fusilli : rotini
35
+ GPT-3 extsubscript{davinci},Sim,,Primark : Shein
36
+ OPT extsubscript{13B},Ally,Joseph Stalin : Josip Broz Tito,FTX : Alameda Research
37
+ OPT extsubscript{13B},Ally,Sophia Loren : Marlon Brando,Elsa : Anna
38
+ OPT extsubscript{13B},Ally,Armenia : Azerbaijan,Aznar : Bush
39
+ OPT extsubscript{13B},Ally,,Windows : Xbox
40
+ OPT extsubscript{13B},Inf,Alicia Vikander : Richard Attenborough,Prince Harry : Monarchy
41
+ OPT extsubscript{13B},Inf,Joe Biden : Donald Trump,trending music : TikTok
42
+ OPT extsubscript{13B},Inf,Harry Potter : Wizard of Oz,Stephen King : Arthur Machen
43
+ OPT extsubscript{13B},Inf,Singaporean food : Malaysian food,Wales : Westminster
44
+ OPT extsubscript{13B},Know,Inglourious Basterds : Sergio Busquets,Valencia : paella
45
+ OPT extsubscript{13B},Know,Coca-Cola : Pepsi,OpenAI : ChatGPT
46
+ OPT extsubscript{13B},Know,,UK : rain
47
+ OPT extsubscript{13B},Rival,Bashar al-Assad : Christianity,Netflix : Disney Plus
48
+ OPT extsubscript{13B},Sim,Nicolae Ceaușescu : Javier Hernández,pill : tablet
49
+ OPT extsubscript{13B},Sim,Chess : Rubik's Cube,Great Britian : British Empire
50
+ OPT extsubscript{13B},Sim,,bourbon : Scotch whisky
51
+ OPT extsubscript{13B},Sim,,fusilli : rotini
experiments/analysis/qualitative/lc.31.csv ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ relation_type,model,top_pred_and_bottom_gold,bottom_pred_and_top_gold
2
+ is competitor/rival of,flan-t5-xxl,[],['AWS : GCP']
3
+ is competitor/rival of,opt-13b,[],[]
4
+ is competitor/rival of,davinci,[],['Netflix : Disney Plus']
5
+ is friend/ally of,flan-t5-xxl,"['Liam Gallagher : Noel Gallagher', 'Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Catherine Zeta-Jones : Johnny Knoxville', 'Armenia : Azerbaijan', 'Russia : Georgia']","['Gondor : Rohan', 'FTX : Alameda Research', 'Red Bull : GoPro', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
6
+ is friend/ally of,opt-13b,"['Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['FTX : Alameda Research', 'Elsa : Anna', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
7
+ is friend/ally of,davinci,"['Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['FTX : Alameda Research', 'Elsa : Anna', 'Rishi Sunak : Joe Biden']"
8
+ is influenced by,flan-t5-xxl,"['Joe Biden : Donald Trump', 'Brazil : Spain', 'Harry Potter : Wizard of Oz', 'James Brown : Michael Jackson']","['Prince Harry : Monarchy', 'trending music : TikTok', 'English : William Shakespeare']"
9
+ is influenced by,opt-13b,"['Singaporean food : Malaysian food', 'Joe Biden : Donald Trump', 'James Brown : Michael Jackson', 'Harry Potter : Wizard of Oz']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Wales : Westminster', 'English : William Shakespeare']"
10
+ is influenced by,davinci,"['Singaporean food : Malaysian food', 'Harry Potter : Wizard of Oz']","['Prince Harry : Monarchy', 'trending music : TikTok', 'English : William Shakespeare']"
11
+ is known for,flan-t5-xxl,"['Michael Jordan : Tessa Thompson', 'Italy : Hawaiian pizza', 'Inglourious Basterds : Sergio Busquets', 'Neil Armstrong : Korean War', 'Italy : tea', 'Coca-Cola : Pepsi', 'Rafael Nadal : Ralph Macchio', 'Romania : Roman Catholicism', 'Charles Bronson : Rory McIlroy']","['Red Bull : energy drinks', 'Thomas Edison : light bulb', 'Canada : maple syrup', 'Harvey Weinstein : Miramax', 'Alphabet Inc. : Google', 'Europe : The Final Countdown', 'OpenAI : ChatGPT', 'UK : rain', 'Spain : olive oil', 'Valencia : paella']"
12
+ is known for,opt-13b,"['India : Gurkhas', 'Coca-Cola : Pepsi', 'Inglourious Basterds : Sergio Busquets']","['Valencia : paella', 'OpenAI : ChatGPT', 'UK : rain']"
13
+ is known for,davinci,"['India : Gurkhas', 'Inglourious Basterds : Sergio Busquets', 'Sophie Turner : Sylvia Plath', 'Coca-Cola : Pepsi', 'George Washington : Kiribati']","['Valencia : paella', 'OpenAI : ChatGPT', 'Bill Nye : scientist', 'Nvidia : GPUs']"
14
+ is similar to,flan-t5-xxl,"['Uzbekistan : United States', 'Dionysus : Toyota Corolla', ""Chess : Rubik's Cube""]","['Counter Strike : Rainbow Six', 'fusilli : rotini', 'Shark : Bush', 'PS5 : XBox', 'Cerave : Nivea']"
15
+ is similar to,opt-13b,"['Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube""]","['pill : tablet', 'Great Britian : British Empire', 'fusilli : rotini', 'Shark : Bush', 'Minnesota : Wisconsin']"
16
+ is similar to,davinci,['Nicolae Ceaușescu : Javier Hernández'],"['pill : tablet', 'fusilli : rotini', 'Shark : Bush', 'Homebase : IKEA', 'Cerave : Nivea']"
experiments/analysis/qualitative/lc.31.format.csv ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,relation_type,top,bottom
2
+ Flan-T5 extsubscript{XXL},Ally,Liam Gallagher : Noel Gallagher,Gondor : Rohan
3
+ Flan-T5 extsubscript{XXL},Ally,Joseph Stalin : Josip Broz Tito,FTX : Alameda Research
4
+ Flan-T5 extsubscript{XXL},Ally,Sophia Loren : Marlon Brando,Red Bull : GoPro
5
+ Flan-T5 extsubscript{XXL},Ally,Catherine Zeta-Jones : Johnny Knoxville,Aznar : Bush
6
+ Flan-T5 extsubscript{XXL},Inf,Joe Biden : Donald Trump,Prince Harry : Monarchy
7
+ Flan-T5 extsubscript{XXL},Inf,Brazil : Spain,trending music : TikTok
8
+ Flan-T5 extsubscript{XXL},Inf,Harry Potter : Wizard of Oz,English : William Shakespeare
9
+ Flan-T5 extsubscript{XXL},Inf,James Brown : Michael Jackson,
10
+ Flan-T5 extsubscript{XXL},Know,Michael Jordan : Tessa Thompson,Red Bull : energy drinks
11
+ Flan-T5 extsubscript{XXL},Know,Italy : Hawaiian pizza,Thomas Edison : light bulb
12
+ Flan-T5 extsubscript{XXL},Know,Inglourious Basterds : Sergio Busquets,Canada : maple syrup
13
+ Flan-T5 extsubscript{XXL},Know,Neil Armstrong : Korean War,Harvey Weinstein : Miramax
14
+ Flan-T5 extsubscript{XXL},Rival,,AWS : GCP
15
+ Flan-T5 extsubscript{XXL},Sim,Uzbekistan : United States,Counter Strike : Rainbow Six
16
+ Flan-T5 extsubscript{XXL},Sim,Dionysus : Toyota Corolla,fusilli : rotini
17
+ Flan-T5 extsubscript{XXL},Sim,Chess : Rubik's Cube,Shark : Bush
18
+ Flan-T5 extsubscript{XXL},Sim,,PS5 : XBox
19
+ GPT-3 extsubscript{davinci},Ally,Joseph Stalin : Josip Broz Tito,FTX : Alameda Research
20
+ GPT-3 extsubscript{davinci},Ally,Sophia Loren : Marlon Brando,Elsa : Anna
21
+ GPT-3 extsubscript{davinci},Ally,Armenia : Azerbaijan,Rishi Sunak : Joe Biden
22
+ GPT-3 extsubscript{davinci},Inf,Singaporean food : Malaysian food,Prince Harry : Monarchy
23
+ GPT-3 extsubscript{davinci},Inf,Harry Potter : Wizard of Oz,trending music : TikTok
24
+ GPT-3 extsubscript{davinci},Inf,,English : William Shakespeare
25
+ GPT-3 extsubscript{davinci},Know,India : Gurkhas,Valencia : paella
26
+ GPT-3 extsubscript{davinci},Know,Inglourious Basterds : Sergio Busquets,OpenAI : ChatGPT
27
+ GPT-3 extsubscript{davinci},Know,Sophie Turner : Sylvia Plath,Bill Nye : scientist
28
+ GPT-3 extsubscript{davinci},Know,Coca-Cola : Pepsi,Nvidia : GPUs
29
+ GPT-3 extsubscript{davinci},Rival,,Netflix : Disney Plus
30
+ GPT-3 extsubscript{davinci},Sim,Nicolae Ceaușescu : Javier Hernández,pill : tablet
31
+ GPT-3 extsubscript{davinci},Sim,,fusilli : rotini
32
+ GPT-3 extsubscript{davinci},Sim,,Shark : Bush
33
+ GPT-3 extsubscript{davinci},Sim,,Homebase : IKEA
34
+ OPT extsubscript{13B},Ally,Joseph Stalin : Josip Broz Tito,FTX : Alameda Research
35
+ OPT extsubscript{13B},Ally,Sophia Loren : Marlon Brando,Elsa : Anna
36
+ OPT extsubscript{13B},Ally,Armenia : Azerbaijan,Aznar : Bush
37
+ OPT extsubscript{13B},Ally,,Windows : Xbox
38
+ OPT extsubscript{13B},Inf,Singaporean food : Malaysian food,Prince Harry : Monarchy
39
+ OPT extsubscript{13B},Inf,Joe Biden : Donald Trump,trending music : TikTok
40
+ OPT extsubscript{13B},Inf,James Brown : Michael Jackson,Wales : Westminster
41
+ OPT extsubscript{13B},Inf,Harry Potter : Wizard of Oz,English : William Shakespeare
42
+ OPT extsubscript{13B},Know,India : Gurkhas,Valencia : paella
43
+ OPT extsubscript{13B},Know,Coca-Cola : Pepsi,OpenAI : ChatGPT
44
+ OPT extsubscript{13B},Know,Inglourious Basterds : Sergio Busquets,UK : rain
45
+ OPT extsubscript{13B},Sim,Nicolae Ceaușescu : Javier Hernández,pill : tablet
46
+ OPT extsubscript{13B},Sim,Chess : Rubik's Cube,Great Britian : British Empire
47
+ OPT extsubscript{13B},Sim,,fusilli : rotini
48
+ OPT extsubscript{13B},Sim,,Shark : Bush
experiments/analysis/qualitative/qa.30.csv ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ relation_type,model,top_pred_and_bottom_gold,bottom_pred_and_top_gold
2
+ is competitor/rival of,flan-t5-xxl,[],['Isaac Newton : Gottfried Leibniz']
3
+ is competitor/rival of,opt-13b,"['Serena Williams : Andy Murray', 'Olympic Games : Helicobacter pylori', 'Mikhail Khodorkovsky : Hezbollah', 'Bashar al-Assad : Christianity']","['Netflix : Disney Plus', 'AWS : GCP']"
4
+ is competitor/rival of,davinci,"['Serena Williams : Andy Murray', 'Olympic Games : Helicobacter pylori', 'Bashar al-Assad : Christianity']","['Netflix : Disney Plus', 'Eminem : MGK', 'Saudi Arabia : Israel']"
5
+ is friend/ally of,flan-t5-xxl,"['Keir Starmer : Jeremy Corbyn', 'Liam Gallagher : Noel Gallagher', 'Russia : Georgia', 'Armenia : Azerbaijan']","['Ron Weasley : Neville Longbottom', 'Elsa : Anna', 'Aznar : Bush', 'Rishi Sunak : Joe Biden']"
6
+ is friend/ally of,opt-13b,"['Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['Microsoft : LinkedIn', 'FTX : Alameda Research', 'Red Bull : GoPro', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
7
+ is friend/ally of,davinci,"['Walter White : Gus Fring', 'Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['FTX : Alameda Research', 'Elsa : Anna', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
8
+ is influenced by,flan-t5-xxl,"['Luke Bryan : Hank Williams', 'Harry Potter : Wizard of Oz', 'Singaporean food : Malaysian food', 'James Brown : Michael Jackson', 'heavy metal : punk music']","['Prince Harry : Monarchy', 'Pepsi : Coca-Cola', 'trending music : TikTok', 'Wales : Westminster', 'Coca-Cola : Pepsi', 'Ethereum : Bitcoin', 'Apple Music : Spotify']"
9
+ is influenced by,opt-13b,"['Alicia Vikander : Richard Attenborough', 'Joe Biden : Donald Trump', 'Harry Potter : Wizard of Oz', 'Singaporean food : Malaysian food', 'James Brown : Michael Jackson']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Stephen King : Arthur Machen', 'Wales : Westminster', 'European Union : Germany', 'Commonwealth : United Kingdom', 'English : William Shakespeare']"
10
+ is influenced by,davinci,"['Singaporean food : Malaysian food', 'James Brown : Michael Jackson', 'Alicia Vikander : Richard Attenborough', 'Harry Potter : Wizard of Oz']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Stephen King : Arthur Machen', 'Commonwealth : United Kingdom', 'English : William Shakespeare']"
11
+ is known for,flan-t5-xxl,"['Romania : Roman Catholicism', 'Belgium : wine']","['Harvey Weinstein : Miramax', 'France : cheese', 'Boris Johnson : Brexit', 'Europe : The Final Countdown', 'Jeff Goldblum : Jurassic Park']"
12
+ is known for,opt-13b,"['Inglourious Basterds : Sergio Busquets', 'Sophie Turner : Sylvia Plath', 'Steve Jobs : AirPods', 'Coca-Cola : Pepsi', 'Rafael Nadal : Ralph Macchio', 'Pixar : Novosibirsk', 'Belgium : wine']","['Europe : The Final Countdown', 'OpenAI : ChatGPT', 'UK : rain', 'Jackson Pollock : action painting', 'Spain : olive oil', 'Valencia : paella']"
13
+ is known for,davinci,"['Inglourious Basterds : Sergio Busquets', 'Rafael Nadal : Ralph Macchio', 'Coca-Cola : Pepsi', 'Sophie Turner : Sylvia Plath']","['Valencia : paella', 'OpenAI : ChatGPT', 'UK : rain', 'Spain : olive oil']"
14
+ is similar to,flan-t5-xxl,"['New York : York', 'cannoli : canneloni', 'sphinx : sphynx', 'Gameboy : Nintendo']",['Suits : Law & Order']
15
+ is similar to,opt-13b,"['Gisele Bündchen : Orson Welles', 'Eduardo Saverin : Guinea-Bissau', 'Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube"", 'sphinx : sphynx']","['pill : tablet', 'Great Britian : British Empire', 'bourbon : Scotch whisky', 'fusilli : rotini', 'Minnesota : Wisconsin']"
16
+ is similar to,davinci,"['Gisele Bündchen : Orson Welles', 'Eduardo Saverin : Guinea-Bissau', 'Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube"", 'sphinx : sphynx']","['pill : tablet', 'Great Britian : British Empire', 'fusilli : rotini', 'Primark : Shein', 'PS5 : XBox', 'Cerave : Nivea', 'Minnesota : Wisconsin']"
experiments/analysis/qualitative/qa.30.format.csv ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ relation_type,model,top,bottom
2
+ Rival,OPT extsubscript{13B},"Serena Williams : Andy Murray, Olympic Games : Helicobacter pylori","Netflix : Disney Plus, AWS : GCP"
3
+ Rival,GPT-3 extsubscript{davinci},"Serena Williams : Andy Murray, Olympic Games : Helicobacter pylori","Netflix : Disney Plus, Eminem : MGK"
4
+ Ally,Flan-T5 extsubscript{XXL},"Keir Starmer : Jeremy Corbyn, Liam Gallagher : Noel Gallagher","Ron Weasley : Neville Longbottom, Elsa : Anna"
5
+ Ally,OPT extsubscript{13B},"Joseph Stalin : Josip Broz Tito, Sophia Loren : Marlon Brando","Microsoft : LinkedIn, FTX : Alameda Research"
6
+ Ally,OPT extsubscript{13B},Armenia : Azerbaijan,"Red Bull : GoPro, Aznar : Bush"
7
+ Ally,GPT-3 extsubscript{davinci},"Walter White : Gus Fring, Joseph Stalin : Josip Broz Tito","FTX : Alameda Research, Elsa : Anna"
8
+ Inf,Flan-T5 extsubscript{XXL},"Luke Bryan : Hank Williams, Harry Potter : Wizard of Oz","Prince Harry : Monarchy, Pepsi : Coca-Cola"
9
+ Inf,Flan-T5 extsubscript{XXL},"Singaporean food : Malaysian food, James Brown : Michael Jackson","trending music : TikTok, Wales : Westminster"
10
+ Inf,OPT extsubscript{13B},"Alicia Vikander : Richard Attenborough, Joe Biden : Donald Trump","Prince Harry : Monarchy, trending music : TikTok"
11
+ Inf,OPT extsubscript{13B},"Harry Potter : Wizard of Oz, Singaporean food : Malaysian food","Stephen King : Arthur Machen, Wales : Westminster"
12
+ Inf,GPT-3 extsubscript{davinci},"Singaporean food : Malaysian food, James Brown : Michael Jackson","Prince Harry : Monarchy, trending music : TikTok"
13
+ Know,Flan-T5 extsubscript{XXL},"Romania : Roman Catholicism, Belgium : wine","Harvey Weinstein : Miramax, France : cheese"
14
+ Know,OPT extsubscript{13B},"Inglourious Basterds : Sergio Busquets, Sophie Turner : Sylvia Plath","Europe : The Final Countdown, OpenAI : ChatGPT"
15
+ Know,OPT extsubscript{13B},"Steve Jobs : AirPods, Coca-Cola : Pepsi","UK : rain, Jackson Pollock : action painting"
16
+ Know,GPT-3 extsubscript{davinci},"Inglourious Basterds : Sergio Busquets, Rafael Nadal : Ralph Macchio","Valencia : paella, OpenAI : ChatGPT"
17
+ Sim,Flan-T5 extsubscript{XXL},"New York : York, cannoli : canneloni",
18
+ Sim,OPT extsubscript{13B},"Gisele Bündchen : Orson Welles, Eduardo Saverin : Guinea-Bissau","pill : tablet, Great Britian : British Empire"
19
+ Sim,GPT-3 extsubscript{davinci},"Gisele Bündchen : Orson Welles, Eduardo Saverin : Guinea-Bissau","pill : tablet, Great Britian : British Empire"
20
+ Sim,GPT-3 extsubscript{davinci},"Nicolae Ceaușescu : Javier Hernández, Chess : Rubik's Cube","fusilli : rotini, Primark : Shein"
experiments/analysis/qualitative/qa.31.csv ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ relation_type,model,top_pred_and_bottom_gold,bottom_pred_and_top_gold
2
+ is competitor/rival of,flan-t5-xxl,[],[]
3
+ is competitor/rival of,opt-13b,"['Bashar al-Assad : Christianity', 'Olympic Games : Helicobacter pylori', 'Serena Williams : Andy Murray', 'Mikhail Khodorkovsky : Hezbollah']","['Netflix : Disney Plus', 'AWS : GCP']"
4
+ is competitor/rival of,davinci,['Olympic Games : Helicobacter pylori'],['Netflix : Disney Plus']
5
+ is friend/ally of,flan-t5-xxl,"['Liam Gallagher : Noel Gallagher', 'Russia : Georgia', 'Armenia : Azerbaijan']","['Elsa : Anna', 'Aznar : Bush', 'Rishi Sunak : Joe Biden']"
6
+ is friend/ally of,opt-13b,"['Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['Microsoft : LinkedIn', 'FTX : Alameda Research', 'Red Bull : GoPro', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
7
+ is friend/ally of,davinci,"['Walter White : Gus Fring', 'Joseph Stalin : Josip Broz Tito', 'Sophia Loren : Marlon Brando', 'Armenia : Azerbaijan']","['FTX : Alameda Research', 'Elsa : Anna', 'Aznar : Bush', 'Windows : Xbox', 'Rishi Sunak : Joe Biden']"
8
+ is influenced by,flan-t5-xxl,"['Luke Bryan : Hank Williams', 'James Brown : Michael Jackson', 'heavy metal : punk music', 'Harry Potter : Wizard of Oz']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Wales : Westminster', 'Coca-Cola : Pepsi', 'Ethereum : Bitcoin', 'Apple Music : Spotify']"
9
+ is influenced by,opt-13b,"['Alicia Vikander : Richard Attenborough', 'Joe Biden : Donald Trump', 'Harry Potter : Wizard of Oz', 'Singaporean food : Malaysian food', 'James Brown : Michael Jackson']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Wales : Westminster', 'European Union : Germany', 'Commonwealth : United Kingdom', 'English : William Shakespeare']"
10
+ is influenced by,davinci,"['Singaporean food : Malaysian food', 'James Brown : Michael Jackson', 'Alicia Vikander : Richard Attenborough', 'Harry Potter : Wizard of Oz']","['Prince Harry : Monarchy', 'trending music : TikTok', 'Commonwealth : United Kingdom', 'English : William Shakespeare']"
11
+ is known for,flan-t5-xxl,"['India : Gurkhas', 'France : rococo movement', 'Romania : Roman Catholicism', 'Belgium : wine']","['Harvey Weinstein : Miramax', 'France : cheese', 'Boris Johnson : Brexit', 'Europe : The Final Countdown', 'Jeff Goldblum : Jurassic Park']"
12
+ is known for,opt-13b,"['India : Gurkhas', 'Inglourious Basterds : Sergio Busquets', 'Sophie Turner : Sylvia Plath', 'Steve Jobs : AirPods', 'Coca-Cola : Pepsi', 'Rafael Nadal : Ralph Macchio', 'Pixar : Novosibirsk', 'Belgium : wine']","['Europe : The Final Countdown', 'OpenAI : ChatGPT', 'UK : rain', 'Jackson Pollock : action painting', 'Spain : olive oil', 'Valencia : paella']"
13
+ is known for,davinci,"['India : Gurkhas', 'Inglourious Basterds : Sergio Busquets', 'Sophie Turner : Sylvia Plath', 'Coca-Cola : Pepsi', 'Rafael Nadal : Ralph Macchio']","['OpenAI : ChatGPT', 'Bill Nye : scientist', 'UK : rain', 'Spain : olive oil', 'Valencia : paella']"
14
+ is similar to,flan-t5-xxl,"['New York : York', 'cannoli : canneloni', 'sphinx : sphynx', 'Gameboy : Nintendo']","['Shark : Bush', 'Suits : Law & Order']"
15
+ is similar to,opt-13b,"['Gisele Bündchen : Orson Welles', 'Eduardo Saverin : Guinea-Bissau', 'Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube"", 'sphinx : sphynx']","['pill : tablet', 'Great Britian : British Empire', 'fusilli : rotini', 'Shark : Bush', 'Minnesota : Wisconsin']"
16
+ is similar to,davinci,"['Gisele Bündchen : Orson Welles', 'Eduardo Saverin : Guinea-Bissau', 'Nicolae Ceaușescu : Javier Hernández', ""Chess : Rubik's Cube"", 'sphinx : sphynx']","['pill : tablet', 'Great Britian : British Empire', 'fusilli : rotini', 'Shark : Bush', 'PS5 : XBox', 'Cerave : Nivea', 'Minnesota : Wisconsin']"
experiments/analysis/qualitative/qa.31.format.csv ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,relation_type,top,bottom
2
+ Flan-T5 extsubscript{XXL},Ally,Liam Gallagher : Noel Gallagher,Elsa : Anna
3
+ Flan-T5 extsubscript{XXL},Ally,Russia : Georgia,Aznar : Bush
4
+ Flan-T5 extsubscript{XXL},Ally,Armenia : Azerbaijan,Rishi Sunak : Joe Biden
5
+ Flan-T5 extsubscript{XXL},Inf,Luke Bryan : Hank Williams,Prince Harry : Monarchy
6
+ Flan-T5 extsubscript{XXL},Inf,James Brown : Michael Jackson,trending music : TikTok
7
+ Flan-T5 extsubscript{XXL},Inf,heavy metal : punk music,Wales : Westminster
8
+ Flan-T5 extsubscript{XXL},Inf,Harry Potter : Wizard of Oz,Coca-Cola : Pepsi
9
+ Flan-T5 extsubscript{XXL},Know,India : Gurkhas,Harvey Weinstein : Miramax
10
+ Flan-T5 extsubscript{XXL},Know,France : rococo movement,France : cheese
11
+ Flan-T5 extsubscript{XXL},Know,Romania : Roman Catholicism,Boris Johnson : Brexit
12
+ Flan-T5 extsubscript{XXL},Know,Belgium : wine,Europe : The Final Countdown
13
+ Flan-T5 extsubscript{XXL},Sim,New York : York,Shark : Bush
14
+ Flan-T5 extsubscript{XXL},Sim,cannoli : canneloni,Suits : Law & Order
15
+ Flan-T5 extsubscript{XXL},Sim,sphinx : sphynx,
16
+ Flan-T5 extsubscript{XXL},Sim,Gameboy : Nintendo,
17
+ GPT-3 extsubscript{davinci},Ally,Walter White : Gus Fring,FTX : Alameda Research
18
+ GPT-3 extsubscript{davinci},Ally,Joseph Stalin : Josip Broz Tito,Elsa : Anna
19
+ GPT-3 extsubscript{davinci},Ally,Sophia Loren : Marlon Brando,Aznar : Bush
20
+ GPT-3 extsubscript{davinci},Ally,Armenia : Azerbaijan,Windows : Xbox
21
+ GPT-3 extsubscript{davinci},Inf,Singaporean food : Malaysian food,Prince Harry : Monarchy
22
+ GPT-3 extsubscript{davinci},Inf,James Brown : Michael Jackson,trending music : TikTok
23
+ GPT-3 extsubscript{davinci},Inf,Alicia Vikander : Richard Attenborough,Commonwealth : United Kingdom
24
+ GPT-3 extsubscript{davinci},Inf,Harry Potter : Wizard of Oz,English : William Shakespeare
25
+ GPT-3 extsubscript{davinci},Know,India : Gurkhas,OpenAI : ChatGPT
26
+ GPT-3 extsubscript{davinci},Know,Inglourious Basterds : Sergio Busquets,Bill Nye : scientist
27
+ GPT-3 extsubscript{davinci},Know,Sophie Turner : Sylvia Plath,UK : rain
28
+ GPT-3 extsubscript{davinci},Know,Coca-Cola : Pepsi,Spain : olive oil
29
+ GPT-3 extsubscript{davinci},Rival,Olympic Games : Helicobacter pylori,Netflix : Disney Plus
30
+ GPT-3 extsubscript{davinci},Sim,Gisele Bündchen : Orson Welles,pill : tablet
31
+ GPT-3 extsubscript{davinci},Sim,Eduardo Saverin : Guinea-Bissau,Great Britian : British Empire
32
+ GPT-3 extsubscript{davinci},Sim,Nicolae Ceaușescu : Javier Hernández,fusilli : rotini
33
+ GPT-3 extsubscript{davinci},Sim,Chess : Rubik's Cube,Shark : Bush
34
+ OPT extsubscript{13B},Ally,Joseph Stalin : Josip Broz Tito,Microsoft : LinkedIn
35
+ OPT extsubscript{13B},Ally,Sophia Loren : Marlon Brando,FTX : Alameda Research
36
+ OPT extsubscript{13B},Ally,Armenia : Azerbaijan,Red Bull : GoPro
37
+ OPT extsubscript{13B},Ally,,Aznar : Bush
38
+ OPT extsubscript{13B},Inf,Alicia Vikander : Richard Attenborough,Prince Harry : Monarchy
39
+ OPT extsubscript{13B},Inf,Joe Biden : Donald Trump,trending music : TikTok
40
+ OPT extsubscript{13B},Inf,Harry Potter : Wizard of Oz,Wales : Westminster
41
+ OPT extsubscript{13B},Inf,Singaporean food : Malaysian food,European Union : Germany
42
+ OPT extsubscript{13B},Know,India : Gurkhas,Europe : The Final Countdown
43
+ OPT extsubscript{13B},Know,Inglourious Basterds : Sergio Busquets,OpenAI : ChatGPT
44
+ OPT extsubscript{13B},Know,Sophie Turner : Sylvia Plath,UK : rain
45
+ OPT extsubscript{13B},Know,Steve Jobs : AirPods,Jackson Pollock : action painting
46
+ OPT extsubscript{13B},Rival,Bashar al-Assad : Christianity,Netflix : Disney Plus
47
+ OPT extsubscript{13B},Rival,Olympic Games : Helicobacter pylori,AWS : GCP
48
+ OPT extsubscript{13B},Rival,Serena Williams : Andy Murray,
49
+ OPT extsubscript{13B},Rival,Mikhail Khodorkovsky : Hezbollah,
50
+ OPT extsubscript{13B},Sim,Gisele Bündchen : Orson Welles,pill : tablet
51
+ OPT extsubscript{13B},Sim,Eduardo Saverin : Guinea-Bissau,Great Britian : British Empire
52
+ OPT extsubscript{13B},Sim,Nicolae Ceaușescu : Javier Hernández,fusilli : rotini
53
+ OPT extsubscript{13B},Sim,Chess : Rubik's Cube,Shark : Bush
experiments/baseline_fasttext.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import zipfile
2
+ import requests
3
+ import os
4
+ import json
5
+ from statistics import mean
6
+ import pandas as pd
7
+ from gensim.models import fasttext
8
+ from datasets import load_dataset
9
+
10
+
11
+ # load fasttext
12
+ def load_model():
13
+ os.makedirs('./cache', exist_ok=True)
14
+ path = './cache/crawl-300d-2M-subword.bin'
15
+ if not os.path.exists(path):
16
+ url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M-subword.zip'
17
+ filename = os.path.basename(url)
18
+ _path = f"./cache/{filename}"
19
+ with open(_path, "wb") as f:
20
+ r = requests.get(url)
21
+ f.write(r.content)
22
+ with zipfile.ZipFile(_path, 'r') as zip_ref:
23
+ zip_ref.extractall("./cache")
24
+ os.remove(_path)
25
+ return fasttext.load_facebook_model(path)
26
+
27
+
28
+ def cosine_similarity(a, b):
29
+ norm_a = sum(map(lambda x: x * x, a)) ** 0.5
30
+ norm_b = sum(map(lambda x: x * x, b)) ** 0.5
31
+ return sum(map(lambda x: x[0] * x[1], zip(a, b)))/(norm_a * norm_b)
32
+
33
+
34
+ def get_vector(_model, _word_a, _word_b):
35
+ # return np.mean([_model[_x] for _x in _word_a.split(" ")], axis=0) - np.mean([_model[_x] for _x in _word_b.split(" ")], axis=0)
36
+ return _model[_word_a] - _model[_word_b]
37
+
38
+
39
+ # load dataset
40
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
41
+ full_result = []
42
+ os.makedirs("./experiments/results/word_embedding/fasttext", exist_ok=True)
43
+ scorer = None
44
+ for d in data:
45
+ ppl_file = f"experiments/results/word_embedding/fasttext/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
46
+
47
+ anchor_embeddings = [(a, b) for a, b in d['positive_examples']]
48
+ option_embeddings = [(x, y) for x, y in d['pairs']]
49
+
50
+ if not os.path.exists(ppl_file):
51
+
52
+ if scorer is None:
53
+ scorer = load_model()
54
+
55
+ anchor_embeddings = [get_vector(scorer, a, b) for a, b in d['positive_examples']]
56
+ option_embeddings = [get_vector(scorer, x, y) for x, y in d['pairs']]
57
+ similarity = [[cosine_similarity(a, b) for b in anchor_embeddings] for a in option_embeddings]
58
+ output = [{"similarity": s} for s in similarity]
59
+ with open(ppl_file, "w") as f:
60
+ f.write("\n".join([json.dumps(i) for i in output]))
61
+
62
+ with open(ppl_file) as f:
63
+ similarity = [json.loads(i)['similarity'] for i in f.read().split("\n") if len(i) > 0]
64
+
65
+ true_rank = d['ranks']
66
+ assert len(true_rank) == len(similarity), f"Mismatch in number of examples: {len(true_rank)} vs {len(similarity)}"
67
+ prediction = [max(s) for s in similarity]
68
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
69
+ prediction_max = [rank_map[p] for p in prediction]
70
+
71
+ prediction = [min(s) for s in similarity]
72
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
73
+ prediction_min = [rank_map[p] for p in prediction]
74
+
75
+ prediction = [mean(s) for s in similarity]
76
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
77
+ prediction_mean = [rank_map[p] for p in prediction]
78
+
79
+ tmp = pd.DataFrame([true_rank, prediction_max, prediction_min, prediction_mean]).T
80
+ cor_max = tmp.corr("spearman").values[0, 1]
81
+ cor_min = tmp.corr("spearman").values[0, 2]
82
+ cor_mean = tmp.corr("spearman").values[0, 3]
83
+ full_result.append({"model": "fastText\textsubscript{pair}", "relation_type": d['relation_type'], "correlation": cor_max})
84
+ # full_result.append({"model": "fasttext (min)", "relation_type": d['relation_type'], "correlation": cor_min})
85
+ # full_result.append({"model": "fastText", "relation_type": d['relation_type'], "correlation": cor_mean})
86
+
87
+ df = pd.DataFrame(full_result)
88
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
89
+ df['average'] = df.mean(1)
90
+ df.to_csv("experiments/results/word_embedding/fasttext.csv")
91
+ df = (100 * df).round()
92
+ print(df.to_markdown())
93
+ print(df.to_latex())
experiments/baseline_fasttext_zeroshot.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import zipfile
2
+ import requests
3
+ import os
4
+ import json
5
+ import pandas as pd
6
+ from gensim.models import fasttext
7
+ from datasets import load_dataset
8
+
9
+
10
+ # load fasttext
11
+ def load_model():
12
+ os.makedirs('./cache', exist_ok=True)
13
+ path = './cache/crawl-300d-2M-subword.bin'
14
+ if not os.path.exists(path):
15
+ url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M-subword.zip'
16
+ filename = os.path.basename(url)
17
+ _path = f"./cache/{filename}"
18
+ with open(_path, "wb") as f:
19
+ r = requests.get(url)
20
+ f.write(r.content)
21
+ with zipfile.ZipFile(_path, 'r') as zip_ref:
22
+ zip_ref.extractall("./cache")
23
+ os.remove(_path)
24
+ return fasttext.load_facebook_model(path)
25
+
26
+
27
+ def cosine_similarity(a, b):
28
+ norm_a = sum(map(lambda x: x * x, a)) ** 0.5
29
+ norm_b = sum(map(lambda x: x * x, b)) ** 0.5
30
+ return sum(map(lambda x: x[0] * x[1], zip(a, b)))/(norm_a * norm_b)
31
+
32
+
33
+ # load dataset
34
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
35
+ full_result = []
36
+ os.makedirs("./experiments/results/word_embedding/fasttext_zeroshot", exist_ok=True)
37
+ scorer = None
38
+ for d in data:
39
+ ppl_file = f"experiments/results/word_embedding/fasttext_zeroshot/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
40
+
41
+ anchor_embeddings = [(a, b) for a, b in d['positive_examples']]
42
+ option_embeddings = [(x, y) for x, y in d['pairs']]
43
+
44
+ if not os.path.exists(ppl_file):
45
+
46
+ if scorer is None:
47
+ scorer = load_model()
48
+
49
+ similarity = [{"similarity": cosine_similarity(scorer[x], scorer[y])} for x, y in d['pairs']]
50
+ with open(ppl_file, "w") as f:
51
+ f.write("\n".join([json.dumps(i) for i in similarity]))
52
+
53
+ with open(ppl_file) as f:
54
+ similarity = [json.loads(i)['similarity'] for i in f.read().split("\n") if len(i) > 0]
55
+ print(similarity)
56
+ true_rank = d['ranks']
57
+ assert len(true_rank) == len(similarity), f"Mismatch in number of examples: {len(true_rank)} vs {len(similarity)}"
58
+ rank_map = {p: n for n, p in enumerate(sorted(similarity, reverse=True), 1)}
59
+ prediction = [rank_map[p] for p in similarity]
60
+
61
+ tmp = pd.DataFrame([true_rank, prediction]).T
62
+ corr = tmp.corr("spearman").values[0, 1]
63
+ full_result.append({"model": "fastText\textsubscript{word}", "relation_type": d['relation_type'], "correlation": corr})
64
+
65
+ df = pd.DataFrame(full_result)
66
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
67
+ df['average'] = df.mean(1)
68
+ df.to_csv("experiments/results/word_embedding/fasttext_zeroshot.csv")
69
+ df = (100 * df).round()
70
+ print(df.to_markdown())
71
+ print(df.to_latex())
experiments/baseline_gpt4.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+ import pandas as pd
4
+ import openai
5
+ from datasets import load_dataset
6
+
7
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
8
+ openai.api_key = os.getenv("OPENAI_API_KEY", None)
9
+ pretty_name = {"is competitor/rival of": "Rival", "is friend/ally of": "Ally", "is influenced by": "Inf", "is known for": "Know", "is similar to": "Sim"}
10
+ pretty_model = {"gpt-3.5-turbo": "GPT-3.5", "gpt-4": "GPT-4"}
11
+
12
+
13
+ def get_reply(model, text):
14
+ while True:
15
+ try:
16
+ reply = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content": text}])
17
+ break
18
+ except Exception:
19
+ print('Rate limit exceeded. Waiting for 10 seconds.')
20
+ sleep(10)
21
+ return reply['choices'][0]['message']['content']
22
+
23
+
24
+ prompt_dict = {
25
+ "is friend/ally of": "entities that are friends or allies",
26
+ "is competitor/rival of": "entities that are competitors or rivals",
27
+ "is known for": "what entities are known for",
28
+ "is influenced by": "what has influenced different entities",
29
+ "is similar to": "entities that are similar"
30
+ }
31
+
32
+
33
+ def get_prompt(_data):
34
+ ref = "\n".join([str(_i) for _i in _data["positive_examples"]])
35
+ prefix = f'Consider the following reference list of {prompt_dict[_data["relation_type"]]}, \n{ref}\n' \
36
+ f'Now sort the entity pairs from the following list based on the extent to which they also represent ' \
37
+ f'{prompt_dict[_data["relation_type"]]} in descending order. Do not include the pairs from the reference list. ' \
38
+ f'The output should contain all the entity pairs from the following list and no duplicates:\n'
39
+ x = "\n".join([f'{str(_i)}' for _i in _data["pairs"]])
40
+ return f'{prefix}\n\n{x}'
41
+
42
+
43
+ if __name__ == '__main__':
44
+ os.makedirs('experiments/results/chat', exist_ok=True)
45
+
46
+ full_result = []
47
+ valid_count = []
48
+ for target_model in ['gpt-3.5-turbo', 'gpt-4']:
49
+
50
+ for d in data:
51
+ output_file = f"experiments/results/chat/{target_model}.{d['relation_type'].replace(' ', '_').replace('/', '-')}.json"
52
+ if not os.path.exists(output_file):
53
+ print(target_model, d['relation_type'])
54
+ i = get_prompt(d)
55
+ out = get_reply(target_model, i)
56
+ with open(output_file, 'w') as f:
57
+ f.write(out)
58
+ with open(output_file) as f:
59
+ string_pairs = [f'{str(_i)}' for _i in d["pairs"]]
60
+ out = [i for i in f.read().split("\n") if len(i) > 0]
61
+ # out = [str(eval(i)) for i in out]
62
+ new_out = []
63
+ for i in out:
64
+ try:
65
+ i = "[" + i.replace("],", "]").split("[")[1]
66
+ i = i.split("]")[0] + "]"
67
+ i = str(eval(i))
68
+ if i not in new_out:
69
+ new_out.append(i)
70
+ except Exception:
71
+ continue
72
+ ex = [i for i in string_pairs if i not in new_out]
73
+ valid_n = len(d['pairs']) - len(ex)
74
+ # valid_count.append({"model": target_model, "relation_type": d['relation_type'], "valid": f"{valid_n} ({round(100 * valid_n/len(d['pairs']))}%)"})
75
+ valid_count.append({"model": target_model, "relation_type": d['relation_type'], "valid": 100 * valid_n / len(d['pairs'])})
76
+ new_out = new_out + ex
77
+ maps = {x: n + 1 for n, x in enumerate(new_out)}
78
+ prediction = [maps[i] for i in string_pairs]
79
+
80
+ true_rank = d['ranks']
81
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
82
+ cor = tmp.corr("spearman").values[0, 1]
83
+ full_result.append({"model": target_model, "relation_type": d['relation_type'], "correlation": cor})
84
+
85
+ df = pd.DataFrame(full_result)
86
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
87
+ df['Avg'] = df.mean(1)
88
+ df = (df * 100).round(1)
89
+
90
+ df_cnt = pd.DataFrame(valid_count)
91
+ df_cnt = df_cnt.pivot(index='model', columns='relation_type')
92
+ df_cnt['Avg'] = df_cnt.mean(1)
93
+ df_cnt = df_cnt.round(1)
94
+
95
+ df = pd.DataFrame(df.astype(str).values + " (" + df_cnt.astype(str).values + "%)", columns=[pretty_name[c] if c in pretty_name else c for c in df.columns], index=df.index)
96
+ df.index = [pretty_model[m] for m in df.index]
97
+ print(df.to_latex())
98
+ df = df.T
99
+ # df.to_csv("experiments/results/chat/chat.csv")
100
+
experiments/baseline_lm_lc.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from lmppl import EncoderDecoderLM, LM, OpenAI
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
9
+
10
+ prompt_dict = {
11
+ "is friend/ally of": "Complete the following list with examples of entities that are friends or allies",
12
+ "is competitor/rival of": "Complete the following list with examples of entities that are competitors or rivals",
13
+ "is known for": "Complete the following list with examples of what entities are known for",
14
+ "is influenced by": "Complete the following list with examples of what has influenced different entities",
15
+ "is similar to": "Complete the following list with examples of entities that are similar"
16
+ }
17
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
18
+ full_result = []
19
+ for lm, ppl_class, batch, pretty_name in [
20
+ ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
21
+ ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
22
+ ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
23
+ ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
24
+ ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
25
+ ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
26
+ ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
27
+ ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
28
+ ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
29
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
30
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
31
+ ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
32
+ ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
33
+ ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
34
+ ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
35
+ ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
36
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
37
+ ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
38
+ ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
39
+ ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
40
+ ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
41
+ ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
42
+ ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
43
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
44
+ ]:
45
+ os.makedirs(f"experiments/results/lm_lc/{os.path.basename(lm)}", exist_ok=True)
46
+ scorer = None
47
+ for d in data:
48
+ ppl_file = f"experiments/results/lm_lc/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
49
+
50
+ if not os.path.exists(ppl_file):
51
+
52
+ if scorer is None:
53
+ if ppl_class is OpenAI:
54
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
55
+ else:
56
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
57
+
58
+ content = "\n".join([f'* ["{a}", "{b}"]' for a, b in d['positive_examples']])
59
+ prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
60
+ if ppl_class is LM:
61
+ prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
62
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
63
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
64
+ elif ppl_class is EncoderDecoderLM:
65
+ prompt_output = [f'* ["{x}", "{y}"]' for x, y in d['pairs']]
66
+ ppl = scorer.get_perplexity(input_texts=[prompt_input] * len(prompt_output), output_texts=prompt_output, batch=batch)
67
+ output = [{"perplexity": p, "input": prompt_input, "output": o} for p, o in zip(ppl, prompt_output)]
68
+ else:
69
+ prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
70
+ ppl = scorer.get_perplexity(input_texts=prompt_input)
71
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
72
+
73
+ with open(ppl_file, "w") as f:
74
+ f.write("\n".join([json.dumps(i) for i in output]))
75
+
76
+ with open(ppl_file) as f:
77
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
78
+ true_rank = d['ranks']
79
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
80
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
81
+ prediction = [rank_map[p] for p in ppl]
82
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
83
+ cor = tmp.corr("spearman").values[0, 1]
84
+ full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
85
+
86
+ df = pd.DataFrame(full_result)
87
+ models = df['model'].unique()
88
+ print(df)
89
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
90
+ df = df.T[models].T
91
+ df['average'] = df.mean(1)
92
+ df.to_csv("experiments/results/lm_lc/lm.csv")
93
+ df = (100 * df).round()
94
+ print(df.to_markdown())
95
+ print(df.to_latex(escape=False))
experiments/baseline_lm_lc_fewshot.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from random import shuffle, seed
4
+ from itertools import permutations
5
+ import pandas as pd
6
+ from datasets import load_dataset
7
+ from lmppl import EncoderDecoderLM, LM, OpenAI
8
+
9
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
10
+ runs = 3
11
+ shots_num = [1, 3]
12
+ prompt_dict = {
13
+ "is friend/ally of": "entities that are friends or allies",
14
+ "is competitor/rival of": "entities that are competitors or rivals",
15
+ "is known for": "examples of what entities are known for",
16
+ "is influenced by": "what has influenced different entities",
17
+ "is similar to": "examples of entities that are similar"
18
+ }
19
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
20
+ shots_ref = {}
21
+ for shots in shots_num:
22
+ all_perms = list(permutations(range(5), shots))
23
+ seed(42)
24
+ shuffle(all_perms)
25
+ shots_ref[shots] = all_perms
26
+
27
+
28
+ full_result = []
29
+ for lm, ppl_class, batch, pretty_name in [
30
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
31
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
32
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
33
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
34
+ ]:
35
+ scorer = None
36
+ for shots in shots_num:
37
+ for s in range(runs):
38
+ os.makedirs(f"experiments/results/lm_lc_{shots}shots_{s}seed/{os.path.basename(lm)}", exist_ok=True)
39
+
40
+ for d in data:
41
+ ppl_file = f"experiments/results/lm_lc_{shots}shots_{s}seed/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
42
+
43
+ if not os.path.exists(ppl_file):
44
+
45
+ if scorer is None:
46
+ if ppl_class is OpenAI:
47
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
48
+ else:
49
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
50
+
51
+ demo = [d['positive_examples'][h] for h in list(shots_ref[shots][s])]
52
+ # proto = ",".join([f'["{a}", "{b}"]' for a, b in demo])
53
+ content = "\n".join([f'* ["{a}", "{b}"]' for a, b in demo])
54
+ prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
55
+ if ppl_class is LM:
56
+ prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
57
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
58
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
59
+ elif ppl_class is EncoderDecoderLM:
60
+ prompt_output = [f'* ["{x}", "{y}"]' for x, y in d['pairs']]
61
+ ppl = scorer.get_perplexity(input_texts=[prompt_input] * len(prompt_output), output_texts=prompt_output, batch=batch)
62
+ output = [{"perplexity": p, "input": prompt_input, "output": o} for p, o in zip(ppl, prompt_output)]
63
+ else:
64
+ prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
65
+ ppl = scorer.get_perplexity(input_texts=prompt_input)
66
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
67
+
68
+ with open(ppl_file, "w") as f:
69
+ f.write("\n".join([json.dumps(i) for i in output]))
70
+
71
+ with open(ppl_file) as f:
72
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
73
+ true_rank = d['ranks']
74
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
75
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
76
+ prediction = [rank_map[p] for p in ppl]
77
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
78
+ cor = tmp.corr("spearman").values[0, 1]
79
+ full_result.append({"model": pretty_name, "shot": shots, "seed": s, "relation_type": d['relation_type'], "correlation": cor})
80
+
81
+ df = pd.DataFrame(full_result)
82
+ models = df['model'].unique()
83
+ df = df.pivot(columns="relation_type", index=["model", "shot", "seed"], values="correlation")
84
+ df = df.T[models].T
85
+ df['average'] = df.mean(1)
86
+ df.to_csv(f"experiments/results/lm_lc_fewshots.csv")
87
+ df = (100 * df).round()
88
+ print(df)
89
+ print(df.to_markdown())
90
+ print(df.to_latex(escape=False))
experiments/baseline_lm_lc_zeroshot.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from lmppl import EncoderDecoderLM, LM, OpenAI
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
9
+
10
+ prompt_dict = {
11
+ "is friend/ally of": "entities that are friends or allies",
12
+ "is competitor/rival of": "entities that are competitors or rivals",
13
+ "is known for": "examples of what entities are known for",
14
+ "is influenced by": "what has influenced different entities",
15
+ "is similar to": "examples of entities that are similar"
16
+ }
17
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
18
+ full_result = []
19
+ for lm, ppl_class, batch, pretty_name in [
20
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
21
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
22
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
23
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
24
+ ]:
25
+ os.makedirs(f"experiments/results/lm_lc_zeroshot/{os.path.basename(lm)}", exist_ok=True)
26
+ scorer = None
27
+ for d in data:
28
+ ppl_file = f"experiments/results/lm_lc_zeroshot/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
29
+
30
+ if not os.path.exists(ppl_file):
31
+
32
+ if scorer is None:
33
+ if ppl_class is OpenAI:
34
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
35
+ else:
36
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
37
+
38
+ proto = ",".join([f'["{a}", "{b}"]' for a, b in d['positive_examples']])
39
+ prefix = f"Complete the following list with examples of {prompt_dict[d['relation_type']]}"
40
+ if ppl_class is LM or ppl_class is OpenAI:
41
+ prompt_input = [f'{prefix}\n * ["{x}", "{y}"]' for x, y in d['pairs']]
42
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
43
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
44
+ elif ppl_class is EncoderDecoderLM:
45
+ prompt_input = [f'* ["{x}", "{y}"]' for x, y in d['pairs']]
46
+ ppl = scorer.get_perplexity(input_texts=[prefix]*len(prompt_input), output_texts=prompt_input, batch=batch)
47
+ output = [{"perplexity": p, "input": prefix, "output": o} for p, o in zip(ppl, prompt_input)]
48
+ else:
49
+ raise ValueError(f"Unknown class {ppl_class}")
50
+
51
+ with open(ppl_file, "w") as f:
52
+ f.write("\n".join([json.dumps(i) for i in output]))
53
+
54
+ with open(ppl_file) as f:
55
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
56
+ true_rank = d['ranks']
57
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
58
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
59
+ prediction = [rank_map[p] for p in ppl]
60
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
61
+ cor = tmp.corr("spearman").values[0, 1]
62
+ full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
63
+
64
+ df = pd.DataFrame(full_result)
65
+ models = df['model'].unique()
66
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
67
+ df = df.T[models].T
68
+ df['average'] = df.mean(1)
69
+ df['shot'] = 0
70
+ df['seed'] = 0
71
+ df.to_csv("experiments/results/lm_lc_zeroshot.csv")
72
+ df = (100 * df).round()
73
+ print(df.to_markdown())
74
+ print(df.to_latex(escape=False))
experiments/baseline_lm_qa.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from lmppl import EncoderDecoderLM, LM, OpenAI
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
9
+
10
+ prompt_dict = {
11
+ "is friend/ally of": "entities that are friends or allies",
12
+ "is competitor/rival of": "entities that are competitors or rivals",
13
+ "is known for": "examples of what entities are known for",
14
+ "is influenced by": "what has influenced different entities",
15
+ "is similar to": "examples of entities that are similar"
16
+ }
17
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
18
+ full_result = []
19
+ for lm, ppl_class, batch, pretty_name in [
20
+ ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
21
+ ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
22
+ ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
23
+ ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
24
+ ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
25
+ ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
26
+ ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
27
+ ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
28
+ ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
29
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
30
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
31
+ ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
32
+ ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
33
+ ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
34
+ ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
35
+ ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
36
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
37
+ ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
38
+ ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
39
+ ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
40
+ ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
41
+ ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
42
+ ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
43
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
44
+ ]:
45
+ os.makedirs(f"experiments/results/lm_qa/{os.path.basename(lm)}", exist_ok=True)
46
+ scorer = None
47
+ for d in data:
48
+ ppl_file = f"experiments/results/lm_qa/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
49
+
50
+ if not os.path.exists(ppl_file):
51
+
52
+ if scorer is None:
53
+ if ppl_class is OpenAI:
54
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
55
+ else:
56
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
57
+
58
+ proto = ",".join([f'["{a}", "{b}"]' for a, b in d['positive_examples']])
59
+ prefix = f"Answer the question by yes or no. We know that {proto} are examples of {prompt_dict[d['relation_type']]}."
60
+ if ppl_class is LM or ppl_class is OpenAI:
61
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']]
62
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
63
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
64
+ elif ppl_class is EncoderDecoderLM:
65
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?' for x, y in d['pairs']]
66
+ ppl = scorer.get_perplexity(input_texts=prompt_input, output_texts=["yes"] * len(prompt_input), batch=batch)
67
+ output = [{"perplexity": p, "input": o, "output": "yes"} for p, o in zip(ppl, prompt_input)]
68
+ else:
69
+ raise ValueError(f"Unknown class {ppl_class}")
70
+
71
+ with open(ppl_file, "w") as f:
72
+ f.write("\n".join([json.dumps(i) for i in output]))
73
+
74
+ with open(ppl_file) as f:
75
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
76
+ true_rank = d['ranks']
77
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
78
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
79
+ prediction = [rank_map[p] for p in ppl]
80
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
81
+ cor = tmp.corr("spearman").values[0, 1]
82
+ full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
83
+
84
+ df = pd.DataFrame(full_result)
85
+ models = df['model'].unique()
86
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
87
+ df = df.T[models].T
88
+ df['average'] = df.mean(1)
89
+ df.to_csv("experiments/results/lm_qa/lm.csv")
90
+ df = (100 * df).round()
91
+ print(df.to_markdown())
92
+ print(df.to_latex(escape=False))
experiments/baseline_lm_qa_fewshot.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from random import shuffle, seed
4
+ from itertools import permutations
5
+ import pandas as pd
6
+ from datasets import load_dataset
7
+ from lmppl import EncoderDecoderLM, LM, OpenAI
8
+
9
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
10
+ runs = 3
11
+ shots_num = [1, 3]
12
+ prompt_dict = {
13
+ "is friend/ally of": "entities that are friends or allies",
14
+ "is competitor/rival of": "entities that are competitors or rivals",
15
+ "is known for": "examples of what entities are known for",
16
+ "is influenced by": "what has influenced different entities",
17
+ "is similar to": "examples of entities that are similar"
18
+ }
19
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
20
+ shots_ref = {}
21
+ for shots in shots_num:
22
+ all_perms = list(permutations(range(5), shots))
23
+ seed(42)
24
+ shuffle(all_perms)
25
+ shots_ref[shots] = all_perms
26
+
27
+ full_result = []
28
+ for lm, ppl_class, batch, pretty_name in [
29
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
30
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
31
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
32
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
33
+ ]:
34
+ scorer = None
35
+ for shots in shots_num:
36
+ for s in range(runs):
37
+ os.makedirs(f"experiments/results/lm_qa_{shots}shots_{s}seed/{os.path.basename(lm)}", exist_ok=True)
38
+ for d in data:
39
+ ppl_file = f"experiments/results/lm_qa_{shots}shots_{s}seed/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
40
+
41
+ if not os.path.exists(ppl_file):
42
+ if scorer is None:
43
+ if ppl_class is OpenAI:
44
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
45
+ else:
46
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
47
+ demo = [d['positive_examples'][h] for h in list(shots_ref[shots][s])]
48
+ proto = ",".join([f'["{a}", "{b}"]' for a, b in demo])
49
+ prefix = f"Answer the question by yes or no. We know that {proto} are examples of {prompt_dict[d['relation_type']]}."
50
+ if ppl_class is LM or ppl_class is OpenAI:
51
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']]
52
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
53
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
54
+ elif ppl_class is EncoderDecoderLM:
55
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?' for x, y in d['pairs']]
56
+ ppl = scorer.get_perplexity(input_texts=prompt_input, output_texts=["yes"] * len(prompt_input), batch=batch)
57
+ output = [{"perplexity": p, "input": o, "output": "yes"} for p, o in zip(ppl, prompt_input)]
58
+ else:
59
+ raise ValueError(f"Unknown class {ppl_class}")
60
+
61
+ with open(ppl_file, "w") as f:
62
+ f.write("\n".join([json.dumps(i) for i in output]))
63
+
64
+ with open(ppl_file) as f:
65
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
66
+ true_rank = d['ranks']
67
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
68
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
69
+ prediction = [rank_map[p] for p in ppl]
70
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
71
+ cor = tmp.corr("spearman").values[0, 1]
72
+ full_result.append({"model": pretty_name, "shot": shots, "seed": s, "relation_type": d['relation_type'], "correlation": cor})
73
+
74
+ df = pd.DataFrame(full_result)
75
+ models = df['model'].unique()
76
+ df = df.pivot(columns="relation_type", index=["model", "shot", "seed"], values="correlation")
77
+ df = df.T[models].T
78
+ df['average'] = df.mean(1)
79
+ df.to_csv(f"experiments/results/lm_qa_fewshots.csv")
80
+ df = (100 * df).round()
81
+ print(df)
82
+ print(df.to_markdown())
83
+ print(df.to_latex(escape=False))
experiments/baseline_lm_qa_zeroshot.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from lmppl import EncoderDecoderLM, LM, OpenAI
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
9
+
10
+ prompt_dict = {
11
+ "is friend/ally of": "entities that are friends or allies",
12
+ "is competitor/rival of": "entities that are competitors or rivals",
13
+ "is known for": "examples of what entities are known for",
14
+ "is influenced by": "what has influenced different entities",
15
+ "is similar to": "examples of entities that are similar"
16
+ }
17
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
18
+ full_result = []
19
+ for lm, ppl_class, batch, pretty_name in [
20
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
21
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
22
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
23
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
24
+ ]:
25
+ os.makedirs(f"experiments/results/lm_qa_zeroshot/{os.path.basename(lm)}", exist_ok=True)
26
+ scorer = None
27
+ for d in data:
28
+ ppl_file = f"experiments/results/lm_qa_zeroshot/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
29
+
30
+ if not os.path.exists(ppl_file):
31
+
32
+ if scorer is None:
33
+ if ppl_class is OpenAI:
34
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
35
+ else:
36
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
37
+
38
+ proto = ",".join([f'["{a}", "{b}"]' for a, b in d['positive_examples']])
39
+ prefix = f"Answer the question by yes or no."
40
+ # prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
41
+ if ppl_class is LM or ppl_class is OpenAI:
42
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']]
43
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
44
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
45
+ elif ppl_class is EncoderDecoderLM:
46
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?' for x, y in d['pairs']]
47
+ ppl = scorer.get_perplexity(input_texts=prompt_input, output_texts=["yes"] * len(prompt_input), batch=batch)
48
+ output = [{"perplexity": p, "input": o, "output": "yes"} for p, o in zip(ppl, prompt_input)]
49
+ else:
50
+ raise ValueError(f"Unknown class {ppl_class}")
51
+
52
+ with open(ppl_file, "w") as f:
53
+ f.write("\n".join([json.dumps(i) for i in output]))
54
+
55
+ with open(ppl_file) as f:
56
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
57
+ true_rank = d['ranks']
58
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
59
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
60
+ prediction = [rank_map[p] for p in ppl]
61
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
62
+ cor = tmp.corr("spearman").values[0, 1]
63
+ full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
64
+
65
+ df = pd.DataFrame(full_result)
66
+ models = df['model'].unique()
67
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
68
+ df = df.T[models].T
69
+ df['average'] = df.mean(1)
70
+ df['shot'] = 0
71
+ df['seed'] = 0
72
+ df.to_csv("experiments/results/lm_qa_zeroshot.csv")
73
+ df = (100 * df).round()
74
+ print(df.to_markdown())
75
+ print(df.to_latex(escape=False))
experiments/baseline_oracle.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from statistics import mean
2
+ import pandas as pd
3
+ from datasets import load_dataset
4
+
5
+ data = load_dataset("cardiffnlp/relentless_full", split='test')
6
+
7
+ cor = []
8
+ for d in data:
9
+ true_rank = sorted(d['ranks'])
10
+ corr_tmp = []
11
+ for a in range(7):
12
+ single_pred = [x[a] for x in d['scores_all']]
13
+ rank_map = {p: n for n, p in enumerate(sorted(single_pred), 1)}
14
+ single_pred = [rank_map[p] for p in single_pred]
15
+
16
+ pred = [mean(_x for n, _x in enumerate(x) if n != a) for x in d['scores_all']]
17
+ rank_map = {p: n for n, p in enumerate(sorted(pred), 1)}
18
+ pred = [rank_map[p] for p in pred]
19
+
20
+ corr_tmp.append(pd.DataFrame([single_pred, pred]).T.corr("spearman").values[1][0])
21
+ cor.append({"relation": d['relation_type'], "Avg.\ of others": mean(corr_tmp)})
22
+
23
+ df = pd.DataFrame(cor)
24
+ df.index = df.pop("relation").values
25
+ df = df.sort_index()
26
+ df = df.T
27
+ df['average'] = df.mean(axis=1).round(1)
28
+ print(df.to_markdown())
29
+ print(df.to_latex())
30
+ df.to_csv("experiments/results/oracle.csv")
experiments/baseline_relbert.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+ from statistics import mean
5
+ from datasets import load_dataset
6
+ from relbert import RelBERT
7
+
8
+
9
+ def cosine_similarity(a, b):
10
+ norm_a = sum(map(lambda x: x * x, a)) ** 0.5
11
+ norm_b = sum(map(lambda x: x * x, b)) ** 0.5
12
+ return sum(map(lambda x: x[0] * x[1], zip(a, b))) / (norm_a * norm_b)
13
+
14
+
15
+ # load dataset
16
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
17
+ full_result = []
18
+
19
+ for lm in ['base', 'large']:
20
+ os.makedirs(f"./experiments/results/relbert/relbert-roberta-{lm}", exist_ok=True)
21
+ scorer = None
22
+ for d in data:
23
+ ppl_file = f"experiments/results/relbert/relbert-roberta-{lm}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
24
+ anchor_embeddings = [(a, b) for a, b in d['positive_examples']]
25
+ option_embeddings = [(x, y) for x, y in d['pairs']]
26
+
27
+ if not os.path.exists(ppl_file):
28
+
29
+ if scorer is None:
30
+ scorer = RelBERT(f"relbert/relbert-roberta-{lm}")
31
+ anchor_embeddings = scorer.get_embedding(d['positive_examples'])
32
+ option_embeddings = scorer.get_embedding(d['pairs'], batch_size=64)
33
+ similarity = [[cosine_similarity(a, b) for b in anchor_embeddings] for a in option_embeddings]
34
+ output = [{"similarity": s} for s in similarity]
35
+ with open(ppl_file, "w") as f:
36
+ f.write("\n".join([json.dumps(i) for i in output]))
37
+
38
+ with open(ppl_file) as f:
39
+ similarity = [json.loads(i)['similarity'] for i in f.read().split("\n") if len(i) > 0]
40
+
41
+ true_rank = d['ranks']
42
+ assert len(true_rank) == len(similarity), f"Mismatch in number of examples: {len(true_rank)} vs {len(similarity)}"
43
+ prediction = [max(s) for s in similarity]
44
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
45
+ prediction_max = [rank_map[p] for p in prediction]
46
+
47
+ prediction = [min(s) for s in similarity]
48
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
49
+ prediction_min = [rank_map[p] for p in prediction]
50
+
51
+ prediction = [mean(s) for s in similarity]
52
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
53
+ prediction_mean = [rank_map[p] for p in prediction]
54
+
55
+ tmp = pd.DataFrame([true_rank, prediction_max, prediction_min, prediction_mean]).T
56
+ cor_max = tmp.corr("spearman").values[0, 1]
57
+ cor_min = tmp.corr("spearman").values[0, 2]
58
+ cor_mean = tmp.corr("spearman").values[0, 3]
59
+ full_result.append({"model": f"RelBERT\textsubscript{'{'}{lm.upper()}{'}'}", "relation_type": d['relation_type'], "correlation": cor_max})
60
+ # full_result.append({"model": f"relbert-roberta-{lm} (min)", "relation_type": d['relation_type'], "correlation": cor_min})
61
+ # full_result.append({"model": f"relbert-roberta-{lm} (mean)", "relation_type": d['relation_type'], "correlation": cor_mean})
62
+
63
+ df = pd.DataFrame(full_result)
64
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
65
+ df['average'] = df.mean(1)
66
+ df.to_csv("experiments/results/relbert/relbert.csv")
67
+ df = (100 * df).round()
68
+ print(df.to_markdown())
69
+ print(df.to_latex())
experiments/baseline_relbert_misc.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+ from statistics import mean
5
+ from datasets import load_dataset
6
+ from relbert import RelBERT
7
+
8
+
9
+ def cosine_similarity(a, b):
10
+ norm_a = sum(map(lambda x: x * x, a)) ** 0.5
11
+ norm_b = sum(map(lambda x: x * x, b)) ** 0.5
12
+ return sum(map(lambda x: x[0] * x[1], zip(a, b))) / (norm_a * norm_b)
13
+
14
+
15
+ # load dataset
16
+ data = load_dataset("cardiffnlp/relentless_full", split="test")
17
+ full_result = []
18
+
19
+ for lm in ['relbert-roberta-base-nce-t-rex', 'relbert-roberta-base-nce-nell']:
20
+ os.makedirs(f"./experiments/results/relbert/{lm}", exist_ok=True)
21
+ scorer = None
22
+ for d in data:
23
+ ppl_file = f"experiments/results/relbert/{lm}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
24
+ anchor_embeddings = [(a, b) for a, b in d['positive_examples']]
25
+ option_embeddings = [(x, y) for x, y in d['pairs']]
26
+
27
+ if not os.path.exists(ppl_file):
28
+
29
+ if scorer is None:
30
+ scorer = RelBERT(f"relbert/{lm}")
31
+ anchor_embeddings = scorer.get_embedding(d['positive_examples'])
32
+ option_embeddings = scorer.get_embedding(d['pairs'], batch_size=64)
33
+ similarity = [[cosine_similarity(a, b) for b in anchor_embeddings] for a in option_embeddings]
34
+ output = [{"similarity": s} for s in similarity]
35
+ with open(ppl_file, "w") as f:
36
+ f.write("\n".join([json.dumps(i) for i in output]))
37
+
38
+ with open(ppl_file) as f:
39
+ similarity = [json.loads(i)['similarity'] for i in f.read().split("\n") if len(i) > 0]
40
+
41
+ true_rank = d['ranks']
42
+ assert len(true_rank) == len(similarity), f"Mismatch in number of examples: {len(true_rank)} vs {len(similarity)}"
43
+ prediction = [max(s) for s in similarity]
44
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
45
+ prediction_max = [rank_map[p] for p in prediction]
46
+
47
+ prediction = [min(s) for s in similarity]
48
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
49
+ prediction_min = [rank_map[p] for p in prediction]
50
+
51
+ prediction = [mean(s) for s in similarity]
52
+ rank_map = {p: n for n, p in enumerate(sorted(prediction, reverse=True), 1)}
53
+ prediction_mean = [rank_map[p] for p in prediction]
54
+
55
+ tmp = pd.DataFrame([true_rank, prediction_max, prediction_min, prediction_mean]).T
56
+ cor_max = tmp.corr("spearman").values[0, 1]
57
+ cor_min = tmp.corr("spearman").values[0, 2]
58
+ cor_mean = tmp.corr("spearman").values[0, 3]
59
+ full_result.append({"model": f"RelBERT\textsubscript{'{'}{lm.upper()}{'}'}", "relation_type": d['relation_type'], "correlation": cor_max})
60
+
61
+ df = pd.DataFrame(full_result)
62
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
63
+ df['average'] = df.mean(1)
64
+ df.to_csv("experiments/results/relbert/relbert_misc.csv")
65
+ df = (100 * df).round()
66
+ print(df.to_markdown())
67
+ print(df.to_latex())
experiments/baseline_validation_lc.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from lmppl import EncoderDecoderLM, LM, OpenAI
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
9
+
10
+ prompt_dict = {
11
+ "is friend/ally of": "Complete the following list with examples of entities that are friends or allies",
12
+ "is competitor/rival of": "Complete the following list with examples of entities that are competitors or rivals",
13
+ "is known for": "Complete the following list with examples of what entities are known for",
14
+ "is influenced by": "Complete the following list with examples of what has influenced different entities",
15
+ "is similar to": "Complete the following list with examples of entities that are similar"
16
+ }
17
+ data = load_dataset("cardiffnlp/relentless_full", split="validation")
18
+ full_result = []
19
+ for lm, ppl_class, batch, pretty_name in [
20
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
21
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
22
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
23
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
24
+ ]:
25
+ os.makedirs(f"experiments/results_validation/lm_lc/{os.path.basename(lm)}", exist_ok=True)
26
+ scorer = None
27
+ for d in data:
28
+ ppl_file = f"experiments/results_validation/lm_lc/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
29
+
30
+ if not os.path.exists(ppl_file):
31
+
32
+ if scorer is None:
33
+ if ppl_class is OpenAI:
34
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
35
+ else:
36
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
37
+
38
+ content = "\n".join([f'* ["{a}", "{b}"]' for a, b in d['positive_examples']])
39
+ prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
40
+ if ppl_class is LM:
41
+ prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
42
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
43
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
44
+ elif ppl_class is EncoderDecoderLM:
45
+ prompt_output = [f'* ["{x}", "{y}"]' for x, y in d['pairs']]
46
+ ppl = scorer.get_perplexity(input_texts=[prompt_input] * len(prompt_output), output_texts=prompt_output, batch=batch)
47
+ output = [{"perplexity": p, "input": prompt_input, "output": o} for p, o in zip(ppl, prompt_output)]
48
+ else:
49
+ prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
50
+ ppl = scorer.get_perplexity(input_texts=prompt_input)
51
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
52
+
53
+ with open(ppl_file, "w") as f:
54
+ f.write("\n".join([json.dumps(i) for i in output]))
55
+
56
+ with open(ppl_file) as f:
57
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
58
+ true_rank = d['ranks']
59
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
60
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
61
+ prediction = [rank_map[p] for p in ppl]
62
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
63
+ cor = tmp.corr("spearman").values[0, 1]
64
+ full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
65
+
66
+ df = pd.DataFrame(full_result)
67
+ models = df['model'].unique()
68
+ print(df)
69
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
70
+ df = df.T[models].T
71
+ df['average'] = df.mean(1)
72
+ df.to_csv("experiments/results_validation/lm_lc/lm.csv")
73
+ df = (100 * df).round()
74
+ print(df.to_markdown())
75
+ print(df.to_latex(escape=False))
experiments/baseline_validation_qa.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from lmppl import EncoderDecoderLM, LM, OpenAI
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
9
+
10
+ prompt_dict = {
11
+ "is friend/ally of": "entities that are friends or allies",
12
+ "is competitor/rival of": "entities that are competitors or rivals",
13
+ "is known for": "examples of what entities are known for",
14
+ "is influenced by": "what has influenced different entities",
15
+ "is similar to": "examples of entities that are similar"
16
+ }
17
+ data = load_dataset("cardiffnlp/relentless_full", split="validation")
18
+ full_result = []
19
+ for lm, ppl_class, batch, pretty_name in [
20
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
21
+ ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
22
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
23
+ ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
24
+ ]:
25
+ os.makedirs(f"experiments/results_validation/lm_qa/{os.path.basename(lm)}", exist_ok=True)
26
+ scorer = None
27
+ for d in data:
28
+ ppl_file = f"experiments/results_validation/lm_qa/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
29
+
30
+ if not os.path.exists(ppl_file):
31
+
32
+ if scorer is None:
33
+ if ppl_class is OpenAI:
34
+ scorer = ppl_class(OPENAI_API_KEY, model=lm)
35
+ else:
36
+ scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
37
+
38
+ proto = ",".join([f'["{a}", "{b}"]' for a, b in d['positive_examples']])
39
+ prefix = f"Answer the question by yes or no. We know that {proto} are examples of {prompt_dict[d['relation_type']]}."
40
+ if ppl_class is LM or ppl_class is OpenAI:
41
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']]
42
+ ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
43
+ output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
44
+ elif ppl_class is EncoderDecoderLM:
45
+ prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?' for x, y in d['pairs']]
46
+ ppl = scorer.get_perplexity(input_texts=prompt_input, output_texts=["yes"] * len(prompt_input), batch=batch)
47
+ output = [{"perplexity": p, "input": o, "output": "yes"} for p, o in zip(ppl, prompt_input)]
48
+ else:
49
+ raise ValueError(f"Unknown class {ppl_class}")
50
+
51
+ with open(ppl_file, "w") as f:
52
+ f.write("\n".join([json.dumps(i) for i in output]))
53
+
54
+ with open(ppl_file) as f:
55
+ ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
56
+ true_rank = d['ranks']
57
+ assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
58
+ rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
59
+ prediction = [rank_map[p] for p in ppl]
60
+ tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
61
+ cor = tmp.corr("spearman").values[0, 1]
62
+ full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
63
+
64
+ df = pd.DataFrame(full_result)
65
+ models = df['model'].unique()
66
+ df = df.pivot(columns="relation_type", index="model", values="correlation")
67
+ df = df.T[models].T
68
+ df['average'] = df.mean(1)
69
+ df.to_csv("experiments/results_validation/lm_qa/lm.csv")
70
+ df = (100 * df).round()
71
+ print(df.to_markdown())
72
+ print(df.to_latex(escape=False))
experiments/figures/fewshots/lc.average.fewshot.landscape.png ADDED

Git LFS Details

  • SHA256: d654aa6d034bc6c68c24b88567a8f50660f74e5ae089dffc5d91c222dcd3d3d4
  • Pointer size: 131 Bytes
  • Size of remote file: 174 kB
experiments/figures/fewshots/lc.average.fewshot.png ADDED

Git LFS Details

  • SHA256: 69a53cf98b5af4656e6bc55ec09e0263d936a0c6ddef575c38d208f07017c5cc
  • Pointer size: 131 Bytes
  • Size of remote file: 159 kB
experiments/figures/fewshots/lc.is_competitor-rival_of.fewshot.landscape.png ADDED

Git LFS Details

  • SHA256: 5c8b4e8028af55bacaba21cd03f3313605d84001e32f84941096ad174a37eb54
  • Pointer size: 131 Bytes
  • Size of remote file: 174 kB
experiments/figures/fewshots/lc.is_competitor-rival_of.fewshot.png ADDED

Git LFS Details

  • SHA256: d154d211e817e28c19b70c543b79a07d86b27ed22d40f63d09f79ec93178a53f
  • Pointer size: 131 Bytes
  • Size of remote file: 159 kB
experiments/figures/fewshots/lc.is_friend-ally_of.fewshot.landscape.png ADDED

Git LFS Details

  • SHA256: 1e7b63efe350bc95cc4a16b743aa8920c9e25818ce595c427d7a2d0297c3b9ca
  • Pointer size: 131 Bytes
  • Size of remote file: 167 kB