Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
7f5f9d0
1 Parent(s): c3de420

fix the logit overflow caused by pad_token https://github.com/asahi417/lmppl/issues/5

Browse files
experiments/{summary_validation.py → get_qualitative_table.py} RENAMED
@@ -2,6 +2,9 @@ import json
2
  import pandas as pd
3
  from datasets import load_dataset
4
 
 
 
 
5
  data_valid = load_dataset("cardiffnlp/relentless", split="validation")
6
  lc_valid = pd.read_csv("results_validation/lm_lc/lm.csv", index_col=0)
7
  qa_valid = pd.read_csv("results_validation/lm_qa/lm.csv", index_col=0)
@@ -27,6 +30,11 @@ p = 30
27
  table = []
28
  for prompt in ['qa', 'lc']:
29
  for i in target.keys():
 
 
 
 
 
30
  for d in data_test:
31
  with open(f"results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
32
  negative_ppl = sorted([json.loads(x)['perplexity'] * -1 for x in f.read().split("\n") if len(x) > 0], reverse=True)
@@ -48,4 +56,4 @@ for prompt in ['qa', 'lc']:
48
  })
49
 
50
  table = pd.DataFrame(table)
51
- # table.to_csv("results_validation/summary_validation.csv")
 
2
  import pandas as pd
3
  from datasets import load_dataset
4
 
5
+ pd.set_option('display.max_rows', None)
6
+ pd.set_option('display.max_columns', None)
7
+
8
  data_valid = load_dataset("cardiffnlp/relentless", split="validation")
9
  lc_valid = pd.read_csv("results_validation/lm_lc/lm.csv", index_col=0)
10
  qa_valid = pd.read_csv("results_validation/lm_qa/lm.csv", index_col=0)
 
30
  table = []
31
  for prompt in ['qa', 'lc']:
32
  for i in target.keys():
33
+ if i in ['flan-t5-xxl', 'flan-ul2'] and prompt == 'lc':
34
+ continue
35
+ if i in ['opt-13b', 'davinci'] and prompt == 'qa':
36
+ continue
37
+
38
  for d in data_test:
39
  with open(f"results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
40
  negative_ppl = sorted([json.loads(x)['perplexity'] * -1 for x in f.read().split("\n") if len(x) > 0], reverse=True)
 
56
  })
57
 
58
  table = pd.DataFrame(table)
59
+ table.to_csv("results_validation/qualitative.csv", index=False)
experiments/results_validation/qualitative.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,,top,bottom
2
+ "Flan-T5 extsubscript{XXL}",Ally,Darth Vader:Obi-Wan Kenobi,Maximus Decimus Meridius:Juba
3
+ ,Inf,,Vape:cigarette
4
+ Flan-UL2,Ally,,Maximus Decimus Meridius:Juba
5
+ ,Inf,Rihanna:Stevie Wonder,
6
+ ,Know,Qualcomm:smartphones,
7
+ "OPT extsubscript{13B}",Rival,,"Wickes:ScrewFix, Russia:US"
8
+ ,Ally,"Darth Vader:Obi-Wan Kenobi, Schindler's List:Baz Luhrmann",Microsoft:OpenAI
9
+ ,Inf,Rihanna:Stevie Wonder,"UK:Winston Churchill, Vape:cigarette"
10
+ ,Sim,Domino's:YO! Sushi,
11
+ "GPT-3 extsubscript{davinci}",Rival,,Wickes:ScrewFix
12
+ ,Ally,Darth Vader:Obi-Wan Kenobi,
13
+ ,Inf,Rihanna:Stevie Wonder,"Vape:cigarette, Liz Truss:Thatcher"
experiments/results_validation/qualitative.xlsx ADDED
Binary file (10.4 kB). View file