Spaces:
Runtime error
Runtime error
reverted tokenizer fix, will be solved manually upon testing, current evals
Browse files
data/code_eval_board.csv
CHANGED
@@ -8,3 +8,6 @@ T,Models,ARC,HellaSwag,MMLU,TruthfulQA,Winogrande,GSM8K,Reference Model
|
|
8 |
๐ถ,upstage/SOLAR-10.7B-Instruct-v1.0,0.11,0.49,0.28,0.36,0.01,0.96,huggyllama/llama-7b
|
9 |
๐ถ,AIDC-ai-business/Marcoroni-7B-v3,0.1,0.14,0.2,0.41,0.0,0.95,mistralai/Mistral-7B-v0.1
|
10 |
๐ถ,amazon/MistralLite,0.09,0.14,0.2,0.43,0.0,0.73,mistralai/Mistral-7B-v0.1
|
|
|
|
|
|
|
|
8 |
๐ถ,upstage/SOLAR-10.7B-Instruct-v1.0,0.11,0.49,0.28,0.36,0.01,0.96,huggyllama/llama-7b
|
9 |
๐ถ,AIDC-ai-business/Marcoroni-7B-v3,0.1,0.14,0.2,0.41,0.0,0.95,mistralai/Mistral-7B-v0.1
|
10 |
๐ถ,amazon/MistralLite,0.09,0.14,0.2,0.43,0.0,0.73,mistralai/Mistral-7B-v0.1
|
11 |
+
๐ถ,openchat/openchat_3.5,0.13,0.13,0.23,0.45,0.0,0.97
|
12 |
+
๐ถ,meta-math/MetaMath-Mistral-7B,0.08,0.1,0.17,0.42,0.0,0.97
|
13 |
+
๐ถ,teknium/OpenHermes-2.5-Mistral-7B,0.07,0.13,0.23,0.39,0.0,0.96
|
data/queue.csv
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
Type,Model,ref_model
|
2 |
-
๐ถ finetuned,openchat/openchat_3.5,mistralai/Mistral-7B-v0.1
|
3 |
-
๐ถ finetuned,teknium/OpenHermes-2.5-Mistral-7B,mistralai/Mistral-7B-v0.1
|
4 |
๐ถ finetuned,WizardLM/WizardMath-7B-V1.1,mistralai/Mistral-7B-v0.1
|
5 |
๐ถ finetuned,Intel/neural-chat-7b-v3-3,mistralai/Mistral-7B-v0.1
|
6 |
๐ถ finetuned,mistralai/Mistral-7B-Instruct-v0.2,mistralai/Mistral-7B-v0.1
|
@@ -8,7 +6,6 @@ Type,Model,ref_model
|
|
8 |
๐ถ finetuned,HuggingFaceH4/zephyr-7b-beta,mistralai/Mistral-7B-v0.1
|
9 |
๐ถ finetuned,berkeley-nest/Starling-LM-7B-alpha,mistralai/Mistral-7B-v0.1
|
10 |
๐ถ finetuned,Open-Orca/Mistral-7B-OpenOrca,mistralai/Mistral-7B-v0.1
|
11 |
-
๐ถ finetuned,meta-math/MetaMath-Mistral-7B,mistralai/Mistral-7B-v0.1
|
12 |
๐ถ finetuned,microsoft/Orca-2-7b,mistralai/Mistral-7B-v0.1
|
13 |
๐ถ finetuned,01-ai/Yi-6B-200K,mistralai/Mistral-7B-v0.1
|
14 |
๐ถ finetuned,Yhyu13/LMCocktail-10.7B-v1,mistralai/Mistral-7B-v0.1
|
|
|
1 |
Type,Model,ref_model
|
|
|
|
|
2 |
๐ถ finetuned,WizardLM/WizardMath-7B-V1.1,mistralai/Mistral-7B-v0.1
|
3 |
๐ถ finetuned,Intel/neural-chat-7b-v3-3,mistralai/Mistral-7B-v0.1
|
4 |
๐ถ finetuned,mistralai/Mistral-7B-Instruct-v0.2,mistralai/Mistral-7B-v0.1
|
|
|
6 |
๐ถ finetuned,HuggingFaceH4/zephyr-7b-beta,mistralai/Mistral-7B-v0.1
|
7 |
๐ถ finetuned,berkeley-nest/Starling-LM-7B-alpha,mistralai/Mistral-7B-v0.1
|
8 |
๐ถ finetuned,Open-Orca/Mistral-7B-OpenOrca,mistralai/Mistral-7B-v0.1
|
|
|
9 |
๐ถ finetuned,microsoft/Orca-2-7b,mistralai/Mistral-7B-v0.1
|
10 |
๐ถ finetuned,01-ai/Yi-6B-200K,mistralai/Mistral-7B-v0.1
|
11 |
๐ถ finetuned,Yhyu13/LMCocktail-10.7B-v1,mistralai/Mistral-7B-v0.1
|
detect-pretrain-code-contamination/src/run.py
CHANGED
@@ -44,7 +44,7 @@ def load_model(name1,ref_model):
|
|
44 |
if name1 not in models:
|
45 |
model1 = AutoModelForCausalLM.from_pretrained(name1, return_dict=True, device_map='auto')
|
46 |
model1.eval()
|
47 |
-
tokenizer1 = AutoTokenizer.from_pretrained(
|
48 |
tokenizer1.pad_token = tokenizer1.eos_token
|
49 |
models[name1] = model1
|
50 |
models[name1 + "_tokenizer"] = tokenizer1
|
|
|
44 |
if name1 not in models:
|
45 |
model1 = AutoModelForCausalLM.from_pretrained(name1, return_dict=True, device_map='auto')
|
46 |
model1.eval()
|
47 |
+
tokenizer1 = AutoTokenizer.from_pretrained(name1)
|
48 |
tokenizer1.pad_token = tokenizer1.eos_token
|
49 |
models[name1] = model1
|
50 |
models[name1 + "_tokenizer"] = tokenizer1
|