inflaton commited on
Commit
0c50515
·
1 Parent(s): c41fec1
.gitattributes CHANGED
@@ -99,3 +99,5 @@ results/glm-4-9b_lora_sft_bf16-p1.csv filter=lfs diff=lfs merge=lfs -text
99
  results/llama3-8b_lora_sft_bf16-p1_en.csv filter=lfs diff=lfs merge=lfs -text
100
  results/mgtv-results_internlm_best.csv filter=lfs diff=lfs merge=lfs -text
101
  results/test_b-results_r4.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
99
  results/llama3-8b_lora_sft_bf16-p1_en.csv filter=lfs diff=lfs merge=lfs -text
100
  results/mgtv-results_internlm_best.csv filter=lfs diff=lfs merge=lfs -text
101
  results/test_b-results_r4.csv filter=lfs diff=lfs merge=lfs -text
102
+ results/mgtv-results_colab_p2_gemma2.csv filter=lfs diff=lfs merge=lfs -text
103
+ results/test_b-results_r5.csv filter=lfs diff=lfs merge=lfs -text
results/mgtv-results_colab_p2_gemma2.csv CHANGED
The diff for this file is too large to render. See raw diff
 
results/test_b-results_r5.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0267f312e20bb6538edf14702512a635b676dd099cd7e7209f6ee4a60ede2f
3
+ size 23133371
scripts/test-mgtv.sh CHANGED
@@ -24,17 +24,20 @@ export USING_LLAMA_FACTORY=false
24
  #export ADAPTER_NAME_OR_PATH=llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full_r3/checkpoint-140
25
 
26
  export MODEL_NAME=google/gemma-2-9b-it
27
- export ADAPTER_NAME_OR_PATH=outputs/checkpoint-6000
28
  export USING_P1_PROMPT_TEMPLATE=false
29
-
30
- #export MODEL_NAME=shenzhi-wang/Llama3-8B-Chinese-Chat
31
- #export ADAPTER_NAME_OR_PATH=llama-factory/saves/llama3-8b/lora/sft_bf16_p1_full_r4/checkpoint-140
32
- #export USING_P1_PROMPT_TEMPLATE=true
33
-
34
  export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
35
 
36
- export TEST_DATA=test_b
37
- export LOGICAL_REASONING_RESULTS_PATH=results/$TEST_DATA-results_r5.csv
 
 
38
 
 
 
39
  echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
40
  python llm_toolkit/eval_logical_reasoning.py
 
 
 
 
 
 
24
  #export ADAPTER_NAME_OR_PATH=llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full_r3/checkpoint-140
25
 
26
  export MODEL_NAME=google/gemma-2-9b-it
 
27
  export USING_P1_PROMPT_TEMPLATE=false
 
 
 
 
 
28
  export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
29
 
30
+ export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_colab_p2_gemma2.csv
31
+ export ADAPTER_NAME_OR_PATH=outputs/checkpoint-7000
32
+ echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
33
+ python llm_toolkit/eval_logical_reasoning.py
34
 
35
+
36
+ export ADAPTER_NAME_OR_PATH=outputs/checkpoint-12000
37
  echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
38
  python llm_toolkit/eval_logical_reasoning.py
39
+
40
+ export TEST_DATA=test_b
41
+ export LOGICAL_REASONING_RESULTS_PATH=results/$TEST_DATA-results_r6.csv
42
+ echo "Test $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
43
+ python llm_toolkit/eval_logical_reasoning.py