File size: 1,226 Bytes
7670f2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e524368
46c075a
7670f2e
e524368
 
7670f2e
46c075a
5006d94
46c075a
 
e524368
 
7670f2e
 
0c50515
6996906
 
 
7670f2e
0c50515
6996906
 
 
0c50515
 
6996906
0c50515
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
#!/bin/sh

BASEDIR=$(dirname "$0")
cd $BASEDIR/..
echo Current Directory:
pwd

BASEDIR=`pwd`

nvidia-smi
uname -a
cat /etc/os-release
lscpu
grep MemTotal /proc/meminfo

#pip install transformers==4.41.2

export LOAD_IN_4BIT=false
#export USING_LLAMA_FACTORY=true
export USING_LLAMA_FACTORY=false

#export MODEL_NAME=internlm/internlm2_5-7b-chat-1m
# export ADAPTER_NAME_OR_PATH=inflaton-ai/InternLM_2_5-7b_LoRA-Adapter
#export ADAPTER_NAME_OR_PATH=llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full_r3/checkpoint-140

export MODEL_NAME=google/gemma-2-9b-it
export USING_P1_PROMPT_TEMPLATE=false
export LOGICAL_REASONING_DATA_PATH=datasets/mgtv

export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_colab_p2_gemma2.csv
#export ADAPTER_NAME_OR_PATH=outputs/checkpoint-7000
#echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
#python llm_toolkit/eval_logical_reasoning.py


export ADAPTER_NAME_OR_PATH=outputs/checkpoint-2500
#echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
#python llm_toolkit/eval_logical_reasoning.py

export TEST_DATA=test_b
export LOGICAL_REASONING_RESULTS_PATH=results/$TEST_DATA-results_r7.csv
echo "Test $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
python llm_toolkit/eval_logical_reasoning.py