File size: 1,143 Bytes
09e6035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
679dc6a
09e6035
 
 
 
 
 
679dc6a
 
 
 
 
 
09e6035
 
 
679dc6a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#!/bin/sh

BASEDIR=$(dirname "$0")
cd $BASEDIR/..
echo Current Directory:
pwd

BASEDIR=`pwd`

nvidia-smi
uname -a
cat /etc/os-release
lscpu
grep MemTotal /proc/meminfo

#pip install -r requirements.txt
#cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes]

export LOGICAL_REASONING_DATA_PATH=datasets/mgtv

export MODEL_NAME=Qwen/Qwen2-72B-Instruct
export MODEL_PREFIX=qwen2_72b_lora_sft_4bit

export CONFIG_FILE=config/$MODEL_PREFIX-p1.yaml
echo "Tuning with $CONFIG_FILE"
$BASEDIR/scripts/tune-lf.sh $CONFIG_FILE

export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p1.csv
export ADAPTER_PATH_BASE=llama-factory/saves/qwen2-72b/lora/sft_4bit_p1_full
echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
python llm_toolkit/eval_logical_reasoning_all_epochs.py


export CONFIG_FILE=config/$MODEL_PREFIX-p2.yaml
echo "Tuning with $CONFIG_FILE"
$BASEDIR/scripts/tune-lf.sh $CONFIG_FILE

export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2.csv
export ADAPTER_PATH_BASE=llama-factory/saves/qwen2-72b/lora/sft_4bit_p2_full
echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
python llm_toolkit/eval_logical_reasoning_all_epochs.py