logical-reasoning / scripts /test-mgtv.sh
inflaton's picture
test
6996906
raw
history blame
1.23 kB
#!/bin/sh
BASEDIR=$(dirname "$0")
cd $BASEDIR/..
echo Current Directory:
pwd
BASEDIR=`pwd`
nvidia-smi
uname -a
cat /etc/os-release
lscpu
grep MemTotal /proc/meminfo
#pip install transformers==4.41.2
export LOAD_IN_4BIT=false
#export USING_LLAMA_FACTORY=true
export USING_LLAMA_FACTORY=false
#export MODEL_NAME=internlm/internlm2_5-7b-chat-1m
# export ADAPTER_NAME_OR_PATH=inflaton-ai/InternLM_2_5-7b_LoRA-Adapter
#export ADAPTER_NAME_OR_PATH=llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p2_full_r3/checkpoint-140
export MODEL_NAME=google/gemma-2-9b-it
export USING_P1_PROMPT_TEMPLATE=false
export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_colab_p2_gemma2.csv
#export ADAPTER_NAME_OR_PATH=outputs/checkpoint-7000
#echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
#python llm_toolkit/eval_logical_reasoning.py
export ADAPTER_NAME_OR_PATH=outputs/checkpoint-2500
#echo "Eval $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
#python llm_toolkit/eval_logical_reasoning.py
export TEST_DATA=test_b
export LOGICAL_REASONING_RESULTS_PATH=results/$TEST_DATA-results_r7.csv
echo "Test $MODEL_NAME with $ADAPTER_NAME_OR_PATH"
python llm_toolkit/eval_logical_reasoning.py