#!/bin/sh BASEDIR=$(dirname "$0") cd $BASEDIR/.. echo Current Directory: pwd BASEDIR=`pwd` nvidia-smi uname -a cat /etc/os-release lscpu grep MemTotal /proc/meminfo #pip install -r requirements.txt #cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes] export LOGICAL_REASONING_DATA_PATH=datasets/mgtv export MODEL_NAME=Qwen/Qwen2-72B-Instruct export MODEL_PREFIX=qwen2_72b_lora_sft_4bit export CONFIG_FILE=config/$MODEL_PREFIX-p1.yaml echo "Tuning with $CONFIG_FILE" $BASEDIR/scripts/tune-lf.sh $CONFIG_FILE export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p1.csv export ADAPTER_PATH_BASE=llama-factory/saves/qwen2-72b/lora/sft_4bit_p1_full echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE" python llm_toolkit/eval_logical_reasoning_all_epochs.py export CONFIG_FILE=config/$MODEL_PREFIX-p2.yaml echo "Tuning with $CONFIG_FILE" $BASEDIR/scripts/tune-lf.sh $CONFIG_FILE export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2.csv export ADAPTER_PATH_BASE=llama-factory/saves/qwen2-72b/lora/sft_4bit_p2_full echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE" python llm_toolkit/eval_logical_reasoning_all_epochs.py