llama-cpp-scripts / bin /llama-hf-to-q6_k.sh
iandennismiller's picture
add script that converts HF torch models to gguf
48b1acd
raw
history blame
1.75 kB
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: $0 <hf_name>"
exit 1
fi
# if ~/.config/llama-hf-to-q6_k.conf does not exist, create it
if [ ! -f ~/.config/llama-hf-to-q6_k.conf ]; then
cat <<EOF > ~/.config/llama-hf-to-q6_k.conf
MODELS_ROOT=~/.ai/models/llama/
HF_DOWNLOADER=~/.ai/bin/hfdownloader
STORAGE_PATH=~/scratch/hfdownloader
PYTHON3_EXEC=~/.virtualenvs/llama.cpp/bin/python3
QUANTIZE_EXEC=~/Work/llama.cpp/build/bin/quantize
CONVERT_PY=~/Work/llama.cpp/convert.py
EOF
fi
source ~/.config/llama-hf-to-q6_k.conf
HF_NAME=$1
ACCOUNT_NAME=$(echo "$HF_NAME" | cut -d '/' -f 1)
MODEL_NAME=$(echo "$HF_NAME" | cut -d '/' -f 2)
MODEL_NAME_LOWER=$(echo "$MODEL_NAME" | tr '[:upper:]' '[:lower:]')
MODEL_F16="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-f16.gguf"
MODEL_Q6_K="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-q6_k.gguf"
cat <<EOF
HF_NAME: $HF_NAME
ACCOUNT_NAME: $ACCOUNT_NAME
MODELS_ROOT: $MODELS_ROOT
MODEL_NAME: $MODEL_NAME
MODEL_NAME_LOWER: $MODEL_NAME_LOWER
MODEL_F16: $MODEL_F16
MODEL_Q6_K: $MODEL_Q6_K
STORAGE_PATH: $STORAGE_PATH
EOF
$HF_DOWNLOADER \
--model "$HF_NAME" \
--storage "$STORAGE_PATH"
mkdir -p $MODELS_ROOT/$HF_NAME"
HF_TORCH_MODEL=$(ls "$STORAGE_PATH"/"${ACCOUNT_NAME}_${MODEL_NAME}"/*00001*)
cat <<EOF
HF_TORCH_MODEL: $HF_TORCH_MODEL
EOF
ls -alFh "$HF_TORCH_MODEL"
$PYTHON3_EXEC \
$CONVERT_PY \
--outtype f16 \
--outfile "$MODEL_F16" \
"$HF_TORCH_MODEL"
ls -alFh "$MODEL_F16"
$QUANTIZE_EXEC \
"$MODEL_F16" \
"$MODEL_Q6_K" Q6_K
ls -alFh "$MODEL_Q6_K"
# re 'Exception: Expected added token IDs to be sequential'
# https://github.com/ggerganov/llama.cpp/issues/3583