vllm-inference / runner.sh
yusufs's picture
feat(runner.sh): add specific task and code revision
dc19c1d
raw
history blame
1.73 kB
#!/bin/bash
# Validate MODEL_ID
if [[ -z "$MODEL_ID" ]]; then
echo "Error: MODEL_ID is not set."
exit 1
fi
# Assign MODEL_NAME and MODEL_REV based on MODEL_ID
case "$MODEL_ID" in
1)
MODEL_NAME="meta-llama/Llama-3.2-3B-Instruct"
MODEL_REV="0cb88a4f764b7a12671c53f0838cd831a0843b95"
;;
2)
MODEL_NAME="sail/Sailor-4B-Chat"
MODEL_REV="89a866a7041e6ec023dd462adeca8e28dd53c83e"
;;
*)
echo "Error: Invalid MODEL_ID. Valid values are 1 or 2."
exit 1
;;
esac
printf "Running %s using vLLM OpenAI compatible API Server at port %s\n" $MODEL_NAME "7860"
# https://medium.com/geekculture/the-story-behind-random-seed-42-in-machine-learning-b838c4ac290a
#[Seven and a half million years later…. Fook and Lunkwill are long gone, but their descendants continue what they started]
# “All right,” said Deep Thought. “The Answer to the Great Question…”
# “Yes..!”
# “Of Life, the Universe and Everything…” said Deep Thought.
# “Yes…!”
# “Is…” said Deep Thought, and paused.
# “Yes…!”
# “Is…”
# “Yes…!!!…?”
# “Forty-two,” said Deep Thought, with infinite majesty and calm.”
# ―Douglas Adams, The Hitchhiker’s Guide to the Galaxy
# Run the Python script with the determined values
# Supported tasks: {'generate', 'embedding'}
python -u /app/openai_compatible_api_server.py \
--model "${MODEL_NAME}" \
--task generate \
--revision "${MODEL_REV}" \
--code-revision "${MODEL_REV}" \
--tokenizer-revision "${MODEL_REV}" \
--seed 42 \
--host 0.0.0.0 \
--port 7860 \
--max-num-batched-tokens 32768 \
--max-model-len 32768 \
--dtype float16 \
--enforce-eager \
--gpu-memory-utilization 0.9