vllm-inference / run.sh
yusufs's picture
feat(download-model): add download model at runtime
fc30f26
raw
history blame
684 Bytes
#!/bin/sh
printf "Running vLLM OpenAI compatible API Server at port %s\n" "7860"
#python -u /app/openai_compatible_api_server.py \
# --model meta-llama/Llama-3.2-3B-Instruct \
# --revision 0cb88a4f764b7a12671c53f0838cd831a0843b95 \
# --host 0.0.0.0 \
# --port 7860 \
# --max-num-batched-tokens 32768 \
# --max-model-len 32768 \
# --dtype half \
# --enforce-eager \
# --gpu-memory-utilization 0.85
python -u /app/openai_compatible_api_server.py \
--model sail/Sailor-4B-Chat \
--revision 89a866a7041e6ec023dd462adeca8e28dd53c83e \
--host 0.0.0.0 \
--port 7860 \
--dtype half \
--enforce-eager \
--gpu-memory-utilization 0.85