wizardlm-13b-v1-2-q4-0-gguf / start_server.sh
AFischer1985's picture
Update start_server.sh
88bbd56
raw
history blame contribute delete
157 Bytes
#!/bin/sh
# For mlock support
ulimit -l unlimited
python3 -B main.py
#python3 -m llama_cpp.server --model ./model/gguf-model.bin --host 0.0.0.0 --port 2600