ollama / start.sh
Germano Cavalcante
Update Downloaded Model
fc2f6ba
raw
history blame
563 Bytes
#!/bin/bash
# Set environment variables for the ollama server
export OLLAMA_HOST=0.0.0.0
export OLLAMA_ORIGINS=https://projects.blender.org
# Start the Ollama service in the background
ollama serve &
# Wait for the service to initialize
sleep 10
# Download the required file
curl -fsSL https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/resolve/main/llama-3.2-3b-instruct-q8_0.gguf?download=true -o llama.gguf
# Create the model using Ollama
ollama create llama3.2 -f Modelfile
# Keep the container running indefinitely
tail -f /dev/null