File size: 563 Bytes
a8144f6
 
 
 
 
 
 
 
 
 
 
 
 
fc2f6ba
a8144f6
 
fc2f6ba
a8144f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#!/bin/bash

# Set environment variables for the ollama server
export OLLAMA_HOST=0.0.0.0
export OLLAMA_ORIGINS=https://projects.blender.org

# Start the Ollama service in the background
ollama serve &

# Wait for the service to initialize
sleep 10

# Download the required file
curl -fsSL https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/resolve/main/llama-3.2-3b-instruct-q8_0.gguf?download=true -o llama.gguf

# Create the model using Ollama
ollama create llama3.2 -f Modelfile

# Keep the container running indefinitely
tail -f /dev/null