# Start the Ollama service | |
ollama serve & | |
echo "Waiting for Ollama server to start..." | |
sleep 10 | |
# Pull the Ollama model | |
ollama pull gemma2:2b | |
# Start the application | |
#exec uvicorn app:app --host 0.0.0.0 --port 7860 | |
# Start the Streamlit application | |
exec streamlit run app.py --server.port=7860 |