Tu Nombre commited on
Commit
bc742f7
·
1 Parent(s): 7b0a0e8

Fix Dockerfile for proper model loading and Ollama setup

Browse files
Files changed (1) hide show
  1. Dockerfile +45 -25
Dockerfile CHANGED
@@ -9,38 +9,58 @@ RUN useradd -m -u 1000 user
9
 
10
  WORKDIR /app
11
 
12
- COPY --chown=user ./requirements.txt requirements.txt
13
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
-
15
  # Copy application files including templates
16
  COPY --chown=user . /app
17
 
18
- # Create startup script
19
- RUN echo '#!/bin/bash' > /app/start.sh && \
20
- echo 'echo "Iniciando servidor Ollama..."' >> /app/start.sh && \
21
- echo 'ollama serve &' >> /app/start.sh && \
22
- echo 'echo "Esperando a que Ollama esté listo..."' >> /app/start.sh && \
23
- echo 'timeout=60' >> /app/start.sh && \
24
- echo 'while ! nc -z localhost 11434; do' >> /app/start.sh && \
25
- echo ' if [ "$timeout" -le "0" ]; then' >> /app/start.sh && \
26
- echo ' echo "Tiempo de espera agotado para Ollama"' >> /app/start.sh && \
27
- echo ' exit 1' >> /app/start.sh && \
28
- echo ' fi' >> /app/start.sh && \
29
- echo ' echo "Esperando a Ollama... $timeout segundos restantes"' >> /app/start.sh && \
30
- echo ' timeout=$((timeout-1))' >> /app/start.sh && \
31
- echo ' sleep 1' >> /app/start.sh && \
32
- echo 'done' >> /app/start.sh && \
33
- echo 'echo "Descargando modelo desde Hugging Face..."' >> /app/start.sh && \
34
- echo 'wget https://huggingface.co/andresdegante/llama3-papalia-nuevo/resolve/main/model.tar.gz' >> /app/start.sh && \
35
- echo 'ollama import llama3-papalia-nuevo model.tar.gz' >> /app/start.sh && \
36
- echo 'rm model.tar.gz' >> /app/start.sh && \
37
- echo 'echo "Iniciando API..."' >> /app/start.sh && \
38
- echo 'uvicorn app:app --host 0.0.0.0 --port 7860' >> /app/start.sh
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  RUN chmod +x /app/start.sh
41
 
42
  USER user
43
  ENV PATH="/home/user/.local/bin:$PATH"
44
 
45
- # Start both Ollama and the FastAPI app
 
 
 
46
  CMD ["/app/start.sh"]
 
9
 
10
  WORKDIR /app
11
 
 
 
 
12
  # Copy application files including templates
13
  COPY --chown=user . /app
14
 
15
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
16
+
17
+ # Create directories
18
+ RUN mkdir -p /home/user/.ollama/models && \
19
+ chown -R user:user /home/user/.ollama
20
+
21
+ # Create startup script with better error handling
22
+ RUN echo '#!/bin/bash\n\
23
+ set -e\n\
24
+ \n\
25
+ echo "Iniciando servidor Ollama..."\n\
26
+ ollama serve &\n\
27
+ \n\
28
+ echo "Esperando a que Ollama esté disponible..."\n\
29
+ for i in $(seq 1 60); do\n\
30
+ if nc -z localhost 11434; then\n\
31
+ echo "Ollama está listo"\n\
32
+ break\n\
33
+ fi\n\
34
+ echo "Esperando a Ollama... $i/60"\n\
35
+ sleep 1\n\
36
+ done\n\
37
+ \n\
38
+ echo "Descargando modelo desde Hugging Face..."\n\
39
+ cd /tmp\n\
40
+ wget -q --show-progress https://huggingface.co/andresdegante/llama3-papalia-nuevo/resolve/main/model.tar.gz\n\
41
+ \n\
42
+ echo "Importando modelo a Ollama..."\n\
43
+ tar xzf model.tar.gz -C /home/user/.ollama/models\n\
44
+ rm model.tar.gz\n\
45
+ \n\
46
+ echo "Verificando modelo..."\n\
47
+ if ! ollama list | grep -q "llama3-papalia-nuevo"; then\n\
48
+ echo "Error: Modelo no encontrado después de la importación"\n\
49
+ exit 1\n\
50
+ fi\n\
51
+ \n\
52
+ echo "Iniciando API..."\n\
53
+ cd /app\n\
54
+ exec uvicorn app:app --host 0.0.0.0 --port 7860\n\
55
+ ' > /app/start.sh
56
 
57
  RUN chmod +x /app/start.sh
58
 
59
  USER user
60
  ENV PATH="/home/user/.local/bin:$PATH"
61
 
62
+ # Health check
63
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
64
+ CMD curl -f http://localhost:7860/health || exit 1
65
+
66
  CMD ["/app/start.sh"]