Spaces:
Sleeping
Sleeping
Tu Nombre
commited on
Commit
·
bc742f7
1
Parent(s):
7b0a0e8
Fix Dockerfile for proper model loading and Ollama setup
Browse files- Dockerfile +45 -25
Dockerfile
CHANGED
@@ -9,38 +9,58 @@ RUN useradd -m -u 1000 user
|
|
9 |
|
10 |
WORKDIR /app
|
11 |
|
12 |
-
COPY --chown=user ./requirements.txt requirements.txt
|
13 |
-
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
14 |
-
|
15 |
# Copy application files including templates
|
16 |
COPY --chown=user . /app
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
echo
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
RUN chmod +x /app/start.sh
|
41 |
|
42 |
USER user
|
43 |
ENV PATH="/home/user/.local/bin:$PATH"
|
44 |
|
45 |
-
#
|
|
|
|
|
|
|
46 |
CMD ["/app/start.sh"]
|
|
|
9 |
|
10 |
WORKDIR /app
|
11 |
|
|
|
|
|
|
|
12 |
# Copy application files including templates
|
13 |
COPY --chown=user . /app
|
14 |
|
15 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
16 |
+
|
17 |
+
# Create directories
|
18 |
+
RUN mkdir -p /home/user/.ollama/models && \
|
19 |
+
chown -R user:user /home/user/.ollama
|
20 |
+
|
21 |
+
# Create startup script with better error handling
|
22 |
+
RUN echo '#!/bin/bash\n\
|
23 |
+
set -e\n\
|
24 |
+
\n\
|
25 |
+
echo "Iniciando servidor Ollama..."\n\
|
26 |
+
ollama serve &\n\
|
27 |
+
\n\
|
28 |
+
echo "Esperando a que Ollama esté disponible..."\n\
|
29 |
+
for i in $(seq 1 60); do\n\
|
30 |
+
if nc -z localhost 11434; then\n\
|
31 |
+
echo "Ollama está listo"\n\
|
32 |
+
break\n\
|
33 |
+
fi\n\
|
34 |
+
echo "Esperando a Ollama... $i/60"\n\
|
35 |
+
sleep 1\n\
|
36 |
+
done\n\
|
37 |
+
\n\
|
38 |
+
echo "Descargando modelo desde Hugging Face..."\n\
|
39 |
+
cd /tmp\n\
|
40 |
+
wget -q --show-progress https://huggingface.co/andresdegante/llama3-papalia-nuevo/resolve/main/model.tar.gz\n\
|
41 |
+
\n\
|
42 |
+
echo "Importando modelo a Ollama..."\n\
|
43 |
+
tar xzf model.tar.gz -C /home/user/.ollama/models\n\
|
44 |
+
rm model.tar.gz\n\
|
45 |
+
\n\
|
46 |
+
echo "Verificando modelo..."\n\
|
47 |
+
if ! ollama list | grep -q "llama3-papalia-nuevo"; then\n\
|
48 |
+
echo "Error: Modelo no encontrado después de la importación"\n\
|
49 |
+
exit 1\n\
|
50 |
+
fi\n\
|
51 |
+
\n\
|
52 |
+
echo "Iniciando API..."\n\
|
53 |
+
cd /app\n\
|
54 |
+
exec uvicorn app:app --host 0.0.0.0 --port 7860\n\
|
55 |
+
' > /app/start.sh
|
56 |
|
57 |
RUN chmod +x /app/start.sh
|
58 |
|
59 |
USER user
|
60 |
ENV PATH="/home/user/.local/bin:$PATH"
|
61 |
|
62 |
+
# Health check
|
63 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
64 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
65 |
+
|
66 |
CMD ["/app/start.sh"]
|