LLMServer / Dockerfile
AurelioAguirre's picture
Upload 6 files
a189e20 verified
raw
history blame
1.78 kB
# Use Python 3.10 as base image for better compatibility with ML libraries
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Install git and required system dependencies
RUN apt-get update && \
apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create cache directory and set permissions
RUN mkdir -p /app/.cache/huggingface && \
chmod 777 /app/.cache/huggingface
# Set environment variables for cache
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface/hub
ENV HF_HOME=/app/.cache/huggingface
# Copy requirements first to leverage Docker cache
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy the rest of the application
COPY . .
# Create checkpoints directory with proper permissions
RUN mkdir -p /app/checkpoints && \
chmod 777 /app/checkpoints
# The token will be passed during build time
ARG HF_TOKEN
ENV HF_TOKEN=${HF_TOKEN}
# Download both models using litgpt
# Only proceed if HF_TOKEN is provided
RUN if [ -n "$HF_TOKEN" ]; then \
python -c "from huggingface_hub import login; from litgpt.cli import download; login('${HF_TOKEN}'); \
download('meta-llama/Llama-2-3b-chat-hf', '/app/checkpoints'); \
download('mistralai/Mistral-7B-Instruct-v0.3', '/app/checkpoints')"; \
else \
echo "No Hugging Face token provided. Models will need to be downloaded separately."; \
fi
# Set environment variables
ENV LLM_ENGINE_HOST=0.0.0.0
ENV LLM_ENGINE_PORT=8001
# Update MODEL_PATH for the new model
ENV MODEL_PATH=/app/checkpoints/mistralai/Mistral-7B-Instruct-v0.3
# Expose both ports:
# 8001 for FastAPI
# 7860 for Hugging Face Spaces
EXPOSE 8001 7860
# Command to run the application
CMD ["python", "main/main.py"]