SummarizerApp / Dockerfile
ming
Add Hugging Face Spaces configuration and deployment files
3a7a125
raw
history blame
1.67 kB
# Hugging Face Spaces compatible Dockerfile
FROM python:3.9-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PYTHONPATH=/app
# Set work directory
WORKDIR /app
# Install system dependencies
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
curl \
ca-certificates \
wget \
git \
&& rm -rf /var/lib/apt/lists/*
# Install Ollama
RUN curl -fsSL https://ollama.ai/install.sh | sh
# Copy requirements first for better caching
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir --upgrade pip \
&& pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY app/ ./app/
COPY pytest.ini .
# Create non-root user for security
RUN groupadd -r appuser && useradd -r -g appuser appuser \
&& chown -R appuser:appuser /app
# Create startup script
RUN echo '#!/bin/bash\n\
# Start Ollama in background\n\
ollama serve &\n\
\n\
# Wait for Ollama to be ready\n\
echo "Waiting for Ollama to start..."\n\
sleep 10\n\
\n\
# Pull the model (this will take a few minutes on first run)\n\
echo "Pulling model..."\n\
ollama pull mistral:7b\n\
\n\
# Start the FastAPI app\n\
echo "Starting FastAPI app..."\n\
exec uvicorn app.main:app --host 0.0.0.0 --port 7860' > /app/start.sh \
&& chmod +x /app/start.sh \
&& chown appuser:appuser /app/start.sh
USER appuser
# Expose port (Hugging Face Spaces uses port 7860)
EXPOSE 7860
# Health check
HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
CMD curl -f http://localhost:7860/health || exit 1
# Run the startup script
CMD ["/app/start.sh"]