File size: 1,707 Bytes
01d5a5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
services:
  backend:
    build:
      context: .
      dockerfile: ${DOCKER_BACKEND_DOCKERFILE:-Dockerfile.backend.cuda}
    container_name: second-me-backend
    restart: unless-stopped
    ports:
      - "8002:8002"
      - "8080:8080"
    volumes:
      - ./data:/app/data
      - ./logs:/app/logs
      - ./run:/app/run
      - ./resources:/app/resources
      - ./docker:/app/docker
      - ./.env:/app/.env
      - llama-cpp-build:/app/llama.cpp/build  # Persist the llama.cpp build
    environment:
      # Environment variables
      - LOCAL_APP_PORT=8002
      - IN_DOCKER_ENV=1
      - PLATFORM=${PLATFORM:-linux}
      - USE_CUDA=1
    extra_hosts:
      - "host.docker.internal:host-gateway"
    deploy:
      resources:
        limits:
          # Set container memory limit to 64GB
          memory: 64G
        reservations:
          # Memory reservation
          memory: 6G
          devices:
            - driver: nvidia
              count: all
              capabilities: [gpu]
    networks:
      - second-me-network

  frontend:
    build:
      context: .
      dockerfile: Dockerfile.frontend
    container_name: second-me-frontend
    restart: unless-stopped
    ports:
      - "3000:3000"
    volumes:
      - ./logs:/app/logs
      - ./resources:/app/resources
    environment:
      - VITE_API_BASE_URL=http://backend:8002
    depends_on:
      - backend
    deploy:
      resources:
        limits:
          # Set container memory limit to 2GB
          memory: 2G
        reservations:
          # Memory reservation
          memory: 1G
    networks:
      - second-me-network

networks:
  second-me-network:
    driver: bridge

volumes:
  llama-cpp-build:
    driver: local