services: nemesisBot: image: telegrambot container_name: telegrambot build: context: . dockerfile: TelegramBot/Dockerfile env_file: - TelegramBot/.env llm-server: image: ghcr.io/ggerganov/llama.cpp:server-cuda container_name: llm-server volumes: - ${MODEL_PATH}:/models ports: - "80:80" command: -m /models/${MODEL_NAME} --port 80 --host 0.0.0.0 -n 128 -c 4096 --no-mmap -ngl 50 -fa -np 4 deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu]