version: "3.3" services: text-generation-webui: build: context: . args: # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST} GPTQ_VERSION: ${GPTQ_VERSION} WEBUI_VERSION: ${WEBUI_VERSION} env_file: .env ports: - "${HOST_PORT}:${CONTAINER_PORT}" - "${HOST_API_PORT}:${CONTAINER_API_PORT}" stdin_open: true tty: true volumes: - ./characters:/app/characters - ./extensions:/app/extensions - ./loras:/app/loras - ./models:/app/models - ./presets:/app/presets - ./prompts:/app/prompts - ./softprompts:/app/softprompts - ./training:/app/training deploy: resources: reservations: devices: - driver: nvidia device_ids: ['0'] capabilities: [gpu]