generated from DAM/ts-TEMPLATE
40 lines
1.4 KiB
YAML
40 lines
1.4 KiB
YAML
services:
|
|
immich-machine-learning-ts:
|
|
image: tailscale/tailscale:latest
|
|
hostname: immich-machine-learning
|
|
container_name: immich-machine-learning-ts
|
|
environment:
|
|
- TS_AUTHKEY={{YOUR_TAILSCALE_AUTHKEY}}
|
|
- TS_STATE_DIR=/var/lib/tailscale
|
|
- TS_SERVE_CONFIG=/config/serve.json
|
|
volumes:
|
|
- ./tailscale/tailscale-data:/var/lib/tailscale
|
|
- ./tailscale/config:/config
|
|
- /dev/net/tun:/dev/net/tun
|
|
cap_add:
|
|
- net_admin
|
|
- sys_module
|
|
restart: unless-stopped
|
|
|
|
immich-machine-learning:
|
|
container_name: immich_machine_learning
|
|
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
|
|
# Example tag: ${IMMICH_VERSION:-release}-cuda
|
|
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
|
|
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
|
|
# file: hwaccel.ml.yml
|
|
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
|
volumes:
|
|
- model-cache:/cache
|
|
env_file:
|
|
- .env
|
|
network_mode: service:immich-machine-learning-ts
|
|
depends_on:
|
|
- immich-machine-learning-ts
|
|
restart: always
|
|
healthcheck:
|
|
disable: false
|
|
|
|
volumes:
|
|
model-cache:
|