-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose-dev.yaml
More file actions
87 lines (78 loc) · 2.2 KB
/
docker-compose-dev.yaml
File metadata and controls
87 lines (78 loc) · 2.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
services:
redis-stack-server:
image: redis/redis-stack-server:latest
ports:
- "9736:6379"
postgres-samples:
image: postgres:alpine
volumes:
- ./docker/postgres/data:/data
- ./docker/postgres/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: example
ports:
- "5432:5432"
trino:
image: trinodb/trino:latest
ports:
- "8384:8080"
volumes:
- ./docker/trino/etc:/etc/trino
graphql-trino:
image: iromu/graphql-trino:latest
ports:
- "8886:8080"
volumes:
- ./docker/graphql-trino/etc:/etc/graphql-trino
environment:
SPRING_DATASOURCE_URL: jdbc:trino://trino:8080
depends_on:
trino:
condition: service_healthy
restart: true
petstore:
image: openapitools/openapi-petstore
ports:
- "8334:8080"
environment:
SWAGGER_HOST: http://localhost:8334
SWAGGER_URL: http://localhost:8334
SWAGGER_BASE_PATH: /v3
open-webui:
image: ghcr.io/open-webui/open-webui:main
volumes:
- open-webui:/app/backend/data
ports:
- "8000:8080"
environment:
WEBUI_AUTH: False
OLLAMA_BASE_URL: http://host.docker.internal:11888
extra_hosts:
- "host.docker.internal:host-gateway"
# 1. This is SLOW
# First time boot needs to download 'llama3' model, 4.7 GB. If docker compose is started by Spring it will fail and stop the container.
# You have to manually start this container and wait for the model to download.
#
# 2. On EVERY application restart (with docker compose), ollama will load again the model into memory.
# This makes the rest endpoints "hang" until the next message shows on docker:
# msg="llama runner started in 149.95 seconds"
# ollama:
# image: ollama/ollama:latest
# container_name: ollama
# volumes:
# - ./.ollama:/root/.ollama
# tty: true
# entrypoint: [ "/usr/bin/bash", "/root/.ollama/ollama.sh" ]
# ports:
# - 11434
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [ gpu ]
volumes:
open-webui: