diff --git a/appwrite/code/.env.example b/appwrite/code/.env.example index 110cf4d83..71d2007e6 100644 --- a/appwrite/code/.env.example +++ b/appwrite/code/.env.example @@ -3,12 +3,17 @@ _APP_LOCALE=en _APP_OPTIONS_ABUSE=enabled _APP_OPTIONS_FORCE_HTTPS=disabled _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS=disabled +_APP_OPTIONS_ROUTER_FORCE_HTTPS=disabled _APP_OPTIONS_ROUTER_PROTECTION=disabled _APP_OPENSSL_KEY_V1=your-secret-key -_APP_DOMAIN=localhost +_APP_DOMAIN=$(PRIMARY_DOMAIN) _APP_CUSTOM_DOMAIN_DENY_LIST=example.com,test.com,app.example.com _APP_DOMAIN_FUNCTIONS=functions.localhost +_APP_DOMAIN_SITES=sites.localhost _APP_DOMAIN_TARGET=localhost +_APP_DOMAIN_TARGET_CNAME=localhost +_APP_DOMAIN_TARGET_AAAA=::1 +_APP_DOMAIN_TARGET_A=127.0.0.1 _APP_CONSOLE_WHITELIST_ROOT=enabled _APP_CONSOLE_WHITELIST_EMAILS= _APP_CONSOLE_WHITELIST_IPS= @@ -79,12 +84,16 @@ _APP_STORAGE_WASABI_SECRET= _APP_STORAGE_WASABI_REGION=eu-central-1 _APP_STORAGE_WASABI_BUCKET= _APP_FUNCTIONS_SIZE_LIMIT=30000000 +_APP_COMPUTE_SIZE_LIMIT=30000000 _APP_FUNCTIONS_BUILD_SIZE_LIMIT=2000000000 _APP_FUNCTIONS_TIMEOUT=900 _APP_FUNCTIONS_BUILD_TIMEOUT=900 +_APP_COMPUTE_BUILD_TIMEOUT=900 _APP_FUNCTIONS_CONTAINERS=10 _APP_FUNCTIONS_CPUS=0 +_APP_COMPUTE_CPUS=0 _APP_FUNCTIONS_MEMORY=0 +_APP_COMPUTE_MEMORY=0 _APP_FUNCTIONS_MEMORY_SWAP=0 _APP_FUNCTIONS_RUNTIMES=node-16.0,php-8.0,python-3.9,ruby-3.0 _APP_EXECUTOR_SECRET=your-secret-key @@ -92,14 +101,19 @@ _APP_EXECUTOR_HOST=http://exc1/v1 _APP_EXECUTOR_RUNTIME_NETWORK=appwrite_runtimes _APP_FUNCTIONS_ENVS=node-16.0,php-7.4,python-3.9,ruby-3.0 _APP_FUNCTIONS_INACTIVE_THRESHOLD=60 +_APP_COMPUTE_INACTIVE_THRESHOLD=60 DOCKERHUB_PULL_USERNAME= DOCKERHUB_PULL_PASSWORD= DOCKERHUB_PULL_EMAIL= OPEN_RUNTIMES_NETWORK=appwrite_runtimes _APP_FUNCTIONS_RUNTIMES_NETWORK=runtimes +_APP_COMPUTE_RUNTIMES_NETWORK=runtimes _APP_DOCKER_HUB_USERNAME= _APP_DOCKER_HUB_PASSWORD= _APP_FUNCTIONS_MAINTENANCE_INTERVAL=3600 +_APP_COMPUTE_MAINTENANCE_INTERVAL=3600 +_APP_SITES_TIMEOUT=900 +_APP_SITES_RUNTIMES=static-1,node-22,flutter-3.29 _APP_VCS_GITHUB_APP_NAME= _APP_VCS_GITHUB_PRIVATE_KEY= _APP_VCS_GITHUB_APP_ID= @@ -108,6 +122,7 @@ _APP_VCS_GITHUB_CLIENT_SECRET= _APP_VCS_GITHUB_WEBHOOK_SECRET= _APP_MAINTENANCE_INTERVAL=86400 _APP_MAINTENANCE_DELAY=0 +_APP_MAINTENANCE_START_TIME=00:00 _APP_MAINTENANCE_RETENTION_CACHE=2592000 _APP_MAINTENANCE_RETENTION_EXECUTION=1209600 _APP_MAINTENANCE_RETENTION_AUDIT=1209600 diff --git a/appwrite/code/docker-compose.yml b/appwrite/code/docker-compose.yml index f3d746a2a..1d8bde4bf 100644 --- a/appwrite/code/docker-compose.yml +++ b/appwrite/code/docker-compose.yml @@ -28,7 +28,7 @@ services: - appwrite appwrite: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 <<: *x-logging restart: unless-stopped networks: @@ -49,10 +49,13 @@ services: - traefik.http.routers.appwrite_api_https.tls=true volumes: - appwrite-uploads:/storage/uploads:rw + - appwrite-imports:/storage/imports:rw - appwrite-cache:/storage/cache:rw - appwrite-config:/storage/config:rw - appwrite-certificates:/storage/certificates:rw - appwrite-functions:/storage/functions:rw + - appwrite-sites:/storage/sites:rw + - appwrite-builds:/storage/builds:rw depends_on: - mariadb - redis @@ -74,10 +77,12 @@ services: - _APP_OPTIONS_ABUSE - _APP_OPTIONS_ROUTER_PROTECTION - _APP_OPTIONS_FORCE_HTTPS - - _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS + - _APP_OPTIONS_ROUTER_FORCE_HTTPS - _APP_OPENSSL_KEY_V1 - _APP_DOMAIN - - _APP_DOMAIN_TARGET + - _APP_DOMAIN_TARGET_CNAME + - _APP_DOMAIN_TARGET_AAAA + - _APP_DOMAIN_TARGET_A - _APP_DOMAIN_FUNCTIONS - _APP_REDIS_HOST - _APP_REDIS_PORT @@ -121,17 +126,21 @@ services: - _APP_STORAGE_WASABI_SECRET - _APP_STORAGE_WASABI_REGION - _APP_STORAGE_WASABI_BUCKET - - _APP_FUNCTIONS_SIZE_LIMIT + - _APP_COMPUTE_SIZE_LIMIT - _APP_FUNCTIONS_TIMEOUT - - _APP_FUNCTIONS_BUILD_TIMEOUT - - _APP_FUNCTIONS_CPUS - - _APP_FUNCTIONS_MEMORY + - _APP_SITES_TIMEOUT + - _APP_COMPUTE_BUILD_TIMEOUT + - _APP_COMPUTE_CPUS + - _APP_COMPUTE_MEMORY - _APP_FUNCTIONS_RUNTIMES + - _APP_SITES_RUNTIMES + - _APP_DOMAIN_SITES - _APP_EXECUTOR_SECRET - _APP_EXECUTOR_HOST - _APP_LOGGING_CONFIG - _APP_MAINTENANCE_INTERVAL - _APP_MAINTENANCE_DELAY + - _APP_MAINTENANCE_START_TIME - _APP_MAINTENANCE_RETENTION_EXECUTION - _APP_MAINTENANCE_RETENTION_CACHE - _APP_MAINTENANCE_RETENTION_ABUSE @@ -153,9 +162,10 @@ services: - _APP_MIGRATIONS_FIREBASE_CLIENT_ID - _APP_MIGRATIONS_FIREBASE_CLIENT_SECRET - _APP_ASSISTANT_OPENAI_API_KEY + appwrite-console: <<: *x-logging - image: appwrite/console:5.2.58 + image: appwrite/console:6.0.13 restart: unless-stopped networks: - appwrite @@ -175,7 +185,7 @@ services: - traefik.http.routers.appwrite_console_https.tls=true appwrite-realtime: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: realtime <<: *x-logging restart: unless-stopped @@ -217,7 +227,7 @@ services: - _APP_LOGGING_CONFIG appwrite-worker-audits: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-audits <<: *x-logging restart: unless-stopped @@ -242,7 +252,7 @@ services: - _APP_LOGGING_CONFIG appwrite-worker-webhooks: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-webhooks <<: *x-logging restart: unless-stopped @@ -269,7 +279,7 @@ services: - _APP_LOGGING_CONFIG appwrite-worker-deletes: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-deletes <<: *x-logging restart: unless-stopped @@ -282,6 +292,7 @@ services: - appwrite-uploads:/storage/uploads:rw - appwrite-cache:/storage/cache:rw - appwrite-functions:/storage/functions:rw + - appwrite-sites:/storage/sites:rw - appwrite-builds:/storage/builds:rw - appwrite-certificates:/storage/certificates:rw environment: @@ -330,7 +341,7 @@ services: - _APP_EMAIL_CERTIFICATES appwrite-worker-databases: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-databases <<: *x-logging restart: unless-stopped @@ -355,7 +366,7 @@ services: - _APP_LOGGING_CONFIG appwrite-worker-builds: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-builds <<: *x-logging restart: unless-stopped @@ -366,7 +377,9 @@ services: - mariadb volumes: - appwrite-functions:/storage/functions:rw + - appwrite-sites:/storage/sites:rw - appwrite-builds:/storage/builds:rw + - appwrite-uploads:/storage/uploads:rw environment: - _APP_ENV - _APP_WORKER_PER_CORE @@ -387,12 +400,13 @@ services: - _APP_VCS_GITHUB_PRIVATE_KEY - _APP_VCS_GITHUB_APP_ID - _APP_FUNCTIONS_TIMEOUT - - _APP_FUNCTIONS_BUILD_TIMEOUT - - _APP_FUNCTIONS_CPUS - - _APP_FUNCTIONS_MEMORY - - _APP_FUNCTIONS_SIZE_LIMIT + - _APP_SITES_TIMEOUT + - _APP_COMPUTE_BUILD_TIMEOUT + - _APP_COMPUTE_CPUS + - _APP_COMPUTE_MEMORY + - _APP_COMPUTE_SIZE_LIMIT - _APP_OPTIONS_FORCE_HTTPS - - _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS + - _APP_OPTIONS_ROUTER_FORCE_HTTPS - _APP_DOMAIN - _APP_STORAGE_DEVICE - _APP_STORAGE_S3_ACCESS_KEY @@ -416,9 +430,10 @@ services: - _APP_STORAGE_WASABI_SECRET - _APP_STORAGE_WASABI_REGION - _APP_STORAGE_WASABI_BUCKET + - _APP_DOMAIN_SITES appwrite-worker-certificates: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-certificates <<: *x-logging restart: unless-stopped @@ -435,7 +450,9 @@ services: - _APP_WORKER_PER_CORE - _APP_OPENSSL_KEY_V1 - _APP_DOMAIN - - _APP_DOMAIN_TARGET + - _APP_DOMAIN_TARGET_CNAME + - _APP_DOMAIN_TARGET_AAAA + - _APP_DOMAIN_TARGET_A - _APP_DOMAIN_FUNCTIONS - _APP_EMAIL_CERTIFICATES - _APP_REDIS_HOST @@ -450,7 +467,7 @@ services: - _APP_LOGGING_CONFIG appwrite-worker-functions: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-functions <<: *x-logging restart: unless-stopped @@ -476,9 +493,10 @@ services: - _APP_DB_USER - _APP_DB_PASS - _APP_FUNCTIONS_TIMEOUT - - _APP_FUNCTIONS_BUILD_TIMEOUT - - _APP_FUNCTIONS_CPUS - - _APP_FUNCTIONS_MEMORY + - _APP_SITES_TIMEOUT + - _APP_COMPUTE_BUILD_TIMEOUT + - _APP_COMPUTE_CPUS + - _APP_COMPUTE_MEMORY - _APP_EXECUTOR_SECRET - _APP_EXECUTOR_HOST - _APP_USAGE_STATS @@ -487,7 +505,7 @@ services: - _APP_LOGGING_CONFIG appwrite-worker-mails: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-mails <<: *x-logging restart: unless-stopped @@ -520,7 +538,7 @@ services: - _APP_OPTIONS_FORCE_HTTPS appwrite-worker-messaging: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-messaging <<: *x-logging restart: unless-stopped @@ -570,12 +588,14 @@ services: - _APP_STORAGE_WASABI_BUCKET appwrite-worker-migrations: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-migrations <<: *x-logging restart: unless-stopped networks: - appwrite + volumes: + - appwrite-imports:/storage/imports:rw depends_on: - mariadb environment: @@ -583,7 +603,9 @@ services: - _APP_WORKER_PER_CORE - _APP_OPENSSL_KEY_V1 - _APP_DOMAIN - - _APP_DOMAIN_TARGET + - _APP_DOMAIN_TARGET_CNAME + - _APP_DOMAIN_TARGET_AAAA + - _APP_DOMAIN_TARGET_A - _APP_EMAIL_SECURITY - _APP_REDIS_HOST - _APP_REDIS_PORT @@ -599,7 +621,7 @@ services: - _APP_MIGRATIONS_FIREBASE_CLIENT_SECRET appwrite-task-maintenance: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: maintenance <<: *x-logging restart: unless-stopped @@ -611,7 +633,9 @@ services: - _APP_ENV - _APP_WORKER_PER_CORE - _APP_DOMAIN - - _APP_DOMAIN_TARGET + - _APP_DOMAIN_TARGET_CNAME + - _APP_DOMAIN_TARGET_AAAA + - _APP_DOMAIN_TARGET_A - _APP_DOMAIN_FUNCTIONS - _APP_OPENSSL_KEY_V1 - _APP_REDIS_HOST @@ -633,14 +657,12 @@ services: - _APP_MAINTENANCE_RETENTION_SCHEDULES appwrite-task-stats-resources: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: stats-resources <<: *x-logging + restart: unless-stopped networks: - appwrite - volumes: - - ./app:/usr/src/code/app - - ./src:/usr/src/code/src depends_on: - redis - mariadb @@ -663,7 +685,7 @@ services: - _APP_STATS_RESOURCES_INTERVAL appwrite-worker-stats-resources: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-stats-resources <<: *x-logging restart: unless-stopped @@ -690,7 +712,7 @@ services: - _APP_STATS_RESOURCES_INTERVAL appwrite-worker-stats-usage: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: worker-stats-usage <<: *x-logging restart: unless-stopped @@ -717,7 +739,7 @@ services: - _APP_USAGE_AGGREGATION_INTERVAL appwrite-task-scheduler-functions: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: schedule-functions <<: *x-logging restart: unless-stopped @@ -741,7 +763,7 @@ services: - _APP_DB_PASS appwrite-task-scheduler-executions: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: schedule-executions <<: *x-logging restart: unless-stopped @@ -765,7 +787,7 @@ services: - _APP_DB_PASS appwrite-task-scheduler-messages: - image: appwrite/appwrite:1.6.2 + image: appwrite/appwrite:1.7.4 entrypoint: schedule-messages <<: *x-logging restart: unless-stopped @@ -797,12 +819,19 @@ services: environment: - _APP_ASSISTANT_OPENAI_API_KEY + appwrite-browser: + image: appwrite/browser:0.2.4 + <<: *x-logging + restart: unless-stopped + networks: + - appwrite + openruntimes-executor: hostname: exc1 <<: *x-logging restart: unless-stopped stop_signal: SIGINT - image: openruntimes/executor:0.6.11 + image: openruntimes/executor:0.7.14 networks: - appwrite - runtimes @@ -810,18 +839,20 @@ services: - /var/run/docker.sock:/var/run/docker.sock - appwrite-builds:/storage/builds:rw - appwrite-functions:/storage/functions:rw + - appwrite-sites:/storage/sites:rw # Host mount nessessary to share files between executor and runtimes. # It's not possible to share mount file between 2 containers without host mount (copying is too slow) - /tmp:/tmp:rw environment: - - OPR_EXECUTOR_INACTIVE_TRESHOLD=$_APP_FUNCTIONS_INACTIVE_THRESHOLD - - OPR_EXECUTOR_MAINTENANCE_INTERVAL=$_APP_FUNCTIONS_MAINTENANCE_INTERVAL - - OPR_EXECUTOR_NETWORK=$_APP_FUNCTIONS_RUNTIMES_NETWORK + - OPR_EXECUTOR_INACTIVE_TRESHOLD=$_APP_COMPUTE_INACTIVE_THRESHOLD + - OPR_EXECUTOR_MAINTENANCE_INTERVAL=$_APP_COMPUTE_MAINTENANCE_INTERVAL + - OPR_EXECUTOR_NETWORK=$_APP_COMPUTE_RUNTIMES_NETWORK - OPR_EXECUTOR_DOCKER_HUB_USERNAME=$_APP_DOCKER_HUB_USERNAME - OPR_EXECUTOR_DOCKER_HUB_PASSWORD=$_APP_DOCKER_HUB_PASSWORD - OPR_EXECUTOR_ENV=$_APP_ENV - - OPR_EXECUTOR_RUNTIMES=$_APP_FUNCTIONS_RUNTIMES + - OPR_EXECUTOR_RUNTIMES=$_APP_FUNCTIONS_RUNTIMES,$_APP_SITES_RUNTIMES - OPR_EXECUTOR_SECRET=$_APP_EXECUTOR_SECRET + - OPR_EXECUTOR_RUNTIME_VERSIONS=v5 - OPR_EXECUTOR_LOGGING_CONFIG=$_APP_LOGGING_CONFIG - OPR_EXECUTOR_STORAGE_DEVICE=$_APP_STORAGE_DEVICE - OPR_EXECUTOR_STORAGE_S3_ACCESS_KEY=$_APP_STORAGE_S3_ACCESS_KEY @@ -895,7 +926,9 @@ volumes: appwrite-redis: appwrite-cache: appwrite-uploads: + appwrite-imports: appwrite-certificates: appwrite-functions: + appwrite-sites: appwrite-builds: appwrite-config: diff --git a/appwrite/update.js b/appwrite/update.js index c8308dae5..50e07435d 100644 --- a/appwrite/update.js +++ b/appwrite/update.js @@ -11,3 +11,9 @@ await utils.downloadFile( await utils.removeContainerNames("./code/docker-compose.yml"); await utils.removePorts("./code/docker-compose.yml"); + +await utils.searchReplace( + "./code/.env.example", + "_APP_DOMAIN=localhost", + "_APP_DOMAIN=$(PRIMARY_DOMAIN)" +); diff --git a/appwrite/update.sh b/appwrite/update.sh deleted file mode 100644 index 2ae62446e..000000000 --- a/appwrite/update.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -if [ ! -d "./repo" ]; then - git clone --depth 1 --branch main --single-branch https://github.com/appwrite/appwrite.git repo -else - cd repo - git pull - cd .. -fi - -curl -s https://appwrite.io/install/compose > ./code/docker-compose.yml -curl -s https://appwrite.io/install/env > ./code/.env.example - - diff --git a/dify/code/.env.example b/dify/code/.env.example index aacef0e1f..226a7b732 100644 --- a/dify/code/.env.example +++ b/dify/code/.env.example @@ -34,7 +34,7 @@ APP_API_URL= # used to display WebAPP API Base Url to the front-end. # If empty, it is the same domain. # Example: https://app.dify.ai -APP_WEB_URL= +APP_WEB_URL=https://$(PRIMARY_DOMAIN) # File preview or download Url prefix. # used to display File preview or download Url to the front-end or as Multi-model inputs; @@ -47,6 +47,16 @@ APP_WEB_URL= # ensuring port 5001 is externally accessible (see docker-compose.yaml). FILES_URL= +# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network. +# Set this to the internal Docker service URL for proper plugin file access. +# Example: INTERNAL_FILES_URL=http://api:5001 +INTERNAL_FILES_URL= + +# Ensure UTF-8 encoding +LANG=en_US.UTF-8 +LC_ALL=en_US.UTF-8 +PYTHONIOENCODING=utf-8 + # ------------------------------ # Server Configuration # ------------------------------ @@ -81,7 +91,7 @@ ENABLE_REQUEST_LOGGING=False # A secret key that is used for securely signing the session cookie # and encrypting sensitive information on the database. # You can generate a strong key using `openssl rand -base64 42`. -SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U +SECRET_KEY= # Password for admin user initialization. # If left unset, admin user will not be prompted for a password @@ -205,10 +215,16 @@ DB_DATABASE=dify # The size of the database connection pool. # The default is 30 connections, which can be appropriately increased. SQLALCHEMY_POOL_SIZE=30 +# The default is 10 connections, which allows temporary overflow beyond the pool size. +SQLALCHEMY_MAX_OVERFLOW=10 # Database connection pool recycling time, the default is 3600 seconds. SQLALCHEMY_POOL_RECYCLE=3600 # Whether to print SQL, default is false. SQLALCHEMY_ECHO=false +# If True, will test connections for liveness upon each checkout +SQLALCHEMY_POOL_PRE_PING=false +# Whether to enable the Last in first out option or use default FIFO queue if is false +SQLALCHEMY_POOL_USE_LIFO=false # Maximum number of connections to the database # Default is 100 @@ -250,6 +266,15 @@ REDIS_PORT=6379 REDIS_USERNAME= REDIS_PASSWORD=difyai123456 REDIS_USE_SSL=false +# SSL configuration for Redis (when REDIS_USE_SSL=true) +REDIS_SSL_CERT_REQS=CERT_NONE +# Options: CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED +REDIS_SSL_CA_CERTS= +# Path to CA certificate file for SSL verification +REDIS_SSL_CERTFILE= +# Path to client certificate file for SSL authentication +REDIS_SSL_KEYFILE= +# Path to client private key file for SSL authentication REDIS_DB=0 # Whether to use Redis Sentinel mode. @@ -274,17 +299,20 @@ REDIS_CLUSTERS_PASSWORD= # Celery Configuration # ------------------------------ -# Use redis as the broker, and redis db 1 for celery broker. -# Format as follows: `redis://:@:/` +# Use standalone redis as the broker, and redis db 1 for celery broker. (redis_username is usually set by defualt as empty) +# Format as follows: `redis://:@:/`. # Example: redis://:difyai123456@redis:6379/1 -# If use Redis Sentinel, format as follows: `sentinel://:@:/` -# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1 +# If use Redis Sentinel, format as follows: `sentinel://:@:/` +# For high availability, you can configure multiple Sentinel nodes (if provided) separated by semicolons like below example: +# Example: sentinel://:difyai123456@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1 CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 +CELERY_BACKEND=redis BROKER_USE_SSL=false # If you are using Redis Sentinel for high availability, configure the following settings. CELERY_USE_SENTINEL=false CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_PASSWORD= CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 # ------------------------------ @@ -316,6 +344,25 @@ OPENDAL_SCHEME=fs # Configurations for OpenDAL Local File System. OPENDAL_FS_ROOT=storage +# ClickZetta Volume Configuration (for storage backend) +# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume +# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters + +# Volume type selection (three types available): +# - user: Personal/small team use, simple config, user-level permissions +# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions +# - external: Data lake integration, external storage connection, volume-level + storage-level permissions +CLICKZETTA_VOLUME_TYPE=user + +# External Volume name (required only when TYPE=external) +CLICKZETTA_VOLUME_NAME= + +# Table Volume table prefix (used only when TYPE=table) +CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ + +# Dify file directory prefix (isolates from other apps, recommended to keep default) +CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km + # S3 Configuration # S3_ENDPOINT= @@ -399,8 +446,10 @@ SUPABASE_URL=your-server-url # ------------------------------ # The type of vector store to use. -# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `clickzetta`. VECTOR_STORE=weaviate +# Prefix used to create collection name in vector database +VECTOR_INDEX_NAME_PREFIX=Vector_index # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. WEAVIATE_ENDPOINT=http://weaviate:8080 @@ -412,6 +461,7 @@ QDRANT_API_KEY=difyai123456 QDRANT_CLIENT_TIMEOUT=20 QDRANT_GRPC_ENABLED=false QDRANT_GRPC_PORT=6334 +QDRANT_REPLICATION_FACTOR=1 # Milvus configuration. Only available when VECTOR_STORE is `milvus`. # The milvus uri. @@ -489,6 +539,13 @@ TIDB_VECTOR_USER= TIDB_VECTOR_PASSWORD= TIDB_VECTOR_DATABASE=dify +# Matrixone vector configurations. +MATRIXONE_HOST=matrixone +MATRIXONE_PORT=6001 +MATRIXONE_USER=dump +MATRIXONE_PASSWORD=111 +MATRIXONE_DATABASE=dify + # Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` TIDB_ON_QDRANT_URL=http://127.0.0.1 TIDB_ON_QDRANT_API_KEY=dify @@ -531,6 +588,7 @@ RELYT_DATABASE=postgres OPENSEARCH_HOST=opensearch OPENSEARCH_PORT=9200 OPENSEARCH_SECURE=true +OPENSEARCH_VERIFY_CERTS=true OPENSEARCH_AUTH_METHOD=basic OPENSEARCH_USER=admin OPENSEARCH_PASSWORD=admin @@ -555,6 +613,17 @@ ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=elastic KIBANA_PORT=5601 +# Using ElasticSearch Cloud Serverless, or not. +ELASTICSEARCH_USE_CLOUD=false +ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL +ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY + +ELASTICSEARCH_VERIFY_CERTS=False +ELASTICSEARCH_CA_CERTS= +ELASTICSEARCH_REQUEST_TIMEOUT=100000 +ELASTICSEARCH_RETRY_ON_TIMEOUT=True +ELASTICSEARCH_MAX_RETRIES=10 + # baidu vector configurations, only available when VECTOR_STORE is `baidu` BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 @@ -614,6 +683,21 @@ TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com TABLESTORE_INSTANCE_NAME=instance-name TABLESTORE_ACCESS_KEY_ID=xxx TABLESTORE_ACCESS_KEY_SECRET=xxx +TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false + +# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` +CLICKZETTA_USERNAME= +CLICKZETTA_PASSWORD= +CLICKZETTA_INSTANCE= +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=quick_start +CLICKZETTA_VCLUSTER=default_ap +CLICKZETTA_SCHEMA=dify +CLICKZETTA_BATCH_SIZE=100 +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese +CLICKZETTA_ANALYZER_MODE=smart +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance # ------------------------------ # Knowledge Configuration @@ -695,6 +779,12 @@ API_SENTRY_PROFILES_SAMPLE_RATE=1.0 # If not set, Sentry error reporting will be disabled. WEB_SENTRY_DSN= +# Plugin_daemon Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= + # ------------------------------ # Notion Integration Configuration # Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations @@ -717,10 +807,11 @@ NOTION_INTERNAL_SECRET= # Mail related configuration # ------------------------------ -# Mail type, support: resend, smtp +# Mail type, support: resend, smtp, sendgrid MAIL_TYPE=resend # Default send from email address, if not specified +# If using SendGrid, use the 'from' field for authentication if necessary. MAIL_DEFAULT_SEND_FROM= # API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. @@ -736,6 +827,9 @@ SMTP_PASSWORD= SMTP_USE_TLS=true SMTP_OPPORTUNISTIC_TLS=false +# Sendgid configuration +SENDGRID_API_KEY= + # ------------------------------ # Others Configuration # ------------------------------ @@ -749,6 +843,8 @@ INVITE_EXPIRY_HOURS=72 # Reset password token valid time (minutes), RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 # The sandbox service endpoint. CODE_EXECUTION_ENDPOINT=http://sandbox:8194 @@ -780,11 +876,41 @@ WORKFLOW_FILE_UPLOAD_LIMIT=10 # hybrid: Save new data to object storage, read from both object storage and RDBMS WORKFLOW_NODE_EXECUTION_STORAGE=rdbms +# Repository configuration +# Core workflow execution repository implementation +# Options: +# - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default) +# - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository +CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository + +# Core workflow node execution repository implementation +# Options: +# - core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository (default) +# - core.repositories.celery_workflow_node_execution_repository.CeleryWorkflowNodeExecutionRepository +CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository + +# API workflow run repository implementation +API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository + +# API workflow node execution repository implementation +API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository + +# Workflow log cleanup configuration +# Enable automatic cleanup of workflow run logs to manage database size +WORKFLOW_LOG_CLEANUP_ENABLED=false +# Number of days to retain workflow run logs (default: 30 days) +WORKFLOW_LOG_RETENTION_DAYS=30 +# Batch size for workflow log cleanup operations (default: 100) +WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 + # HTTP request node in workflow configuration HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 HTTP_REQUEST_NODE_SSL_VERIFY=True +# Respect X-* headers to redirect clients +RESPECT_XFORWARD_HEADERS_ENABLED=false + # SSRF Proxy server HTTP URL SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 # SSRF Proxy server HTTPS URL @@ -800,7 +926,7 @@ MAX_TOOLS_NUM=10 MAX_PARALLEL_LIMIT=10 # The maximum number of iterations for agent setting -MAX_ITERATIONS_NUM=5 +MAX_ITERATIONS_NUM=99 # ------------------------------ # Environment Variables for web Service @@ -809,11 +935,18 @@ MAX_ITERATIONS_NUM=5 # The timeout for the text generation in millisecond TEXT_GENERATION_TIMEOUT_MS=60000 +# Allow rendering unsafe URLs which have "data:" scheme. +ALLOW_UNSAFE_DATA_SCHEME=false + +# Maximum number of tree depth in the workflow +MAX_TREE_DEPTH=50 + # ------------------------------ # Environment Variables for db Service # ------------------------------ -PGUSER=${DB_USERNAME} +# The name of the default postgres user. +POSTGRES_USER=${DB_USERNAME} # The password for the default postgres user. POSTGRES_PASSWORD=${DB_PASSWORD} # The name of the default postgres database. @@ -940,7 +1073,7 @@ NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3 # Nginx performance tuning NGINX_WORKER_PROCESSES=auto -NGINX_CLIENT_MAX_BODY_SIZE=15M +NGINX_CLIENT_MAX_BODY_SIZE=100M NGINX_KEEPALIVE_TIMEOUT=65 # Proxy settings @@ -1049,13 +1182,16 @@ MARKETPLACE_API_URL=https://marketplace.dify.ai FORCE_VERIFYING_SIGNATURE=true +PLUGIN_STDIO_BUFFER_SIZE=1024 +PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 + PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 PLUGIN_MAX_EXECUTION_TIMEOUT=600 # PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple PIP_MIRROR_URL= # https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example -# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss +# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss volcengine_tos PLUGIN_STORAGE_TYPE=local PLUGIN_STORAGE_LOCAL_ROOT=/app/storage PLUGIN_WORKING_PATH=/app/storage/cwd @@ -1065,6 +1201,7 @@ PLUGIN_MEDIA_CACHE_PATH=assets # Plugin oss bucket PLUGIN_STORAGE_OSS_BUCKET= # Plugin oss s3 credentials +PLUGIN_S3_USE_AWS=false PLUGIN_S3_USE_AWS_MANAGED_IAM=false PLUGIN_S3_ENDPOINT= PLUGIN_S3_USE_PATH_STYLE=false @@ -1085,11 +1222,18 @@ PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 PLUGIN_ALIYUN_OSS_PATH= +# Plugin oss volcengine tos +PLUGIN_VOLCENGINE_TOS_ENDPOINT= +PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= +PLUGIN_VOLCENGINE_TOS_SECRET_KEY= +PLUGIN_VOLCENGINE_TOS_REGION= # ------------------------------ # OTLP Collector Configuration # ------------------------------ ENABLE_OTEL=false +OTLP_TRACE_ENDPOINT= +OTLP_METRIC_ENDPOINT= OTLP_BASE_ENDPOINT=http://localhost:4318 OTLP_API_KEY= OTEL_EXPORTER_OTLP_PROTOCOL= @@ -1104,3 +1248,24 @@ OTEL_METRIC_EXPORT_TIMEOUT=30000 # Prevent Clickjacking ALLOW_EMBED=false + +# Dataset queue monitor configuration +QUEUE_MONITOR_THRESHOLD=200 +# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai +QUEUE_MONITOR_ALERT_EMAILS= +# Monitor interval in minutes, default is 30 minutes +QUEUE_MONITOR_INTERVAL=30 + +# Swagger UI configuration +SWAGGER_UI_ENABLED=true +SWAGGER_UI_PATH=/swagger-ui.html + +# Celery schedule tasks configuration +ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false +ENABLE_CLEAN_UNUSED_DATASETS_TASK=false +ENABLE_CREATE_TIDB_SERVERLESS_TASK=false +ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false +ENABLE_CLEAN_MESSAGES=false +ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false +ENABLE_DATASETS_QUEUE_MONITOR=false +ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true diff --git a/dify/code/README.md b/dify/code/README.md index 22dfe2c91..b5c46eb9f 100644 --- a/dify/code/README.md +++ b/dify/code/README.md @@ -4,7 +4,7 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T ### What's Updated -- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections. +- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\ For more information, refer `docker/certbot/README.md`. - **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments. @@ -13,43 +13,44 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments. - **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file. + - **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades. ### How to Deploy Dify with `docker-compose.yaml` 1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system. -2. **Environment Setup**: - - Navigate to the `docker` directory. - - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`. - - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options. -3. **Running the Services**: - - Execute `docker compose up` from the `docker` directory to start the services. - - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. -4. **SSL Certificate Setup**: - - Refer `docker/certbot/README.md` to set up SSL certificates using Certbot. -5. **OpenTelemetry Collector Setup**: +1. **Environment Setup**: + - Navigate to the `docker` directory. + - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`. + - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options. +1. **Running the Services**: + - Execute `docker compose up` from the `docker` directory to start the services. + - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. +1. **SSL Certificate Setup**: + - Refer `docker/certbot/README.md` to set up SSL certificates using Certbot. +1. **OpenTelemetry Collector Setup**: - Change `ENABLE_OTEL` to `true` in `.env`. - Configure `OTLP_BASE_ENDPOINT` properly. ### How to Deploy Middleware for Developing Dify 1. **Middleware Setup**: - - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches. - - Navigate to the `docker` directory. - - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file). -2. **Running Middleware Services**: - - Navigate to the `docker` directory. - - Execute `docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d` to start the middleware services. (Change the profile to other vector database if you are not using weaviate) + - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches. + - Navigate to the `docker` directory. + - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file). +1. **Running Middleware Services**: + - Navigate to the `docker` directory. + - Execute `docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d` to start the middleware services. (Change the profile to other vector database if you are not using weaviate) ### Migration for Existing Users For users migrating from the `docker-legacy` setup: 1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup. -2. **Transfer Customizations**: - - If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create. -3. **Data Migration**: - - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary. +1. **Transfer Customizations**: + - If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create. +1. **Data Migration**: + - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary. ### Overview of `.env` @@ -64,39 +65,49 @@ For users migrating from the `docker-legacy` setup: The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables: 1. **Common Variables**: - - `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services. - - `APP_WEB_URL`: Frontend application URL. - - `FILES_URL`: Base URL for file downloads and previews. -2. **Server Configuration**: - - `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings. - - `SECRET_KEY`: A key for encrypting session cookies and other sensitive data. + - `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services. + - `APP_WEB_URL`: Frontend application URL. + - `FILES_URL`: Base URL for file downloads and previews. + +1. **Server Configuration**: + + - `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings. + - `SECRET_KEY`: A key for encrypting session cookies and other sensitive data. + +1. **Database Configuration**: + + - `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details. + +1. **Redis Configuration**: + + - `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings. + +1. **Celery Configuration**: + + - `CELERY_BROKER_URL`: Configuration for Celery message broker. + +1. **Storage Configuration**: + + - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc. + +1. **Vector Database Configuration**: -3. **Database Configuration**: - - `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details. + - `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`). + - Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`. -4. **Redis Configuration**: - - `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings. +1. **CORS Configuration**: -5. **Celery Configuration**: - - `CELERY_BROKER_URL`: Configuration for Celery message broker. + - `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing. -6. **Storage Configuration**: - - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc. +1. **OpenTelemetry Configuration**: -7. **Vector Database Configuration**: - - `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`). - - Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`. + - `ENABLE_OTEL`: Enable OpenTelemetry collector in api. + - `OTLP_BASE_ENDPOINT`: Endpoint for your OTLP exporter. -8. **CORS Configuration**: - - `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing. +1. **Other Service-Specific Environment Variables**: -9. **OpenTelemetry Configuration**: - - `ENABLE_OTEL`: Enable OpenTelemetry collector in api. - - `OTLP_BASE_ENDPOINT`: Endpoint for your OTLP exporter. - -10. **Other Service-Specific Environment Variables**: - - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`. + - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`. ### Additional Information diff --git a/dify/code/certbot/README.md b/dify/code/certbot/README.md index 21be34b33..62b1eee39 100644 --- a/dify/code/certbot/README.md +++ b/dify/code/certbot/README.md @@ -2,12 +2,12 @@ ## Short description -docker compose certbot configurations with Backward compatibility (without certbot container). +docker compose certbot configurations with Backward compatibility (without certbot container).\ Use `docker compose --profile certbot up` to use this features. ## The simplest way for launching new servers with SSL certificates -1. Get letsencrypt certs +1. Get letsencrypt certs\ set `.env` values ```properties NGINX_SSL_CERT_FILENAME=fullchain.pem @@ -25,7 +25,7 @@ Use `docker compose --profile certbot up` to use this features. ```shell docker compose exec -it certbot /bin/sh /update-cert.sh ``` -2. Edit `.env` file and `docker compose --profile certbot up` again. +1. Edit `.env` file and `docker compose --profile certbot up` again.\ set `.env` value additionally ```properties NGINX_HTTPS_ENABLED=true @@ -34,7 +34,7 @@ Use `docker compose --profile certbot up` to use this features. ```shell docker compose --profile certbot up -d --no-deps --force-recreate nginx ``` - Then you can access your serve with HTTPS. + Then you can access your serve with HTTPS.\ [https://your_domain.com](https://your_domain.com) ## SSL certificates renewal diff --git a/dify/code/docker-compose-template.yaml b/dify/code/docker-compose-template.yaml index ceb32e4ab..0e695e4fc 100644 --- a/dify/code/docker-compose-template.yaml +++ b/dify/code/docker-compose-template.yaml @@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:1.4.0 + image: langgenius/dify-api:1.8.0 restart: always environment: # Use the shared environment variables. @@ -31,7 +31,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:1.4.0 + image: langgenius/dify-api:1.8.0 restart: always environment: # Use the shared environment variables. @@ -55,9 +55,28 @@ services: - ssrf_proxy_network - default + # worker_beat service + # Celery beat for scheduling periodic tasks. + worker_beat: + image: langgenius/dify-api:1.8.0 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. + MODE: beat + depends_on: + db: + condition: service_healthy + redis: + condition: service_started + networks: + - ssrf_proxy_network + - default + # Frontend web application. web: - image: langgenius/dify-web:1.4.0 + image: langgenius/dify-web:1.8.0 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -67,6 +86,7 @@ services: TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} CSP_WHITELIST: ${CSP_WHITELIST:-} ALLOW_EMBED: ${ALLOW_EMBED:-false} + ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} @@ -75,7 +95,8 @@ services: LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} + MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} @@ -84,7 +105,7 @@ services: image: postgres:15-alpine restart: always environment: - PGUSER: ${PGUSER:-postgres} + POSTGRES_USER: ${POSTGRES_USER:-postgres} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} POSTGRES_DB: ${POSTGRES_DB:-dify} PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} @@ -142,7 +163,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.0.10-local + image: langgenius/dify-plugin-daemon:0.2.0-local restart: always environment: # Use the shared environment variables. @@ -160,6 +181,8 @@ services: FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} + PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} @@ -168,6 +191,7 @@ services: PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} + S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} @@ -184,6 +208,12 @@ services: ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} + VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} + VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} + VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} + VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} + SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false} + SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-} ports: - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" volumes: @@ -260,7 +290,7 @@ services: NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} - NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M} NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} @@ -430,7 +460,7 @@ services: # OceanBase vector database oceanbase: - image: oceanbase/oceanbase-ce:4.3.5.1-101000042025031818 + image: oceanbase/oceanbase-ce:4.3.5-lts container_name: oceanbase profiles: - oceanbase @@ -444,9 +474,16 @@ services: OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} - MODE: MINI + OB_SERVER_IP: 127.0.0.1 + MODE: mini ports: - "${OCEANBASE_VECTOR_PORT:-2881}:2881" + healthcheck: + test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"' ] + interval: 10s + retries: 30 + start_period: 30s + timeout: 10s # Oracle vector database oracle: @@ -506,7 +543,7 @@ services: milvus-standalone: container_name: milvus-standalone - image: milvusdb/milvus:v2.5.0-beta + image: milvusdb/milvus:v2.5.15 profiles: - milvus command: [ 'milvus', 'run', 'standalone' ] @@ -605,6 +642,18 @@ services: ports: - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} + # Matrixone vector store. + matrixone: + hostname: matrixone + image: matrixorigin/matrixone:2.1.1 + profiles: + - matrixone + restart: always + volumes: + - ./volumes/matrixone/data:/mo-data + ports: + - ${MATRIXONE_PORT:-6001}:${MATRIXONE_PORT:-6001} + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites elasticsearch: diff --git a/dify/code/docker-compose.middleware.yaml b/dify/code/docker-compose.middleware.yaml index 8690a5092..9f7cc7258 100644 --- a/dify/code/docker-compose.middleware.yaml +++ b/dify/code/docker-compose.middleware.yaml @@ -20,7 +20,7 @@ services: ports: - "${EXPOSE_POSTGRES_PORT:-5432}:5432" healthcheck: - test: [ "CMD", "pg_isready" ] + test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ] interval: 1s timeout: 3s retries: 30 @@ -71,7 +71,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.0.10-local + image: langgenius/dify-plugin-daemon:0.2.0-local restart: always env_file: - ./middleware.env @@ -104,6 +104,7 @@ services: PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} + S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} @@ -121,6 +122,10 @@ services: ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} + VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} + VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} + VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} + VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} ports: - "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}" - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" diff --git a/dify/code/docker-compose.yaml b/dify/code/docker-compose.yaml index 90dd29cec..7f24227a1 100644 --- a/dify/code/docker-compose.yaml +++ b/dify/code/docker-compose.yaml @@ -11,6 +11,10 @@ x-shared-env: &shared-api-worker-env APP_API_URL: ${APP_API_URL:-} APP_WEB_URL: ${APP_WEB_URL:-} FILES_URL: ${FILES_URL:-} + INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-} + LANG: ${LANG:-en_US.UTF-8} + LC_ALL: ${LC_ALL:-en_US.UTF-8} + PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8} LOG_LEVEL: ${LOG_LEVEL:-INFO} LOG_FILE: ${LOG_FILE:-/app/logs/server.log} LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} @@ -53,8 +57,11 @@ x-shared-env: &shared-api-worker-env DB_PORT: ${DB_PORT:-5432} DB_DATABASE: ${DB_DATABASE:-dify} SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} + SQLALCHEMY_MAX_OVERFLOW: ${SQLALCHEMY_MAX_OVERFLOW:-10} SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} + SQLALCHEMY_POOL_PRE_PING: ${SQLALCHEMY_POOL_PRE_PING:-false} + SQLALCHEMY_POOL_USE_LIFO: ${SQLALCHEMY_POOL_USE_LIFO:-false} POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100} POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} @@ -65,6 +72,10 @@ x-shared-env: &shared-api-worker-env REDIS_USERNAME: ${REDIS_USERNAME:-} REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} REDIS_USE_SSL: ${REDIS_USE_SSL:-false} + REDIS_SSL_CERT_REQS: ${REDIS_SSL_CERT_REQS:-CERT_NONE} + REDIS_SSL_CA_CERTS: ${REDIS_SSL_CA_CERTS:-} + REDIS_SSL_CERTFILE: ${REDIS_SSL_CERTFILE:-} + REDIS_SSL_KEYFILE: ${REDIS_SSL_KEYFILE:-} REDIS_DB: ${REDIS_DB:-0} REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} REDIS_SENTINELS: ${REDIS_SENTINELS:-} @@ -76,15 +87,21 @@ x-shared-env: &shared-api-worker-env REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} + CELERY_BACKEND: ${CELERY_BACKEND:-redis} BROKER_USE_SSL: ${BROKER_USE_SSL:-false} CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} + CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-} CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} STORAGE_TYPE: ${STORAGE_TYPE:-opendal} OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} + CLICKZETTA_VOLUME_TYPE: ${CLICKZETTA_VOLUME_TYPE:-user} + CLICKZETTA_VOLUME_NAME: ${CLICKZETTA_VOLUME_NAME:-} + CLICKZETTA_VOLUME_TABLE_PREFIX: ${CLICKZETTA_VOLUME_TABLE_PREFIX:-dataset_} + CLICKZETTA_VOLUME_DIFY_PREFIX: ${CLICKZETTA_VOLUME_DIFY_PREFIX:-dify_km} S3_ENDPOINT: ${S3_ENDPOINT:-} S3_REGION: ${S3_REGION:-us-east-1} S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} @@ -131,6 +148,7 @@ x-shared-env: &shared-api-worker-env SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} SUPABASE_URL: ${SUPABASE_URL:-your-server-url} VECTOR_STORE: ${VECTOR_STORE:-weaviate} + VECTOR_INDEX_NAME_PREFIX: ${VECTOR_INDEX_NAME_PREFIX:-Vector_index} WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} @@ -138,6 +156,7 @@ x-shared-env: &shared-api-worker-env QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} + QDRANT_REPLICATION_FACTOR: ${QDRANT_REPLICATION_FACTOR:-1} MILVUS_URI: ${MILVUS_URI:-http://host.docker.internal:19530} MILVUS_DATABASE: ${MILVUS_DATABASE:-} MILVUS_TOKEN: ${MILVUS_TOKEN:-} @@ -194,6 +213,11 @@ x-shared-env: &shared-api-worker-env TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} + MATRIXONE_HOST: ${MATRIXONE_HOST:-matrixone} + MATRIXONE_PORT: ${MATRIXONE_PORT:-6001} + MATRIXONE_USER: ${MATRIXONE_USER:-dump} + MATRIXONE_PASSWORD: ${MATRIXONE_PASSWORD:-111} + MATRIXONE_DATABASE: ${MATRIXONE_DATABASE:-dify} TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} @@ -227,6 +251,7 @@ x-shared-env: &shared-api-worker-env OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} + OPENSEARCH_VERIFY_CERTS: ${OPENSEARCH_VERIFY_CERTS:-true} OPENSEARCH_AUTH_METHOD: ${OPENSEARCH_AUTH_METHOD:-basic} OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} @@ -245,6 +270,14 @@ x-shared-env: &shared-api-worker-env ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} KIBANA_PORT: ${KIBANA_PORT:-5601} + ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false} + ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL} + ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY} + ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False} + ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-} + ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000} + ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True} + ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10} BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} @@ -288,6 +321,19 @@ x-shared-env: &shared-api-worker-env TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name} TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} + TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false} + CLICKZETTA_USERNAME: ${CLICKZETTA_USERNAME:-} + CLICKZETTA_PASSWORD: ${CLICKZETTA_PASSWORD:-} + CLICKZETTA_INSTANCE: ${CLICKZETTA_INSTANCE:-} + CLICKZETTA_SERVICE: ${CLICKZETTA_SERVICE:-api.clickzetta.com} + CLICKZETTA_WORKSPACE: ${CLICKZETTA_WORKSPACE:-quick_start} + CLICKZETTA_VCLUSTER: ${CLICKZETTA_VCLUSTER:-default_ap} + CLICKZETTA_SCHEMA: ${CLICKZETTA_SCHEMA:-dify} + CLICKZETTA_BATCH_SIZE: ${CLICKZETTA_BATCH_SIZE:-100} + CLICKZETTA_ENABLE_INVERTED_INDEX: ${CLICKZETTA_ENABLE_INVERTED_INDEX:-true} + CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese} + CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart} + CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance} UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} ETL_TYPE: ${ETL_TYPE:-dify} @@ -306,6 +352,8 @@ x-shared-env: &shared-api-worker-env API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} + PLUGIN_SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false} + PLUGIN_SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-} NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} @@ -320,9 +368,12 @@ x-shared-env: &shared-api-worker-env SMTP_PASSWORD: ${SMTP_PASSWORD:-} SMTP_USE_TLS: ${SMTP_USE_TLS:-true} SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} + SENDGRID_API_KEY: ${SENDGRID_API_KEY:-} INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} + CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES: ${CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES:-5} + OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES: ${OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES:-5} CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} @@ -344,17 +395,27 @@ x-shared-env: &shared-api-worker-env WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3} WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} WORKFLOW_NODE_EXECUTION_STORAGE: ${WORKFLOW_NODE_EXECUTION_STORAGE:-rdbms} + CORE_WORKFLOW_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository} + CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository} + API_WORKFLOW_RUN_REPOSITORY: ${API_WORKFLOW_RUN_REPOSITORY:-repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository} + API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository} + WORKFLOW_LOG_CLEANUP_ENABLED: ${WORKFLOW_LOG_CLEANUP_ENABLED:-false} + WORKFLOW_LOG_RETENTION_DAYS: ${WORKFLOW_LOG_RETENTION_DAYS:-30} + WORKFLOW_LOG_CLEANUP_BATCH_SIZE: ${WORKFLOW_LOG_CLEANUP_BATCH_SIZE:-100} HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True} + RESPECT_XFORWARD_HEADERS_ENABLED: ${RESPECT_XFORWARD_HEADERS_ENABLED:-false} SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} - PGUSER: ${PGUSER:-${DB_USERNAME}} + ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} + MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} + POSTGRES_USER: ${POSTGRES_USER:-${DB_USERNAME}} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}} PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} @@ -410,7 +471,7 @@ x-shared-env: &shared-api-worker-env NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} - NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M} NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} @@ -455,6 +516,8 @@ x-shared-env: &shared-api-worker-env MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} + PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} @@ -465,6 +528,7 @@ x-shared-env: &shared-api-worker-env PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} + PLUGIN_S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} PLUGIN_S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} PLUGIN_S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} PLUGIN_S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} @@ -482,7 +546,13 @@ x-shared-env: &shared-api-worker-env PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} PLUGIN_ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} PLUGIN_ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} + PLUGIN_VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} + PLUGIN_VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} + PLUGIN_VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} + PLUGIN_VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} ENABLE_OTEL: ${ENABLE_OTEL:-false} + OTLP_TRACE_ENDPOINT: ${OTLP_TRACE_ENDPOINT:-} + OTLP_METRIC_ENDPOINT: ${OTLP_METRIC_ENDPOINT:-} OTLP_BASE_ENDPOINT: ${OTLP_BASE_ENDPOINT:-http://localhost:4318} OTLP_API_KEY: ${OTLP_API_KEY:-} OTEL_EXPORTER_OTLP_PROTOCOL: ${OTEL_EXPORTER_OTLP_PROTOCOL:-} @@ -495,11 +565,24 @@ x-shared-env: &shared-api-worker-env OTEL_BATCH_EXPORT_TIMEOUT: ${OTEL_BATCH_EXPORT_TIMEOUT:-10000} OTEL_METRIC_EXPORT_TIMEOUT: ${OTEL_METRIC_EXPORT_TIMEOUT:-30000} ALLOW_EMBED: ${ALLOW_EMBED:-false} + QUEUE_MONITOR_THRESHOLD: ${QUEUE_MONITOR_THRESHOLD:-200} + QUEUE_MONITOR_ALERT_EMAILS: ${QUEUE_MONITOR_ALERT_EMAILS:-} + QUEUE_MONITOR_INTERVAL: ${QUEUE_MONITOR_INTERVAL:-30} + SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-true} + SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html} + ENABLE_CLEAN_EMBEDDING_CACHE_TASK: ${ENABLE_CLEAN_EMBEDDING_CACHE_TASK:-false} + ENABLE_CLEAN_UNUSED_DATASETS_TASK: ${ENABLE_CLEAN_UNUSED_DATASETS_TASK:-false} + ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false} + ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK: ${ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK:-false} + ENABLE_CLEAN_MESSAGES: ${ENABLE_CLEAN_MESSAGES:-false} + ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: ${ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK:-false} + ENABLE_DATASETS_QUEUE_MONITOR: ${ENABLE_DATASETS_QUEUE_MONITOR:-false} + ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK: ${ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK:-true} services: # API service api: - image: langgenius/dify-api:1.4.0 + image: langgenius/dify-api:1.8.0 restart: always environment: # Use the shared environment variables. @@ -528,7 +611,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:1.4.0 + image: langgenius/dify-api:1.8.0 restart: always environment: # Use the shared environment variables. @@ -552,9 +635,28 @@ services: - ssrf_proxy_network - default + # worker_beat service + # Celery beat for scheduling periodic tasks. + worker_beat: + image: langgenius/dify-api:1.8.0 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. + MODE: beat + depends_on: + db: + condition: service_healthy + redis: + condition: service_started + networks: + - ssrf_proxy_network + - default + # Frontend web application. web: - image: langgenius/dify-web:1.4.0 + image: langgenius/dify-web:1.8.0 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -564,6 +666,7 @@ services: TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} CSP_WHITELIST: ${CSP_WHITELIST:-} ALLOW_EMBED: ${ALLOW_EMBED:-false} + ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} @@ -572,7 +675,8 @@ services: LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} + MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} @@ -581,7 +685,7 @@ services: image: postgres:15-alpine restart: always environment: - PGUSER: ${PGUSER:-postgres} + POSTGRES_USER: ${POSTGRES_USER:-postgres} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} POSTGRES_DB: ${POSTGRES_DB:-dify} PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} @@ -649,7 +753,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.0.10-local + image: langgenius/dify-plugin-daemon:0.2.0-local restart: always environment: # Use the shared environment variables. @@ -667,6 +771,8 @@ services: FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} + PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} @@ -675,6 +781,7 @@ services: PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} + S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} @@ -691,6 +798,12 @@ services: ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} + VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} + VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} + VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} + VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} + SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false} + SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-} volumes: - ./volumes/plugin_daemon:/app/storage depends_on: @@ -779,7 +892,7 @@ services: NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} - NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M} NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} @@ -949,7 +1062,7 @@ services: # OceanBase vector database oceanbase: - image: oceanbase/oceanbase-ce:4.3.5.1-101000042025031818 + image: oceanbase/oceanbase-ce:4.3.5-lts profiles: - oceanbase restart: always @@ -962,7 +1075,19 @@ services: OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} - MODE: MINI + OB_SERVER_IP: 127.0.0.1 + MODE: mini + healthcheck: + test: + [ + 'CMD-SHELL', + 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e + "SELECT 1;"' + ] + interval: 10s + retries: 30 + start_period: 30s + timeout: 10s # Oracle vector database oracle: @@ -1020,7 +1145,7 @@ services: - milvus milvus-standalone: - image: milvusdb/milvus:v2.5.0-beta + image: milvusdb/milvus:v2.5.15 profiles: - milvus command: [ 'milvus', 'run', 'standalone' ] @@ -1110,6 +1235,16 @@ services: - ./volumes/myscale/log:/var/log/clickhouse-server - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + # Matrixone vector store. + matrixone: + hostname: matrixone + image: matrixorigin/matrixone:2.1.1 + profiles: + - matrixone + restart: always + volumes: + - ./volumes/matrixone/data:/mo-data + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites elasticsearch: diff --git a/dify/code/middleware.env.example b/dify/code/middleware.env.example index 2437026ee..2eba62f59 100644 --- a/dify/code/middleware.env.example +++ b/dify/code/middleware.env.example @@ -1,7 +1,7 @@ # ------------------------------ # Environment Variables for db Service # ------------------------------ -PGUSER=postgres +POSTGRES_USER=postgres # The password for the default postgres user. POSTGRES_PASSWORD=difyai123456 # The name of the default postgres database. @@ -109,7 +109,7 @@ EXPOSE_PLUGIN_DEBUGGING_HOST=localhost EXPOSE_PLUGIN_DEBUGGING_PORT=5003 PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 -PLUGIN_DIFY_INNER_API_URL=http://api:5001 +PLUGIN_DIFY_INNER_API_URL=http://host.docker.internal:5001 MARKETPLACE_ENABLED=true MARKETPLACE_API_URL=https://marketplace.dify.ai @@ -133,6 +133,7 @@ PLUGIN_MEDIA_CACHE_PATH=assets PLUGIN_STORAGE_OSS_BUCKET= # Plugin oss s3 credentials PLUGIN_S3_USE_AWS_MANAGED_IAM=false +PLUGIN_S3_USE_AWS=false PLUGIN_S3_ENDPOINT= PLUGIN_S3_USE_PATH_STYLE=false PLUGIN_AWS_ACCESS_KEY= @@ -152,3 +153,8 @@ PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 PLUGIN_ALIYUN_OSS_PATH= +# Plugin oss volcengine tos +PLUGIN_VOLCENGINE_TOS_ENDPOINT= +PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= +PLUGIN_VOLCENGINE_TOS_SECRET_KEY= +PLUGIN_VOLCENGINE_TOS_REGION= diff --git a/dify/code/nginx/conf.d/default.conf.template b/dify/code/nginx/conf.d/default.conf.template index a458412d1..48d7da8cf 100644 --- a/dify/code/nginx/conf.d/default.conf.template +++ b/dify/code/nginx/conf.d/default.conf.template @@ -39,7 +39,10 @@ server { proxy_pass http://web:3000; include proxy.conf; } - + location /mcp { + proxy_pass http://api:5001; + include proxy.conf; + } # placeholder for acme challenge location ${ACME_CHALLENGE_LOCATION} diff --git a/dify/update.js b/dify/update.js index faff6b0be..48d1867a2 100644 --- a/dify/update.js +++ b/dify/update.js @@ -4,3 +4,15 @@ await utils.cloneOrPullRepo({ repo: "https://github.com/langgenius/dify.git" }); await utils.copyDir("./repo/docker", "./code"); await utils.removeContainerNames("./code/docker-compose.yaml"); await utils.removePorts("./code/docker-compose.yaml"); + +await utils.searchReplace( + "./code/.env.example", + "APP_WEB_URL=", + "APP_WEB_URL=https://$(PRIMARY_DOMAIN)" +); + +await utils.searchReplace( + "./code/.env.example", + "SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U", + "SECRET_KEY=" +); diff --git a/dify/update.sh b/dify/update.sh deleted file mode 100644 index 433d23c2d..000000000 --- a/dify/update.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [ ! -d "./repo" ]; then - git clone --depth 1 --branch main --single-branch https://github.com/langgenius/dify.git repo -else - cd repo - git pull - cd .. -fi - -cp -r ./repo/docker/. ./code - - diff --git a/plane/README.md b/plane/README.md index 86611bc18..ce423fb4c 100644 --- a/plane/README.md +++ b/plane/README.md @@ -1,4 +1,42 @@ -# Plane +# Plane - Project Management Script -- copied from https://github.com/makeplane/plane -- removed `ports` +This directory contains the Plane project management tool deployment for Easypanel. + +## Update Process + +To update Plane to the latest version, follow these steps in order: + +### Step 1: Run the Setup Script for Upgrade + +First, run the `setup.sh` script and choose the upgrade option: + +``` +./setup.sh +``` + +When prompted, select option **5** for "Upgrade". This will: + +* Check for the latest available release +* Download the latest stable version + +### Step 2: Run the Update Script + +After the setup script completes successfully, run the `update.js` script: + +``` +node update.js +``` + +This script will: + +* Removes the existing code directory and changes the name for the newly fetched directory to code.  +* Rename `plane.env` to `.env.example` +* Rename `docker-compose.yaml` to `docker-compose.yml` +* Remove container names and ports from docker-compose.yml +* Update environment variables to use Easypanel's `PRIMARY_DOMAIN` variable + +## Important Notes + +* **Always run setup.sh first** - This ensures you get the latest official Plane release +* **Then run update.js** - This applies Easypanel-specific customizations +* The update process will preserve your existing data and configuration \ No newline at end of file diff --git a/plane/code/.env.example b/plane/code/.env.example index 78031a4ac..fd92cb43c 100644 --- a/plane/code/.env.example +++ b/plane/code/.env.example @@ -1,5 +1,6 @@ -APP_DOMAIN=localhost -APP_RELEASE=stable +APP_DOMAIN=$(PRIMARY_DOMAIN) +APP_RELEASE=v0.28.0 +SSL=false WEB_REPLICAS=1 SPACE_REPLICAS=1 @@ -9,10 +10,12 @@ WORKER_REPLICAS=1 BEAT_WORKER_REPLICAS=1 LIVE_REPLICAS=1 -NGINX_PORT=80 -WEB_URL=http://${APP_DOMAIN} +LISTEN_HTTP_PORT=80 +LISTEN_HTTPS_PORT=443 + +WEB_URL=https://$(PRIMARY_DOMAIN) DEBUG=0 -CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN} +CORS_ALLOWED_ORIGINS=https://$(PRIMARY_DOMAIN) API_BASE_URL=http://api:8000 #DB SETTINGS @@ -38,6 +41,19 @@ RABBITMQ_PASSWORD=plane RABBITMQ_VHOST=plane AMQP_URL= +# If SSL Cert to be generated, set CERT_EMAIl="email " +CERT_ACME_CA=https://acme-v02.api.letsencrypt.org/directory +TRUSTED_PROXIES=0.0.0.0/0 +SITE_ADDRESS=:80 +CERT_EMAIL= + + + +# For DNS Challenge based certificate generation, set the CERT_ACME_DNS, CERT_EMAIL +# CERT_ACME_DNS="acme_dns " +CERT_ACME_DNS= + + # Secret Key SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5 @@ -61,3 +77,6 @@ MINIO_ENDPOINT_SSL=0 # API key rate limit API_KEY_RATE_LIMIT=60/minute +DOCKERHUB_USER=artifacts.plane.so/makeplane +PULL_POLICY=if_not_present +CUSTOM_BUILD=false diff --git a/plane/code/README.md b/plane/code/README.md deleted file mode 100644 index e5a8089e6..000000000 --- a/plane/code/README.md +++ /dev/null @@ -1,605 +0,0 @@ -# Self Hosting - -In this guide, we will walk you through the process of setting up a self-hosted environment. Self-hosting allows you to have full control over your applications and data. It's a great way to ensure privacy, control, and customization. - -We will cover two main options for setting up your self-hosted environment: using a cloud server or using your desktop. For the cloud server, we will use an AWS EC2 instance. For the desktop, we will use Docker to create a local environment. - -Let's get started! - -## Setting up Docker Environment - -
- Option 1 - Using Cloud Server -

Best way to start is to create EC2 machine on AWS. It must have minimum of 2vCPU and 4GB RAM.

-

Run the below command to install docker engine.

- -`curl -fsSL https://get.docker.com | sh -` - -
- ---- - -
- Option 2 - Using Desktop - -#### For Mac - -
    -
  1. Download Docker Desktop for Mac from the Docker Hub.
  2. -
  3. Double-click the downloaded `.dmg` file and drag the Docker app icon to the Applications folder.
  4. -
  5. Open Docker Desktop from the Applications folder. You might be asked to provide your system password to install additional software.
  6. -
- -#### For Windows: - -
    -
  1. Download Docker Desktop for Windows from the Docker Hub.
  2. -
  3. Run the installer and follow the instructions. You might be asked to enable Hyper-V and "Containers" Windows features.
  4. -
  5. Open Docker Desktop. You might be asked to log out and log back in, or restart your machine, for changes to take effect.
  6. -
- -After installation, you can verify the installation by opening a terminal (Command Prompt on Windows, Terminal app on Mac) and running the command `docker --version`. This should display the installed version of Docker. - -
- ---- - -## Installing Plane - -Installing plane is a very easy and minimal step process. - -### Prerequisite - -- Docker installed and running -- OS with bash scripting enabled (Ubuntu, Linux AMI, macos). Windows systems need to have [gitbash](https://git-scm.com/download/win) -- User context used must have access to docker services. In most cases, use sudo su to switch as root user -- Use the terminal (or gitbash) window to run all the future steps - -### Downloading Latest Release - -``` -mkdir plane-selfhost - -cd plane-selfhost -``` - -#### For *Docker Compose* based setup - -``` -curl -fsSL -o setup.sh https://github.com/makeplane/plane/releases/latest/download/setup.sh - -chmod +x setup.sh -``` - -#### For *Docker Swarm* based setup - -``` -curl -fsSL -o setup.sh https://github.com/makeplane/plane/releases/latest/download/swarm.sh - -chmod +x setup.sh -``` - ---- - -### Proceed with setup - -Above steps will set you ready to install and start plane services. - -Lets get started by running the `./setup.sh` command. - -This will prompt you with the below options. - -#### Docker Compose -```bash -Select an Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 1 -``` - -For the 1st time setup, type "1" as action input. - -This will create a folder `plane-app` and will download 2 files inside that - -- `docker-compose.yaml` -- `plane.env` - -Again the `options [1-8]` will be popped up, and this time hit `8` to exit. - -#### Docker Swarm - -```bash -Select an Action you want to perform: - 1) Deploy Stack - 2) Remove Stack - 3) View Stack Status - 4) Redeploy Stack - 5) Upgrade - 6) View Logs - 7) Exit - -Action [3]: 1 -``` - -For the 1st time setup, type "1" as action input. - -This will create a create a folder `plane-app` and will download 2 files inside that - -- `docker-compose.yaml` -- `plane.env` - -Again the `options [1-7]` will be popped up, and this time hit `7` to exit. - ---- - -### Continue with setup - Environment Settings - -Before proceeding, we suggest used to review `.env` file and set the values. -Below are the most import keys you must refer to. _You can use any text editor to edit this file_. - -> `NGINX_PORT` - This is default set to `80`. Make sure the port you choose to use is not preoccupied. (e.g `NGINX_PORT=8080`) - -> `WEB_URL` - This is default set to `http://localhost`. Change this to the FQDN you plan to use along with NGINX_PORT (eg. `https://plane.example.com:8080` or `http://[IP-ADDRESS]:8080`) - -> `CORS_ALLOWED_ORIGINS` - This is default set to `http://localhost`. Change this to the FQDN you plan to use along with NGINX_PORT (eg. `https://plane.example.com:8080` or `http://[IP-ADDRESS]:8080`) - -There are many other settings you can play with, but we suggest you configure `EMAIL SETTINGS` as it will enable you to invite your teammates onto the platform. - ---- - -### Continue with setup - Start Server (Docker Compose) - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `2` to start the sevices - -```bash -Select a Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 2 -``` - -Expect something like this. -![Downloading docker images](images/download.png) - -Be patient as it might take sometime based on download speed and system configuration. If all goes well, you must see something like this - -![Downloading completed](images/started.png) - -This is the confirmation that all images were downloaded and the services are up & running. - -You have successfully self hosted `Plane` instance. Access the application by going to IP or domain you have configured it (e.g `https://plane.example.com:8080` or `http://[IP-ADDRESS]:8080`) - ---- - -### Stopping the Server / Remove Stack - -In case you want to make changes to `plane.env` variables, we suggest you to stop the services before doing that. - -#### Docker Compose - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `3` to stop the sevices - -```bash -Select a Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 3 -``` - -If all goes well, you must see something like this - -![Stop Services](images/stopped.png) - -#### Docker Swarm - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `2` to stop the sevices - -```bash -Select an Action you want to perform: - 1) Deploy Stack - 2) Remove Stack - 3) View Stack Status - 4) Redeploy Stack - 5) Upgrade - 6) View Logs - 7) Exit - -Action [3]: 2 -``` - -If all goes well, you will see the confirmation from docker cli - ---- - -### Restarting the Server / Redeploy Stack - -In case you want to make changes to `plane.env` variables, without stopping the server or you noticed some abnormalies in services, you can restart the services with `RESTART` / `REDEPLOY` option. - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `4` to restart the sevices - -#### Docker Compose -```bash -Select a Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 4 -``` - -If all goes well, you must see something like this - -![Restart Services](images/restart.png) - -#### Docker Swarm - -```bash - 1) Deploy Stack - 2) Remove Stack - 3) View Stack Status - 4) Redeploy Stack - 5) Upgrade - 6) View Logs - 7) Exit - -Action [3]: 4 -``` - -If all goes well, you will see the confirmation from docker cli - ---- - -### Upgrading Plane Version - -It is always advised to keep Plane up to date with the latest release. - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `5` to upgrade the release. - -#### Docker Compose - -```bash -Select a Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 5 -``` - -By choosing this, it will stop the services and then will download the latest `docker-compose.yaml` and `plane.env`. - -You must expect the below message - -![Alt text](images/upgrade.png) - -Once done, choose `8` to exit from prompt. - -> It is very important for you to validate the `plane.env` for the new changes. - -Once done with making changes in `plane.env` file, jump on to `Start Server` - -#### Docker Swarm - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `5` to upgrade the release. - -```bash - 1) Deploy Stack - 2) Remove Stack - 3) View Stack Status - 4) Redeploy Stack - 5) Upgrade - 6) View Logs - 7) Exit - -Action [3]: 5 -``` - -By choosing this, it will stop the services and then will download the latest `docker-compose.yaml` and `plane.env`. - -Once done, choose `7` to exit from prompt. - -> It is very important for you to validate the `plane.env` for the new changes. - -Once done with making changes in `plane.env` file, jump on to `Redeploy Stack` - ---- - -### View Logs - -There would a time when you might want to check what is happening inside the API, Worker or any other container. - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. - -This time select `6` to view logs. - -#### Docker Compose - -```bash -Select a Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 6 -``` - -#### Docker Swarm - - -```bash - 1) Deploy Stack - 2) Remove Stack - 3) View Stack Status - 4) Redeploy Stack - 5) Upgrade - 6) View Logs - 7) Exit - -Action [3]: 6 -``` - -#### Service Menu Options for Logs -This will further open sub-menu with list of services -```bash -Select a Service you want to view the logs for: - 1) Web - 2) Space - 3) API - 4) Worker - 5) Beat-Worker - 6) Migrator - 7) Proxy - 8) Redis - 9) Postgres - 10) Minio - 11) RabbitMQ - 0) Back to Main Menu - -Service: 3 -``` - -Select any of the service to view the logs e.g. `3`. Expect something similar to this -```bash -api-1 | Waiting for database... -api-1 | Database available! -api-1 | Waiting for database migrations to complete... -api-1 | Waiting for database migrations to complete... -api-1 | Waiting for database migrations to complete... -api-1 | Waiting for database migrations to complete... -api-1 | Waiting for database migrations to complete... -api-1 | Waiting for database migrations to complete... -api-1 | Waiting for database migrations to complete... -api-1 | No migrations Pending. Starting processes ... -api-1 | Instance registered -api-1 | ENABLE_SIGNUP loaded with value from environment variable. -api-1 | ENABLE_EMAIL_PASSWORD loaded with value from environment variable. -api-1 | ENABLE_MAGIC_LINK_LOGIN loaded with value from environment variable. -api-1 | GOOGLE_CLIENT_ID loaded with value from environment variable. -api-1 | GITHUB_CLIENT_ID loaded with value from environment variable. -api-1 | GITHUB_CLIENT_SECRET loaded with value from environment variable. -api-1 | EMAIL_HOST loaded with value from environment variable. -api-1 | EMAIL_HOST_USER loaded with value from environment variable. -api-1 | EMAIL_HOST_PASSWORD loaded with value from environment variable. -api-1 | EMAIL_PORT loaded with value from environment variable. -api-1 | EMAIL_FROM loaded with value from environment variable. -api-1 | EMAIL_USE_TLS loaded with value from environment variable. -api-1 | EMAIL_USE_SSL loaded with value from environment variable. -api-1 | OPENAI_API_KEY loaded with value from environment variable. -api-1 | GPT_ENGINE loaded with value from environment variable. -api-1 | UNSPLASH_ACCESS_KEY loaded with value from environment variable. -api-1 | Checking bucket... -api-1 | Bucket 'uploads' does not exist. Creating bucket... -api-1 | Bucket 'uploads' created successfully. -api-1 | Public read access policy set for bucket 'uploads'. -api-1 | Cache Cleared -api-1 | [2024-05-02 03:56:01 +0000] [1] [INFO] Starting gunicorn 21.2.0 -api-1 | [2024-05-02 03:56:01 +0000] [1] [INFO] Listening at: http://0.0.0.0:8000 (1) -api-1 | [2024-05-02 03:56:01 +0000] [1] [INFO] Using worker: uvicorn.workers.UvicornWorker -api-1 | [2024-05-02 03:56:01 +0000] [25] [INFO] Booting worker with pid: 25 -api-1 | [2024-05-02 03:56:03 +0000] [25] [INFO] Started server process [25] -api-1 | [2024-05-02 03:56:03 +0000] [25] [INFO] Waiting for application startup. -api-1 | [2024-05-02 03:56:03 +0000] [25] [INFO] ASGI 'lifespan' protocol appears unsupported. -api-1 | [2024-05-02 03:56:03 +0000] [25] [INFO] Application startup complete. - -``` - -To exit this, use `CTRL+C` and then you will land on to the main-menu with the list of actions. - -Similarly, you can view the logs of other services. - ---- - -### Backup Data (Docker Compose) - -There would a time when you might want to backup your data from docker volumes to external storage like S3 or drives. - -Lets again run the `./setup.sh` command. You will again be prompted with the below options. This time select `7` to Backup the data. - -```bash -Select a Action you want to perform: - 1) Install (x86_64) - 2) Start - 3) Stop - 4) Restart - 5) Upgrade - 6) View Logs - 7) Backup Data - 8) Exit - -Action [2]: 7 -``` - -In response, you can find the backup folder - -```bash -Backing Up plane-app_pgdata -Backing Up plane-app_redisdata -Backing Up plane-app_uploads - -Backup completed successfully. Backup files are stored in /....../plane-app/backup/20240502-1120 -``` - ---- - -### Restore Data (Docker Compose) - -When you want to restore the previously backed-up data, follow the instructions below. - -1. Make sure that Plane-CE is installed, started, and then stopped. This ensures that the Docker volumes are created. - -1. Download the restore script using the command below. We suggest downloading it in the same folder as `setup.sh`. - - ```bash - curl -fsSL -o restore.sh https://raw.githubusercontent.com/makeplane/plane/master/deploy/selfhost/restore.sh - chmod +x restore.sh - ``` - -1. Execute the command below to restore your data. - - ```bash - ./restore.sh - ``` - - As an example, for a backup folder `/opt/plane-selfhost/plane-app/backup/20240722-0914`, expect the response below: - - ```bash - -------------------------------------------- - ____ _ ///////// - | _ \| | __ _ _ __ ___ ///////// - | |_) | |/ _` | '_ \ / _ \ ///// ///// - | __/| | (_| | | | | __/ ///// ///// - |_| |_|\__,_|_| |_|\___| //// - //// - -------------------------------------------- - Project management tool from the future - -------------------------------------------- - Found /opt/plane-selfhost/plane-app/backup/20240722-0914/pgdata.tar.gz - .....Restoring plane-app_pgdata - .....Successfully restored volume plane-app_pgdata from pgdata.tar.gz - - Found /opt/plane-selfhost/plane-app/backup/20240722-0914/redisdata.tar.gz - .....Restoring plane-app_redisdata - .....Successfully restored volume plane-app_redisdata from redisdata.tar.gz - - Found /opt/plane-selfhost/plane-app/backup/20240722-0914/uploads.tar.gz - .....Restoring plane-app_uploads - .....Successfully restored volume plane-app_uploads from uploads.tar.gz - - - Restore completed successfully. - ``` - -1. Start the Plane instance using `./setup.sh start`. - ---- - -
-

Upgrading from v0.13.2 to v0.14.x

- -This is one time activity for users who are upgrading from v0.13.2 to v0.14.0 - -As there has been significant changes to Self Hosting process, this step mainly covers the data migration from current (v0.13.2) docker volumes from newly created volumes - -> Before we begin with migration, make sure your v0.14.0 was started and then stopped. This is required to know the newly created docker volume names. - -Begin with downloading the migration script using below command - -``` - -curl -fsSL -o migrate.sh https://raw.githubusercontent.com/makeplane/plane/master/deploy/selfhost/migration-0.13-0.14.sh - -chmod +x migrate.sh - -``` - -Now run the `./migrate.sh` command and expect the instructions as below - -``` -****************************************************************** - -This script is solely for the migration purpose only. -This is a 1 time migration of volume data from v0.13.2 => v0.14.x - -Assumption: -1. Postgres data volume name ends with _pgdata -2. Minio data volume name ends with _uploads -3. Redis data volume name ends with _redisdata - -Any changes to this script can break the migration. - -Before you proceed, make sure you run the below command -to know the docker volumes - -docker volume ls -q | grep -i "_pgdata" -docker volume ls -q | grep -i "_uploads" -docker volume ls -q | grep -i "_redisdata" - -******************************************************* - -Given below list of REDIS volumes, identify the prefix of source and destination volumes leaving "_redisdata" ---------------------- -plane-app_redisdata -v0132_redisdata - -Provide the Source Volume Prefix : -``` - -**Open another terminal window**, and run the mentioned 3 command. This may be different for users who have changed the volume names in their previous setup (v0.13.2) - -For every command you must see 2 records something like shown in above example of `redisdata` - -To move forward, you would need PREFIX of old setup and new setup. As per above example, `v0132` is the prefix of v0.13.2 and `plane-app` is the prefix of v0.14.0 setup - -**Back to original terminal window**, _Provide the Source Volume Prefix_ and hit ENTER. - -Now you will be prompted to _Provide Destination Volume Prefix_. Provide the value and hit ENTER - -``` -Provide the Source Volume Prefix : v0132 -Provide the Destination Volume Prefix : plane-app -``` - -In case the suffixes are wrong or the mentioned volumes are not found, you will receive the error shown below. The image below displays an error for source volumes. - -![Migrate Error](images/migrate-error.png) - -In case of successful migration, it will be a silent exit without error. - -Now its time to restart v0.14.0 setup. -
\ No newline at end of file diff --git a/plane/code/build.yml b/plane/code/build.yml deleted file mode 100644 index b65d297e9..000000000 --- a/plane/code/build.yml +++ /dev/null @@ -1,30 +0,0 @@ -services: - web: - image: ${DOCKERHUB_USER:-local}/plane-frontend:${APP_RELEASE:-latest} - build: - context: . - dockerfile: ./web/Dockerfile.web - - space: - image: ${DOCKERHUB_USER:-local}/plane-space:${APP_RELEASE:-latest} - build: - context: ./ - dockerfile: ./space/Dockerfile.space - - admin: - image: ${DOCKERHUB_USER:-local}/plane-admin:${APP_RELEASE:-latest} - build: - context: ./ - dockerfile: ./admin/Dockerfile.admin - - api: - image: ${DOCKERHUB_USER:-local}/plane-backend:${APP_RELEASE:-latest} - build: - context: ./apiserver - dockerfile: ./Dockerfile.api - - proxy: - image: ${DOCKERHUB_USER:-local}/plane-proxy:${APP_RELEASE:-latest} - build: - context: ./nginx - dockerfile: ./Dockerfile diff --git a/plane/code/docker-compose.yml b/plane/code/docker-compose.yml index 65fb84e10..284b80ef9 100644 --- a/plane/code/docker-compose.yml +++ b/plane/code/docker-compose.yml @@ -24,9 +24,16 @@ x-aws-s3-env: &aws-s3-env AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads} x-proxy-env: &proxy-env - NGINX_PORT: ${NGINX_PORT:-80} - BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads} + SSL: ${SSL:-false} + APP_DOMAIN: ${APP_DOMAIN:-localhost} FILE_SIZE_LIMIT: ${FILE_SIZE_LIMIT:-5242880} + CERT_EMAIL: ${CERT_EMAIL} + CERT_ACME_CA: ${CERT_ACME_CA} + CERT_ACME_DNS: ${CERT_ACME_DNS} + LISTEN_HTTP_PORT: ${LISTEN_HTTP_PORT:-80} + LISTEN_HTTPS_PORT: ${LISTEN_HTTPS_PORT:-443} + BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads} + SITE_ADDRESS: ${SITE_ADDRESS:-:80} x-mq-env: # RabbitMQ Settings @@ -55,8 +62,7 @@ x-app-env: &app-env services: web: - image: artifacts.plane.so/makeplane/plane-frontend:${APP_RELEASE:-stable} - command: node web/server.js web + image: artifacts.plane.so/makeplane/plane-frontend:${APP_RELEASE:-v0.28.0} deploy: replicas: ${WEB_REPLICAS:-1} restart_policy: @@ -66,8 +72,7 @@ services: - worker space: - image: artifacts.plane.so/makeplane/plane-space:${APP_RELEASE:-stable} - command: node space/server.js space + image: artifacts.plane.so/makeplane/plane-space:${APP_RELEASE:-v0.28.0} deploy: replicas: ${SPACE_REPLICAS:-1} restart_policy: @@ -78,8 +83,7 @@ services: - web admin: - image: artifacts.plane.so/makeplane/plane-admin:${APP_RELEASE:-stable} - command: node admin/server.js admin + image: artifacts.plane.so/makeplane/plane-admin:${APP_RELEASE:-v0.28.0} deploy: replicas: ${ADMIN_REPLICAS:-1} restart_policy: @@ -89,8 +93,7 @@ services: - web live: - image: artifacts.plane.so/makeplane/plane-live:${APP_RELEASE:-stable} - command: node live/dist/server.js live + image: artifacts.plane.so/makeplane/plane-live:${APP_RELEASE:-v0.28.0} environment: <<: [ *live-env ] deploy: @@ -102,7 +105,7 @@ services: - web api: - image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable} + image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-v0.28.0} command: ./bin/docker-entrypoint-api.sh deploy: replicas: ${API_REPLICAS:-1} @@ -118,7 +121,7 @@ services: - plane-mq worker: - image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable} + image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-v0.28.0} command: ./bin/docker-entrypoint-worker.sh deploy: replicas: ${WORKER_REPLICAS:-1} @@ -135,7 +138,7 @@ services: - plane-mq beat-worker: - image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable} + image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-v0.28.0} command: ./bin/docker-entrypoint-beat.sh deploy: replicas: ${BEAT_WORKER_REPLICAS:-1} @@ -152,7 +155,7 @@ services: - plane-mq migrator: - image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable} + image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-v0.28.0} command: ./bin/docker-entrypoint-migrator.sh deploy: replicas: 1 @@ -214,17 +217,31 @@ services: # Comment this if you already have a reverse proxy running proxy: - image: artifacts.plane.so/makeplane/plane-proxy:${APP_RELEASE:-stable} - environment: - <<: *proxy-env + image: artifacts.plane.so/makeplane/plane-proxy:${APP_RELEASE:-v0.28.0} + command: + [ + "caddy", + "run", + "--config", + "/etc/caddy/Caddyfile", + "--adapter", + "caddyfile" + ] deploy: replicas: 1 restart_policy: condition: on-failure + environment: + <<: *proxy-env + volumes: + - proxy_config:/config + - proxy_data:/data depends_on: - web - api - space + - admin + - live volumes: pgdata: @@ -235,3 +252,5 @@ volumes: logs_beat-worker: logs_migrator: rabbitmq_data: + proxy_config: + proxy_data: diff --git a/plane/code/images/download.png b/plane/code/images/download.png deleted file mode 100644 index bb0d1183e..000000000 Binary files a/plane/code/images/download.png and /dev/null differ diff --git a/plane/code/images/migrate-error.png b/plane/code/images/migrate-error.png deleted file mode 100644 index f42ec441a..000000000 Binary files a/plane/code/images/migrate-error.png and /dev/null differ diff --git a/plane/code/images/restart.png b/plane/code/images/restart.png deleted file mode 100644 index 0387599a0..000000000 Binary files a/plane/code/images/restart.png and /dev/null differ diff --git a/plane/code/images/started.png b/plane/code/images/started.png deleted file mode 100644 index d6a0a0baa..000000000 Binary files a/plane/code/images/started.png and /dev/null differ diff --git a/plane/code/images/stopped.png b/plane/code/images/stopped.png deleted file mode 100644 index 0f5876882..000000000 Binary files a/plane/code/images/stopped.png and /dev/null differ diff --git a/plane/code/images/upgrade.png b/plane/code/images/upgrade.png deleted file mode 100644 index b78fbbb60..000000000 Binary files a/plane/code/images/upgrade.png and /dev/null differ diff --git a/plane/code/migration-0.13-0.14.sh b/plane/code/migration-0.13-0.14.sh deleted file mode 100755 index d03f87780..000000000 --- a/plane/code/migration-0.13-0.14.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -echo ' -****************************************************************** - -This script is solely for the migration purpose only. -This is a 1 time migration of volume data from v0.13.2 => v0.14.x - -Assumption: -1. Postgres data volume name ends with _pgdata -2. Minio data volume name ends with _uploads -3. Redis data volume name ends with _redisdata - -Any changes to this script can break the migration. - -Before you proceed, make sure you run the below command -to know the docker volumes - -docker volume ls -q | grep -i "_pgdata" -docker volume ls -q | grep -i "_uploads" -docker volume ls -q | grep -i "_redisdata" - -******************************************************* -' - -DOWNLOAD_FOL=./download -rm -rf ${DOWNLOAD_FOL} -mkdir -p ${DOWNLOAD_FOL} - -function volumeExists { - if [ "$(docker volume ls -f name=$1 | awk '{print $NF}' | grep -E '^'$1'$')" ]; then - return 0 - else - return 1 - fi -} - -function readPrefixes(){ - echo '' - echo 'Given below list of REDIS volumes, identify the prefix of source and destination volumes leaving "_redisdata" ' - echo '---------------------' - docker volume ls -q | grep -i "_redisdata" - echo '' - - read -p "Provide the Source Volume Prefix : " SRC_VOL_PREFIX - until [ "$SRC_VOL_PREFIX" ]; do - read -p "Provide the Source Volume Prefix : " SRC_VOL_PREFIX - done - - read -p "Provide the Destination Volume Prefix : " DEST_VOL_PREFIX - until [ "$DEST_VOL_PREFIX" ]; do - read -p "Provide the Source Volume Prefix : " DEST_VOL_PREFIX - done - - echo '' - echo 'Prefix Provided ' - echo " Source : ${SRC_VOL_PREFIX}" - echo " Destination : ${DEST_VOL_PREFIX}" - echo '---------------------------------------' -} - -function migrate(){ - - SRC_VOLUME=${SRC_VOL_PREFIX}_${VOL_NAME_SUFFIX} - DEST_VOLUME=${DEST_VOL_PREFIX}_${VOL_NAME_SUFFIX} - - if volumeExists $SRC_VOLUME; then - if volumeExists $DEST_VOLUME; then - GOOD_TO_GO=1 - else - echo "Destination Volume '$DEST_VOLUME' does not exist" - echo '' - fi - else - echo "Source Volume '$SRC_VOLUME' does not exist" - echo '' - fi - - if [ $GOOD_TO_GO = 1 ]; then - - echo "MIGRATING ${VOL_NAME_SUFFIX} FROM ${SRC_VOLUME} => ${DEST_VOLUME}" - - TEMP_CONTAINER=$(docker run -d -v $SRC_VOLUME:$CONTAINER_VOL_FOLDER busybox true) - docker cp -q $TEMP_CONTAINER:$CONTAINER_VOL_FOLDER ${DOWNLOAD_FOL}/${VOL_NAME_SUFFIX} - docker rm $TEMP_CONTAINER &> /dev/null - - TEMP_CONTAINER=$(docker run -d -v $DEST_VOLUME:$CONTAINER_VOL_FOLDER busybox true) - if [ "$VOL_NAME_SUFFIX" = "pgdata" ]; then - docker cp -q ${DOWNLOAD_FOL}/${VOL_NAME_SUFFIX} $TEMP_CONTAINER:$CONTAINER_VOL_FOLDER/_temp - docker run --rm -v $DEST_VOLUME:$CONTAINER_VOL_FOLDER \ - -e DATA_FOLDER="${CONTAINER_VOL_FOLDER}" \ - busybox /bin/sh -c 'cp -Rf $DATA_FOLDER/_temp/* $DATA_FOLDER ' - else - docker cp -q ${DOWNLOAD_FOL}/${VOL_NAME_SUFFIX} $TEMP_CONTAINER:$CONTAINER_VOL_FOLDER - fi - docker rm $TEMP_CONTAINER &> /dev/null - - echo '' - fi -} - -readPrefixes - -# MIGRATE DB -CONTAINER_VOL_FOLDER=/var/lib/postgresql/data -VOL_NAME_SUFFIX=pgdata -migrate - -# MIGRATE REDIS -CONTAINER_VOL_FOLDER=/data -VOL_NAME_SUFFIX=redisdata -migrate - -# MIGRATE MINIO -CONTAINER_VOL_FOLDER=/export -VOL_NAME_SUFFIX=uploads -migrate - diff --git a/plane/code/restore.sh b/plane/code/restore.sh deleted file mode 100755 index 23b8de6cf..000000000 --- a/plane/code/restore.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -function print_header() { -clear - -cat <<"EOF" --------------------------------------------- - ____ _ ///////// -| _ \| | __ _ _ __ ___ ///////// -| |_) | |/ _` | '_ \ / _ \ ///// ///// -| __/| | (_| | | | | __/ ///// ///// -|_| |_|\__,_|_| |_|\___| //// - //// --------------------------------------------- -Project management tool from the future --------------------------------------------- -EOF -} - -function restoreSingleVolume() { - selectedVolume=$1 - backupFolder=$2 - restoreFile=$3 - - docker volume rm "$selectedVolume" > /dev/null 2>&1 - - if [ $? -ne 0 ]; then - echo "Error: Failed to remove volume $selectedVolume" - echo "" - return 1 - fi - - docker volume create "$selectedVolume" > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "Error: Failed to create volume $selectedVolume" - echo "" - return 1 - fi - - docker run --rm \ - -e TAR_NAME="$restoreFile" \ - -v "$selectedVolume":"/vol" \ - -v "$backupFolder":/backup \ - busybox sh -c 'mkdir -p /restore && tar -xzf "/backup/${TAR_NAME}.tar.gz" -C /restore && mv /restore/${TAR_NAME}/* /vol' - - if [ $? -ne 0 ]; then - echo "Error: Failed to restore volume ${selectedVolume} from ${restoreFile}.tar.gz" - echo "" - return 1 - fi - echo ".....Successfully restored volume $selectedVolume from ${restoreFile}.tar.gz" - echo "" -} - -function restoreData() { - print_header - local BACKUP_FOLDER=${1:-$PWD} - - local dockerServiceStatus - dockerServiceStatus=$($COMPOSE_CMD ls --filter name=plane-app --format=json | jq -r .[0].Status) - local dockerServicePrefix - dockerServicePrefix="running" - - if [[ $dockerServiceStatus == $dockerServicePrefix* ]]; then - echo "Plane App is running. Please STOP the Plane App before restoring data." - exit 1 - fi - - local volume_suffix - volume_suffix="_pgdata|_redisdata|_uploads|_rabbitmq_data" - local volumes - volumes=$(docker volume ls -f "name=plane-app" --format "{{.Name}}" | grep -E "$volume_suffix") - # Check if there are any matching volumes - if [ -z "$volumes" ]; then - echo ".....No volumes found starting with 'plane-app'" - exit 1 - fi - - - for BACKUP_FILE in $BACKUP_FOLDER/*.tar.gz; do - if [ -e "$BACKUP_FILE" ]; then - - local restoreFileName - restoreFileName=$(basename "$BACKUP_FILE") - restoreFileName="${restoreFileName%.tar.gz}" - - local restoreVolName - restoreVolName="plane-app_${restoreFileName}" - echo "Found $BACKUP_FILE" - - local docVol - docVol=$(docker volume ls -f "name=$restoreVolName" --format "{{.Name}}" | grep -E "$volume_suffix") - - if [ -z "$docVol" ]; then - echo "Skipping: No volume found with name $restoreVolName" - else - echo ".....Restoring $docVol" - restoreSingleVolume "$docVol" "$BACKUP_FOLDER" "$restoreFileName" - fi - else - echo "No .tar.gz files found in the current directory." - echo "" - echo "Please provide the path to the backup file." - echo "" - echo "Usage: ./restore.sh /path/to/backup" - exit 1 - fi - done - - echo "" - echo "Restore completed successfully." - echo "" -} - -# if docker-compose is installed -if command -v docker-compose &> /dev/null -then - COMPOSE_CMD="docker-compose" -else - COMPOSE_CMD="docker compose" -fi - -restoreData "$@" \ No newline at end of file diff --git a/plane/code/swarm.sh b/plane/code/swarm.sh deleted file mode 100755 index c58f05e51..000000000 --- a/plane/code/swarm.sh +++ /dev/null @@ -1,612 +0,0 @@ -#!/bin/bash - -BRANCH=${BRANCH:-master} -SERVICE_FOLDER=plane-app -SCRIPT_DIR=$PWD -PLANE_INSTALL_DIR=$PWD/$SERVICE_FOLDER -export APP_RELEASE="stable" -export DOCKERHUB_USER=artifacts.plane.so/makeplane - -export GH_REPO=makeplane/plane -export RELEASE_DOWNLOAD_URL="https://github.com/$GH_REPO/releases/download" -export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deploy/selfhost" - -OS_NAME=$(uname) - -# Create necessary directories -mkdir -p $PLANE_INSTALL_DIR/archive - -DOCKER_FILE_PATH=$PLANE_INSTALL_DIR/docker-compose.yml -DOCKER_ENV_PATH=$PLANE_INSTALL_DIR/plane.env - -function print_header() { -clear - -cat <<"EOF" --------------------------------------------- - ____ _ ///////// -| _ \| | __ _ _ __ ___ ///////// -| |_) | |/ _` | '_ \ / _ \ ///// ///// -| __/| | (_| | | | | __/ ///// ///// -|_| |_|\__,_|_| |_|\___| //// - //// --------------------------------------------- -Project management tool from the future --------------------------------------------- -EOF -} - -function checkLatestRelease(){ - echo "Checking for the latest release..." >&2 - local latest_release=$(curl -s https://api.github.com/repos/$GH_REPO/releases/latest | grep -o '"tag_name": "[^"]*"' | sed 's/"tag_name": "//;s/"//g') - if [ -z "$latest_release" ]; then - echo "Failed to check for the latest release. Exiting..." >&2 - exit 1 - fi - - echo $latest_release -} - -# Function to read stack name from env file -function readStackName() { - if [ -f "$DOCKER_ENV_PATH" ]; then - local saved_stack_name=$(grep "^STACK_NAME=" "$DOCKER_ENV_PATH" | cut -d'=' -f2) - if [ -n "$saved_stack_name" ]; then - stack_name=$saved_stack_name - return 1 - fi - fi - return 0 -} - -# Function to get stack name (either from env or user input) -function getStackName() { - read -p "Enter stack name [plane]: " input_stack_name - if [ -z "$input_stack_name" ]; then - input_stack_name="plane" - fi - stack_name=$input_stack_name - updateEnvFile "STACK_NAME" "$stack_name" "$DOCKER_ENV_PATH" - echo "Using stack name: $stack_name" -} - -function syncEnvFile(){ - echo "Syncing environment variables..." >&2 - if [ -f "$PLANE_INSTALL_DIR/plane.env.bak" ]; then - # READ keys of plane.env and update the values from plane.env.bak - while IFS= read -r line - do - # ignore if the line is empty or starts with # - if [ -z "$line" ] || [[ $line == \#* ]]; then - continue - fi - key=$(echo "$line" | cut -d'=' -f1) - value=$(getEnvValue "$key" "$PLANE_INSTALL_DIR/plane.env.bak") - if [ -n "$value" ]; then - updateEnvFile "$key" "$value" "$DOCKER_ENV_PATH" - fi - done < "$DOCKER_ENV_PATH" - - value=$(getEnvValue "STACK_NAME" "$PLANE_INSTALL_DIR/plane.env.bak") - if [ -n "$value" ]; then - updateEnvFile "STACK_NAME" "$value" "$DOCKER_ENV_PATH" - fi - fi - echo "Environment variables synced successfully" >&2 - rm -f $PLANE_INSTALL_DIR/plane.env.bak -} - -function getEnvValue() { - local key=$1 - local file=$2 - - if [ -z "$key" ] || [ -z "$file" ]; then - echo "Invalid arguments supplied" - exit 1 - fi - - if [ -f "$file" ]; then - grep -q "^$key=" "$file" - if [ $? -eq 0 ]; then - local value - value=$(grep "^$key=" "$file" | cut -d'=' -f2) - echo "$value" - else - echo "" - fi - fi -} - -function updateEnvFile() { - local key=$1 - local value=$2 - local file=$3 - - if [ -z "$key" ] || [ -z "$value" ] || [ -z "$file" ]; then - echo "Invalid arguments supplied" - exit 1 - fi - - if [ -f "$file" ]; then - # check if key exists in the file - grep -q "^$key=" "$file" - if [ $? -ne 0 ]; then - echo "$key=$value" >> "$file" - return - else - if [ "$OS_NAME" == "Darwin" ]; then - value=$(echo "$value" | sed 's/|/\\|/g') - sed -i '' "s|^$key=.*|$key=$value|g" "$file" - else - sed -i "s/^$key=.*/$key=$value/g" "$file" - fi - fi - else - echo "File not found: $file" - exit 1 - fi -} - -function download() { - cd $SCRIPT_DIR || exit 1 - TS=$(date +%s) - if [ -f "$PLANE_INSTALL_DIR/docker-compose.yml" ] - then - mv $PLANE_INSTALL_DIR/docker-compose.yml $PLANE_INSTALL_DIR/archive/$TS.docker-compose.yml - fi - - echo $RELEASE_DOWNLOAD_URL - echo $FALLBACK_DOWNLOAD_URL - echo $APP_RELEASE - - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$RELEASE_DOWNLOAD_URL/$APP_RELEASE/docker-compose.yml?$(date +%s)") - BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') - STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') - - if [ "$STATUS" -eq 200 ]; then - echo "$BODY" > $PLANE_INSTALL_DIR/docker-compose.yml - else - # Fallback to download from the raw github url - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$FALLBACK_DOWNLOAD_URL/docker-compose.yml?$(date +%s)") - BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') - STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') - - if [ "$STATUS" -eq 200 ]; then - echo "$BODY" > $PLANE_INSTALL_DIR/docker-compose.yml - else - echo "Failed to download docker-compose.yml. HTTP Status: $STATUS" - echo "URL: $RELEASE_DOWNLOAD_URL/$APP_RELEASE/docker-compose.yml" - mv $PLANE_INSTALL_DIR/archive/$TS.docker-compose.yml $PLANE_INSTALL_DIR/docker-compose.yml - exit 1 - fi - fi - - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$RELEASE_DOWNLOAD_URL/$APP_RELEASE/variables.env?$(date +%s)") - BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') - STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') - - if [ "$STATUS" -eq 200 ]; then - echo "$BODY" > $PLANE_INSTALL_DIR/variables-upgrade.env - else - # Fallback to download from the raw github url - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$FALLBACK_DOWNLOAD_URL/variables.env?$(date +%s)") - BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') - STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') - - if [ "$STATUS" -eq 200 ]; then - echo "$BODY" > $PLANE_INSTALL_DIR/variables-upgrade.env - else - echo "Failed to download variables.env. HTTP Status: $STATUS" - echo "URL: $RELEASE_DOWNLOAD_URL/$APP_RELEASE/variables.env" - mv $PLANE_INSTALL_DIR/archive/$TS.docker-compose.yml $PLANE_INSTALL_DIR/docker-compose.yml - exit 1 - fi - fi - - if [ -f "$DOCKER_ENV_PATH" ]; - then - cp "$DOCKER_ENV_PATH" "$PLANE_INSTALL_DIR/archive/$TS.env" - cp "$DOCKER_ENV_PATH" "$PLANE_INSTALL_DIR/plane.env.bak" - fi - - mv $PLANE_INSTALL_DIR/variables-upgrade.env $DOCKER_ENV_PATH - - syncEnvFile - - updateEnvFile "APP_RELEASE" "$APP_RELEASE" "$DOCKER_ENV_PATH" - -} -function deployStack() { - # Check if docker compose file and env file exist - if [ ! -f "$DOCKER_FILE_PATH" ] || [ ! -f "$DOCKER_ENV_PATH" ]; then - echo "Configuration files not found" - echo "Downloading it now......" - APP_RELEASE=$(checkLatestRelease) - download - fi - if [ -z "$stack_name" ]; then - getStackName - fi - echo "Starting ${stack_name} stack..." - - # Pull envs - if [ -f "$DOCKER_ENV_PATH" ]; then - set -o allexport; source $DOCKER_ENV_PATH; set +o allexport; - else - echo "Environment file not found: $DOCKER_ENV_PATH" - exit 1 - fi - - # Deploy the stack - docker stack deploy -c $DOCKER_FILE_PATH $stack_name - - echo "Waiting for services to be deployed..." - sleep 10 - - # Check migrator service - local migrator_service=$(docker service ls --filter name=${stack_name}_migrator -q) - if [ -n "$migrator_service" ]; then - echo ">> Waiting for Data Migration to finish" - while docker service ls --filter name=${stack_name}_migrator | grep -q "running"; do - echo -n "." - sleep 1 - done - echo "" - - # Get the most recent container for the migrator service - local migrator_container=$(docker ps -a --filter name=${stack_name}_migrator --latest -q) - - if [ -n "$migrator_container" ]; then - # Get the exit code of the container - local exit_code=$(docker inspect --format='{{.State.ExitCode}}' $migrator_container) - - if [ "$exit_code" != "0" ]; then - echo "Server failed to start ❌" - echo "Migration failed with exit code: $exit_code" - echo "Please check the logs for the 'migrator' service and resolve the issue(s)." - echo "Stop the services by running the command: ./swarm.sh stop" - exit 1 - else - echo " Data Migration completed successfully ✅" - fi - else - echo "Warning: Could not find migrator container to check exit status" - fi - fi - - # Check API service - local api_service=$(docker service ls --filter name=${stack_name}_api -q) - while docker service ls --filter name=${stack_name}_api | grep -q "running"; do - local running_container=$(docker ps --filter "name=${stack_name}_api" --filter "status=running" -q) - if [ -n "$running_container" ]; then - if docker container logs $running_container 2>/dev/null | grep -q "Application Startup Complete"; then - break - fi - fi - sleep 2 - done - - if [ -z "$api_service" ]; then - echo "Plane Server failed to start ❌" - echo "Please check the logs for the 'api' service and resolve the issue(s)." - echo "Stop the services by running the command: ./swarm.sh stop" - exit 1 - fi - echo " Plane Server started successfully ✅" - echo "" - echo " You can access the application at $WEB_URL" - echo "" -} - -function removeStack() { - if [ -z "$stack_name" ]; then - echo "Stack name not found" - exit 1 - fi - echo "Removing ${stack_name} stack..." - docker stack rm "$stack_name" - echo "Waiting for services to be removed..." - while docker stack ls | grep -q "$stack_name"; do - sleep 1 - done - sleep 20 - echo "Services stopped successfully ✅" -} - -function viewStatus() { - echo "Checking status of ${stack_name} stack..." - if [ -z "$stack_name" ]; then - echo "Stack name not found" - exit 1 - fi - docker stack ps "$stack_name" -} - -function redeployStack() { - removeStack - echo "ReDeploying ${stack_name} stack..." - deployStack -} - -function upgrade() { - - echo "Checking status of ${stack_name} stack..." - if [ -z "$stack_name" ]; then - echo "Stack name not found" - exit 1 - fi - - local latest_release=$(checkLatestRelease) - - echo "" - echo "Current release: $APP_RELEASE" - - if [ "$latest_release" == "$APP_RELEASE" ]; then - echo "" - echo "You are already using the latest release" - exit 0 - fi - - echo "Latest release: $latest_release" - echo "" - - # Check for confirmation to upgrade - echo "Do you want to upgrade to the latest release ($latest_release)?" - read -p "Continue? [y/N]: " confirm - - if [[ ! "$confirm" =~ ^[Yy]$ ]]; then - echo "Exiting..." - exit 0 - fi - - export APP_RELEASE=$latest_release - - # check if stack exists - echo "Upgrading ${stack_name} stack..." - - # check env file and take backup - if [ -f "$DOCKER_ENV_PATH" ]; then - cp "$DOCKER_ENV_PATH" "${DOCKER_ENV_PATH}.bak" - fi - - download - redeployStack -} - -function viewSpecificLogs() { - local service=$1 - - # Input validation - if [ -z "$service" ]; then - echo "Error: Please specify a service name" - return 1 - fi - - # Main loop for service logs - while true; do - # Get all running containers for the service - local running_containers=$(docker ps --filter "name=${stack_name}_${service}" --filter "status=running" -q) - - # If no running containers found, try service logs - if [ -z "$running_containers" ]; then - echo "No running containers found for ${stack_name}_${service}, checking service logs..." - if docker service inspect ${stack_name}_${service} >/dev/null 2>&1; then - echo "Press Ctrl+C or 'q' to exit logs" - docker service logs ${stack_name}_${service} -f - break - else - echo "Error: No running containers or services found for ${stack_name}_${service}" - return 1 - fi - return - fi - - # If multiple containers are running, let user choose - if [ $(echo "$running_containers" | grep -v '^$' | wc -l) -gt 1 ]; then - clear - echo "Multiple containers found for ${stack_name}_${service}:" - local i=1 - # Use regular arrays instead of associative arrays - container_ids=() - container_names=() - - while read -r container_id; do - if [ -n "$container_id" ]; then - local container_name=$(docker inspect --format '{{.Name}}' "$container_id" | sed 's/\///') - container_ids[$i]=$container_id - container_names[$i]=$container_name - echo "[$i] ${container_names[$i]} (${container_ids[$i]})" - i=$((i+1)) - fi - done <<< "$running_containers" - - echo -e "\nPlease select a container number:" - read -r selection - - if [[ "$selection" =~ ^[0-9]+$ ]] && [ -n "${container_ids[$selection]}" ]; then - local selected_container=${container_ids[$selection]} - clear - echo "Showing logs for container: ${container_names[$selection]}" - echo "Press Ctrl+C or 'q' to return to container selection" - - # Start watching logs in the background - docker container logs -f "$selected_container" & - local log_pid=$! - - while true; do - read -r -n 1 input - if [[ $input == "q" ]]; then - kill $log_pid 2>/dev/null - wait $log_pid 2>/dev/null - break - fi - done - clear - else - echo "Error: Invalid selection" - sleep 2 - fi - else - # Single container case - local container_name=$(docker inspect --format '{{.Name}}' "$running_containers" | sed 's/\///') - echo "Showing logs for container: $container_name" - echo "Press Ctrl+C or 'q' to exit logs" - docker container logs -f "$running_containers" & - local log_pid=$! - - while true; do - read -r -n 1 input - if [[ $input == "q" ]]; then - kill $log_pid 2>/dev/null - wait $log_pid 2>/dev/null - break - fi - done - break - fi - done -} - -function viewLogs(){ - - ARG_SERVICE_NAME=$2 - if [ -z "$ARG_SERVICE_NAME" ]; - then - echo - echo "Select a Service you want to view the logs for:" - echo " 1) Web" - echo " 2) Space" - echo " 3) API" - echo " 4) Worker" - echo " 5) Beat-Worker" - echo " 6) Migrator" - echo " 7) Proxy" - echo " 8) Redis" - echo " 9) Postgres" - echo " 10) Minio" - echo " 11) RabbitMQ" - echo " 0) Back to Main Menu" - echo - read -p "Service: " DOCKER_SERVICE_NAME - - until (( DOCKER_SERVICE_NAME >= 0 && DOCKER_SERVICE_NAME <= 11 )); do - echo "Invalid selection. Please enter a number between 0 and 11." - read -p "Service: " DOCKER_SERVICE_NAME - done - - if [ -z "$DOCKER_SERVICE_NAME" ]; - then - echo "INVALID SERVICE NAME SUPPLIED" - else - case $DOCKER_SERVICE_NAME in - 1) viewSpecificLogs "web";; - 2) viewSpecificLogs "space";; - 3) viewSpecificLogs "api";; - 4) viewSpecificLogs "worker";; - 5) viewSpecificLogs "beat-worker";; - 6) viewSpecificLogs "migrator";; - 7) viewSpecificLogs "proxy";; - 8) viewSpecificLogs "plane-redis";; - 9) viewSpecificLogs "plane-db";; - 10) viewSpecificLogs "plane-minio";; - 11) viewSpecificLogs "plane-mq";; - 0) askForAction;; - *) echo "INVALID SERVICE NAME SUPPLIED";; - esac - fi - elif [ -n "$ARG_SERVICE_NAME" ]; - then - ARG_SERVICE_NAME=$(echo "$ARG_SERVICE_NAME" | tr '[:upper:]' '[:lower:]') - case $ARG_SERVICE_NAME in - web) viewSpecificLogs "web";; - space) viewSpecificLogs "space";; - api) viewSpecificLogs "api";; - worker) viewSpecificLogs "worker";; - beat-worker) viewSpecificLogs "beat-worker";; - migrator) viewSpecificLogs "migrator";; - proxy) viewSpecificLogs "proxy";; - redis) viewSpecificLogs "plane-redis";; - postgres) viewSpecificLogs "plane-db";; - minio) viewSpecificLogs "plane-minio";; - rabbitmq) viewSpecificLogs "plane-mq";; - *) echo "INVALID SERVICE NAME SUPPLIED";; - esac - else - echo "INVALID SERVICE NAME SUPPLIED" - fi -} - - - -function askForAction() { - # Rest of askForAction remains the same but use $stack_name instead of $STACK_NAME - local DEFAULT_ACTION=$1 - - if [ -z "$DEFAULT_ACTION" ]; then - echo - echo "Select an Action you want to perform:" - echo " 1) Deploy Stack" - echo " 2) Remove Stack" - echo " 3) View Stack Status" - echo " 4) Redeploy Stack" - echo " 5) Upgrade" - echo " 6) View Logs" - echo " 7) Exit" - echo - read -p "Action [3]: " ACTION - until [[ -z "$ACTION" || "$ACTION" =~ ^[1-6]$ ]]; do - echo "$ACTION: invalid selection." - read -p "Action [3]: " ACTION - done - - if [ -z "$ACTION" ]; then - ACTION=3 - fi - echo - fi - - if [ "$ACTION" == "1" ] || [ "$DEFAULT_ACTION" == "deploy" ]; then - deployStack - elif [ "$ACTION" == "2" ] || [ "$DEFAULT_ACTION" == "remove" ]; then - removeStack - elif [ "$ACTION" == "3" ] || [ "$DEFAULT_ACTION" == "status" ]; then - viewStatus - elif [ "$ACTION" == "4" ] || [ "$DEFAULT_ACTION" == "redeploy" ]; then - redeployStack - elif [ "$ACTION" == "5" ] || [ "$DEFAULT_ACTION" == "upgrade" ]; then - upgrade - elif [ "$ACTION" == "6" ] || [ "$DEFAULT_ACTION" == "logs" ]; then - viewLogs "$@" - elif [ "$ACTION" == "7" ] || [ "$DEFAULT_ACTION" == "exit" ]; then - exit 0 - else - echo "INVALID ACTION SUPPLIED" - fi -} - -# Initialize stack name at script start - -if [ -z "$stack_name" ]; then - readStackName -fi - -# Sync environment variables -if [ -f "$DOCKER_ENV_PATH" ]; then - DOCKERHUB_USER=$(getEnvValue "DOCKERHUB_USER" "$DOCKER_ENV_PATH") - APP_RELEASE=$(getEnvValue "APP_RELEASE" "$DOCKER_ENV_PATH") - - if [ -z "$DOCKERHUB_USER" ]; then - DOCKERHUB_USER=artifacts.plane.so/makeplane - updateEnvFile "DOCKERHUB_USER" "$DOCKERHUB_USER" "$DOCKER_ENV_PATH" - fi - - if [ -z "$APP_RELEASE" ]; then - APP_RELEASE=stable - updateEnvFile "APP_RELEASE" "$APP_RELEASE" "$DOCKER_ENV_PATH" - fi -fi - - -# Main execution -print_header -askForAction "$@" diff --git a/plane/code/install.sh b/plane/setup.sh similarity index 95% rename from plane/code/install.sh rename to plane/setup.sh index 9f0065f66..3460d35f1 100755 --- a/plane/code/install.sh +++ b/plane/setup.sh @@ -2,14 +2,14 @@ BRANCH=${BRANCH:-master} SCRIPT_DIR=$PWD -SERVICE_FOLDER=plane-app +SERVICE_FOLDER=plane-code PLANE_INSTALL_DIR=$PWD/$SERVICE_FOLDER export APP_RELEASE=stable export DOCKERHUB_USER=artifacts.plane.so/makeplane export PULL_POLICY=${PULL_POLICY:-if_not_present} export GH_REPO=makeplane/plane export RELEASE_DOWNLOAD_URL="https://github.com/$GH_REPO/releases/download" -export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deploy/selfhost" +export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deployments/cli/community" CPU_ARCH=$(uname -m) OS_NAME=$(uname) @@ -57,7 +57,7 @@ function spinner() { function checkLatestRelease(){ echo "Checking for the latest release..." >&2 - local latest_release=$(curl -s https://api.github.com/repos/$GH_REPO/releases/latest | grep -o '"tag_name": "[^"]*"' | sed 's/"tag_name": "//;s/"//g') + local latest_release=$(curl -fsSL https://api.github.com/repos/$GH_REPO/releases/latest | grep -o '"tag_name": "[^"]*"' | sed 's/"tag_name": "//;s/"//g') if [ -z "$latest_release" ]; then echo "Failed to check for the latest release. Exiting..." >&2 exit 1 @@ -196,7 +196,7 @@ function buildYourOwnImage(){ REPO=https://github.com/$GH_REPO.git git clone "$REPO" "$PLANE_TEMP_CODE_DIR" --branch "$BRANCH" --single-branch --depth 1 - cp "$PLANE_TEMP_CODE_DIR/deploy/selfhost/build.yml" "$PLANE_TEMP_CODE_DIR/build.yml" + cp "$PLANE_TEMP_CODE_DIR/deployments/cli/community/build.yml" "$PLANE_TEMP_CODE_DIR/build.yml" cd "$PLANE_TEMP_CODE_DIR" || exit @@ -247,7 +247,7 @@ function download() { mv $PLANE_INSTALL_DIR/docker-compose.yaml $PLANE_INSTALL_DIR/archive/$TS.docker-compose.yaml fi - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$RELEASE_DOWNLOAD_URL/$APP_RELEASE/docker-compose.yml?$(date +%s)") + RESPONSE=$(curl -fsSL -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$RELEASE_DOWNLOAD_URL/$APP_RELEASE/docker-compose.yml?$(date +%s)") BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') @@ -255,7 +255,7 @@ function download() { echo "$BODY" > $PLANE_INSTALL_DIR/docker-compose.yaml else # Fallback to download from the raw github url - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$FALLBACK_DOWNLOAD_URL/docker-compose.yml?$(date +%s)") + RESPONSE=$(curl -fsSL -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$FALLBACK_DOWNLOAD_URL/docker-compose.yml?$(date +%s)") BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') @@ -269,7 +269,7 @@ function download() { fi fi - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$RELEASE_DOWNLOAD_URL/$APP_RELEASE/variables.env?$(date +%s)") + RESPONSE=$(curl -fsSL -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$RELEASE_DOWNLOAD_URL/$APP_RELEASE/variables.env?$(date +%s)") BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') @@ -277,7 +277,7 @@ function download() { echo "$BODY" > $PLANE_INSTALL_DIR/variables-upgrade.env else # Fallback to download from the raw github url - RESPONSE=$(curl -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$FALLBACK_DOWNLOAD_URL/variables.env?$(date +%s)") + RESPONSE=$(curl -fsSL -H 'Cache-Control: no-cache, no-store' -s -w "HTTPSTATUS:%{http_code}" "$FALLBACK_DOWNLOAD_URL/variables.env?$(date +%s)") BODY=$(echo "$RESPONSE" | sed -e 's/HTTPSTATUS\:.*//g') STATUS=$(echo "$RESPONSE" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') diff --git a/plane/update.js b/plane/update.js index 5abe74f8b..51317d885 100644 --- a/plane/update.js +++ b/plane/update.js @@ -1,13 +1,28 @@ import utils from "../utils.js"; -await utils.cloneOrPullRepo({ - repo: "https://github.com/makeplane/plane.git", - path: "./repo", - branch: "preview", -}); - -await utils.copyDir("./repo/deploy/selfhost", "./code"); -await utils.renameFile("./code/variables.env", "./code/.env.example"); +await utils.removeDir("./code"); +await utils.renameDir("./plane-code", "./code"); +await utils.removeDir("./code/archive"); +await utils.renameFile("./code/plane.env", "./code/.env.example"); +await utils.renameFile("./code/docker-compose.yaml", "./code/docker-compose.yml"); await utils.removeContainerNames("./code/docker-compose.yml"); await utils.removePorts("./code/docker-compose.yml"); + +await utils.searchReplace( + "./code/.env.example", + "APP_DOMAIN=localhost", + "APP_DOMAIN=$(PRIMARY_DOMAIN)" +); + +await utils.searchReplace( + "./code/.env.example", + "WEB_URL=http://${APP_DOMAIN}", + "WEB_URL=https://$(PRIMARY_DOMAIN)" +); + +await utils.searchReplace( + "./code/.env.example", + "CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN}", + "CORS_ALLOWED_ORIGINS=https://$(PRIMARY_DOMAIN)" +); diff --git a/plane/update.sh b/plane/update.sh deleted file mode 100644 index 3bd731389..000000000 --- a/plane/update.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -if [ ! -d "./repo" ]; then - git clone --depth 1 --branch preview --single-branch https://github.com/makeplane/plane.git repo -else - cd repo - git pull - cd .. -fi - -cp -r ./repo/deploy/selfhost/. ./code -mv ./code/variables.env ./code/.env.example - - diff --git a/supabase/code/.env.example b/supabase/code/.env.example index bb7450087..fcb631072 100644 --- a/supabase/code/.env.example +++ b/supabase/code/.env.example @@ -26,10 +26,17 @@ POSTGRES_PORT=5432 ############ # Supavisor -- Database pooler ############ +# Port Supavisor listens on for transaction pooling connections POOLER_PROXY_PORT_TRANSACTION=6543 +# Maximum number of PostgreSQL connections Supavisor opens per pool POOLER_DEFAULT_POOL_SIZE=20 +# Maximum number of client connections Supavisor accepts per pool POOLER_MAX_CLIENT_CONN=100 +# Unique tenant identifier POOLER_TENANT_ID=your-tenant-id +# Pool size for internal metadata storage used by Supavisor +# This is separate from client connections and used only by Supavisor itself +POOLER_DB_POOL_SIZE=5 ############ @@ -52,7 +59,7 @@ PGRST_DB_SCHEMAS=public,storage,graphql_public ############ ## General -SITE_URL=http://localhost:3000 +SITE_URL=https://$(PRIMARY_DOMAIN) ADDITIONAL_REDIRECT_URLS= JWT_EXPIRY=3600 DISABLE_SIGNUP=false @@ -106,14 +113,14 @@ FUNCTIONS_VERIFY_JWT=false ############ -# Logs - Configuration for Logflare +# Logs - Configuration for Analytics # Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction ############ -LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key - # Change vector.toml sinks to reflect this change -LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key +# these cannot be the same value +LOGFLARE_PUBLIC_ACCESS_TOKEN=your-super-secret-and-long-logflare-key-public +LOGFLARE_PRIVATE_ACCESS_TOKEN=your-super-secret-and-long-logflare-key-private # Docker socket location - this value will differ depending on your OS DOCKER_SOCKET_LOCATION=/var/run/docker.sock diff --git a/supabase/code/dev/docker-compose.dev.yml b/supabase/code/dev/docker-compose.dev.yml index ca19a0ad7..f8b3ba787 100644 --- a/supabase/code/dev/docker-compose.dev.yml +++ b/supabase/code/dev/docker-compose.dev.yml @@ -4,7 +4,7 @@ services: studio: build: context: .. - dockerfile: studio/Dockerfile + dockerfile: apps/studio/Dockerfile target: dev ports: - 8082:8082 diff --git a/supabase/code/docker-compose.yml b/supabase/code/docker-compose.yml index a8d8584d9..fe77a0c3f 100644 --- a/supabase/code/docker-compose.yml +++ b/supabase/code/docker-compose.yml @@ -10,7 +10,7 @@ name: supabase services: studio: - image: supabase/studio:2025.05.19-sha-3487831 + image: supabase/studio:2025.06.30-sha-6f5982d restart: unless-stopped healthcheck: test: @@ -41,7 +41,7 @@ services: SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} AUTH_JWT_SECRET: ${JWT_SECRET} - LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} LOGFLARE_URL: http://analytics:4000 NEXT_PUBLIC_ENABLE_LOGS: true # Comment to use Big Query backend for analytics @@ -75,7 +75,7 @@ services: /docker-entrypoint.sh kong docker-start' auth: - image: supabase/gotrue:v2.172.1 + image: supabase/gotrue:v2.177.0 restart: unless-stopped healthcheck: test: @@ -136,6 +136,12 @@ services: GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + # LINE OAuth Configuration + GOTRUE_EXTERNAL_LINE_ENABLED: ${GOTRUE_EXTERNAL_LINE_ENABLED} + GOTRUE_EXTERNAL_LINE_CHANNEL_ID: ${GOTRUE_EXTERNAL_LINE_CHANNEL_ID} + GOTRUE_EXTERNAL_LINE_CHANNEL_SECRET: ${GOTRUE_EXTERNAL_LINE_CHANNEL_SECRET} + GOTRUE_EXTERNAL_LINE_SCOPE: ${GOTRUE_EXTERNAL_LINE_SCOPE} + GOTRUE_EXTERNAL_LINE_REDIRECT_URI: ${GOTRUE_EXTERNAL_LINE_REDIRECT_URI} # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true" @@ -221,7 +227,7 @@ services: # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up storage: - image: supabase/storage-api:v1.22.17 + image: supabase/storage-api:v1.25.7 restart: unless-stopped volumes: - ./volumes/storage:/var/lib/storage:z @@ -279,7 +285,7 @@ services: IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} meta: - image: supabase/postgres-meta:v0.89.0 + image: supabase/postgres-meta:v0.91.0 restart: unless-stopped depends_on: db: @@ -314,7 +320,7 @@ services: command: [ "start", "--main-service", "/home/deno/functions/main" ] analytics: - image: supabase/logflare:1.12.0 + image: supabase/logflare:1.14.2 restart: unless-stopped # Uncomment to use Big Query backend for analytics # volumes: @@ -339,7 +345,8 @@ services: DB_PORT: ${POSTGRES_PORT} DB_PASSWORD: ${POSTGRES_PASSWORD} DB_SCHEMA: _analytics - LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} LOGFLARE_SINGLE_TENANT: true LOGFLARE_SUPABASE_MODE: true LOGFLARE_MIN_CLUSTER_SIZE: 1 @@ -421,14 +428,14 @@ services: interval: 5s retries: 3 environment: - LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} command: [ "--config", "/etc/vector/vector.yml" ] security_opt: - "label=disable" # Update the DATABASE_URL if you are using an external Postgres database supavisor: - image: supabase/supavisor:2.5.1 + image: supabase/supavisor:2.5.7 restart: unless-stopped volumes: - ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z @@ -456,7 +463,7 @@ services: POSTGRES_PORT: ${POSTGRES_PORT} POSTGRES_DB: ${POSTGRES_DB} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase + DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase CLUSTER_POSTGRES: true SECRET_KEY_BASE: ${SECRET_KEY_BASE} VAULT_ENC_KEY: ${VAULT_ENC_KEY} @@ -468,6 +475,7 @@ services: POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} POOLER_POOL_MODE: transaction + DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE} command: [ "/bin/sh", diff --git a/supabase/code/docker-compose.yml.backup b/supabase/code/docker-compose.yml.backup new file mode 100644 index 000000000..b1fbf8641 --- /dev/null +++ b/supabase/code/docker-compose.yml.backup @@ -0,0 +1,482 @@ +# Usage +# Start: docker compose up +# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up +# Stop: docker compose down +# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans +# Reset everything: ./reset.sh + +name: supabase + +services: + + studio: + image: supabase/studio:2025.06.30-sha-6f5982d + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "fetch('http://studio:3000/api/platform/profile').then((r) => {if + (r.status !== 200) throw new Error(r.status)})" + ] + timeout: 10s + interval: 5s + retries: 3 + depends_on: + analytics: + condition: service_healthy + environment: + STUDIO_PG_META_URL: http://meta:8080 + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} + DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} + OPENAI_API_KEY: ${OPENAI_API_KEY:-} + + SUPABASE_URL: http://kong:8000 + SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + AUTH_JWT_SECRET: ${JWT_SECRET} + + LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} + LOGFLARE_URL: http://analytics:4000 + NEXT_PUBLIC_ENABLE_LOGS: true + # Comment to use Big Query backend for analytics + NEXT_ANALYTICS_BACKEND_PROVIDER: postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery + + kong: + image: kong:2.8.1 + restart: unless-stopped + volumes: + # https://github.com/supabase/supabase/issues/12661 + - ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z + depends_on: + analytics: + condition: service_healthy + environment: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml + # https://github.com/supabase/cli/issues/14 + KONG_DNS_ORDER: LAST,A,CNAME + KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth + KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k + KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} + DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} + # https://unix.stackexchange.com/a/294837 + entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && + /docker-entrypoint.sh kong docker-start' + + auth: + image: supabase/gotrue:v2.177.0 + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:9999/health" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + GOTRUE_API_HOST: 0.0.0.0 + GOTRUE_API_PORT: 9999 + API_EXTERNAL_URL: ${API_EXTERNAL_URL} + + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + + GOTRUE_SITE_URL: ${SITE_URL} + GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} + GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} + + GOTRUE_JWT_ADMIN_ROLES: service_role + GOTRUE_JWT_AUD: authenticated + GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated + GOTRUE_JWT_EXP: ${JWT_EXPIRY} + GOTRUE_JWT_SECRET: ${JWT_SECRET} + + GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} + GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} + GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} + + # Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile. + # GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true + + # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true + # GOTRUE_SMTP_MAX_FREQUENCY: 1s + GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} + GOTRUE_SMTP_HOST: ${SMTP_HOST} + GOTRUE_SMTP_PORT: ${SMTP_PORT} + GOTRUE_SMTP_USER: ${SMTP_USER} + GOTRUE_SMTP_PASS: ${SMTP_PASS} + GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} + GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} + GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} + GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} + GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} + + GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} + GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook + + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "" + + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt" + + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt" + + # GOTRUE_HOOK_SEND_SMS_ENABLED: "false" + # GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + # GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false" + # GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender" + # GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + rest: + image: postgrest/postgrest:v12.2.12 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} + PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} + command: [ "postgrest" ] + + realtime: + # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain + image: supabase/realtime:v2.34.47 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "-H", + "Authorization: Bearer ${ANON_KEY}", + "http://localhost:4000/api/tenants/realtime-dev/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + PORT: 4000 + DB_HOST: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_USER: supabase_admin + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_NAME: ${POSTGRES_DB} + DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' + DB_ENC_KEY: supabaserealtime + API_JWT_SECRET: ${JWT_SECRET} + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + ERL_AFLAGS: -proto_dist inet_tcp + DNS_NODES: "''" + RLIMIT_NOFILE: "10000" + APP_NAME: realtime + SEED_SELF_HOST: true + RUN_JANITOR: true + + # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up + storage: + image: supabase/storage-api:v1.25.7 + restart: unless-stopped + volumes: + - ./volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://storage:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: file + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + GLOBAL_S3_BUCKET: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + + imgproxy: + image: darthsim/imgproxy:v3.8.0 + restart: unless-stopped + volumes: + - ./volumes/storage:/var/lib/storage:z + healthcheck: + test: [ "CMD", "imgproxy", "health" ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_LOCAL_FILESYSTEM_ROOT: / + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} + + meta: + image: supabase/postgres-meta:v0.91.0 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PG_META_PORT: 8080 + PG_META_DB_HOST: ${POSTGRES_HOST} + PG_META_DB_PORT: ${POSTGRES_PORT} + PG_META_DB_NAME: ${POSTGRES_DB} + PG_META_DB_USER: supabase_admin + PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} + + functions: + image: supabase/edge-runtime:v1.67.4 + restart: unless-stopped + volumes: + - ./volumes/functions:/home/deno/functions:Z + depends_on: + analytics: + condition: service_healthy + environment: + JWT_SECRET: ${JWT_SECRET} + SUPABASE_URL: http://kong:8000 + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} + SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 + VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" + command: [ "start", "--main-service", "/home/deno/functions/main" ] + + analytics: + image: supabase/logflare:1.14.2 + restart: unless-stopped + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + healthcheck: + test: [ "CMD", "curl", "http://localhost:4000/health" ] + timeout: 5s + interval: 5s + retries: 10 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + environment: + LOGFLARE_NODE_HOST: 127.0.0.1 + DB_USERNAME: supabase_admin + DB_DATABASE: _supabase + DB_HOSTNAME: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_SCHEMA: _analytics + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} + LOGFLARE_SINGLE_TENANT: true + LOGFLARE_SUPABASE_MODE: true + LOGFLARE_MIN_CLUSTER_SIZE: 1 + + # Comment variables to use Big Query backend for analytics + POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase + POSTGRES_BACKEND_SCHEMA: _analytics + LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} + + # Comment out everything below this point if you are using an external Postgres database + db: + image: supabase/postgres:15.8.1.060 + restart: unless-stopped + volumes: + - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z + # Must be superuser to create event trigger + - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z + # Must be superuser to alter reserved role + - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z + # Initialize the database settings with JWT_SECRET and JWT_EXP + - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z + # PGDATA directory is persisted between restarts + - ./volumes/db/data:/var/lib/postgresql/data:Z + # Changes required for internal supabase data such as _analytics + - ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z + # Changes required for Analytics support + - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z + # Changes required for Pooler support + - ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z + # Use named volume to persist pgsodium decryption key between restarts + - db-config:/etc/postgresql-custom + healthcheck: + test: [ "CMD", "pg_isready", "-U", "postgres", "-h", "localhost" ] + interval: 5s + timeout: 5s + retries: 10 + depends_on: + vector: + condition: service_healthy + environment: + POSTGRES_HOST: /var/run/postgresql + PGPORT: ${POSTGRES_PORT} + POSTGRES_PORT: ${POSTGRES_PORT} + PGPASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PGDATABASE: ${POSTGRES_DB} + POSTGRES_DB: ${POSTGRES_DB} + JWT_SECRET: ${JWT_SECRET} + JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgres", + "-c", + "config_file=/etc/postgresql/postgresql.conf", + "-c", + "log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs + ] + + vector: + image: timberio/vector:0.28.1-alpine + restart: unless-stopped + volumes: + - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z + - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://vector:9001/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + command: [ "--config", "/etc/vector/vector.yml" ] + security_opt: + - "label=disable" + + # Update the DATABASE_URL if you are using an external Postgres database + supavisor: + image: supabase/supavisor:2.5.7 + restart: unless-stopped + volumes: + - ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "http://127.0.0.1:4000/api/health" + ] + interval: 10s + timeout: 5s + retries: 5 + depends_on: + db: + condition: service_healthy + analytics: + condition: service_healthy + environment: + PORT: 4000 + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase + CLUSTER_POSTGRES: true + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + VAULT_ENC_KEY: ${VAULT_ENC_KEY} + API_JWT_SECRET: ${JWT_SECRET} + METRICS_JWT_SECRET: ${JWT_SECRET} + REGION: local + ERL_AFLAGS: -proto_dist inet_tcp + POOLER_TENANT_ID: ${POOLER_TENANT_ID} + POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} + POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} + POOLER_POOL_MODE: transaction + DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE} + command: + [ + "/bin/sh", + "-c", + "/app/bin/migrate && /app/bin/supavisor eval \"$$(cat + /etc/pooler/pooler.exs)\" && /app/bin/server" + ] + +volumes: + db-config: diff --git a/supabase/code/volumes/logs/vector.yml b/supabase/code/volumes/logs/vector.yml index cce46df43..1c438a8ec 100644 --- a/supabase/code/volumes/logs/vector.yml +++ b/supabase/code/volumes/logs/vector.yml @@ -165,7 +165,9 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 - uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} + uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod' logflare_realtime: type: 'http' inputs: @@ -175,7 +177,9 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 - uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} + uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod' logflare_rest: type: 'http' inputs: @@ -185,7 +189,9 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 - uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} + uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod' logflare_db: type: 'http' inputs: @@ -195,10 +201,12 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} # We must route the sink through kong because ingesting logs before logflare is fully initialised will # lead to broken queries from studio. This works by the assumption that containers are started in the # following order: vector > db > logflare > kong - uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs' logflare_functions: type: 'http' inputs: @@ -208,7 +216,9 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 - uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} + uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs' logflare_storage: type: 'http' inputs: @@ -218,7 +228,9 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 - uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} + uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2' logflare_kong: type: 'http' inputs: @@ -229,4 +241,6 @@ sinks: method: 'post' request: retry_max_duration_secs: 10 - uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' + headers: + x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required} + uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod' diff --git a/supabase/update.js b/supabase/update.js index ca3df4fa7..fdcaa71db 100644 --- a/supabase/update.js +++ b/supabase/update.js @@ -5,3 +5,9 @@ await utils.copyDir("./repo/docker", "./code"); await utils.removeContainerNames("./code/docker-compose.yml"); await utils.removePorts("./code/docker-compose.yml"); + +await utils.searchReplace( + "./code/.env.example", + "SITE_URL=http://localhost:3000", + "SITE_URL=https://$(PRIMARY_DOMAIN)" +); diff --git a/supabase/update.sh b/supabase/update.sh deleted file mode 100644 index 24abd5a02..000000000 --- a/supabase/update.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -if [ ! -d "./repo" ]; then - git clone --depth 1 --branch master --single-branch https://github.com/supabase/supabase repo -else - cd repo - git pull - cd .. -fi - -cp -r ./repo/docker/. ./code diff --git a/twenty/README.md b/twenty/README.md deleted file mode 100644 index 1fa12fb2a..000000000 --- a/twenty/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Twenty - -- copied from https://github.com/twentyhq/twenty -- removed `ports` diff --git a/twenty/code/.env.example b/twenty/code/.env.example deleted file mode 100644 index 941e7dfe0..000000000 --- a/twenty/code/.env.example +++ /dev/null @@ -1,19 +0,0 @@ -TAG=latest - -#PG_DATABASE_USER=postgres -#PG_DATABASE_PASSWORD=replace_me_with_a_strong_password_without_special_characters -#PG_DATABASE_HOST=db -#PG_DATABASE_PORT=5432 -REDIS_URL=redis://redis:6379 - -SERVER_URL=https://$(PRIMARY_DOMAIN) -SIGN_IN_PREFILLED=false - -# Use openssl rand -base64 32 for each secret -APP_SECRET=replace_me_with_a_random_string - -STORAGE_TYPE=local - -# STORAGE_S3_REGION=eu-west3 -# STORAGE_S3_NAME=my-bucket -# STORAGE_S3_ENDPOINT= diff --git a/twenty/code/Makefile b/twenty/code/Makefile deleted file mode 100644 index bdda023dd..000000000 --- a/twenty/code/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -# Makefile for building Twenty CRM docker images. -# Set the tag and/or target build platform using make command-line variables assignments. -# -# Optional make variables: -# PLATFORM - defaults to 'linux/amd64' -# TAG - defaults to 'latest' -# -# Example: make -# Example: make PLATFORM=linux/aarch64 TAG=my-tag - -PLATFORM ?= linux/amd64 -TAG ?= latest - -prod-build: - @cd ../.. && docker build -f ./packages/twenty-docker/twenty/Dockerfile --platform $(PLATFORM) --tag twenty:$(TAG) . && cd - - -prod-run: - @docker run -d -p 3000:3000 --name twenty twenty:$(TAG) - -prod-postgres-run: - @docker run -d -p 5432:5432 -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=postgres --name twenty-postgres twenty-postgres:$(TAG) - -prod-website-build: - @cd ../.. && docker build -f ./packages/twenty-docker/twenty-website/Dockerfile --platform $(PLATFORM) --tag twenty-website:$(TAG) . && cd - - -prod-website-run: - @docker run -d -p 3000:3000 --name twenty-website twenty-website:$(TAG) diff --git a/twenty/code/docker-compose.yml b/twenty/code/docker-compose.yml deleted file mode 100644 index 2c0e1884c..000000000 --- a/twenty/code/docker-compose.yml +++ /dev/null @@ -1,123 +0,0 @@ -name: twenty - -services: - server: - image: twentycrm/twenty:${TAG:-latest} - volumes: - - server-local-data:/app/packages/twenty-server/.local-storage - environment: - NODE_PORT: 3000 - PG_DATABASE_URL: postgres://${PG_DATABASE_USER:-postgres}:${PG_DATABASE_PASSWORD:-postgres}@${PG_DATABASE_HOST:-db}:${PG_DATABASE_PORT:-5432}/default - SERVER_URL: ${SERVER_URL} - REDIS_URL: ${REDIS_URL:-redis://redis:6379} - - STORAGE_TYPE: ${STORAGE_TYPE} - STORAGE_S3_REGION: ${STORAGE_S3_REGION} - STORAGE_S3_NAME: ${STORAGE_S3_NAME} - STORAGE_S3_ENDPOINT: ${STORAGE_S3_ENDPOINT} - - APP_SECRET: ${APP_SECRET:-replace_me_with_a_random_string} - # MESSAGING_PROVIDER_GMAIL_ENABLED: ${MESSAGING_PROVIDER_GMAIL_ENABLED} - # CALENDAR_PROVIDER_GOOGLE_ENABLED: ${CALENDAR_PROVIDER_GOOGLE_ENABLED} - # AUTH_GOOGLE_CLIENT_ID: ${AUTH_GOOGLE_CLIENT_ID} - # AUTH_GOOGLE_CLIENT_SECRET: ${AUTH_GOOGLE_CLIENT_SECRET} - # AUTH_GOOGLE_CALLBACK_URL: ${AUTH_GOOGLE_CALLBACK_URL} - # AUTH_GOOGLE_APIS_CALLBACK_URL: ${AUTH_GOOGLE_APIS_CALLBACK_URL} - - # CALENDAR_PROVIDER_MICROSOFT_ENABLED: ${CALENDAR_PROVIDER_MICROSOFT_ENABLED} - # MESSAGING_PROVIDER_MICROSOFT_ENABLED: ${MESSAGING_PROVIDER_MICROSOFT_ENABLED} - # AUTH_MICROSOFT_ENABLED: ${AUTH_MICROSOFT_ENABLED} - # AUTH_MICROSOFT_CLIENT_ID: ${AUTH_MICROSOFT_CLIENT_ID} - # AUTH_MICROSOFT_CLIENT_SECRET: ${AUTH_MICROSOFT_CLIENT_SECRET} - # AUTH_MICROSOFT_CALLBACK_URL: ${AUTH_MICROSOFT_CALLBACK_URL} - # AUTH_MICROSOFT_APIS_CALLBACK_URL: ${AUTH_MICROSOFT_APIS_CALLBACK_URL} - - # EMAIL_FROM_ADDRESS: ${EMAIL_FROM_ADDRESS:-contact@yourdomain.com} - # EMAIL_FROM_NAME: ${EMAIL_FROM_NAME:-"John from YourDomain"} - # EMAIL_SYSTEM_ADDRESS: ${EMAIL_SYSTEM_ADDRESS:-system@yourdomain.com} - # EMAIL_DRIVER: ${EMAIL_DRIVER:-smtp} - # EMAIL_SMTP_HOST: ${EMAIL_SMTP_HOST:-smtp.gmail.com} - # EMAIL_SMTP_PORT: ${EMAIL_SMTP_PORT:-465} - # EMAIL_SMTP_USER: ${EMAIL_SMTP_USER:-} - # EMAIL_SMTP_PASSWORD: ${EMAIL_SMTP_PASSWORD:-} - - depends_on: - db: - condition: service_healthy - healthcheck: - test: curl --fail http://localhost:3000/healthz - interval: 5s - timeout: 5s - retries: 20 - restart: always - - worker: - image: twentycrm/twenty:${TAG:-latest} - volumes: - - server-local-data:/app/packages/twenty-server/${STORAGE_LOCAL_PATH:-.local-storage} - command: [ "yarn", "worker:prod" ] - environment: - PG_DATABASE_URL: postgres://${PG_DATABASE_USER:-postgres}:${PG_DATABASE_PASSWORD:-postgres}@${PG_DATABASE_HOST:-db}:${PG_DATABASE_PORT:-5432}/default - SERVER_URL: ${SERVER_URL} - REDIS_URL: ${REDIS_URL:-redis://redis:6379} - DISABLE_DB_MIGRATIONS: "true" # it already runs on the server - - STORAGE_TYPE: ${STORAGE_TYPE} - STORAGE_S3_REGION: ${STORAGE_S3_REGION} - STORAGE_S3_NAME: ${STORAGE_S3_NAME} - STORAGE_S3_ENDPOINT: ${STORAGE_S3_ENDPOINT} - - APP_SECRET: ${APP_SECRET:-replace_me_with_a_random_string} - # MESSAGING_PROVIDER_GMAIL_ENABLED: ${MESSAGING_PROVIDER_GMAIL_ENABLED} - # CALENDAR_PROVIDER_GOOGLE_ENABLED: ${CALENDAR_PROVIDER_GOOGLE_ENABLED} - # AUTH_GOOGLE_CLIENT_ID: ${AUTH_GOOGLE_CLIENT_ID} - # AUTH_GOOGLE_CLIENT_SECRET: ${AUTH_GOOGLE_CLIENT_SECRET} - # AUTH_GOOGLE_CALLBACK_URL: ${AUTH_GOOGLE_CALLBACK_URL} - # AUTH_GOOGLE_APIS_CALLBACK_URL: ${AUTH_GOOGLE_APIS_CALLBACK_URL} - - # CALENDAR_PROVIDER_MICROSOFT_ENABLED: ${CALENDAR_PROVIDER_MICROSOFT_ENABLED} - # MESSAGING_PROVIDER_MICROSOFT_ENABLED: ${MESSAGING_PROVIDER_MICROSOFT_ENABLED} - # AUTH_MICROSOFT_ENABLED: ${AUTH_MICROSOFT_ENABLED} - # AUTH_MICROSOFT_CLIENT_ID: ${AUTH_MICROSOFT_CLIENT_ID} - # AUTH_MICROSOFT_CLIENT_SECRET: ${AUTH_MICROSOFT_CLIENT_SECRET} - # AUTH_MICROSOFT_CALLBACK_URL: ${AUTH_MICROSOFT_CALLBACK_URL} - # AUTH_MICROSOFT_APIS_CALLBACK_URL: ${AUTH_MICROSOFT_APIS_CALLBACK_URL} - - # EMAIL_FROM_ADDRESS: ${EMAIL_FROM_ADDRESS:-contact@yourdomain.com} - # EMAIL_FROM_NAME: ${EMAIL_FROM_NAME:-"John from YourDomain"} - # EMAIL_SYSTEM_ADDRESS: ${EMAIL_SYSTEM_ADDRESS:-system@yourdomain.com} - # EMAIL_DRIVER: ${EMAIL_DRIVER:-smtp} - # EMAIL_SMTP_HOST: ${EMAIL_SMTP_HOST:-smtp.gmail.com} - # EMAIL_SMTP_PORT: ${EMAIL_SMTP_PORT:-465} - # EMAIL_SMTP_USER: ${EMAIL_SMTP_USER:-} - # EMAIL_SMTP_PASSWORD: ${EMAIL_SMTP_PASSWORD:-} - - depends_on: - db: - condition: service_healthy - server: - condition: service_healthy - restart: always - - db: - image: postgres:16 - volumes: - - db-data:/var/lib/postgresql/data - environment: - POSTGRES_USER: ${PG_DATABASE_USER:-postgres} - POSTGRES_PASSWORD: ${PG_DATABASE_PASSWORD:-postgres} - healthcheck: - test: pg_isready -U ${PG_DATABASE_USER:-postgres} -h localhost -d postgres - interval: 5s - timeout: 5s - retries: 10 - restart: always - - redis: - image: redis - restart: always - command: [ "--maxmemory-policy", "noeviction" ] - -volumes: - db-data: - server-local-data: diff --git a/twenty/code/k8s/README.md b/twenty/code/k8s/README.md deleted file mode 100644 index d538d21ba..000000000 --- a/twenty/code/k8s/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# README -DISCLAIMER: The k8s and podman deployments are not maintained by the core team. -These files are provided and maintained by the community. Twenty core team -maintains support for docker deployment. - -## Overview - -This repository contains Kubernetes manifests and Terraform files to help you deploy and manage the TwentyCRM application. The files are located in the `packages/twenty-docker/k8s` directory. - -## Prerequisites - -Before using these files, ensure you have the following installed and configured on your system: - -- Kubernetes cluster (e.g., Minikube, EKS, GKE) -- kubectl -- Terraform -- Docker - -## Setup Instructions - -### Step 1: Clone the Repository - -Clone the repository to your local machine: - -``` bash -git clone https://github.com/twentyhq/twenty.git -cd twentycrm/packages/twenty-docker/k8s -``` - -### Step 2: Customize the Manifests and Terraform Files - -**Important:** These files require customization for your specific implementation. Update the placeholders and configurations according to your environment and requirements. - -### Step 3: Deploy with Terraform - -1. Navigate to the Terraform directory: - - ```bash - cd terraform - ``` - -2. Initialize Terraform: - - ```bash - terraform init - ``` - -3. Plan the deployment: - - ```bash - terraform plan - ``` - -4. Apply the deployment: - - ```bash - terraform apply - ``` - -## OR - -### Step 3: Deploy with Kubernetes Manifests - -1. Navigate to the Kubernetes manifests directory: - - ```bash - cd ../k8s - ``` - -2. Create Server Secret - - ``` bash - kubectl create secret generic -n twentycrm tokens --from-literal accessToken=changeme --from-literal loginToken="changeme" --from-literal refreshToken="changeme" --from-literal fileToken="changeme" - ``` - -3. Apply the manifests: - - ```bash - kubectl apply -f . - ``` - -## Customization - -### Kubernetes Manifests - -- **Namespace:** Update the `namespace` in the manifests as needed. -- **Resource Limits:** Adjust the resource limits and requests according to your application's requirements. -- **Environment Variables:** Configure server tokens in the `Secret` command above. - -### Terraform Files - -- **Variables:** Update the variables in the `variables.tf` file to match your environment. -- **Locals:** Update the locals in the `main.tf` file to match your environment. -- **Providers:** Ensure the provider configurations (e.g., AWS, GCP) are correct for your setup. -- **Resources:** Modify the resource definitions as needed to fit your infrastructure. - -## Troubleshooting - -### Common Issues - -- **Connectivity:** Ensure your Kubernetes cluster is accessible and configured correctly. -- **Permissions:** Verify that you have the necessary permissions to deploy resources in your cloud provider. -- **Resource Limits:** Adjust resource limits if you encounter issues related to insufficient resources. - -### Logs and Debugging - -- Use `kubectl logs` to check the logs of your Kubernetes pods. -- Use `terraform show` and `terraform state` to inspect your Terraform state and configurations. - -## Conclusion - -This setup provides a basic structure for deploying the TwentyCRM application using Kubernetes and Terraform. Ensure you thoroughly customize the manifests and Terraform files to suit your specific needs. For any issues or questions, please refer to the official documentation of Kubernetes and Terraform or seek support from your cloud provider. - ---- - -Feel free to contribute and improve this repository by submitting pull requests or opening issues. Happy deploying! diff --git a/twenty/code/k8s/manifests/deployment-db.yaml b/twenty/code/k8s/manifests/deployment-db.yaml deleted file mode 100644 index c797972e0..000000000 --- a/twenty/code/k8s/manifests/deployment-db.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: twentycrm-db - name: twentycrm-db - namespace: twentycrm -spec: - progressDeadlineSeconds: 600 - replicas: 1 - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - app: twentycrm-db - template: - metadata: - labels: - app: twentycrm-db - spec: - volumes: - - name: twentycrm-db-data - persistentVolumeClaim: - claimName: twentycrm-db-pvc - containers: - - name: twentycrm - image: twentycrm/twenty-postgres-spilo:latest - imagePullPolicy: Always - env: - - name: PGUSER_SUPERUSER - value: "postgres" - - name: PGPASSWORD_SUPERUSER - value: "postgres" - - name: SPILO_PROVIDER - value: "local" - - name: ALLOW_NOSSL - value: "true" - ports: - - containerPort: 5432 - name: tcp - protocol: TCP - resources: - requests: - memory: "256Mi" - cpu: "250m" - limits: - memory: "1024Mi" - cpu: "1000m" - stdin: true - tty: true - volumeMounts: - - mountPath: /home/postgres/pgdata - name: twentycrm-db-data - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/twenty/code/k8s/manifests/deployment-redis.yaml b/twenty/code/k8s/manifests/deployment-redis.yaml deleted file mode 100644 index bd7aaa3cf..000000000 --- a/twenty/code/k8s/manifests/deployment-redis.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: twentycrm-redis - name: twentycrm-redis - namespace: twentycrm -spec: - progressDeadlineSeconds: 600 - replicas: 1 - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - app: twentycrm-redis - template: - metadata: - labels: - app: twentycrm-redis - spec: - containers: - - name: redis - image: redis/redis-stack-server:latest - imagePullPolicy: Always - args: ["--maxmemory-policy", "noeviction"] - env: - - name: PORT - value: "6379" - ports: - - containerPort: 6379 - name: redis - protocol: TCP - resources: - requests: - memory: "1024Mi" - cpu: "250m" - limits: - memory: "2048Mi" - cpu: "500m" - - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/twenty/code/k8s/manifests/deployment-server.yaml b/twenty/code/k8s/manifests/deployment-server.yaml deleted file mode 100644 index 9d219936e..000000000 --- a/twenty/code/k8s/manifests/deployment-server.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: twentycrm-server - name: twentycrm-server - namespace: twentycrm -spec: - progressDeadlineSeconds: 600 - replicas: 1 - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - app: twentycrm-server - template: - metadata: - labels: - app: twentycrm-server - spec: - volumes: - - name: twentycrm-server-data - persistentVolumeClaim: - claimName: twentycrm-server-pvc - - name: twentycrm-docker-data - persistentVolumeClaim: - claimName: twentycrm-docker-data-pvc - containers: - - name: twentycrm - image: twentycrm/twenty:latest - imagePullPolicy: Always - env: - - name: NODE_PORT - value: 3000 - - name: SERVER_URL - value: "https://crm.example.com:443" - - name: "PG_DATABASE_URL" - value: "postgres://postgres:postgres@twentycrm-db.twentycrm.svc.cluster.local/default" - - name: "REDIS_URL" - value: "redis://twentycrm-redis.twentycrm.svc.cluster.local:6379" - - name: SIGN_IN_PREFILLED - value: "false" - - name: STORAGE_TYPE - value: "local" - - name: "ACCESS_TOKEN_EXPIRES_IN" - value: "7d" - - name: "LOGIN_TOKEN_EXPIRES_IN" - value: "1h" - - name: APP_SECRET - valueFrom: - secretKeyRef: - name: tokens - key: accessToken - ports: - - containerPort: 3000 - name: http-tcp - protocol: TCP - resources: - requests: - memory: "256Mi" - cpu: "250m" - limits: - memory: "1024Mi" - cpu: "1000m" - stdin: true - tty: true - volumeMounts: - - mountPath: /app/docker-data - name: twentycrm-docker-data - - mountPath: /app/packages/twenty-server/.local-storage - name: twentycrm-server-data - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/twenty/code/k8s/manifests/deployment-worker.yaml b/twenty/code/k8s/manifests/deployment-worker.yaml deleted file mode 100644 index fbee27781..000000000 --- a/twenty/code/k8s/manifests/deployment-worker.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: twentycrm-worker - name: twentycrm-worker - namespace: twentycrm -spec: - progressDeadlineSeconds: 600 - replicas: 1 - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - app: twentycrm-worker - template: - metadata: - labels: - app: twentycrm-worker - spec: - containers: - - name: twentycrm - image: twentycrm/twenty:latest - imagePullPolicy: Always - env: - - name: SERVER_URL - value: "https://crm.example.com:443" - - name: PG_DATABASE_URL - value: "postgres://postgres:postgres@twentycrm-db.twentycrm.svc.cluster.local/default" - - name: DISABLE_DB_MIGRATIONS - value: "false" # it already runs on the server - - name: STORAGE_TYPE - value: "local" - - name: "REDIS_URL" - value: "redis://twentycrm-redis.twentycrm.svc.cluster.local:6379" - - name: APP_SECRET - valueFrom: - secretKeyRef: - name: tokens - key: accessToken - command: - - yarn - - worker:prod - resources: - requests: - memory: "1024Mi" - cpu: "250m" - limits: - memory: "2048Mi" - cpu: "1000m" - stdin: true - tty: true - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/twenty/code/k8s/manifests/ingress.yaml b/twenty/code/k8s/manifests/ingress.yaml deleted file mode 100644 index 0bbae11dd..000000000 --- a/twenty/code/k8s/manifests/ingress.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: twentycrm - namespace: twentycrm - annotations: - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Forwarded-For $http_x_forwarded_for"; - nginx.ingress.kubernetes.io/force-ssl-redirect: "false" - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/backend-protocol: "HTTP" -spec: - ingressClassName: nginx - rules: - - host: crm.example.com - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: twentycrm-server - port: - name: http-tcp diff --git a/twenty/code/k8s/manifests/pv-db.yaml b/twenty/code/k8s/manifests/pv-db.yaml deleted file mode 100644 index 9caa4ca4d..000000000 --- a/twenty/code/k8s/manifests/pv-db.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: twentycrm-db-pv -spec: - storageClassName: default - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain diff --git a/twenty/code/k8s/manifests/pv-docker-data.yaml b/twenty/code/k8s/manifests/pv-docker-data.yaml deleted file mode 100644 index 95fc52a26..000000000 --- a/twenty/code/k8s/manifests/pv-docker-data.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: twentycrm-docker-data-pv -spec: - storageClassName: default - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain diff --git a/twenty/code/k8s/manifests/pv-server.yaml b/twenty/code/k8s/manifests/pv-server.yaml deleted file mode 100644 index 721de7d56..000000000 --- a/twenty/code/k8s/manifests/pv-server.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: twentycrm-server-pv - namespace: twentycrm -spec: - storageClassName: default - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain diff --git a/twenty/code/k8s/manifests/pvc-db.yaml b/twenty/code/k8s/manifests/pvc-db.yaml deleted file mode 100644 index 146596ea1..000000000 --- a/twenty/code/k8s/manifests/pvc-db.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: twentycrm-db-pvc - namespace: twentycrm -spec: - storageClassName: default - volumeName: twentycrm-db-pv - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi diff --git a/twenty/code/k8s/manifests/pvc-docker-data.yaml b/twenty/code/k8s/manifests/pvc-docker-data.yaml deleted file mode 100644 index 12dd071a7..000000000 --- a/twenty/code/k8s/manifests/pvc-docker-data.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: twentycrm-docker-data-pvc - namespace: twentycrm -spec: - storageClassName: default - volumeName: twentycrm-docker-data-pv - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Mi diff --git a/twenty/code/k8s/manifests/pvc-server.yaml b/twenty/code/k8s/manifests/pvc-server.yaml deleted file mode 100644 index f265057cf..000000000 --- a/twenty/code/k8s/manifests/pvc-server.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: twentycrm-server-pvc - namespace: twentycrm -spec: - storageClassName: default - volumeName: twentycrm-server-pv - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi diff --git a/twenty/code/k8s/manifests/service-db.yaml b/twenty/code/k8s/manifests/service-db.yaml deleted file mode 100644 index 89dbd1464..000000000 --- a/twenty/code/k8s/manifests/service-db.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: twentycrm-db - namespace: twentycrm -spec: - internalTrafficPolicy: Cluster - ports: - - port: 5432 - protocol: TCP - targetPort: 5432 - selector: - app: twentycrm-db - sessionAffinity: ClientIP - sessionAffinityConfig: - clientIP: - timeoutSeconds: 10800 - type: ClusterIP diff --git a/twenty/code/k8s/manifests/service-redis.yaml b/twenty/code/k8s/manifests/service-redis.yaml deleted file mode 100644 index 49f508897..000000000 --- a/twenty/code/k8s/manifests/service-redis.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: twentycrm-redis - namespace: twentycrm -spec: - internalTrafficPolicy: Cluster - ports: - - port: 6379 - protocol: TCP - targetPort: 6379 - selector: - app: twentycrm-redis - sessionAffinity: ClientIP - sessionAffinityConfig: - clientIP: - timeoutSeconds: 10800 - type: ClusterIP diff --git a/twenty/code/k8s/manifests/service-server.yaml b/twenty/code/k8s/manifests/service-server.yaml deleted file mode 100644 index b45b28f31..000000000 --- a/twenty/code/k8s/manifests/service-server.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: twentycrm-server - namespace: twentycrm -spec: - internalTrafficPolicy: Cluster - ports: - - name: http-tcp - port: 3000 - protocol: TCP - targetPort: 3000 - selector: - app: twentycrm-server - sessionAffinity: ClientIP - sessionAffinityConfig: - clientIP: - timeoutSeconds: 10800 - type: ClusterIP diff --git a/twenty/code/k8s/terraform/.terraform-docs.yml b/twenty/code/k8s/terraform/.terraform-docs.yml deleted file mode 100644 index 792c543f4..000000000 --- a/twenty/code/k8s/terraform/.terraform-docs.yml +++ /dev/null @@ -1,48 +0,0 @@ -formatter: "markdown table" # this is required - -version: "" - -header-from: main.tf - -recursive: - enabled: false - path: modules - -output: - file: "README.md" - mode: inject - template: |- - - # TwentyCRM Terraform Docs - - This file was generated by [terraform-docs](https://terraform-docs.io/), for more information on how to install, configure, and use visit their website. - - To update this `README.md` after changes to the Terraform code in this folder, run: `terraform-docs -c `./.terraform-docs.yml .` - - To make configuration changes to how this doc is generated, see `./.terraform-docs.yml` - - {{ .Content }} - - -output-values: - enabled: false - from: "outputs.tf" - -sort: - enabled: true - by: required - -settings: - anchor: true - color: true - default: true - description: true - escape: true - hide-empty: true - html: true - indent: 2 - lockfile: true - read-comments: true - required: true - sensitive: true - type: true diff --git a/twenty/code/k8s/terraform/README.md b/twenty/code/k8s/terraform/README.md deleted file mode 100644 index 32facfd18..000000000 --- a/twenty/code/k8s/terraform/README.md +++ /dev/null @@ -1,73 +0,0 @@ - -# TwentyCRM Terraform Docs - -This file was generated by [terraform-docs](https://terraform-docs.io/), for more information on how to install, configure, and use visit their website. - -To update this `README.md` after changes to the Terraform code in this folder, run: `terraform-docs -c `./.terraform-docs.yml .` - -To make configuration changes to how this doc is generated, see `./.terraform-docs.yml` - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.9.2 | -| [kubernetes](#requirement\_kubernetes) | >= 2.32.0 | -| [random](#requirement\_random) | >= 3.6.3 | - -## Providers - -| Name | Version | -|------|---------| -| [kubernetes](#provider\_kubernetes) | >= 2.32.0 | -| [random](#provider\_random) | >= 3.6.3 | - -## Resources - -| Name | Type | -|------|------| -| [kubernetes_deployment.twentycrm_db](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | -| [kubernetes_deployment.twentycrm_redis](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | -| [kubernetes_deployment.twentycrm_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | -| [kubernetes_deployment.twentycrm_worker](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | -| [kubernetes_ingress.twentycrm](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/ingress) | resource | -| [kubernetes_namespace.twentycrm](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | -| [kubernetes_persistent_volume.db](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume) | resource | -| [kubernetes_persistent_volume.docker_data](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume) | resource | -| [kubernetes_persistent_volume.server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume) | resource | -| [kubernetes_persistent_volume_claim.db](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume_claim) | resource | -| [kubernetes_persistent_volume_claim.docker_data](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume_claim) | resource | -| [kubernetes_persistent_volume_claim.server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/persistent_volume_claim) | resource | -| [kubernetes_secret.twentycrm_tokens](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | -| [kubernetes_service.twentycrm_db](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | -| [kubernetes_service.twentycrm_redis](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | -| [kubernetes_service.twentycrm_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | -| [random_bytes.this](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/bytes) | resource | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [twentycrm\_app\_hostname](#input\_twentycrm\_app\_hostname) | The protocol, DNS fully qualified hostname, and port used to access TwentyCRM in your environment. Ex: https://crm.example.com:443 | `string` | n/a | yes | -| [twentycrm\_pgdb\_admin\_password](#input\_twentycrm\_pgdb\_admin\_password) | TwentyCRM password for postgres database. | `string` | n/a | yes | -| [twentycrm\_app\_name](#input\_twentycrm\_app\_name) | A friendly name prefix to use for every component deployed. | `string` | `"twentycrm"` | no | -| [twentycrm\_db\_image](#input\_twentycrm\_db\_image) | TwentyCRM image for database deployment. This defaults to latest. | `string` | `"twentycrm/twenty-postgres-spilo:latest"` | no | -| [twentycrm\_db\_pv\_capacity](#input\_twentycrm\_db\_pv\_capacity) | Storage capacity provisioned for database persistent volume. | `string` | `"10Gi"` | no | -| [twentycrm\_db\_pv\_path](#input\_twentycrm\_db\_pv\_path) | Local path to use to store the physical volume if using local storage on nodes. | `string` | `""` | no | -| [twentycrm\_db\_pvc\_requests](#input\_twentycrm\_db\_pvc\_requests) | Storage capacity reservation for database persistent volume claim. | `string` | `"10Gi"` | no | -| [twentycrm\_db\_replicas](#input\_twentycrm\_db\_replicas) | Number of replicas for the TwentyCRM database deployment. This defaults to 1. | `number` | `1` | no | -| [twentycrm\_docker\_data\_mount\_path](#input\_twentycrm\_docker\_data\_mount\_path) | TwentyCRM mount path for servers application data. Defaults to '/app/docker-data'. | `string` | `"/app/docker-data"` | no | -| [twentycrm\_docker\_data\_pv\_capacity](#input\_twentycrm\_docker\_data\_pv\_capacity) | Storage capacity provisioned for server persistent volume. | `string` | `"10Gi"` | no | -| [twentycrm\_docker\_data\_pv\_path](#input\_twentycrm\_docker\_data\_pv\_path) | Local path to use to store the physical volume if using local storage on nodes. | `string` | `""` | no | -| [twentycrm\_docker\_data\_pvc\_requests](#input\_twentycrm\_docker\_data\_pvc\_requests) | Storage capacity reservation for server persistent volume claim. | `string` | `"10Gi"` | no | -| [twentycrm\_namespace](#input\_twentycrm\_namespace) | Namespace for all TwentyCRM resources | `string` | `"twentycrm"` | no | -| [twentycrm\_redis\_image](#input\_twentycrm\_redis\_image) | TwentyCRM image for Redis deployment. This defaults to latest. | `string` | `"redis/redis-stack-server:latest"` | no | -| [twentycrm\_redis\_replicas](#input\_twentycrm\_redis\_replicas) | Number of replicas for the TwentyCRM Redis deployment. This defaults to 1. | `number` | `1` | no | -| [twentycrm\_server\_data\_mount\_path](#input\_twentycrm\_server\_data\_mount\_path) | TwentyCRM mount path for servers application data. Defaults to '/app/packages/twenty-server/.local-storage'. | `string` | `"/app/packages/twenty-server/.local-storage"` | no | -| [twentycrm\_server\_image](#input\_twentycrm\_server\_image) | TwentyCRM server image for the server deployment. This defaults to latest. This value is also used for the workers image. | `string` | `"twentycrm/twenty:latest"` | no | -| [twentycrm\_server\_pv\_capacity](#input\_twentycrm\_server\_pv\_capacity) | Storage capacity provisioned for server persistent volume. | `string` | `"10Gi"` | no | -| [twentycrm\_server\_pv\_path](#input\_twentycrm\_server\_pv\_path) | Local path to use to store the physical volume if using local storage on nodes. | `string` | `""` | no | -| [twentycrm\_server\_pvc\_requests](#input\_twentycrm\_server\_pvc\_requests) | Storage capacity reservation for server persistent volume claim. | `string` | `"10Gi"` | no | -| [twentycrm\_server\_replicas](#input\_twentycrm\_server\_replicas) | Number of replicas for the TwentyCRM server deployment. This defaults to 1. | `number` | `1` | no | -| [twentycrm\_worker\_replicas](#input\_twentycrm\_worker\_replicas) | Number of replicas for the TwentyCRM worker deployment. This defaults to 1. | `number` | `1` | no | - diff --git a/twenty/code/k8s/terraform/deployment-db.tf b/twenty/code/k8s/terraform/deployment-db.tf deleted file mode 100644 index 62c61a298..000000000 --- a/twenty/code/k8s/terraform/deployment-db.tf +++ /dev/null @@ -1,87 +0,0 @@ -resource "kubernetes_deployment" "twentycrm_db" { - metadata { - name = "${var.twentycrm_app_name}-db" - namespace = kubernetes_namespace.twentycrm.metadata.0.name - labels = { - app = "${var.twentycrm_app_name}-db" - } - } - - spec { - replicas = var.twentycrm_db_replicas - selector { - match_labels = { - app = "${var.twentycrm_app_name}-db" - } - } - - strategy { - type = "RollingUpdate" - rolling_update { - max_surge = "1" - max_unavailable = "1" - } - } - - template { - metadata { - labels = { - app = "${var.twentycrm_app_name}-db" - } - } - - spec { - container { - image = var.twentycrm_db_image - name = var.twentycrm_app_name - stdin = true - tty = true - security_context { - allow_privilege_escalation = true - } - - env { - name = "POSTGRES_PASSWORD" - value = var.twentycrm_pgdb_admin_password - } - env { - name = "BITNAMI_DEBUG" - value = true - } - - port { - container_port = 5432 - protocol = "TCP" - } - - resources { - requests = { - cpu = "250m" - memory = "256Mi" - } - limits = { - cpu = "1000m" - memory = "1024Mi" - } - } - - volume_mount { - name = "db-data" - mount_path = "/bitnami/postgresql" - } - } - - volume { - name = "db-data" - - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim.db.metadata.0.name - } - } - - dns_policy = "ClusterFirst" - restart_policy = "Always" - } - } - } -} diff --git a/twenty/code/k8s/terraform/deployment-redis.tf b/twenty/code/k8s/terraform/deployment-redis.tf deleted file mode 100644 index d867dac76..000000000 --- a/twenty/code/k8s/terraform/deployment-redis.tf +++ /dev/null @@ -1,60 +0,0 @@ -resource "kubernetes_deployment" "twentycrm_redis" { - metadata { - name = "${var.twentycrm_app_name}-redis" - namespace = kubernetes_namespace.twentycrm.metadata.0.name - - labels = { - app = "${var.twentycrm_app_name}-redis" - } - } - - spec { - replicas = var.twentycrm_redis_replicas - selector { - match_labels = { - app = "${var.twentycrm_app_name}-redis" - } - } - - strategy { - type = "RollingUpdate" - rolling_update { - max_surge = "1" - max_unavailable = "1" - } - } - - template { - metadata { - labels = { - app = "${var.twentycrm_app_name}-redis" - } - } - - spec { - container { - image = var.twentycrm_redis_image - name = "redis" - - port { - container_port = 6379 - protocol = "TCP" - } - - resources { - requests = { - cpu = "250m" - memory = "1024Mi" - } - limits = { - cpu = "500m" - memory = "2048Mi" - } - } - } - dns_policy = "ClusterFirst" - restart_policy = "Always" - } - } - } -} diff --git a/twenty/code/k8s/terraform/deployment-server.tf b/twenty/code/k8s/terraform/deployment-server.tf deleted file mode 100644 index a8bb43b95..000000000 --- a/twenty/code/k8s/terraform/deployment-server.tf +++ /dev/null @@ -1,138 +0,0 @@ -resource "kubernetes_deployment" "twentycrm_server" { - metadata { - name = "${var.twentycrm_app_name}-server" - namespace = kubernetes_namespace.twentycrm.metadata.0.name - labels = { - app = "${var.twentycrm_app_name}-server" - } - } - - spec { - replicas = var.twentycrm_server_replicas - selector { - match_labels = { - app = "${var.twentycrm_app_name}-server" - } - } - - strategy { - type = "RollingUpdate" - rolling_update { - max_surge = "1" - max_unavailable = "1" - } - } - - template { - metadata { - labels = { - app = "${var.twentycrm_app_name}-server" - } - } - - spec { - container { - image = var.twentycrm_server_image - name = var.twentycrm_app_name - stdin = true - tty = true - - env { - name = "NODE_PORT" - value = "3000" - } - - env { - name = "SERVER_URL" - value = var.twentycrm_app_hostname - } - - env { - name = "PG_DATABASE_URL" - value = "postgres://twenty:${var.twentycrm_pgdb_admin_password}@${kubernetes_service.twentycrm_db.metadata.0.name}.${kubernetes_namespace.twentycrm.metadata.0.name}.svc.cluster.local/default" - } - env { - name = "REDIS_URL" - value = "redis://${kubernetes_service.twentycrm_redis.metadata.0.name}.${kubernetes_namespace.twentycrm.metadata.0.name}.svc.cluster.local:6379" - } - env { - name = "DISABLE_DB_MIGRATIONS" - value = "false" - } - - env { - name = "STORAGE_TYPE" - value = "local" - } - env { - name = "ACCESS_TOKEN_EXPIRES_IN" - value = "7d" - } - env { - name = "LOGIN_TOKEN_EXPIRES_IN" - value = "1h" - } - env { - name = "APP_SECRET" - value_from { - secret_key_ref { - name = "tokens" - key = "accessToken" - } - } - } - - port { - container_port = 3000 - protocol = "TCP" - } - - resources { - requests = { - cpu = "250m" - memory = "256Mi" - } - limits = { - cpu = "1000m" - memory = "1024Mi" - } - } - - volume_mount { - name = "server-data" - mount_path = var.twentycrm_server_data_mount_path - } - - volume_mount { - name = "docker-data" - mount_path = var.twentycrm_docker_data_mount_path - } - } - - volume { - name = "server-data" - - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim.server.metadata.0.name - } - } - - volume { - name = "docker-data" - - persistent_volume_claim { - claim_name = kubernetes_persistent_volume_claim.docker_data.metadata.0.name - } - } - - dns_policy = "ClusterFirst" - restart_policy = "Always" - } - } - } - depends_on = [ - kubernetes_deployment.twentycrm_db, - kubernetes_deployment.twentycrm_redis, - kubernetes_secret.twentycrm_tokens - ] -} diff --git a/twenty/code/k8s/terraform/deployment-worker.tf b/twenty/code/k8s/terraform/deployment-worker.tf deleted file mode 100644 index 9e4da3e8c..000000000 --- a/twenty/code/k8s/terraform/deployment-worker.tf +++ /dev/null @@ -1,99 +0,0 @@ -resource "kubernetes_deployment" "twentycrm_worker" { - metadata { - name = "${var.twentycrm_app_name}-worker" - namespace = kubernetes_namespace.twentycrm.metadata.0.name - labels = { - app = "${var.twentycrm_app_name}-worker" - } - } - - spec { - replicas = var.twentycrm_worker_replicas - selector { - match_labels = { - app = "${var.twentycrm_app_name}-worker" - } - } - - strategy { - type = "RollingUpdate" - rolling_update { - max_surge = "1" - max_unavailable = "1" - } - } - - template { - metadata { - labels = { - app = "${var.twentycrm_app_name}-worker" - } - } - - spec { - container { - image = var.twentycrm_server_image - name = var.twentycrm_app_name - stdin = true - tty = true - command = ["yarn", "worker:prod"] - - env { - name = "SERVER_URL" - value = var.twentycrm_app_hostname - } - - env { - name = "PG_DATABASE_URL" - value = "postgres://twenty:${var.twentycrm_pgdb_admin_password}@${kubernetes_service.twentycrm_db.metadata.0.name}.${kubernetes_namespace.twentycrm.metadata.0.name}.svc.cluster.local/default" - } - - env { - name = "REDIS_URL" - value = "redis://${kubernetes_service.twentycrm_redis.metadata.0.name}.${kubernetes_namespace.twentycrm.metadata.0.name}.svc.cluster.local:6379" - } - - env { - name = "DISABLE_DB_MIGRATIONS" - value = "true" #it already runs on the server - } - - env { - name = "STORAGE_TYPE" - value = "local" - } - - env { - name = "APP_SECRET" - value_from { - secret_key_ref { - name = "tokens" - key = "accessToken" - } - } - } - - resources { - requests = { - cpu = "250m" - memory = "1024Mi" - } - limits = { - cpu = "1000m" - memory = "2048Mi" - } - } - } - - dns_policy = "ClusterFirst" - restart_policy = "Always" - } - } - } - depends_on = [ - kubernetes_deployment.twentycrm_db, - kubernetes_deployment.twentycrm_redis, - kubernetes_deployment.twentycrm_server, - kubernetes_secret.twentycrm_tokens, - ] -} diff --git a/twenty/code/k8s/terraform/ingress.tf b/twenty/code/k8s/terraform/ingress.tf deleted file mode 100644 index f8a28779c..000000000 --- a/twenty/code/k8s/terraform/ingress.tf +++ /dev/null @@ -1,30 +0,0 @@ -resource "kubernetes_ingress" "twentycrm" { - wait_for_load_balancer = true - metadata { - name = "${var.twentycrm_app_name}-ingress" - namespace = kubernetes_namespace.twentycrm.metadata.0.name - annotations = { - "kubernetes.io/ingress.class" = "nginx" - "nginx.ingress.kubernetes.io/configuration-snippet" = </dev/null; then - echo -e "\t❌ Docker is not installed or not in PATH. Please install Docker first.\n\t\tSee https://docs.docker.com/get-docker/" - exit 1 -fi -# Check if docker compose plugin is installed -if ! docker compose version &>/dev/null; then - echo -e "\t❌ Docker Compose is not installed or not in PATH (n.b. docker-compose is deprecated)\n\t\tUpdate docker or install docker-compose-plugin\n\t\tOn Linux: sudo apt-get install docker-compose-plugin\n\t\tSee https://docs.docker.com/compose/install/" - exit 1 -fi -# Check if docker is started -if ! docker info &>/dev/null; then - echo -e "\t❌ Docker is not running.\n\t\tPlease start Docker Desktop, Docker or check documentation at https://docs.docker.com/config/daemon/start/" - exit 1 -fi -if ! command -v curl &>/dev/null; then - echo -e "\t❌ Curl is not installed or not in PATH.\n\t\tOn macOS: brew install curl\n\t\tOn Linux: sudo apt install curl" - exit 1 -fi - -# Check if docker compose version is >= 2 -if [ "$(docker compose version --short | cut -d' ' -f3 | cut -d'.' -f1)" -lt 2 ]; then - echo -e "\t❌ Docker Compose is outdated. Please update Docker Compose to version 2 or higher.\n\t\tSee https://docs.docker.com/compose/install/linux/" - exit 1 -fi -# Check if docker-compose is installed, if so issue a warning if version is < 2 -if command -v docker-compose &>/dev/null; then - if [ "$(docker-compose version --short | cut -d' ' -f3 | cut -d'.' -f1)" -lt 2 ]; then - echo -e "\n\t⚠️ 'docker-compose' is installed but outdated. Make sure to use 'docker compose' or to upgrade 'docker-compose' to version 2.\n\t\tSee https://docs.docker.com/compose/install/standalone/\n" - fi -fi - -# Catch errors -set -e -function on_exit { - # $? is the exit status of the last command executed - local exit_status=$? - if [ $exit_status -ne 0 ]; then - echo "❌ Something went wrong, exiting: $exit_status" - fi -} -trap on_exit EXIT - -# Use environment variables VERSION and BRANCH, with defaults if not set -version=${VERSION:-$(curl -s "https://hub.docker.com/v2/repositories/twentycrm/twenty/tags" | grep -o '"name":"[^"]*"' | grep -v 'latest' | cut -d'"' -f4 | sort -V | tail -n1)} -branch=${BRANCH:-$(curl -s https://api.github.com/repos/twentyhq/twenty/tags | grep '"name":' | head -n 1 | cut -d '"' -f 4)} - -echo "🚀 Using docker version $version and Github branch $branch" - -dir_name="twenty" -function ask_directory { - read -p "📁 Enter the directory name to setup the project (default: $dir_name): " answer - if [ -n "$answer" ]; then - dir_name=$answer - fi -} - -ask_directory - -while [ -d "$dir_name" ]; do - read -p "🚫 Directory '$dir_name' already exists. Do you want to overwrite it? (y/N) " answer - if [ "$answer" = "y" ]; then - break - else - ask_directory - fi -done - -# Create a directory named twenty -echo "📁 Creating directory '$dir_name'" -mkdir -p "$dir_name" && cd "$dir_name" || { echo "❌ Failed to create/access directory '$dir_name'"; exit 1; } - -# Copy twenty/packages/twenty-docker/docker-compose.yml in it -echo -e "\t• Copying docker-compose.yml" -curl -sLo docker-compose.yml https://raw.githubusercontent.com/twentyhq/twenty/$branch/packages/twenty-docker/docker-compose.yml - -# Copy twenty/packages/twenty-docker/.env.example to .env -echo -e "\t• Setting up .env file" -curl -sLo .env https://raw.githubusercontent.com/twentyhq/twenty/$branch/packages/twenty-docker/.env.example - -# Replace TAG=latest by TAG= -if [[ $(uname) == "Darwin" ]]; then - # Running on macOS - sed -i '' "s/TAG=latest/TAG=$version/g" .env -else - # Assuming Linux - sed -i'' "s/TAG=latest/TAG=$version/g" .env -fi - -# Generate random strings for secrets -echo "# === Randomly generated secret ===" >> .env -echo "APP_SECRET=$(openssl rand -base64 32)" >> .env - -echo "" >> .env -echo "PG_DATABASE_PASSWORD=$(openssl rand -hex 16)" >> .env - -echo -e "\t• .env configuration completed" - -port=3000 -# Check if command nc is available -if command -v nc &> /dev/null; then - # Check if port 3000 is already in use, propose to change it - while nc -zv localhost $port &>/dev/null; do - read -p "🚫 Port $port is already in use. Do you want to use another port? (Y/n) " answer - if [ "$answer" = "n" ]; then - continue - fi - read -p "Enter a new port number: " new_port - if [[ $(uname) == "Darwin" ]]; then - sed -i '' "s/$port:$port/$new_port:$port/g" docker-compose.yml - sed -E -i '' "s|^SERVER_URL=http://localhost:[0-9]+|SERVER_URL=http://localhost:$new_port|g" .env - else - sed -i'' "s/$port:$port/$new_port:$port/g" docker-compose.yml - sed -E -i'' "s|^SERVER_URL=http://localhost:[0-9]+|SERVER_URL=http://localhost:$new_port|g" .env - fi - port=$new_port - done -fi - -# Ask user if they want to start the project -read -p "🚀 Do you want to start the project now? (Y/n) " answer -if [ "$answer" = "n" ]; then - echo "✅ Project setup completed. Run 'docker compose up -d' to start." - exit 0 -else - echo "🐳 Starting Docker containers..." - docker compose up -d - # Check if port is listening - echo "Waiting for server to be healthy, it might take a few minutes while we initialize the database..." - # Tail logs of the server until it's ready - docker compose logs -f server & - pid=$! - while [ ! $(docker inspect --format='{{.State.Health.Status}}' twenty-server-1) = "healthy" ]; do - sleep 1 - done - kill $pid - echo "" - echo "✅ Server is up and running" -fi - -function ask_open_browser { - read -p "🌐 Do you want to open the project in your browser? (Y/n) " answer - if [ "$answer" = "n" ]; then - echo "✅ Setup completed. Access your project at http://localhost:$port" - exit 0 - fi -} - -# Ask user if they want to open the project -# Running on macOS -if [[ $(uname) == "Darwin" ]]; then - ask_open_browser - - open "http://localhost:$port" -# Assuming Linux -else - # xdg-open is not installed, we could be running in a non gui environment - if command -v xdg-open >/dev/null 2>&1; then - ask_open_browser - - xdg-open "http://localhost:$port" - else - echo "✅ Setup completed. Your project is available at http://localhost:$port" - fi -fi diff --git a/twenty/code/twenty-postgres-spilo/Dockerfile b/twenty/code/twenty-postgres-spilo/Dockerfile deleted file mode 100644 index 9a84c120d..000000000 --- a/twenty/code/twenty-postgres-spilo/Dockerfile +++ /dev/null @@ -1,68 +0,0 @@ -ARG POSTGRES_VERSION=15 -ARG SPILO_VERSION=3.2-p1 -ARG WRAPPERS_VERSION=0.2.0 - -# Build the mysql_fdw extension -FROM debian:bookworm AS build-mysql_fdw -ARG POSTGRES_VERSION - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt update && \ - apt install -y \ - build-essential \ - git \ - postgresql-server-dev-${POSTGRES_VERSION} \ - default-libmysqlclient-dev && \ - rm -rf /var/lib/apt/lists/* - -# Install mysql_fdw -RUN git clone https://github.com/EnterpriseDB/mysql_fdw.git -WORKDIR /mysql_fdw -RUN make USE_PGXS=1 - - -# Build libssl for wrappers -FROM ubuntu:22.04 AS build-libssl - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt update && \ - apt install -y \ - build-essential \ - git && \ - rm -rf /var/lib/apt/lists/* - -WORKDIR /build -RUN git clone --branch OpenSSL_1_1_1-stable https://github.com/openssl/openssl.git -WORKDIR /build/openssl -RUN ./config && make && make install - - -# Extend the Spilo image with the mysql_fdw extensions -FROM ghcr.io/zalando/spilo-${POSTGRES_VERSION}:${SPILO_VERSION} -ARG POSTGRES_VERSION -ARG WRAPPERS_VERSION -ARG TARGETARCH - -# Install precompiled supabase wrappers extensions -RUN set -eux; \ - ARCH="$(dpkg --print-architecture)"; \ - case "${ARCH}" in \ - aarch64|arm64) TARGETARCH='arm64';; \ - amd64|x86_64) TARGETARCH='amd64';; \ - *) echo "Unsupported arch: ${ARCH}"; exit 1;; \ - esac; - -RUN apt update && apt install default-libmysqlclient-dev -y && rm -rf /var/lib/apt/lists/* - -RUN curl -L "https://github.com/supabase/wrappers/releases/download/v${WRAPPERS_VERSION}/wrappers-v${WRAPPERS_VERSION}-pg${POSTGRES_VERSION}-${TARGETARCH}-linux-gnu.deb" -o wrappers.deb && \ - dpkg --install wrappers.deb && \ - rm wrappers.deb - -COPY --from=build-libssl /usr/local/lib/libssl* /usr/local/lib/libcrypto* /usr/lib/ -COPY --from=build-libssl /usr/local/lib/engines-1.1 /usr/lib/engines-1.1 - -# Copy mysql_fdw -COPY --from=build-mysql_fdw /mysql_fdw/mysql_fdw.so \ - /usr/lib/postgresql/${POSTGRES_VERSION}/lib/mysql_fdw.so -COPY --from=build-mysql_fdw /mysql_fdw/mysql_fdw*.sql /mysql_fdw/mysql_fdw.control /mysql_fdw/mysql_fdw_pushdown.config \ - /usr/share/postgresql/${POSTGRES_VERSION}/extension/ diff --git a/twenty/code/twenty-website/Dockerfile b/twenty/code/twenty-website/Dockerfile deleted file mode 100644 index 3f64a074c..000000000 --- a/twenty/code/twenty-website/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM node:18.17.1-alpine as twenty-website-build - - -WORKDIR /app - -COPY ./package.json . -COPY ./yarn.lock . -COPY ./.yarnrc.yml . -COPY ./.yarn/releases /app/.yarn/releases -COPY ./tools/eslint-rules /app/tools/eslint-rules -COPY ./packages/twenty-ui/package.json /app/packages/twenty-ui/ -COPY ./packages/twenty-shared/package.json /app/packages/twenty-shared/ -COPY ./packages/twenty-website/package.json /app/packages/twenty-website/package.json - -RUN yarn - -ENV KEYSTATIC_GITHUB_CLIENT_ID="" -ENV KEYSTATIC_GITHUB_CLIENT_SECRET="" -ENV KEYSTATIC_SECRET="" -ENV NEXT_PUBLIC_KEYSTATIC_GITHUB_APP_SLUG="" - -COPY ./packages/twenty-ui /app/packages/twenty-ui -COPY ./packages/twenty-website /app/packages/twenty-website -RUN npx nx build twenty-website - -FROM node:18.17.1-alpine as twenty-website - -WORKDIR /app/packages/twenty-website - -COPY --from=twenty-website-build /app /app - -WORKDIR /app/packages/twenty-website - -LABEL org.opencontainers.image.source=https://github.com/twentyhq/twenty -LABEL org.opencontainers.image.description="This image provides a consistent and reproducible environment for the website." - -RUN chown -R 1000 /app - -# Use non root user with uid 1000 -USER 1000 - -CMD ["/bin/sh", "-c", "npx nx start"] \ No newline at end of file diff --git a/twenty/code/twenty/Dockerfile b/twenty/code/twenty/Dockerfile deleted file mode 100644 index 410069433..000000000 --- a/twenty/code/twenty/Dockerfile +++ /dev/null @@ -1,87 +0,0 @@ -# Base image for common dependencies -FROM node:18.17.1-alpine as common-deps - -WORKDIR /app - -# Copy only the necessary files for dependency resolution -COPY ./package.json ./yarn.lock ./.yarnrc.yml ./tsconfig.base.json ./nx.json /app/ -COPY ./.yarn/releases /app/.yarn/releases - -COPY ./.prettierrc /app/ -COPY ./packages/twenty-emails/package.json /app/packages/twenty-emails/ -COPY ./packages/twenty-server/package.json /app/packages/twenty-server/ -COPY ./packages/twenty-server/patches /app/packages/twenty-server/patches -COPY ./packages/twenty-ui/package.json /app/packages/twenty-ui/ -COPY ./packages/twenty-shared/package.json /app/packages/twenty-shared/ -COPY ./packages/twenty-front/package.json /app/packages/twenty-front/ - -# Install all dependencies -RUN yarn && yarn cache clean && npx nx reset - - -# Build the back -FROM common-deps as twenty-server-build - -# Copy sourcecode after installing dependences to accelerate subsequents builds -COPY ./packages/twenty-emails /app/packages/twenty-emails -COPY ./packages/twenty-shared /app/packages/twenty-shared -COPY ./packages/twenty-server /app/packages/twenty-server - -RUN npx nx run twenty-server:build -RUN mv /app/packages/twenty-server/dist /app/packages/twenty-server/build -RUN npx nx run twenty-server:build:packageJson -RUN mv /app/packages/twenty-server/dist/package.json /app/packages/twenty-server/package.json -RUN rm -rf /app/packages/twenty-server/dist -RUN mv /app/packages/twenty-server/build /app/packages/twenty-server/dist - -RUN yarn workspaces focus --production twenty-emails twenty-shared twenty-server - - -# Build the front -FROM common-deps as twenty-front-build - -ARG REACT_APP_SERVER_BASE_URL - -COPY ./packages/twenty-front /app/packages/twenty-front -COPY ./packages/twenty-ui /app/packages/twenty-ui -COPY ./packages/twenty-shared /app/packages/twenty-shared -RUN npx nx build twenty-front - - -# Final stage: Run the application -FROM node:18.17.1-alpine as twenty - -# Used to run healthcheck in docker -RUN apk add --no-cache curl jq - -RUN npm install -g tsx - -RUN apk add --no-cache postgresql-client - -COPY ./packages/twenty-docker/twenty/entrypoint.sh /app/entrypoint.sh -RUN chmod +x /app/entrypoint.sh -WORKDIR /app/packages/twenty-server - -ARG REACT_APP_SERVER_BASE_URL -ENV REACT_APP_SERVER_BASE_URL $REACT_APP_SERVER_BASE_URL - -ARG APP_VERSION -ENV APP_VERSION $APP_VERSION - -# Copy built applications from previous stages -COPY --chown=1000 --from=twenty-server-build /app /app -COPY --chown=1000 --from=twenty-server-build /app/packages/twenty-server /app/packages/twenty-server -COPY --chown=1000 --from=twenty-front-build /app/packages/twenty-front/build /app/packages/twenty-server/dist/front - -# Set metadata and labels -LABEL org.opencontainers.image.source=https://github.com/twentyhq/twenty -LABEL org.opencontainers.image.description="This image provides a consistent and reproducible environment for the backend and frontend, ensuring it deploys faster and runs the same way regardless of the deployment environment." - -RUN mkdir -p /app/.local-storage /app/packages/twenty-server/.local-storage && \ - chown -R 1000:1000 /app - -# Use non root user with uid 1000 -USER 1000 - -CMD ["node", "dist/src/main"] -ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/twenty/code/twenty/entrypoint.sh b/twenty/code/twenty/entrypoint.sh deleted file mode 100755 index a7de30a37..000000000 --- a/twenty/code/twenty/entrypoint.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -set -e - -setup_and_migrate_db() { - if [ "${DISABLE_DB_MIGRATIONS}" = "true" ]; then - echo "Database setup and migrations are disabled, skipping..." - return - fi - - echo "Running database setup and migrations..." - PGUSER=$(echo $PG_DATABASE_URL | awk -F '//' '{print $2}' | awk -F ':' '{print $1}') - PGPASS=$(echo $PG_DATABASE_URL | awk -F ':' '{print $3}' | awk -F '@' '{print $1}') - PGHOST=$(echo $PG_DATABASE_URL | awk -F '@' '{print $2}' | awk -F ':' '{print $1}') - PGPORT=$(echo $PG_DATABASE_URL | awk -F ':' '{print $4}' | awk -F '/' '{print $1}') - PGDATABASE=$(echo $PG_DATABASE_URL | awk -F ':' '{print $4}' | awk -F '/' '{print $2}') - - # Creating the database if it doesn't exist - db_count=$(PGPASSWORD=${PGPASS} psql -h ${PGHOST} -p ${PGPORT} -U ${PGUSER} -d postgres -tAc "SELECT COUNT(*) FROM pg_database WHERE datname = '${PGDATABASE}'") - if [ "$db_count" = "0" ]; then - echo "Database ${PGDATABASE} does not exist, creating..." - PGPASSWORD=${PGPASS} psql -h ${PGHOST} -p ${PGPORT} -U ${PGUSER} -d postgres -c "CREATE DATABASE \"${PGDATABASE}\"" - - # Run setup and migration scripts - NODE_OPTIONS="--max-old-space-size=1500" tsx ./scripts/setup-db.ts - fi - - yarn command:prod upgrade - echo "Successfully migrated DB!" -} -setup_and_migrate_db - -# Continue with the original Docker command -exec "$@" diff --git a/twenty/update.js b/twenty/update.js deleted file mode 100644 index b6131b7b6..000000000 --- a/twenty/update.js +++ /dev/null @@ -1,25 +0,0 @@ -import utils from "../utils.js"; - -await utils.cloneOrPullRepo({ repo: "git@github.com:twentyhq/twenty.git" }); -await utils.copyDir("./repo/packages/twenty-docker", "./code"); - -await utils.removeContainerNames("./code/docker-compose.yml"); -await utils.removePorts("./code/docker-compose.yml"); - -await utils.searchReplace( - "./code/.env.example", - "#REDIS_URL=redis://redis:6379", - "REDIS_URL=redis://redis:6379" -); - -await utils.searchReplace( - "./code/.env.example", - "SERVER_URL=http://localhost:3000", - "SERVER_URL=https://$(PRIMARY_DOMAIN)" -); - -await utils.searchReplace( - "./code/.env.example", - "# APP_SECRET=replace_me_with_a_random_string", - "APP_SECRET=replace_me_with_a_random_string" -); diff --git a/twenty/update.sh b/twenty/update.sh deleted file mode 100644 index 468d07655..000000000 --- a/twenty/update.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [ ! -d "./repo" ]; then - git clone --depth 1 --branch main --single-branch git@github.com:twentyhq/twenty.git repo -else - cd repo - git pull - cd .. -fi - -cp -r ./repo/packages/twenty-docker/. ./code - - diff --git a/utils.js b/utils.js index d22ef2013..2644f3196 100644 --- a/utils.js +++ b/utils.js @@ -81,6 +81,18 @@ async function searchReplace(path, search, replace) { await fs.promises.writeFile(path, newFile); } +async function removeDir(path) { + console.log(`Removing directory ${path}`); + + await execa("rm", ["-rf", path]); +} + +async function renameDir(src, dest) { + console.log(`Renaming directory ${src} to ${dest}`); + + await execa("mv", [src, dest]); +} + export default { cloneOrPullRepo, removeContainerNames, @@ -89,4 +101,6 @@ export default { downloadFile, renameFile, searchReplace, + removeDir, + renameDir, };