Buy Me a Coffee

MacOS使用Docker運行Dify時的疑難排解:調整docker-compose.yaml

在MacOS上使用Docker運行Dify時,有時會遇到一些問題導致無法正常使用。本文將介紹如何通過調整dify/docker/docker-compose.yaml文件來解決這些問題。

請使用下列 docker-compose.yaml 文件:

version: '3.8'

x-shared-env: &shared-api-worker-env
  LOG_LEVEL: ${LOG_LEVEL:-INFO}
  LOG_FILE: ${LOG_FILE:-}
  DEBUG: ${DEBUG:-false}
  FLASK_DEBUG: ${FLASK_DEBUG:-false}
  SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
  INIT_PASSWORD: ${INIT_PASSWORD:-}
  CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
  CONSOLE_API_URL: ${CONSOLE_API_URL:-}
  SERVICE_API_URL: ${SERVICE_API_URL:-}
  APP_WEB_URL: ${APP_WEB_URL:-}
  CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
  OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
  FILES_URL: ${FILES_URL:-}
  FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
  APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
  MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
  DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
  DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
  DIFY_PORT: ${DIFY_PORT:-5001}
  SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
  SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-}
  CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
  GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
  CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
  CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
  CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
  CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
  API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
  API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
  DB_USERNAME: ${DB_USERNAME:-postgres}
  DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
  DB_HOST: ${DB_HOST:-db}
  DB_PORT: ${DB_PORT:-5432}
  DB_DATABASE: ${DB_DATABASE:-dify}
  SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
  SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
  SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
  REDIS_HOST: redis
  REDIS_PORT: 6379
  REDIS_USERNAME: ${REDIS_USERNAME:-}
  REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
  REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
  REDIS_DB: 0
  REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
  REDIS_SENTINELS: ${REDIS_SENTINELS:-}
  REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
  REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
  REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
  REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-}
  CELERY_BROKER_URL: redis://:${REDIS_PASSWORD:-difyai123456}@redis:6379/1
  BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
  CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
  CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
  CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-}
  WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
  CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
  STORAGE_TYPE: ${STORAGE_TYPE:-local}
  STORAGE_LOCAL_PATH: storage
  S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
  S3_ENDPOINT: ${S3_ENDPOINT:-}
  S3_BUCKET_NAME: ${S3_BUCKET_NAME:-}
  S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
  S3_SECRET_KEY: ${S3_SECRET_KEY:-}
  S3_REGION: ${S3_REGION:-us-east-1}
  AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-}
  AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-}
  AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-}
  AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-}
  GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-}
  GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
  ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-}
  ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-}
  ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-}
  ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-}
  ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-}
  ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
  ALIYUN_OSS_PATHS: ${ALIYUN_OSS_PATH:-}
  TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-}
  TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-}
  TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-}
  TENCENT_COS_REGION: ${TENCENT_COS_REGION:-}
  TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-}
  HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-}
  HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-}
  HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-}
  HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-}
  OCI_ENDPOINT: ${OCI_ENDPOINT:-}
  OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-}
  OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-}
  OCI_SECRET_KEY: ${OCI_SECRET_KEY:-}
  OCI_REGION: ${OCI_REGION:-}
  VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-}
  VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-}
  VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-}
  VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-}
  VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-}
  VECTOR_STORE: ${VECTOR_STORE:-weaviate}
  WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
  WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
  QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
  QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
  QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
  QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
  QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
  MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
  MILVUS_TOKEN: ${MILVUS_TOKEN:-}
  MILVUS_USER: ${MILVUS_USER:-root}
  MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
  MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
  MYSCALE_PORT: ${MYSCALE_PORT:-8123}
  MYSCALE_USER: ${MYSCALE_USER:-default}
  MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
  MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
  MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
  RELYT_HOST: ${RELYT_HOST:-db}
  RELYT_PORT: ${RELYT_PORT:-5432}
  RELYT_USER: ${RELYT_USER:-postgres}
  RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
  RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
  PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
  PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
  PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
  PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
  PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
  TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
  TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
  TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
  TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
  TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
  ORACLE_HOST: ${ORACLE_HOST:-oracle}
  ORACLE_PORT: ${ORACLE_PORT:-1521}
  ORACLE_USER: ${ORACLE_USER:-dify}
  ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
  ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
  CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
  CHROMA_PORT: ${CHROMA_PORT:-8000}
  CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
  CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
  CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
  CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
  ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
  ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
  ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
  ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
  KIBANA_PORT: ${KIBANA_PORT:-5601}
  # AnalyticDB configuration
  ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-}
  ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-}
  ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-}
  ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-}
  ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-}
  ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-}
  ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
  ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-}
  OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
  OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
  OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
  OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
  OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
  TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
  TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
  TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
  TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
  TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
  TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
  TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
  UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
  UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
  ETL_TYPE: ${ETL_TYPE:-dify}
  UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
  MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT:-base64}
  UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
  SENTRY_DSN: ${API_SENTRY_DSN:-}
  SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
  NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
  NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
  NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
  MAIL_TYPE: ${MAIL_TYPE:-resend}
  MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
  SMTP_SERVER: ${SMTP_SERVER:-}
  SMTP_PORT: ${SMTP_PORT:-465}
  SMTP_USERNAME: ${SMTP_USERNAME:-}
  SMTP_PASSWORD: ${SMTP_PASSWORD:-}
  SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
  SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
  RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
  RESEND_API_URL: https://api.resend.com
  INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-1000}
  INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
  RESET_PASSWORD_TOKEN_EXPIRY_HOURS: ${RESET_PASSWORD_TOKEN_EXPIRY_HOURS:-24}
  CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
  CODE_EXECUTION_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
  CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
  CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
  CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
  CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
  CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
  TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
  CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
  CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
  CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
  SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
  SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}

services:
  # API service
  api:
    image: langgenius/dify-api:latest
    restart: always
    environment:
      # Use the shared environment variables.
      <<: *shared-api-worker-env
      # Startup mode, 'api' starts the API server.
      MODE: api
    depends_on:
      - db
      - redis
    volumes:
      # Mount the storage directory to the container, for storing user files.
      - ./volumes/app/storage:/app/api/storage
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 1G

  # worker service
  # The Celery worker for processing the queue.
  worker:
    image: langgenius/dify-api:latest
    restart: always
    environment:
      # Use the shared environment variables.
      <<: *shared-api-worker-env
      # Startup mode, 'worker' starts the Celery worker for processing the queue.
      MODE: worker
    depends_on:
      - db
      - redis
    volumes:
      # Mount the storage directory to the container, for storing user files.
      - ./volumes/app/storage:/app/api/storage
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 1G

  # Frontend web application.
  web:
    image: langgenius/dify-web:latest
    restart: always
    environment:
      CONSOLE_API_URL: ${CONSOLE_API_URL:-}
      APP_API_URL: ${APP_API_URL:-}
      SENTRY_DSN: ${WEB_SENTRY_DSN:-}
      NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
    networks:
      - dify-network

  # The postgres database.
  db:
    image: postgres:15-alpine
    restart: always
    environment:
      PGUSER: ${PGUSER:-postgres}
      POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
      POSTGRES_DB: ${POSTGRES_DB:-dify}
      PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
    command: >
      postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
               -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
               -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
               -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
               -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'      
    volumes:
      - ./volumes/db/data:/var/lib/postgresql/data
    healthcheck:
      test: [ "CMD", "pg_isready" ]
      interval: 1s
      timeout: 3s
      retries: 30
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 1G

  # The redis cache.
  redis:
    image: redis:7-alpine
    restart: always
    volumes:
      # Mount the redis data directory to the container.
      - ./volumes/redis/data:/data
    # Set the redis password when startup redis server.
    command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
    healthcheck:
      test: [ "CMD", "redis-cli", "ping" ]
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 512M

  # The DifySandbox
  sandbox:
    image: langgenius/dify-sandbox:0.2.7
    restart: always
    environment:
      # The DifySandbox configurations
      # Make sure you are changing this key for your deployment with a strong key.
      # You can generate a strong key using `openssl rand -base64 42`.
      API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
      GIN_MODE: ${SANDBOX_GIN_MODE:-release}
      WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
      ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
      HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
      HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
      SANDBOX_PORT: ${SANDBOX_PORT:-8194}
    volumes:
      - ./volumes/sandbox/dependencies:/dependencies
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:8194/health" ]
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 512M

  # ssrf_proxy server
  # for more information, please refer to
  # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
  ssrf_proxy:
    image: ubuntu/squid:latest
    restart: always
    volumes:
      - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
      - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
    entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
    environment:
      # pls clearly modify the squid env vars to fit your network environment.
      HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
      COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
      REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
      SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
      SANDBOX_PORT: ${SANDBOX_PORT:-8194}
    networks:
      - dify-network

  # Certbot service
  # use `docker-compose --profile certbot up` to start the certbot service.
  certbot:
    image: certbot/certbot
    profiles:
      - certbot
    volumes:
      - ./volumes/certbot/conf:/etc/letsencrypt
      - ./volumes/certbot/www:/var/www/html
      - ./volumes/certbot/logs:/var/log/letsencrypt
      - ./volumes/certbot/conf/live:/etc/letsencrypt/live
      - ./certbot/update-cert.template.txt:/update-cert.template.txt
      - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
    environment:
      - CERTBOT_EMAIL=${CERTBOT_EMAIL}
      - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
      - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
    entrypoint: [ "/docker-entrypoint.sh" ]
    command: [ "tail", "-f", "/dev/null" ]
    networks:
      - dify-network

  # The nginx reverse proxy.
  # used for reverse proxying the API service and Web service.
  nginx:
    image: nginx:latest
    restart: always
    volumes:
      - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
      - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
      - ./nginx/https.conf.template:/etc/nginx/https.conf.template
      - ./nginx/conf.d:/etc/nginx/conf.d
      - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
      - ./nginx/ssl:/etc/ssl # cert dir (legacy)
      - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
      - ./volumes/certbot/conf:/etc/letsencrypt
      - ./volumes/certbot/www:/var/www/html
    entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
    environment:
      NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
      NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
      NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
      NGINX_PORT: ${NGINX_PORT:-80}
      # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
      # and modify the env vars below in .env if HTTPS_ENABLED is true.
      NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
      NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
      NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
      NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
      NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
      NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
      NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
      NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
      NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
      CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
    depends_on:
      - api
      - web
    ports:
      - "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
      - "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 256M

  # The Weaviate vector store.
  weaviate:
    image: semitechnologies/weaviate:1.19.0
    profiles:
      - ""
      - weaviate
    restart: always
    volumes:
      # Mount the Weaviate data directory to the con tainer.
      - ./volumes/weaviate:/var/lib/weaviate
    environment:
      # The Weaviate configurations
      # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
      PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
      QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
      AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
      DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
      CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
      AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
      AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
      AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
      AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
      AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 2G

  # Qdrant vector store.
  # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
  qdrant:
    image: langgenius/qdrant:v1.7.3
    profiles:
      - qdrant
    restart: always
    volumes:
      - ./volumes/qdrant:/qdrant/storage
    environment:
      QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}

  # The pgvector vector database.
  pgvector:
    image: pgvector/pgvector:pg16
    profiles:
      - pgvector
    restart: always
    environment:
      PGUSER: ${PGVECTOR_PGUSER:-postgres}
      # The password for the default postgres user.
      POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
      # The name of the default postgres database.
      POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
      # postgres data directory
      PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
    volumes:
      - ./volumes/pgvector/data:/var/lib/postgresql/data
    healthcheck:
      test: [ "CMD", "pg_isready" ]
      interval: 1s
      timeout: 3s
      retries: 30

  # pgvecto-rs vector store
  pgvecto-rs:
    image: tensorchord/pgvecto-rs:pg16-v0.3.0
    profiles:
      - pgvecto-rs
    restart: always
    environment:
      PGUSER: ${PGVECTOR_PGUSER:-postgres}
      # The password for the default postgres user.
      POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
      # The name of the default postgres database.
      POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
      # postgres data directory
      PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
    volumes:
      - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
    healthcheck:
      test: [ "CMD", "pg_isready" ]
      interval: 1s
      timeout: 3s
      retries: 30

  # Chroma vector database
  chroma:
    image: ghcr.io/chroma-core/chroma:0.5.1
    profiles:
      - chroma
    restart: always
    volumes:
      - ./volumes/chroma:/chroma/chroma
    environment:
      CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
      CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
      IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}

  # Oracle vector database
  oracle:
    image: container-registry.oracle.com/database/free:latest
    profiles:
      - oracle
    restart: always
    volumes:
      - source: oradata
        type: volume
        target: /opt/oracle/oradata
      - ./startupscripts:/opt/oracle/scripts/startup
    environment:
      ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
      ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}

  # Milvus vector database services
  etcd:
    container_name: milvus-etcd
    image: quay.io/coreos/etcd:v3.5.5
    profiles:
      - milvus
    environment:
      ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
      ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
      ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
      ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
    volumes:
      - ./volumes/milvus/etcd:/etcd
    command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
    healthcheck:
      test: [ "CMD", "etcdctl", "endpoint", "health" ]
      interval: 30s
      timeout: 20s
      retries: 3
    networks:
      - milvus

  minio:
    container_name: milvus-minio
    image: minio/minio:RELEASE.2023-03-20T20-16-18Z
    profiles:
      - milvus
    environment:
      MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
      MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
    volumes:
      - ./volumes/milvus/minio:/minio_data
    command: minio server /minio_data --console-address ":9001"
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:9000/minio/health/live" ]
      interval: 30s
      timeout: 20s
      retries: 3
    networks:
      - milvus

  milvus-standalone:
    container_name: milvus-standalone
    image: milvusdb/milvus:v2.3.1
    profiles:
      - milvus
    command: [ "milvus", "run", "standalone" ]
    environment:
      ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
      MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
      common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
    volumes:
      - ./volumes/milvus/milvus:/var/lib/milvus
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:9091/healthz" ]
      interval: 30s
      start_period: 90s
      timeout: 20s
      retries: 3
    depends_on:
      - etcd
      - minio
    ports:
      - 19530:19530
      - 9091:9091
    networks:
      - milvus

  # Opensearch vector database
  opensearch:
    container_name: opensearch
    image: opensearchproject/opensearch:latest
    profiles:
      - opensearch
    environment:
      discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
      bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
      OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
      OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
    ulimits:
      memlock:
        soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
        hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
      nofile:
        soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
        hard: ${OPENSEARCH_NOFILE_HARD:-65536}
    volumes:
      - ./volumes/opensearch/data:/usr/share/opensearch/data
    networks:
      - opensearch-net

  opensearch-dashboards:
    container_name: opensearch-dashboards
    image: opensearchproject/opensearch-dashboards:latest
    profiles:
      - opensearch
    environment:
      OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
    volumes:
      - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
    networks:
      - opensearch-net
    depends_on:
      - opensearch

  # MyScale vector database
  myscale:
    container_name: myscale
    image: myscale/myscaledb:1.6.4
    profiles:
      - myscale
    restart: always
    tty: true
    volumes:
      - ./volumes/myscale/data:/var/lib/clickhouse
      - ./volumes/myscale/log:/var/log/clickhouse-server
      - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
    ports:
      - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}

  # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
  # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
    container_name: elasticsearch
    profiles:
      - elasticsearch
    restart: always
    volumes:
      - dify_es01_data:/usr/share/elasticsearch/data
    environment:
      ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
      cluster.name: dify-es-cluster
      node.name: dify-es0
      discovery.type: single-node
      xpack.license.self_generated.type: trial
      xpack.security.enabled: "true"
      xpack.security.enrollment.enabled: "false"
      xpack.security.http.ssl.enabled: "false"
    ports:
      - ${ELASTICSEARCH_PORT:-9200}:9200
    healthcheck:
      test: [ "CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty" ]
      interval: 30s
      timeout: 10s
      retries: 50

  # https://www.elastic.co/guide/en/kibana/current/docker.html
  # https://www.elastic.co/guide/en/kibana/current/settings.html
  kibana:
    image: docker.elastic.co/kibana/kibana:8.14.3
    container_name: kibana
    profiles:
      - elasticsearch
    depends_on:
      - elasticsearch
    restart: always
    environment:
      XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
      NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
      XPACK_SECURITY_ENABLED: "true"
      XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
      XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
      XPACK_FLEET_ISAIRGAPPED: "true"
      I18N_LOCALE: zh-CN
      SERVER_PORT: "5601"
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200
    ports:
      - ${KIBANA_PORT:-5601}:5601
    healthcheck:
      test: [ "CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1" ]
      interval: 30s
      timeout: 10s
      retries: 3

  # unstructured .
  # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
  unstructured:
    image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
    profiles:
      - unstructured
    restart: always
    volumes:
      - ./volumes/unstructured:/app/data

networks:
  # create a network between sandbox, api and ssrf_proxy, and can not access outside.
  ssrf_proxy_network:
    driver: bridge
    internal: true
  milvus:
    driver: bridge
  opensearch-net:
    driver: bridge
    internal: true
  dify-network:
    driver: bridge

volumes:
  oradata:
  dify_es01_data:
  db_data:
  redis_data:
  weaviate_data:
  app_storage:

實施步驟

  1. 打開終端機,進入Dify專案目錄。
  2. 使用文本編輯器打開docker/docker-compose.yaml文件。
  3. 根據上述建議,修改相應的配置項。
  4. 保存文件並重新啟動Docker容器:
docker-compose down
docker-compose up -d
  1. 檢查Dify是否正常運行。

總結

通過適當調整docker-compose.yaml文件,我們可以解決在MacOS上使用Docker運行Dify時遇到的大多數問題。記住,每次修改配置後都要重新啟動Docker容器。如果問題仍然存在,可以查看Docker日誌以獲取更多信息:

docker-compose logs

希望這篇指南能幫助您順利在MacOS上運行Dify。如果您遇到其他問題,不要猶豫,查閱Dify的官方文檔或在社區論壇尋求幫助。


Troubleshooting Dify on MacOS with Docker: Adjusting docker-compose.yaml

Introduction

When running Dify with Docker on MacOS, users may encounter issues that prevent normal operation. This article will guide you through adjusting the dify/docker/docker-compose.yaml file to resolve these problems.

apply the following docker-compose.yaml to dify/docker/docker-compose.yaml:

docker-compose.yaml

version: '3.8'

x-shared-env: &shared-api-worker-env
  LOG_LEVEL: ${LOG_LEVEL:-INFO}
  LOG_FILE: ${LOG_FILE:-}
  DEBUG: ${DEBUG:-false}
  FLASK_DEBUG: ${FLASK_DEBUG:-false}
  SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
  INIT_PASSWORD: ${INIT_PASSWORD:-}
  CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
  CONSOLE_API_URL: ${CONSOLE_API_URL:-}
  SERVICE_API_URL: ${SERVICE_API_URL:-}
  APP_WEB_URL: ${APP_WEB_URL:-}
  CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
  OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
  FILES_URL: ${FILES_URL:-}
  FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
  APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
  MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
  DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
  DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
  DIFY_PORT: ${DIFY_PORT:-5001}
  SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
  SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-}
  CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
  GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
  CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
  CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
  CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
  CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
  API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
  API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
  DB_USERNAME: ${DB_USERNAME:-postgres}
  DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
  DB_HOST: ${DB_HOST:-db}
  DB_PORT: ${DB_PORT:-5432}
  DB_DATABASE: ${DB_DATABASE:-dify}
  SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
  SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
  SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
  REDIS_HOST: redis
  REDIS_PORT: 6379
  REDIS_USERNAME: ${REDIS_USERNAME:-}
  REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
  REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
  REDIS_DB: 0
  REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
  REDIS_SENTINELS: ${REDIS_SENTINELS:-}
  REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
  REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
  REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
  REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-}
  CELERY_BROKER_URL: redis://:${REDIS_PASSWORD:-difyai123456}@redis:6379/1
  BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
  CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
  CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
  CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-}
  WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
  CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
  STORAGE_TYPE: ${STORAGE_TYPE:-local}
  STORAGE_LOCAL_PATH: storage
  S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
  S3_ENDPOINT: ${S3_ENDPOINT:-}
  S3_BUCKET_NAME: ${S3_BUCKET_NAME:-}
  S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
  S3_SECRET_KEY: ${S3_SECRET_KEY:-}
  S3_REGION: ${S3_REGION:-us-east-1}
  AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-}
  AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-}
  AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-}
  AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-}
  GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-}
  GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
  ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-}
  ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-}
  ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-}
  ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-}
  ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-}
  ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
  ALIYUN_OSS_PATHS: ${ALIYUN_OSS_PATH:-}
  TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-}
  TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-}
  TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-}
  TENCENT_COS_REGION: ${TENCENT_COS_REGION:-}
  TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-}
  HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-}
  HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-}
  HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-}
  HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-}
  OCI_ENDPOINT: ${OCI_ENDPOINT:-}
  OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-}
  OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-}
  OCI_SECRET_KEY: ${OCI_SECRET_KEY:-}
  OCI_REGION: ${OCI_REGION:-}
  VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-}
  VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-}
  VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-}
  VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-}
  VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-}
  VECTOR_STORE: ${VECTOR_STORE:-weaviate}
  WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
  WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
  QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
  QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
  QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
  QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
  QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
  MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
  MILVUS_TOKEN: ${MILVUS_TOKEN:-}
  MILVUS_USER: ${MILVUS_USER:-root}
  MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
  MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
  MYSCALE_PORT: ${MYSCALE_PORT:-8123}
  MYSCALE_USER: ${MYSCALE_USER:-default}
  MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
  MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
  MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
  RELYT_HOST: ${RELYT_HOST:-db}
  RELYT_PORT: ${RELYT_PORT:-5432}
  RELYT_USER: ${RELYT_USER:-postgres}
  RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
  RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
  PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
  PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
  PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
  PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
  PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
  TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
  TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
  TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
  TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
  TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
  ORACLE_HOST: ${ORACLE_HOST:-oracle}
  ORACLE_PORT: ${ORACLE_PORT:-1521}
  ORACLE_USER: ${ORACLE_USER:-dify}
  ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
  ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
  CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
  CHROMA_PORT: ${CHROMA_PORT:-8000}
  CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
  CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
  CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
  CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
  ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
  ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
  ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
  ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
  KIBANA_PORT: ${KIBANA_PORT:-5601}
  # AnalyticDB configuration
  ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-}
  ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-}
  ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-}
  ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-}
  ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-}
  ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-}
  ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
  ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-}
  OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
  OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
  OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
  OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
  OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
  TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
  TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
  TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
  TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
  TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
  TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
  TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
  UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
  UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
  ETL_TYPE: ${ETL_TYPE:-dify}
  UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
  MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT:-base64}
  UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
  SENTRY_DSN: ${API_SENTRY_DSN:-}
  SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
  SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
  NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
  NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
  NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
  NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
  MAIL_TYPE: ${MAIL_TYPE:-resend}
  MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
  SMTP_SERVER: ${SMTP_SERVER:-}
  SMTP_PORT: ${SMTP_PORT:-465}
  SMTP_USERNAME: ${SMTP_USERNAME:-}
  SMTP_PASSWORD: ${SMTP_PASSWORD:-}
  SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
  SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
  RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
  RESEND_API_URL: https://api.resend.com
  INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-1000}
  INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
  RESET_PASSWORD_TOKEN_EXPIRY_HOURS: ${RESET_PASSWORD_TOKEN_EXPIRY_HOURS:-24}
  CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
  CODE_EXECUTION_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
  CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
  CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
  CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
  CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
  CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
  TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
  CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
  CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
  CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
  SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
  SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}

services:
  # API service
  api:
    image: langgenius/dify-api:latest
    restart: always
    environment:
      # Use the shared environment variables.
      <<: *shared-api-worker-env
      # Startup mode, 'api' starts the API server.
      MODE: api
    depends_on:
      - db
      - redis
    volumes:
      # Mount the storage directory to the container, for storing user files.
      - ./volumes/app/storage:/app/api/storage
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 1G

  # worker service
  # The Celery worker for processing the queue.
  worker:
    image: langgenius/dify-api:latest
    restart: always
    environment:
      # Use the shared environment variables.
      <<: *shared-api-worker-env
      # Startup mode, 'worker' starts the Celery worker for processing the queue.
      MODE: worker
    depends_on:
      - db
      - redis
    volumes:
      # Mount the storage directory to the container, for storing user files.
      - ./volumes/app/storage:/app/api/storage
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 1G

  # Frontend web application.
  web:
    image: langgenius/dify-web:latest
    restart: always
    environment:
      CONSOLE_API_URL: ${CONSOLE_API_URL:-}
      APP_API_URL: ${APP_API_URL:-}
      SENTRY_DSN: ${WEB_SENTRY_DSN:-}
      NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
    networks:
      - dify-network

  # The postgres database.
  db:
    image: postgres:15-alpine
    restart: always
    environment:
      PGUSER: ${PGUSER:-postgres}
      POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
      POSTGRES_DB: ${POSTGRES_DB:-dify}
      PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
    command: >
      postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
               -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
               -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
               -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
               -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'      
    volumes:
      - ./volumes/db/data:/var/lib/postgresql/data
    healthcheck:
      test: [ "CMD", "pg_isready" ]
      interval: 1s
      timeout: 3s
      retries: 30
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 1G

  # The redis cache.
  redis:
    image: redis:7-alpine
    restart: always
    volumes:
      # Mount the redis data directory to the container.
      - ./volumes/redis/data:/data
    # Set the redis password when startup redis server.
    command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
    healthcheck:
      test: [ "CMD", "redis-cli", "ping" ]
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 512M

  # The DifySandbox
  sandbox:
    image: langgenius/dify-sandbox:0.2.7
    restart: always
    environment:
      # The DifySandbox configurations
      # Make sure you are changing this key for your deployment with a strong key.
      # You can generate a strong key using `openssl rand -base64 42`.
      API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
      GIN_MODE: ${SANDBOX_GIN_MODE:-release}
      WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
      ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
      HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
      HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
      SANDBOX_PORT: ${SANDBOX_PORT:-8194}
    volumes:
      - ./volumes/sandbox/dependencies:/dependencies
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:8194/health" ]
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 512M

  # ssrf_proxy server
  # for more information, please refer to
  # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
  ssrf_proxy:
    image: ubuntu/squid:latest
    restart: always
    volumes:
      - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
      - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
    entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
    environment:
      # pls clearly modify the squid env vars to fit your network environment.
      HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
      COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
      REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
      SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
      SANDBOX_PORT: ${SANDBOX_PORT:-8194}
    networks:
      - dify-network

  # Certbot service
  # use `docker-compose --profile certbot up` to start the certbot service.
  certbot:
    image: certbot/certbot
    profiles:
      - certbot
    volumes:
      - ./volumes/certbot/conf:/etc/letsencrypt
      - ./volumes/certbot/www:/var/www/html
      - ./volumes/certbot/logs:/var/log/letsencrypt
      - ./volumes/certbot/conf/live:/etc/letsencrypt/live
      - ./certbot/update-cert.template.txt:/update-cert.template.txt
      - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
    environment:
      - CERTBOT_EMAIL=${CERTBOT_EMAIL}
      - CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
      - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
    entrypoint: [ "/docker-entrypoint.sh" ]
    command: [ "tail", "-f", "/dev/null" ]
    networks:
      - dify-network

  # The nginx reverse proxy.
  # used for reverse proxying the API service and Web service.
  nginx:
    image: nginx:latest
    restart: always
    volumes:
      - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
      - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
      - ./nginx/https.conf.template:/etc/nginx/https.conf.template
      - ./nginx/conf.d:/etc/nginx/conf.d
      - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
      - ./nginx/ssl:/etc/ssl # cert dir (legacy)
      - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
      - ./volumes/certbot/conf:/etc/letsencrypt
      - ./volumes/certbot/www:/var/www/html
    entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
    environment:
      NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
      NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
      NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
      NGINX_PORT: ${NGINX_PORT:-80}
      # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
      # and modify the env vars below in .env if HTTPS_ENABLED is true.
      NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
      NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
      NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
      NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
      NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
      NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
      NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
      NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
      NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
      CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
    depends_on:
      - api
      - web
    ports:
      - "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
      - "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 256M

  # The Weaviate vector store.
  weaviate:
    image: semitechnologies/weaviate:1.19.0
    profiles:
      - ""
      - weaviate
    restart: always
    volumes:
      # Mount the Weaviate data directory to the con tainer.
      - ./volumes/weaviate:/var/lib/weaviate
    environment:
      # The Weaviate configurations
      # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
      PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
      QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
      AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
      DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
      CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
      AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
      AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
      AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
      AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
      AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
    networks:
      - dify-network
    deploy:
      resources:
        limits:
          cpus: '1'
          memory: 2G

  # Qdrant vector store.
  # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
  qdrant:
    image: langgenius/qdrant:v1.7.3
    profiles:
      - qdrant
    restart: always
    volumes:
      - ./volumes/qdrant:/qdrant/storage
    environment:
      QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}

  # The pgvector vector database.
  pgvector:
    image: pgvector/pgvector:pg16
    profiles:
      - pgvector
    restart: always
    environment:
      PGUSER: ${PGVECTOR_PGUSER:-postgres}
      # The password for the default postgres user.
      POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
      # The name of the default postgres database.
      POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
      # postgres data directory
      PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
    volumes:
      - ./volumes/pgvector/data:/var/lib/postgresql/data
    healthcheck:
      test: [ "CMD", "pg_isready" ]
      interval: 1s
      timeout: 3s
      retries: 30

  # pgvecto-rs vector store
  pgvecto-rs:
    image: tensorchord/pgvecto-rs:pg16-v0.3.0
    profiles:
      - pgvecto-rs
    restart: always
    environment:
      PGUSER: ${PGVECTOR_PGUSER:-postgres}
      # The password for the default postgres user.
      POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
      # The name of the default postgres database.
      POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
      # postgres data directory
      PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
    volumes:
      - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
    healthcheck:
      test: [ "CMD", "pg_isready" ]
      interval: 1s
      timeout: 3s
      retries: 30

  # Chroma vector database
  chroma:
    image: ghcr.io/chroma-core/chroma:0.5.1
    profiles:
      - chroma
    restart: always
    volumes:
      - ./volumes/chroma:/chroma/chroma
    environment:
      CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
      CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
      IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}

  # Oracle vector database
  oracle:
    image: container-registry.oracle.com/database/free:latest
    profiles:
      - oracle
    restart: always
    volumes:
      - source: oradata
        type: volume
        target: /opt/oracle/oradata
      - ./startupscripts:/opt/oracle/scripts/startup
    environment:
      ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
      ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}

  # Milvus vector database services
  etcd:
    container_name: milvus-etcd
    image: quay.io/coreos/etcd:v3.5.5
    profiles:
      - milvus
    environment:
      ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
      ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
      ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
      ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
    volumes:
      - ./volumes/milvus/etcd:/etcd
    command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
    healthcheck:
      test: [ "CMD", "etcdctl", "endpoint", "health" ]
      interval: 30s
      timeout: 20s
      retries: 3
    networks:
      - milvus

  minio:
    container_name: milvus-minio
    image: minio/minio:RELEASE.2023-03-20T20-16-18Z
    profiles:
      - milvus
    environment:
      MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
      MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
    volumes:
      - ./volumes/milvus/minio:/minio_data
    command: minio server /minio_data --console-address ":9001"
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:9000/minio/health/live" ]
      interval: 30s
      timeout: 20s
      retries: 3
    networks:
      - milvus

  milvus-standalone:
    container_name: milvus-standalone
    image: milvusdb/milvus:v2.3.1
    profiles:
      - milvus
    command: [ "milvus", "run", "standalone" ]
    environment:
      ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
      MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
      common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
    volumes:
      - ./volumes/milvus/milvus:/var/lib/milvus
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:9091/healthz" ]
      interval: 30s
      start_period: 90s
      timeout: 20s
      retries: 3
    depends_on:
      - etcd
      - minio
    ports:
      - 19530:19530
      - 9091:9091
    networks:
      - milvus

  # Opensearch vector database
  opensearch:
    container_name: opensearch
    image: opensearchproject/opensearch:latest
    profiles:
      - opensearch
    environment:
      discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
      bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
      OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
      OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
    ulimits:
      memlock:
        soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
        hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
      nofile:
        soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
        hard: ${OPENSEARCH_NOFILE_HARD:-65536}
    volumes:
      - ./volumes/opensearch/data:/usr/share/opensearch/data
    networks:
      - opensearch-net

  opensearch-dashboards:
    container_name: opensearch-dashboards
    image: opensearchproject/opensearch-dashboards:latest
    profiles:
      - opensearch
    environment:
      OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
    volumes:
      - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
    networks:
      - opensearch-net
    depends_on:
      - opensearch

  # MyScale vector database
  myscale:
    container_name: myscale
    image: myscale/myscaledb:1.6.4
    profiles:
      - myscale
    restart: always
    tty: true
    volumes:
      - ./volumes/myscale/data:/var/lib/clickhouse
      - ./volumes/myscale/log:/var/log/clickhouse-server
      - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
    ports:
      - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}

  # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
  # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
    container_name: elasticsearch
    profiles:
      - elasticsearch
    restart: always
    volumes:
      - dify_es01_data:/usr/share/elasticsearch/data
    environment:
      ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
      cluster.name: dify-es-cluster
      node.name: dify-es0
      discovery.type: single-node
      xpack.license.self_generated.type: trial
      xpack.security.enabled: "true"
      xpack.security.enrollment.enabled: "false"
      xpack.security.http.ssl.enabled: "false"
    ports:
      - ${ELASTICSEARCH_PORT:-9200}:9200
    healthcheck:
      test: [ "CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty" ]
      interval: 30s
      timeout: 10s
      retries: 50

  # https://www.elastic.co/guide/en/kibana/current/docker.html
  # https://www.elastic.co/guide/en/kibana/current/settings.html
  kibana:
    image: docker.elastic.co/kibana/kibana:8.14.3
    container_name: kibana
    profiles:
      - elasticsearch
    depends_on:
      - elasticsearch
    restart: always
    environment:
      XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
      NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
      XPACK_SECURITY_ENABLED: "true"
      XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
      XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
      XPACK_FLEET_ISAIRGAPPED: "true"
      I18N_LOCALE: zh-CN
      SERVER_PORT: "5601"
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200
    ports:
      - ${KIBANA_PORT:-5601}:5601
    healthcheck:
      test: [ "CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1" ]
      interval: 30s
      timeout: 10s
      retries: 3

  # unstructured .
  # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
  unstructured:
    image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
    profiles:
      - unstructured
    restart: always
    volumes:
      - ./volumes/unstructured:/app/data

networks:
  # create a network between sandbox, api and ssrf_proxy, and can not access outside.
  ssrf_proxy_network:
    driver: bridge
    internal: true
  milvus:
    driver: bridge
  opensearch-net:
    driver: bridge
    internal: true
  dify-network:
    driver: bridge

volumes:
  oradata:
  dify_es01_data:
  db_data:
  redis_data:
  weaviate_data:
  app_storage:

Implementation Steps

  1. Open Terminal and navigate to the Dify project directory.
  2. Open the docker/docker-compose.yaml file with a text editor.
  3. Modify the relevant configuration items based on the above suggestions.
  4. Save the file and restart Docker containers:
docker-compose down
docker-compose up -d
  1. Check if Dify is running normally.

Conclusion

By appropriately adjusting the docker-compose.yaml file, we can resolve most issues encountered when running Dify with Docker on MacOS. Remember to restart Docker containers after each configuration change. If problems persist, you can check Docker logs for more information:

docker-compose logs

We hope this guide helps you successfully run Dify on MacOS. If you encounter other issues, don’t hesitate to consult Dify’s official documentation or seek help in the community forums.