Spaces:
Build error
Build error
# ------------------------------ | |
# Environment Variables for API service & worker | |
# ------------------------------ | |
# ------------------------------ | |
# Common Variables | |
# ------------------------------ | |
# The backend URL of the console API, | |
# used to concatenate the authorization callback. | |
# If empty, it is the same domain. | |
# Example: https://api.console.dify.ai | |
CONSOLE_API_URL= | |
# The front-end URL of the console web, | |
# used to concatenate some front-end addresses and for CORS configuration use. | |
# If empty, it is the same domain. | |
# Example: https://console.dify.ai | |
CONSOLE_WEB_URL= | |
# Service API Url, | |
# used to display Service API Base Url to the front-end. | |
# If empty, it is the same domain. | |
# Example: https://api.dify.ai | |
SERVICE_API_URL= | |
# WebApp API backend Url, | |
# used to declare the back-end URL for the front-end API. | |
# If empty, it is the same domain. | |
# Example: https://api.app.dify.ai | |
APP_API_URL= | |
# WebApp Url, | |
# used to display WebAPP API Base Url to the front-end. | |
# If empty, it is the same domain. | |
# Example: https://app.dify.ai | |
APP_WEB_URL= | |
# File preview or download Url prefix. | |
# used to display File preview or download Url to the front-end or as Multi-model inputs; | |
# Url is signed and has expiration time. | |
FILES_URL= | |
# ------------------------------ | |
# Server Configuration | |
# ------------------------------ | |
# The log level for the application. | |
# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` | |
LOG_LEVEL=INFO | |
# Log file path | |
LOG_FILE= | |
# Log file max size, the unit is MB | |
LOG_FILE_MAX_SIZE=20 | |
# Log file max backup count | |
LOG_FILE_BACKUP_COUNT=5 | |
# Debug mode, default is false. | |
# It is recommended to turn on this configuration for local development | |
# to prevent some problems caused by monkey patch. | |
DEBUG=false | |
# Flask debug mode, it can output trace information at the interface when turned on, | |
# which is convenient for debugging. | |
FLASK_DEBUG=false | |
# A secretkey that is used for securely signing the session cookie | |
# and encrypting sensitive information on the database. | |
# You can generate a strong key using `openssl rand -base64 42`. | |
SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U | |
# Password for admin user initialization. | |
# If left unset, admin user will not be prompted for a password | |
# when creating the initial admin account. | |
INIT_PASSWORD= | |
# Deployment environment. | |
# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. | |
# Testing environment. There will be a distinct color label on the front-end page, | |
# indicating that this environment is a testing environment. | |
DEPLOY_ENV=PRODUCTION | |
# Whether to enable the version check policy. | |
# If set to empty, https://updates.dify.ai will be called for version check. | |
CHECK_UPDATE_URL=https://updates.dify.ai | |
# Used to change the OpenAI base address, default is https://api.openai.com/v1. | |
# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, | |
# or when a local model provides OpenAI compatible API, it can be replaced. | |
OPENAI_API_BASE=https://api.openai.com/v1 | |
# When enabled, migrations will be executed prior to application startup | |
# and the application will start after the migrations have completed. | |
MIGRATION_ENABLED=true | |
# File Access Time specifies a time interval in seconds for the file to be accessed. | |
# The default value is 300 seconds. | |
FILES_ACCESS_TIMEOUT=300 | |
# Access token expiration time in minutes | |
ACCESS_TOKEN_EXPIRE_MINUTES=60 | |
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. | |
APP_MAX_ACTIVE_REQUESTS=0 | |
# ------------------------------ | |
# Container Startup Related Configuration | |
# Only effective when starting with docker image or docker-compose. | |
# ------------------------------ | |
# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. | |
DIFY_BIND_ADDRESS=0.0.0.0 | |
# API service binding port number, default 5001. | |
DIFY_PORT=5001 | |
# The number of API server workers, i.e., the number of gevent workers. | |
# Formula: number of cpu cores x 2 + 1 | |
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers | |
SERVER_WORKER_AMOUNT= | |
# Defaults to gevent. If using windows, it can be switched to sync or solo. | |
SERVER_WORKER_CLASS= | |
# Similar to SERVER_WORKER_CLASS. Default is gevent. | |
# If using windows, it can be switched to sync or solo. | |
CELERY_WORKER_CLASS= | |
# Request handling timeout. The default is 200, | |
# it is recommended to set it to 360 to support a longer sse connection time. | |
GUNICORN_TIMEOUT=360 | |
# The number of Celery workers. The default is 1, and can be set as needed. | |
CELERY_WORKER_AMOUNT= | |
# Flag indicating whether to enable autoscaling of Celery workers. | |
# | |
# Autoscaling is useful when tasks are CPU intensive and can be dynamically | |
# allocated and deallocated based on the workload. | |
# | |
# When autoscaling is enabled, the maximum and minimum number of workers can | |
# be specified. The autoscaling algorithm will dynamically adjust the number | |
# of workers within the specified range. | |
# | |
# Default is false (i.e., autoscaling is disabled). | |
# | |
# Example: | |
# CELERY_AUTO_SCALE=true | |
CELERY_AUTO_SCALE=false | |
# The maximum number of Celery workers that can be autoscaled. | |
# This is optional and only used when autoscaling is enabled. | |
# Default is not set. | |
CELERY_MAX_WORKERS= | |
# The minimum number of Celery workers that can be autoscaled. | |
# This is optional and only used when autoscaling is enabled. | |
# Default is not set. | |
CELERY_MIN_WORKERS= | |
# API Tool configuration | |
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 | |
API_TOOL_DEFAULT_READ_TIMEOUT=60 | |
# ------------------------------ | |
# Database Configuration | |
# The database uses PostgreSQL. Please use the public schema. | |
# It is consistent with the configuration in the 'db' service below. | |
# ------------------------------ | |
DB_USERNAME=postgres | |
DB_PASSWORD=difyai123456 | |
DB_HOST=db | |
DB_PORT=5432 | |
DB_DATABASE=dify | |
# The size of the database connection pool. | |
# The default is 30 connections, which can be appropriately increased. | |
SQLALCHEMY_POOL_SIZE=30 | |
# Database connection pool recycling time, the default is 3600 seconds. | |
SQLALCHEMY_POOL_RECYCLE=3600 | |
# Whether to print SQL, default is false. | |
SQLALCHEMY_ECHO=false | |
# Maximum number of connections to the database | |
# Default is 100 | |
# | |
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS | |
POSTGRES_MAX_CONNECTIONS=100 | |
# Sets the amount of shared memory used for postgres's shared buffers. | |
# Default is 128MB | |
# Recommended value: 25% of available memory | |
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS | |
POSTGRES_SHARED_BUFFERS=128MB | |
# Sets the amount of memory used by each database worker for working space. | |
# Default is 4MB | |
# | |
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM | |
POSTGRES_WORK_MEM=4MB | |
# Sets the amount of memory reserved for maintenance activities. | |
# Default is 64MB | |
# | |
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM | |
POSTGRES_MAINTENANCE_WORK_MEM=64MB | |
# Sets the planner's assumption about the effective cache size. | |
# Default is 4096MB | |
# | |
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE | |
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB | |
# ------------------------------ | |
# Redis Configuration | |
# This Redis configuration is used for caching and for pub/sub during conversation. | |
# ------------------------------ | |
REDIS_HOST=redis | |
REDIS_PORT=6379 | |
REDIS_USERNAME= | |
REDIS_PASSWORD=difyai123456 | |
REDIS_USE_SSL=false | |
# Whether to use Redis Sentinel mode. | |
# If set to true, the application will automatically discover and connect to the master node through Sentinel. | |
REDIS_USE_SENTINEL=false | |
# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. | |
# Format: `<sentinel1_ip>:<sentinel1_port>,<sentinel2_ip>:<sentinel2_port>,<sentinel3_ip>:<sentinel3_port>` | |
REDIS_SENTINELS= | |
REDIS_SENTINEL_SERVICE_NAME= | |
REDIS_SENTINEL_USERNAME= | |
REDIS_SENTINEL_PASSWORD= | |
REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 | |
# ------------------------------ | |
# Celery Configuration | |
# ------------------------------ | |
# Use redis as the broker, and redis db 1 for celery broker. | |
# Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>` | |
# Example: redis://:difyai123456@redis:6379/1 | |
# If use Redis Sentinel, format as follows: `sentinel://<sentinel_username>:<sentinel_password>@<sentinel_host>:<sentinel_port>/<redis_database>` | |
# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1 | |
CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 | |
BROKER_USE_SSL=false | |
# If you are using Redis Sentinel for high availability, configure the following settings. | |
CELERY_USE_SENTINEL=false | |
CELERY_SENTINEL_MASTER_NAME= | |
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 | |
# ------------------------------ | |
# CORS Configuration | |
# Used to set the front-end cross-domain access policy. | |
# ------------------------------ | |
# Specifies the allowed origins for cross-origin requests to the Web API, | |
# e.g. https://dify.app or * for all origins. | |
WEB_API_CORS_ALLOW_ORIGINS=* | |
# Specifies the allowed origins for cross-origin requests to the console API, | |
# e.g. https://cloud.dify.ai or * for all origins. | |
CONSOLE_CORS_ALLOW_ORIGINS=* | |
# ------------------------------ | |
# File Storage Configuration | |
# ------------------------------ | |
# The type of storage to use for storing user files. | |
# Supported values are `local` , `s3` , `azure-blob` , `google-storage`, `tencent-cos`, `huawei-obs`, `volcengine-tos`, `baidu-obs`, `supabase` | |
# Default: `local` | |
STORAGE_TYPE=local | |
STORAGE_LOCAL_PATH=storage | |
# S3 Configuration | |
# Whether to use AWS managed IAM roles for authenticating with the S3 service. | |
# If set to false, the access key and secret key must be provided. | |
S3_USE_AWS_MANAGED_IAM=false | |
# The endpoint of the S3 service. | |
S3_ENDPOINT= | |
# The region of the S3 service. | |
S3_REGION=us-east-1 | |
# The name of the S3 bucket to use for storing files. | |
S3_BUCKET_NAME=difyai | |
# The access key to use for authenticating with the S3 service. | |
S3_ACCESS_KEY= | |
# The secret key to use for authenticating with the S3 service. | |
S3_SECRET_KEY= | |
# Azure Blob Configuration | |
# The name of the Azure Blob Storage account to use for storing files. | |
AZURE_BLOB_ACCOUNT_NAME=difyai | |
# The access key to use for authenticating with the Azure Blob Storage account. | |
AZURE_BLOB_ACCOUNT_KEY=difyai | |
# The name of the Azure Blob Storage container to use for storing files. | |
AZURE_BLOB_CONTAINER_NAME=difyai-container | |
# The URL of the Azure Blob Storage account. | |
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net | |
# Google Storage Configuration | |
# The name of the Google Storage bucket to use for storing files. | |
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name | |
# The service account JSON key to use for authenticating with the Google Storage service. | |
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string | |
# The Alibaba Cloud OSS configurations, | |
# only available when STORAGE_TYPE is `aliyun-oss` | |
ALIYUN_OSS_BUCKET_NAME=your-bucket-name | |
ALIYUN_OSS_ACCESS_KEY=your-access-key | |
ALIYUN_OSS_SECRET_KEY=your-secret-key | |
ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com | |
ALIYUN_OSS_REGION=ap-southeast-1 | |
ALIYUN_OSS_AUTH_VERSION=v4 | |
# Don't start with '/'. OSS doesn't support leading slash in object names. | |
ALIYUN_OSS_PATH=your-path | |
# Tencent COS Configuration | |
# The name of the Tencent COS bucket to use for storing files. | |
TENCENT_COS_BUCKET_NAME=your-bucket-name | |
# The secret key to use for authenticating with the Tencent COS service. | |
TENCENT_COS_SECRET_KEY=your-secret-key | |
# The secret id to use for authenticating with the Tencent COS service. | |
TENCENT_COS_SECRET_ID=your-secret-id | |
# The region of the Tencent COS service. | |
TENCENT_COS_REGION=your-region | |
# The scheme of the Tencent COS service. | |
TENCENT_COS_SCHEME=your-scheme | |
# Huawei OBS Configuration | |
# The name of the Huawei OBS bucket to use for storing files. | |
HUAWEI_OBS_BUCKET_NAME=your-bucket-name | |
# The secret key to use for authenticating with the Huawei OBS service. | |
HUAWEI_OBS_SECRET_KEY=your-secret-key | |
# The access key to use for authenticating with the Huawei OBS service. | |
HUAWEI_OBS_ACCESS_KEY=your-access-key | |
# The server url of the HUAWEI OBS service. | |
HUAWEI_OBS_SERVER=your-server-url | |
# Volcengine TOS Configuration | |
# The name of the Volcengine TOS bucket to use for storing files. | |
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name | |
# The secret key to use for authenticating with the Volcengine TOS service. | |
VOLCENGINE_TOS_SECRET_KEY=your-secret-key | |
# The access key to use for authenticating with the Volcengine TOS service. | |
VOLCENGINE_TOS_ACCESS_KEY=your-access-key | |
# The endpoint of the Volcengine TOS service. | |
VOLCENGINE_TOS_ENDPOINT=your-server-url | |
# The region of the Volcengine TOS service. | |
VOLCENGINE_TOS_REGION=your-region | |
# Baidu OBS Storage Configuration | |
# The name of the Baidu OBS bucket to use for storing files. | |
BAIDU_OBS_BUCKET_NAME=your-bucket-name | |
# The secret key to use for authenticating with the Baidu OBS service. | |
BAIDU_OBS_SECRET_KEY=your-secret-key | |
# The access key to use for authenticating with the Baidu OBS service. | |
BAIDU_OBS_ACCESS_KEY=your-access-key | |
# The endpoint of the Baidu OBS service. | |
BAIDU_OBS_ENDPOINT=your-server-url | |
# Supabase Storage Configuration | |
# The name of the Supabase bucket to use for storing files. | |
SUPABASE_BUCKET_NAME=your-bucket-name | |
# The api key to use for authenticating with the Supabase service. | |
SUPABASE_API_KEY=your-access-key | |
# The project endpoint url of the Supabase service. | |
SUPABASE_URL=your-server-url | |
# ------------------------------ | |
# Vector Database Configuration | |
# ------------------------------ | |
# The type of vector store to use. | |
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`, `couchbase`, `vikingdb`. | |
VECTOR_STORE=weaviate | |
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. | |
WEAVIATE_ENDPOINT=http://weaviate:8080 | |
# The Weaviate API key. | |
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih | |
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. | |
QDRANT_URL=http://qdrant:6333 | |
# The Qdrant API key. | |
QDRANT_API_KEY=difyai123456 | |
# The Qdrant client timeout setting. | |
QDRANT_CLIENT_TIMEOUT=20 | |
# The Qdrant client enable gRPC mode. | |
QDRANT_GRPC_ENABLED=false | |
# The Qdrant server gRPC mode PORT. | |
QDRANT_GRPC_PORT=6334 | |
# Milvus configuration Only available when VECTOR_STORE is `milvus`. | |
# The milvus uri. | |
MILVUS_URI=http://127.0.0.1:19530 | |
# The milvus token. | |
MILVUS_TOKEN= | |
# The milvus username. | |
MILVUS_USER=root | |
# The milvus password. | |
MILVUS_PASSWORD=Milvus | |
# MyScale configuration, only available when VECTOR_STORE is `myscale` | |
# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: | |
# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters | |
MYSCALE_HOST=myscale | |
MYSCALE_PORT=8123 | |
MYSCALE_USER=default | |
MYSCALE_PASSWORD= | |
MYSCALE_DATABASE=dify | |
MYSCALE_FTS_PARAMS= | |
# Couchbase configurations, only available when VECTOR_STORE is `couchbase` | |
# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) | |
COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server | |
COUCHBASE_USER=Administrator | |
COUCHBASE_PASSWORD=password | |
COUCHBASE_BUCKET_NAME=Embeddings | |
COUCHBASE_SCOPE_NAME=_default | |
# pgvector configurations, only available when VECTOR_STORE is `pgvector` | |
PGVECTOR_HOST=pgvector | |
PGVECTOR_PORT=5432 | |
PGVECTOR_USER=postgres | |
PGVECTOR_PASSWORD=difyai123456 | |
PGVECTOR_DATABASE=dify | |
PGVECTOR_MIN_CONNECTION=1 | |
PGVECTOR_MAX_CONNECTION=5 | |
# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` | |
PGVECTO_RS_HOST=pgvecto-rs | |
PGVECTO_RS_PORT=5432 | |
PGVECTO_RS_USER=postgres | |
PGVECTO_RS_PASSWORD=difyai123456 | |
PGVECTO_RS_DATABASE=dify | |
# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` | |
ANALYTICDB_KEY_ID=your-ak | |
ANALYTICDB_KEY_SECRET=your-sk | |
ANALYTICDB_REGION_ID=cn-hangzhou | |
ANALYTICDB_INSTANCE_ID=gp-ab123456 | |
ANALYTICDB_ACCOUNT=testaccount | |
ANALYTICDB_PASSWORD=testpassword | |
ANALYTICDB_NAMESPACE=dify | |
ANALYTICDB_NAMESPACE_PASSWORD=difypassword | |
# TiDB vector configurations, only available when VECTOR_STORE is `tidb` | |
TIDB_VECTOR_HOST=tidb | |
TIDB_VECTOR_PORT=4000 | |
TIDB_VECTOR_USER=xxx.root | |
TIDB_VECTOR_PASSWORD=xxxxxx | |
TIDB_VECTOR_DATABASE=dify | |
# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` | |
TIDB_ON_QDRANT_URL=http://127.0.0.1 | |
TIDB_ON_QDRANT_API_KEY=dify | |
TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 | |
TIDB_ON_QDRANT_GRPC_ENABLED=false | |
TIDB_ON_QDRANT_GRPC_PORT=6334 | |
TIDB_PUBLIC_KEY=dify | |
TIDB_PRIVATE_KEY=dify | |
TIDB_API_URL=http://127.0.0.1 | |
TIDB_IAM_API_URL=http://127.0.0.1 | |
TIDB_REGION=regions/aws-us-east-1 | |
TIDB_PROJECT_ID=dify | |
TIDB_SPEND_LIMIT=100 | |
# Chroma configuration, only available when VECTOR_STORE is `chroma` | |
CHROMA_HOST=127.0.0.1 | |
CHROMA_PORT=8000 | |
CHROMA_TENANT=default_tenant | |
CHROMA_DATABASE=default_database | |
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider | |
CHROMA_AUTH_CREDENTIALS=xxxxxx | |
# Oracle configuration, only available when VECTOR_STORE is `oracle` | |
ORACLE_HOST=oracle | |
ORACLE_PORT=1521 | |
ORACLE_USER=dify | |
ORACLE_PASSWORD=dify | |
ORACLE_DATABASE=FREEPDB1 | |
# relyt configurations, only available when VECTOR_STORE is `relyt` | |
RELYT_HOST=db | |
RELYT_PORT=5432 | |
RELYT_USER=postgres | |
RELYT_PASSWORD=difyai123456 | |
RELYT_DATABASE=postgres | |
# open search configuration, only available when VECTOR_STORE is `opensearch` | |
OPENSEARCH_HOST=opensearch | |
OPENSEARCH_PORT=9200 | |
OPENSEARCH_USER=admin | |
OPENSEARCH_PASSWORD=admin | |
OPENSEARCH_SECURE=true | |
# tencent vector configurations, only available when VECTOR_STORE is `tencent` | |
TENCENT_VECTOR_DB_URL=http://127.0.0.1 | |
TENCENT_VECTOR_DB_API_KEY=dify | |
TENCENT_VECTOR_DB_TIMEOUT=30 | |
TENCENT_VECTOR_DB_USERNAME=dify | |
TENCENT_VECTOR_DB_DATABASE=dify | |
TENCENT_VECTOR_DB_SHARD=1 | |
TENCENT_VECTOR_DB_REPLICAS=2 | |
# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` | |
ELASTICSEARCH_HOST=0.0.0.0 | |
ELASTICSEARCH_PORT=9200 | |
ELASTICSEARCH_USERNAME=elastic | |
ELASTICSEARCH_PASSWORD=elastic | |
# baidu vector configurations, only available when VECTOR_STORE is `baidu` | |
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 | |
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 | |
BAIDU_VECTOR_DB_ACCOUNT=root | |
BAIDU_VECTOR_DB_API_KEY=dify | |
BAIDU_VECTOR_DB_DATABASE=dify | |
BAIDU_VECTOR_DB_SHARD=1 | |
BAIDU_VECTOR_DB_REPLICAS=3 | |
# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` | |
VIKINGDB_ACCESS_KEY=your-ak | |
VIKINGDB_SECRET_KEY=your-sk | |
VIKINGDB_REGION=cn-shanghai | |
VIKINGDB_HOST=api-vikingdb.xxx.volces.com | |
VIKINGDB_SCHEMA=http | |
VIKINGDB_CONNECTION_TIMEOUT=30 | |
VIKINGDB_SOCKET_TIMEOUT=30 | |
# Lindorm configuration, only available when VECTOR_STORE is `lindorm` | |
LINDORM_URL=http://ld-***************-proxy-search-pub.lindorm.aliyuncs.com:30070 | |
LINDORM_USERNAME=username | |
LINDORM_PASSWORD=password | |
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase` | |
OCEANBASE_VECTOR_HOST=oceanbase-vector | |
OCEANBASE_VECTOR_PORT=2881 | |
OCEANBASE_VECTOR_USER=root@test | |
OCEANBASE_VECTOR_PASSWORD= | |
OCEANBASE_VECTOR_DATABASE=test | |
OCEANBASE_MEMORY_LIMIT=6G | |
# ------------------------------ | |
# Knowledge Configuration | |
# ------------------------------ | |
# Upload file size limit, default 15M. | |
UPLOAD_FILE_SIZE_LIMIT=15 | |
# The maximum number of files that can be uploaded at a time, default 5. | |
UPLOAD_FILE_BATCH_LIMIT=5 | |
# ETl type, support: `dify`, `Unstructured` | |
# `dify` Dify's proprietary file extraction scheme | |
# `Unstructured` Unstructured.io file extraction scheme | |
ETL_TYPE=dify | |
# Unstructured API path, needs to be configured when ETL_TYPE is Unstructured. | |
# For example: http://unstructured:8000/general/v0/general | |
UNSTRUCTURED_API_URL= | |
# ------------------------------ | |
# Model Configuration | |
# ------------------------------ | |
# The maximum number of tokens allowed for prompt generation. | |
# This setting controls the upper limit of tokens that can be used by the LLM | |
# when generating a prompt in the prompt generation tool. | |
# Default: 512 tokens. | |
PROMPT_GENERATION_MAX_TOKENS=512 | |
# The maximum number of tokens allowed for code generation. | |
# This setting controls the upper limit of tokens that can be used by the LLM | |
# when generating code in the code generation tool. | |
# Default: 1024 tokens. | |
CODE_GENERATION_MAX_TOKENS=1024 | |
# ------------------------------ | |
# Multi-modal Configuration | |
# ------------------------------ | |
# The format of the image sent when the multi-modal model is input, | |
# the default is base64, optional url. | |
# The delay of the call in url mode will be lower than that in base64 mode. | |
# It is generally recommended to use the more compatible base64 mode. | |
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image. | |
MULTIMODAL_SEND_IMAGE_FORMAT=base64 | |
# Upload image file size limit, default 10M. | |
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 | |
# Upload video file size limit, default 100M. | |
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 | |
# Upload audio file size limit, default 50M. | |
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 | |
# ------------------------------ | |
# Sentry Configuration | |
# Used for application monitoring and error log tracking. | |
# ------------------------------ | |
# API Service Sentry DSN address, default is empty, when empty, | |
# all monitoring information is not reported to Sentry. | |
# If not set, Sentry error reporting will be disabled. | |
API_SENTRY_DSN= | |
# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. | |
API_SENTRY_TRACES_SAMPLE_RATE=1.0 | |
# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. | |
API_SENTRY_PROFILES_SAMPLE_RATE=1.0 | |
# Web Service Sentry DSN address, default is empty, when empty, | |
# all monitoring information is not reported to Sentry. | |
# If not set, Sentry error reporting will be disabled. | |
WEB_SENTRY_DSN= | |
# ------------------------------ | |
# Notion Integration Configuration | |
# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations | |
# ------------------------------ | |
# Configure as "public" or "internal". | |
# Since Notion's OAuth redirect URL only supports HTTPS, | |
# if deploying locally, please use Notion's internal integration. | |
NOTION_INTEGRATION_TYPE=public | |
# Notion OAuth client secret (used for public integration type) | |
NOTION_CLIENT_SECRET= | |
# Notion OAuth client id (used for public integration type) | |
NOTION_CLIENT_ID= | |
# Notion internal integration secret. | |
# If the value of NOTION_INTEGRATION_TYPE is "internal", | |
# you need to configure this variable. | |
NOTION_INTERNAL_SECRET= | |
# ------------------------------ | |
# Mail related configuration | |
# ------------------------------ | |
# Mail type, support: resend, smtp | |
MAIL_TYPE=resend | |
# Default send from email address, if not specified | |
MAIL_DEFAULT_SEND_FROM= | |
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. | |
RESEND_API_KEY=your-resend-api-key | |
# SMTP server configuration, used when MAIL_TYPE is `smtp` | |
SMTP_SERVER= | |
SMTP_PORT=465 | |
SMTP_USERNAME= | |
SMTP_PASSWORD= | |
SMTP_USE_TLS=true | |
SMTP_OPPORTUNISTIC_TLS=false | |
# ------------------------------ | |
# Others Configuration | |
# ------------------------------ | |
# Maximum length of segmentation tokens for indexing | |
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000 | |
# Member invitation link valid time (hours), | |
# Default: 72. | |
INVITE_EXPIRY_HOURS=72 | |
# Reset password token valid time (minutes), | |
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 | |
# The sandbox service endpoint. | |
CODE_EXECUTION_ENDPOINT=http://sandbox:8194 | |
CODE_MAX_NUMBER=9223372036854775807 | |
CODE_MIN_NUMBER=-9223372036854775808 | |
CODE_MAX_DEPTH=5 | |
CODE_MAX_PRECISION=20 | |
CODE_MAX_STRING_LENGTH=80000 | |
TEMPLATE_TRANSFORM_MAX_LENGTH=80000 | |
CODE_MAX_STRING_ARRAY_LENGTH=30 | |
CODE_MAX_OBJECT_ARRAY_LENGTH=30 | |
CODE_MAX_NUMBER_ARRAY_LENGTH=1000 | |
# Workflow runtime configuration | |
WORKFLOW_MAX_EXECUTION_STEPS=500 | |
WORKFLOW_MAX_EXECUTION_TIME=1200 | |
WORKFLOW_CALL_MAX_DEPTH=5 | |
MAX_VARIABLE_SIZE=204800 | |
WORKFLOW_FILE_UPLOAD_LIMIT=10 | |
# HTTP request node in workflow configuration | |
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 | |
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 | |
# SSRF Proxy server HTTP URL | |
SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 | |
# SSRF Proxy server HTTPS URL | |
SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 | |
# ------------------------------ | |
# Environment Variables for web Service | |
# ------------------------------ | |
# The timeout for the text generation in millisecond | |
TEXT_GENERATION_TIMEOUT_MS=60000 | |
# ------------------------------ | |
# Environment Variables for db Service | |
# ------------------------------ | |
PGUSER=${DB_USERNAME} | |
# The password for the default postgres user. | |
POSTGRES_PASSWORD=${DB_PASSWORD} | |
# The name of the default postgres database. | |
POSTGRES_DB=${DB_DATABASE} | |
# postgres data directory | |
PGDATA=/var/lib/postgresql/data/pgdata | |
# ------------------------------ | |
# Environment Variables for sandbox Service | |
# ------------------------------ | |
# The API key for the sandbox service | |
SANDBOX_API_KEY=dify-sandbox | |
# The mode in which the Gin framework runs | |
SANDBOX_GIN_MODE=release | |
# The timeout for the worker in seconds | |
SANDBOX_WORKER_TIMEOUT=15 | |
# Enable network for the sandbox service | |
SANDBOX_ENABLE_NETWORK=true | |
# HTTP proxy URL for SSRF protection | |
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 | |
# HTTPS proxy URL for SSRF protection | |
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 | |
# The port on which the sandbox service runs | |
SANDBOX_PORT=8194 | |
# ------------------------------ | |
# Environment Variables for weaviate Service | |
# (only used when VECTOR_STORE is weaviate) | |
# ------------------------------ | |
WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate | |
WEAVIATE_QUERY_DEFAULTS_LIMIT=25 | |
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true | |
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none | |
WEAVIATE_CLUSTER_HOSTNAME=node1 | |
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true | |
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih | |
WEAVIATE_AUTHENTICATION_APIKEY_USERS[email protected] | |
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true | |
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS[email protected] | |
# ------------------------------ | |
# Environment Variables for Chroma | |
# (only used when VECTOR_STORE is chroma) | |
# ------------------------------ | |
# Authentication credentials for Chroma server | |
CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 | |
# Authentication provider for Chroma server | |
CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider | |
# Persistence setting for Chroma server | |
CHROMA_IS_PERSISTENT=TRUE | |
# ------------------------------ | |
# Environment Variables for Oracle Service | |
# (only used when VECTOR_STORE is Oracle) | |
# ------------------------------ | |
ORACLE_PWD=Dify123456 | |
ORACLE_CHARACTERSET=AL32UTF8 | |
# ------------------------------ | |
# Environment Variables for milvus Service | |
# (only used when VECTOR_STORE is milvus) | |
# ------------------------------ | |
# ETCD configuration for auto compaction mode | |
ETCD_AUTO_COMPACTION_MODE=revision | |
# ETCD configuration for auto compaction retention in terms of number of revisions | |
ETCD_AUTO_COMPACTION_RETENTION=1000 | |
# ETCD configuration for backend quota in bytes | |
ETCD_QUOTA_BACKEND_BYTES=4294967296 | |
# ETCD configuration for the number of changes before triggering a snapshot | |
ETCD_SNAPSHOT_COUNT=50000 | |
# MinIO access key for authentication | |
MINIO_ACCESS_KEY=minioadmin | |
# MinIO secret key for authentication | |
MINIO_SECRET_KEY=minioadmin | |
# ETCD service endpoints | |
ETCD_ENDPOINTS=etcd:2379 | |
# MinIO service address | |
MINIO_ADDRESS=minio:9000 | |
# Enable or disable security authorization | |
MILVUS_AUTHORIZATION_ENABLED=true | |
# ------------------------------ | |
# Environment Variables for pgvector / pgvector-rs Service | |
# (only used when VECTOR_STORE is pgvector / pgvector-rs) | |
# ------------------------------ | |
PGVECTOR_PGUSER=postgres | |
# The password for the default postgres user. | |
PGVECTOR_POSTGRES_PASSWORD=difyai123456 | |
# The name of the default postgres database. | |
PGVECTOR_POSTGRES_DB=dify | |
# postgres data directory | |
PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata | |
# ------------------------------ | |
# Environment Variables for opensearch | |
# (only used when VECTOR_STORE is opensearch) | |
# ------------------------------ | |
OPENSEARCH_DISCOVERY_TYPE=single-node | |
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true | |
OPENSEARCH_JAVA_OPTS_MIN=512m | |
OPENSEARCH_JAVA_OPTS_MAX=1024m | |
OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 | |
OPENSEARCH_MEMLOCK_SOFT=-1 | |
OPENSEARCH_MEMLOCK_HARD=-1 | |
OPENSEARCH_NOFILE_SOFT=65536 | |
OPENSEARCH_NOFILE_HARD=65536 | |
# ------------------------------ | |
# Environment Variables for Nginx reverse proxy | |
# ------------------------------ | |
NGINX_SERVER_NAME=_ | |
NGINX_HTTPS_ENABLED=false | |
# HTTP port | |
NGINX_PORT=80 | |
# SSL settings are only applied when HTTPS_ENABLED is true | |
NGINX_SSL_PORT=443 | |
# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory | |
# and modify the env vars below accordingly. | |
NGINX_SSL_CERT_FILENAME=dify.crt | |
NGINX_SSL_CERT_KEY_FILENAME=dify.key | |
NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3 | |
# Nginx performance tuning | |
NGINX_WORKER_PROCESSES=auto | |
NGINX_CLIENT_MAX_BODY_SIZE=15M | |
NGINX_KEEPALIVE_TIMEOUT=65 | |
# Proxy settings | |
NGINX_PROXY_READ_TIMEOUT=3600s | |
NGINX_PROXY_SEND_TIMEOUT=3600s | |
# Set true to accept requests for /.well-known/acme-challenge/ | |
NGINX_ENABLE_CERTBOT_CHALLENGE=false | |
# ------------------------------ | |
# Certbot Configuration | |
# ------------------------------ | |
# Email address (required to get certificates from Let's Encrypt) | |
CERTBOT_EMAIL[email protected] | |
# Domain name | |
CERTBOT_DOMAIN=your_domain.com | |
# certbot command options | |
# i.e: --force-renewal --dry-run --test-cert --debug | |
CERTBOT_OPTIONS= | |
# ------------------------------ | |
# Environment Variables for SSRF Proxy | |
# ------------------------------ | |
SSRF_HTTP_PORT=3128 | |
SSRF_COREDUMP_DIR=/var/spool/squid | |
SSRF_REVERSE_PROXY_PORT=8194 | |
SSRF_SANDBOX_HOST=sandbox | |
# ------------------------------ | |
# docker env var for specifying vector db type at startup | |
# (based on the vector db type, the corresponding docker | |
# compose profile will be used) | |
# if you want to use unstructured, add ',unstructured' to the end | |
# ------------------------------ | |
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate} | |
# ------------------------------ | |
# Docker Compose Service Expose Host Port Configurations | |
# ------------------------------ | |
EXPOSE_NGINX_PORT=80 | |
EXPOSE_NGINX_SSL_PORT=443 | |
# ---------------------------------------------------------------------------- | |
# ModelProvider & Tool Position Configuration | |
# Used to specify the model providers and tools that can be used in the app. | |
# ---------------------------------------------------------------------------- | |
# Pin, include, and exclude tools | |
# Use comma-separated values with no spaces between items. | |
# Example: POSITION_TOOL_PINS=bing,google | |
POSITION_TOOL_PINS= | |
POSITION_TOOL_INCLUDES= | |
POSITION_TOOL_EXCLUDES= | |
# Pin, include, and exclude model providers | |
# Use comma-separated values with no spaces between items. | |
# Example: POSITION_PROVIDER_PINS=openai,openllm | |
POSITION_PROVIDER_PINS= | |
POSITION_PROVIDER_INCLUDES= | |
POSITION_PROVIDER_EXCLUDES= | |
# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP | |
CSP_WHITELIST= | |