shenchucheng
add local storage
d0921c0
raw
history blame
No virus
4.56 kB
# DO NOT MODIFY THIS FILE, create a new key.yaml, define OPENAI_API_KEY.
# The configuration of key.yaml has a higher priority and will not enter git
#### Project Path Setting
# WORKSPACE_PATH: "Path for placing output files"
#### if OpenAI
## The official OPENAI_BASE_URL is https://api.openai.com/v1
## If the official OPENAI_BASE_URL is not available, we recommend using the [openai-forward](https://github.com/beidongjiedeguang/openai-forward).
## Or, you can configure OPENAI_PROXY to access official OPENAI_BASE_URL.
OPENAI_BASE_URL: "https://api.openai.com/v1"
#OPENAI_PROXY: "http://127.0.0.1:8118"
#OPENAI_API_KEY: "YOUR_API_KEY" # set the value to sk-xxx if you host the openai interface for open llm model
OPENAI_API_MODEL: "gpt-4-1106-preview"
MAX_TOKENS: 4096
RPM: 10
#### if Spark
#SPARK_APPID : "YOUR_APPID"
#SPARK_API_SECRET : "YOUR_APISecret"
#SPARK_API_KEY : "YOUR_APIKey"
#DOMAIN : "generalv2"
#SPARK_URL : "ws://spark-api.xf-yun.com/v2.1/chat"
#### if Anthropic
#ANTHROPIC_API_KEY: "YOUR_API_KEY"
#### if AZURE, check https://github.com/openai/openai-cookbook/blob/main/examples/azure/chat.ipynb
#OPENAI_API_TYPE: "azure"
#OPENAI_BASE_URL: "YOUR_AZURE_ENDPOINT"
#OPENAI_API_KEY: "YOUR_AZURE_API_KEY"
#OPENAI_API_VERSION: "YOUR_AZURE_API_VERSION"
#DEPLOYMENT_NAME: "YOUR_DEPLOYMENT_NAME"
#### if zhipuai from `https://open.bigmodel.cn`. You can set here or export API_KEY="YOUR_API_KEY"
# ZHIPUAI_API_KEY: "YOUR_API_KEY"
#### if Google Gemini from `https://ai.google.dev/` and API_KEY from `https://makersuite.google.com/app/apikey`.
#### You can set here or export GOOGLE_API_KEY="YOUR_API_KEY"
# GEMINI_API_KEY: "YOUR_API_KEY"
#### if use self-host open llm model with openai-compatible interface
#OPEN_LLM_API_BASE: "http://127.0.0.1:8000/v1"
#OPEN_LLM_API_MODEL: "llama2-13b"
#
##### if use Fireworks api
#FIREWORKS_API_KEY: "YOUR_API_KEY"
#FIREWORKS_API_BASE: "https://api.fireworks.ai/inference/v1"
#FIREWORKS_API_MODEL: "YOUR_LLM_MODEL" # example, accounts/fireworks/models/llama-v2-13b-chat
#### if use self-host open llm model by ollama
# OLLAMA_API_BASE: http://127.0.0.1:11434/api
# OLLAMA_API_MODEL: llama2
#### for Search
## Supported values: serpapi/google/serper/ddg
#SEARCH_ENGINE: serpapi
## Visit https://serpapi.com/ to get key.
#SERPAPI_API_KEY: "YOUR_API_KEY"
## Visit https://console.cloud.google.com/apis/credentials to get key.
#GOOGLE_API_KEY: "YOUR_API_KEY"
## Visit https://programmablesearchengine.google.com/controlpanel/create to get id.
#GOOGLE_CSE_ID: "YOUR_CSE_ID"
## Visit https://serper.dev/ to get key.
#SERPER_API_KEY: "YOUR_API_KEY"
#### for web access
## Supported values: playwright/selenium
#WEB_BROWSER_ENGINE: playwright
## Supported values: chromium/firefox/webkit, visit https://playwright.dev/python/docs/api/class-browsertype
##PLAYWRIGHT_BROWSER_TYPE: chromium
## Supported values: chrome/firefox/edge/ie, visit https://www.selenium.dev/documentation/webdriver/browsers/
# SELENIUM_BROWSER_TYPE: chrome
#### for TTS
#AZURE_TTS_SUBSCRIPTION_KEY: "YOUR_API_KEY"
#AZURE_TTS_REGION: "eastus"
#### for Stable Diffusion
## Use SD service, based on https://github.com/AUTOMATIC1111/stable-diffusion-webui
#SD_URL: "YOUR_SD_URL"
#SD_T2I_API: "/sdapi/v1/txt2img"
#### for Execution
#LONG_TERM_MEMORY: false
#### for Mermaid CLI
## If you installed mmdc (Mermaid CLI) only for metagpt then enable the following configuration.
#PUPPETEER_CONFIG: "./config/puppeteer-config.json"
#MMDC: "./node_modules/.bin/mmdc"
### for calc_usage
# CALC_USAGE: false
### for Research
# MODEL_FOR_RESEARCHER_SUMMARY: gpt-3.5-turbo
# MODEL_FOR_RESEARCHER_REPORT: gpt-3.5-turbo-16k
### choose the engine for mermaid conversion,
# default is nodejs, you can change it to playwright,pyppeteer or ink
# MERMAID_ENGINE: nodejs
### browser path for pyppeteer engine, support Chrome, Chromium,MS Edge
#PYPPETEER_EXECUTABLE_PATH: "/usr/bin/google-chrome-stable"
### for repair non-openai LLM's output when parse json-text if PROMPT_FORMAT=json
### due to non-openai LLM's output will not always follow the instruction, so here activate a post-process
### repair operation on the content extracted from LLM's raw output. Warning, it improves the result but not fix all cases.
# REPAIR_LLM_OUTPUT: false
# PROMPT_FORMAT: json #json or markdown
DISABLE_LLM_PROVIDER_CHECK: true
STORAGE_TYPE: local # local / s3
# for local storage
LOCAL_ROOT: "storage"
LOCAL_BASE_URL: "storage"
# for s3 storage
# S3_ACCESS_KEY: ""
# S3_SECRET_KEY: ""
# S3_ENDPOINT_URL: ""
# S3_BUCKET: ""
# S3_SECURE: false