# Configuration version (required) version: 1.0.9 # Cache settings: Set to true to enable caching cache: true fileStrategy: "firebase" # Definition of custom endpoints endpoints: custom: # OpenRouter.ai Example - name: "Reverse Proxy1" # For `apiKey` and `baseURL`, you can use environment variables that you define. # recommended environment variables: # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. apiKey: "user_provided" baseURL: "user_provided" models: default: ["claude-3-5-sonnet-20240620", "claude-3-opus-20240229", "gpt-4-vision-preview", "gpt-4", "gpt-4o", "gpt-4-1106-preview", "gpt-4-0125-preview", "gpt-4-turbo", "gpt-3.5-turbo", "gpt-3.5-turbo-0613", "gpt-4-32k", "gpt-4-0314", "gpt-4-0613", "chatglm_pro", "chatglm_lite", "glm-4"] fetch: true titleConvo: false titleModel: "gpt-3.5-turbo" summarize: false summaryModel: "gpt-3.5-turbo" forcePrompt: false modelDisplayLabel: "Custom1" - name: "Daifuku" # For `apiKey` and `baseURL`, you can use environment variables that you define. # recommended environment variables: # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. apiKey: "user_provided" baseURL: "user_provided" models: default: ["gpt-4", "gpt-4o", "gpt-4-1106-preview", "gpt-4-0125-preview", "gpt-4-turbo-2024-04-09", "gpt-3.5-turbo", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-4-vision-preview"] fetch: true titleConvo: true titleModel: "gpt-3.5-turbo" summarize: false summaryModel: "gpt-3.5-turbo" forcePrompt: false modelDisplayLabel: "daifuku" # OpenRouter.ai Example - name: "Siliconflow Nvida" apiKey: "user_provided" baseURL: "https://api.siliconflow.cn/v1" models: default: ["nvidia/Llama-3.1-Nemotron-70B-Instruct"] fetch: true titleConvo: true titleModel: "Qwen/Qwen2.5-7B-Instruct" summarize: false summaryModel: "Qwen/Qwen2.5-7B-Instruct" forcePrompt: false modelDisplayLabel: "siliconflow" # OpenRouter.ai Example - name: "Reverse Proxy2" apiKey: "user_provided" baseURL: "user_provided" models: default: ["gpt-3.5-turbo","gpt-3.5-turbo-16k","gpt-4","gpt-4-0125-preview","gpt-4-0613","gpt-4-1106-preview","gpt-4-turbo-2024-04-09","gpt-4-turbo","gpt-4o","gpt-4o-mini-2024-07-18","gpt-4o-mini","chatgpt-4o-latest","claude-3-5-sonnet-20240620","claude-3-haiku-20240307","o1-mini","o1-mini-2024-09-12","o1-preview","o1-preview-2024-09-12","claude-3-haiku","claude-3-sonnet","claude-3-opus","claude-3-opus-20240229"] fetch: true titleConvo: false titleModel: "gpt-3.5-turbo" summarize: false summaryModel: "gpt-3.5-turbo" forcePrompt: false modelDisplayLabel: "Custom2" # See the Custom Configuration Guide for more information: # https://docs.librechat.ai/install/configuration/custom_config.html