llm / config.yaml
Guru-25's picture
enable cache
3c2ff65 verified
raw
history blame
769 Bytes
model_list:
- model_name: gpt-4o
litellm_params:
model: github/gpt-4o
api_key: "os.environ/GITHUB_API_KEY"
- model_name: gpt-4o-mini
litellm_params:
model: github/gpt-4o-mini
api_key: "os.environ/GITHUB_API_KEY"
- model_name: llama-3.1-405b-instruct
litellm_params:
model: github/meta-llama-3.1-405b-instruct
api_key: "os.environ/GITHUB_API_KEY"
- model_name: llama-3.1-70b-instruct
litellm_params:
model: github/meta-llama-3.1-70b-instruct
api_key: "os.environ/GITHUB_API_KEY"
- model_name: llama-3.1-8b-instruct
litellm_params:
model: github/meta-llama-3.1-8b-instruct
api_key: "os.environ/GITHUB_API_KEY"
litellm_settings:
set_verbose: False
json_logs: True
cache: True