model_list: - model_name: guru-llama litellm_params: model: github/meta-llama-3.1-70b-instruct api_key: "os.environ/GITHUB_API_KEY" litellm_settings: set_verbose: False json_logs: True cache: True cache_params: type: redis namespace: "litellm_caching" success_callback: ["langfuse"] failure_callback: ["langfuse"]