Files
2026-04-16 23:11:07 +02:00

38 lines
1.6 KiB
YAML

litellm_settings:
drop_params: true # This strips 'thinking', etc. before sending to Copilot
model_list:
- model_name: gpt-4.1
litellm_params:
model: github_copilot/gpt-4.1
- model_name: claude-haiku-4.5
litellm_params:
model: github_copilot/claude-haiku-4.5
- model_name: claude-sonnet-4.6
litellm_params:
model: github_copilot/claude-sonnet-4.6
- model_name: gpt-5.1-codex
model_info:
mode: responses
litellm_params:
model: github_copilot/gpt-5.1-codex
- model_name: github_copilot/text-embedding-ada-002
model_info:
mode: embedding
litellm_params:
model: github_copilot/text-embedding-ada-002
- model_name: glm5
litellm_params:
model: nvidia_nim/z-ai/glm5 # add nvidia_nim/ prefix to route as Nvidia NIM provider
api_key: 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
api_base: "" # [OPTIONAL] - default is https://integrate.api.nvidia.com/v1/
- model_name: kimi-k2.5
litellm_params:
model: nvidia_nim/moonshotai/kimi-k2.5 # add nvidia_nim/ prefix to route as Nvidia NIM provider
api_key: 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
api_base: "" # [OPTIONAL] - default is https://integrate.api.nvidia.com/v1/
- model_name: minimax-m2.7
litellm_params:
model: nvidia_nim/minimaxai/minimax-m2.7 # add nvidia_nim/ prefix to route as Nvidia NIM provider
api_key: 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
api_base: "" # [OPTIONAL] - default is https://integrate.api.nvidia.com/v1/