diff --git a/config.yaml b/config.yaml index 66cab70..ec8eb20 100644 --- a/config.yaml +++ b/config.yaml @@ -2,22 +2,22 @@ litellm_settings: drop_params: true # This strips 'thinking', etc. before sending to Copilot model_list: -# - model_name: github_copilot/gpt-4 -# litellm_params: -# model: github_copilot/gpt-4 -# - model_name: github_copilot/claude-sonnet-4.6 -# litellm_params: -# model: github_copilot/claude-sonnet-4.6 -# - model_name: github_copilot/gpt-5.1-codex -# model_info: -# mode: responses -# litellm_params: -# model: github_copilot/gpt-5.1-codex -# - model_name: github_copilot/text-embedding-ada-002 -# model_info: -# mode: embedding -# litellm_params: -# model: github_copilot/text-embedding-ada-002 + - model_name: gpt-4.1 + litellm_params: + model: github_copilot/gpt-4.1 + - model_name: claude-sonnet-4.6 + litellm_params: + model: github_copilot/claude-sonnet-4.6 + - model_name: gpt-5.1-codex + model_info: + mode: responses + litellm_params: + model: github_copilot/gpt-5.1-codex + - model_name: github_copilot/text-embedding-ada-002 + model_info: + mode: embedding + litellm_params: + model: github_copilot/text-embedding-ada-002 - model_name: glm5 litellm_params: model: nvidia_nim/z-ai/glm5 # add nvidia_nim/ prefix to route as Nvidia NIM provider diff --git a/start_litllm.sh b/start_litllm.sh index db79220..3521a21 100644 --- a/start_litllm.sh +++ b/start_litllm.sh @@ -1 +1,2 @@ +export PORT=4000 litellm --config config.yaml \ No newline at end of file