Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 36 additions & 7 deletions openhands_cli/tui/modals/settings/choices.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,49 @@
import litellm

from openhands.sdk.llm import UNVERIFIED_MODELS_EXCLUDING_BEDROCK, VERIFIED_MODELS


# Get set of valid litellm provider names for filtering
# See: https://docs.litellm.ai/docs/providers
_VALID_LITELLM_PROVIDERS: set[str] = {
str(getattr(p, "value", p)) for p in litellm.provider_list
}


def get_provider_options() -> list[tuple[str, str]]:
"""Get list of available LLM providers."""
providers = list(VERIFIED_MODELS.keys()) + list(
UNVERIFIED_MODELS_EXCLUDING_BEDROCK.keys()
)
return [(provider, provider) for provider in providers]
"""Get list of available LLM providers.
Includes:
- All VERIFIED_MODELS providers (openhands, openai, anthropic, mistral)
even if not in litellm.provider_list (e.g. 'openhands' is custom)
- UNVERIFIED providers that are known to litellm (filters out invalid
"providers" like 'meta-llama', 'Qwen' which are vendor names)
Sorted alphabetically.
"""
# Verified providers always included (includes custom like 'openhands')
verified_providers = set(VERIFIED_MODELS.keys())

# Unverified providers are filtered to only valid litellm providers
unverified_providers = set(UNVERIFIED_MODELS_EXCLUDING_BEDROCK.keys())
valid_unverified = unverified_providers & _VALID_LITELLM_PROVIDERS
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@OpenHands investigate where do unverified models come from, in the sdk, and make an issue on the sdk repo to clean those up / verify as best we can, e.g. maybe we should refactor this code to be there.
(clone yourself agent-sdk from the same org)

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm on it! enyst can track my progress at all-hands.dev

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Final summary (work completed)

What I investigated

  • Cloned All-Hands-AI/agent-sdk into /workspace/project/agent-sdk.
  • Found the source of “unverified models” in the SDK:
    • openhands-sdk/openhands/sdk/llm/utils/unverified_models.py
    • get_supported_llm_models() aggregates litellm.model_list + litellm.model_cost.keys() (plus optional Bedrock via boto3).
    • get_unverified_models() then naively splits model identifiers on / (or .) and treats the first segment as a provider.
  • Confirmed this is why bogus “providers” appear (e.g. 1024-x-1024, low, medium, us, eu, etc.): LiteLLM includes parameterized/region-prefixed entries in its registries, which the SDK currently misclassifies as providers.

Issue created on the SDK repo

Checklist against your request

  • Investigated where unverified models come from in the SDK
  • Cloned agent-sdk from the same org
  • Created an issue on the SDK repo proposing cleanup/verification/refactor

Git / code changes

  • No code changes were made in OpenHands-CLI or agent-sdk for this request, so there is no git diff, and nothing to commit/push.

View full conversation

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


# Combine and sort
all_valid_providers = sorted(verified_providers | valid_unverified)

return [(provider, provider) for provider in all_valid_providers]


def get_model_options(provider: str) -> list[tuple[str, str]]:
"""Get list of available models for a provider."""
"""Get list of available models for a provider, sorted alphabetically."""
models = VERIFIED_MODELS.get(
provider, []
) + UNVERIFIED_MODELS_EXCLUDING_BEDROCK.get(provider, [])
return [(model, model) for model in models]

# Remove duplicates and sort
unique_models = sorted(set(models))

return [(model, model) for model in unique_models]


provider_options = get_provider_options()
9 changes: 5 additions & 4 deletions openhands_cli/tui/modals/settings/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,11 @@ def get_full_model_name(self):
return str(self.custom_model)

model_str = str(self.model)
full_model = (
f"{self.provider}/{model_str}" if "/" not in model_str else model_str
)
return full_model

# Always add provider prefix - litellm requires it for routing.
# Even if model contains '/' (e.g. "openai/gpt-4.1" from openrouter)
# See: https://docs.litellm.ai/docs/providers
return f"{self.provider}/{model_str}"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This makes perfect sense, yes; there are multiple providers like that, which proxy to others, basically



class SettingsSaveResult(BaseModel):
Expand Down
34 changes: 34 additions & 0 deletions tests/tui/modals/settings/test_settings_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,40 @@ def test_preserves_existing_api_key_when_not_provided(
None, # advanced base_url cleared
("custom_model", "base_url"),
),
(
"basic",
"openrouter",
"google/gemini-3-flash-preview",
"should-be-cleared",
"https://advanced.example",
# All providers require prefix even for models with '/' in their name
# See: https://docs.litellm.ai/docs/providers/openrouter
"openrouter/google/gemini-3-flash-preview",
None,
("custom_model", "base_url"),
),
(
"basic",
"nvidia_nim",
"meta/llama3-70b-instruct",
"should-be-cleared",
"https://advanced.example",
# See: https://docs.litellm.ai/docs/providers/nvidia_nim
"nvidia_nim/meta/llama3-70b-instruct",
None,
("custom_model", "base_url"),
),
(
"basic",
"deepinfra",
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"should-be-cleared",
"https://advanced.example",
# See: https://docs.litellm.ai/docs/providers
"deepinfra/meta-llama/Meta-Llama-3.1-8B-Instruct",
None,
("custom_model", "base_url"),
),
(
"advanced",
"openai",
Expand Down
Loading