fix(model dropdown): stop injecting default_model into unrelated providers — v0.50.88 (PR #672 by @franksong2702)

This commit is contained in:
nesquena-hermes
2026-04-19 04:18:43 +00:00
3 changed files with 69 additions and 18 deletions

View File

@@ -13,7 +13,12 @@
### Added ### Added
- **Searchable model picker** — the model dropdown now has a live search input at the top. Type any part of a model name or ID to filter the list instantly; provider group headers (Anthropic, OpenAI, OpenRouter, etc.) remain visible in filtered results. Includes a clear button, Escape-to-close support, and a "No models found" empty state. i18n strings added for English, Spanish, and zh-CN. (PR #659 by @mmartial) - **Searchable model picker** — the model dropdown now has a live search input at the top. Type any part of a model name or ID to filter the list instantly; provider group headers (Anthropic, OpenAI, OpenRouter, etc.) remain visible in filtered results. Includes a clear button, Escape-to-close support, and a "No models found" empty state. i18n strings added for English, Spanish, and zh-CN. (PR #659 by @mmartial)
## [v0.50.89] — 2026-04-19
### Fixed
- **System Preferences model dropdown no longer misattributes the default model to unrelated providers** — the `/api/models` builder no longer injects the global `default_model` into unknown provider groups such as `Alibaba` or `Minimax-Cn`. When a provider has no real model catalog of its own, it is now omitted from the dropdown instead of showing a misleading placeholder like `gpt-5.4-mini`. If the active provider still needs a default fallback, it is shown in a separate `Default` group rather than being mixed into another provider's models.
## [v0.50.85] — 2026-04-18
### Fixed ### Fixed
- **`_provider_oauth_authenticated()` now respects the `hermes_home` parameter** — the function had a CLI fast path (`hermes_cli.auth.get_auth_status()`) that ignored the caller-supplied `hermes_home` and read from the real system home. On machines where `openai-codex` (or another OAuth provider) was genuinely authenticated, this caused three test assertions to return `True` instead of `False`, regardless of the isolated `tmp_path` the test passed in. Removed the CLI fast path; the function now reads exclusively from `hermes_home/auth.json`, which is both the correct scoped behavior and what the docstring described. No functional change for production (the auth.json path was already the complete fallback). (Fixes pre-existing test_sprint34 failures) - **`_provider_oauth_authenticated()` now respects the `hermes_home` parameter** — the function had a CLI fast path (`hermes_cli.auth.get_auth_status()`) that ignored the caller-supplied `hermes_home` and read from the real system home. On machines where `openai-codex` (or another OAuth provider) was genuinely authenticated, this caused three test assertions to return `True` instead of `False`, regardless of the isolated `tmp_path` the test passed in. Removed the CLI fast path; the function now reads exclusively from `hermes_home/auth.json`, which is both the correct scoped behavior and what the docstring described. No functional change for production (the auth.json path was already the complete fallback). (Fixes pre-existing test_sprint34 failures)
@@ -1641,4 +1646,3 @@ Critical regressions introduced during the server.py split, caught by users and
- **Regression test file added** (`tests/test_regressions.py`): 10 tests, one per introduced bug. These form a permanent regression gate so each class of error can never silently return. - **Regression test file added** (`tests/test_regressions.py`): 10 tests, one per introduced bug. These form a permanent regression gate so each class of error can never silently return.
--- ---

View File

@@ -1123,7 +1123,9 @@ def get_available_models() -> dict:
) )
else: else:
# Unknown provider -- use auto-detected models if available, # Unknown provider -- use auto-detected models if available,
# otherwise fall back to default model placeholder # otherwise skip it for the model dropdown. Do NOT inject the
# global default_model here: that would incorrectly imply the
# provider can serve the default model (e.g. Alibaba -> gpt-5.4-mini).
if auto_detected_models: if auto_detected_models:
groups.append( groups.append(
{ {
@@ -1131,19 +1133,6 @@ def get_available_models() -> dict:
"models": auto_detected_models, "models": auto_detected_models,
} }
) )
else:
if default_model:
groups.append(
{
"provider": provider_name,
"models": [
{
"id": default_model,
"label": default_model.split("/")[-1],
}
],
}
)
else: else:
# No providers detected. Show only the configured default model so the user # No providers detected. Show only the configured default model so the user
# can at least send messages with their current setting. Avoid showing a # can at least send messages with their current setting. Avoid showing a
@@ -1186,7 +1175,14 @@ def get_available_models() -> dict:
injected = True injected = True
break break
if not injected and groups: if not injected and groups:
groups[0]["models"].insert(0, {"id": default_model, "label": label}) # Keep the default isolated rather than polluting the first
# detected provider group.
groups.append(
{
"provider": "Default",
"models": [{"id": default_model, "label": label}],
}
)
elif not groups: elif not groups:
groups.append( groups.append(
{ {

View File

@@ -245,6 +245,10 @@ def _available_models_with_full_cfg(provider, default, base_url):
'default': default, 'default': default,
'base_url': base_url, 'base_url': base_url,
} }
try:
_cfg._cfg_mtime = _cfg.Path(_cfg._get_config_path()).stat().st_mtime
except Exception:
pass
# Clear model-override env vars to prevent the real profile from leaking in # Clear model-override env vars to prevent the real profile from leaking in
_model_env_keys = ('HERMES_MODEL', 'OPENAI_MODEL', 'LLM_MODEL') _model_env_keys = ('HERMES_MODEL', 'OPENAI_MODEL', 'LLM_MODEL')
_saved_env = {k: os.environ.pop(k, None) for k in _model_env_keys} _saved_env = {k: os.environ.pop(k, None) for k in _model_env_keys}
@@ -318,16 +322,59 @@ def test_default_model_lands_under_active_provider_group(monkeypatch):
) )
groups = {g['provider']: [m['id'] for m in g['models']] for g in result['groups']} groups = {g['provider']: [m['id'] for m in g['models']] for g in result['groups']}
assert 'OpenAI Codex' in groups, f"OpenAI Codex group missing: {list(groups)}" assert 'OpenAI Codex' in groups, f"OpenAI Codex group missing: {list(groups)}"
assert 'gpt-5.4' in groups['OpenAI Codex'], ( norm = lambda mid: mid.split('/', 1)[-1].split(':', 1)[-1]
assert 'gpt-5.4' in {norm(mid) for mid in groups['OpenAI Codex']}, (
f"gpt-5.4 not in OpenAI Codex group; contents: {groups['OpenAI Codex']}" f"gpt-5.4 not in OpenAI Codex group; contents: {groups['OpenAI Codex']}"
) )
# And crucially, it must NOT have landed in the alphabetically-first # And crucially, it must NOT have landed in the alphabetically-first
# group (Anthropic) via the fallback path. # group (Anthropic) via the fallback path.
assert 'gpt-5.4' not in groups.get('Anthropic', []), ( assert 'gpt-5.4' not in {norm(mid) for mid in groups.get('Anthropic', [])}, (
f"gpt-5.4 leaked into Anthropic group via fallback: {groups.get('Anthropic')}" f"gpt-5.4 leaked into Anthropic group via fallback: {groups.get('Anthropic')}"
) )
def test_unknown_providers_do_not_inherit_default_model(monkeypatch):
"""Detected providers without their own model catalog must not be filled
with the global default_model placeholder.
Regression guard for the bug where Alibaba / Minimax-Cn ended up showing
gpt-5.4-mini even though those providers do not serve it.
"""
import sys, types
fake_mod = types.ModuleType('hermes_cli.models')
fake_mod.list_available_providers = lambda: [
{'id': 'openai-codex', 'authenticated': True},
{'id': 'alibaba', 'authenticated': True},
{'id': 'minimax-cn', 'authenticated': True},
]
fake_auth = types.ModuleType('hermes_cli.auth')
fake_auth.get_auth_status = lambda pid: {'key_source': 'env'}
monkeypatch.setitem(sys.modules, 'hermes_cli.models', fake_mod)
monkeypatch.setitem(sys.modules, 'hermes_cli.auth', fake_auth)
result = _available_models_with_full_cfg(
provider='openai-codex',
default='gpt-5.4-mini',
base_url='',
)
groups = {g['provider']: [m['id'] for m in g['models']] for g in result['groups']}
norm = lambda mid: mid.split('/', 1)[-1].split(':', 1)[-1]
assert 'Alibaba' not in groups, (
f"Alibaba should not inherit the default model placeholder: {groups}"
)
assert 'Minimax-Cn' not in groups, (
f"Minimax-Cn should not inherit the default model placeholder: {groups}"
)
assert not any(
norm(mid) == 'gpt-5.4-mini'
for mid in groups.get('Alibaba', []) + groups.get('Minimax-Cn', [])
), (
f"Unknown provider groups still inherited the default model: {groups}"
)
def test_custom_endpoint_uses_model_config_api_key_for_model_discovery(monkeypatch): def test_custom_endpoint_uses_model_config_api_key_for_model_discovery(monkeypatch):
"""Custom endpoint model discovery must use model.api_key from config.yaml, """Custom endpoint model discovery must use model.api_key from config.yaml,
not only environment variables, otherwise the dropdown collapses to the not only environment variables, otherwise the dropdown collapses to the
@@ -342,6 +389,10 @@ def test_custom_endpoint_uses_model_config_api_key_for_model_discovery(monkeypat
'base_url': 'https://example.test/v1', 'base_url': 'https://example.test/v1',
'api_key': 'sk-test-model-key', 'api_key': 'sk-test-model-key',
} }
try:
_cfg._cfg_mtime = _cfg.Path(_cfg._get_config_path()).stat().st_mtime
except Exception:
pass
_cfg.cfg.pop('providers', None) _cfg.cfg.pop('providers', None)
captured = {} captured = {}