fix: preserve slash model IDs for custom endpoints (fixes #548) — v0.50.53

fix: preserve slash model IDs for custom endpoints (fixes #548) — v0.50.53
This commit is contained in:
nesquena-hermes
2026-04-15 15:13:23 -07:00
committed by GitHub
3 changed files with 14 additions and 5 deletions

View File

@@ -1,5 +1,10 @@
# Hermes Web UI -- Changelog # Hermes Web UI -- Changelog
## [v0.50.53] — 2026-04-15
### Fixed
- **Custom endpoint slash model IDs** — model IDs with vendor prefixes that are intrinsic (e.g. `zai-org/GLM-5.1` on DeepInfra) are now preserved when routing to a custom `base_url` endpoint. Previously, all prefixed IDs were stripped, causing `model_not_found` errors on providers that require the full vendor/model format. Known provider namespaces (`openai/`, `google/`, `anthropic/`, etc.) are still stripped as before. (Fixes #548, PR #549 by @eba8)
## [v0.50.52] — 2026-04-15 ## [v0.50.52] — 2026-04-15
### Fixed ### Fixed

View File

@@ -637,10 +637,14 @@ def resolve_model_provider(model_id: str) -> tuple:
# just because the model name contains a slash (e.g. google/gemma-4-26b-a4b). # just because the model name contains a slash (e.g. google/gemma-4-26b-a4b).
# The user has explicitly pointed at a base_url, so trust their routing config. # The user has explicitly pointed at a base_url, so trust their routing config.
if config_base_url: if config_base_url:
# Strip provider prefix (e.g. 'openai/gpt-5.4' -> 'gpt-5.4') so prefixed # Only strip the provider prefix when it's a known provider namespace
# model IDs from previous sessions don't break custom endpoint routing. # (e.g. "openai/gpt-5.4" → "gpt-5.4" for a custom OpenAI-compatible proxy).
bare_model = model_id.split('/', 1)[-1] # Unknown prefixes (e.g. "zai-org/GLM-5.1" on DeepInfra) are intrinsic to
return bare_model, config_provider, config_base_url # the model ID and must be preserved — stripping them causes model_not_found.
if prefix in _PROVIDER_MODELS:
return bare, config_provider, config_base_url
# Unknown prefix (not a named provider) — pass full model_id through.
return model_id, config_provider, config_base_url
# If prefix does NOT match config provider, the user picked a cross-provider model # If prefix does NOT match config provider, the user picked a cross-provider model
# from the OpenRouter dropdown (e.g. config=anthropic but picked openai/gpt-5.4-mini). # from the OpenRouter dropdown (e.g. config=anthropic but picked openai/gpt-5.4-mini).
# In this case always route through openrouter with the full provider/model string. # In this case always route through openrouter with the full provider/model string.

View File

@@ -553,7 +553,7 @@
<div class="settings-section-title">System</div> <div class="settings-section-title">System</div>
<div class="settings-section-meta">Instance version and access controls.</div> <div class="settings-section-meta">Instance version and access controls.</div>
</div> </div>
<span class="settings-version-badge">v0.50.52</span> <span class="settings-version-badge">v0.50.53</span>
</div> </div>
<div class="settings-field" style="border-top:1px solid var(--border);padding-top:12px;margin-top:8px"> <div class="settings-field" style="border-top:1px solid var(--border);padding-top:12px;margin-top:8px">
<label for="settingsPassword" data-i18n="settings_label_password">Access Password</label> <label for="settingsPassword" data-i18n="settings_label_password">Access Password</label>