fix: route openai-codex live model fetch through agent get_codex_model_ids() (#410)
* fix: route openai-codex live model fetch through agent's get_codex_model_ids() Previously _handle_live_models() grouped openai-codex with openai and sent a request to https://api.openai.com/v1/models, which returns 403 because Codex auth is OAuth-based via chatgpt.com, not a standard API key. The live fetch silently failed and the UI showed only the hardcoded static list. Now: openai-codex has a dedicated early-exit branch that calls hermes_cli.codex_models.get_codex_model_ids() — the same path the agent CLI uses. It resolves models in order: live Codex API (if OAuth token available) > ~/.codex/ local cache > DEFAULT_CODEX_MODELS. This means: - If the user has a valid Codex OAuth session, the UI gets the exact model list their subscription provides (e.g. gpt-5.2, gpt-5.3-codex-spark that aren't in the hardcoded list) - If the OAuth session is expired, falls back to local ~/.codex/ cache - Always has DEFAULT_CODEX_MODELS as final fallback Also: improved label generation for Codex model IDs (GPT-5.4 Mini vs GPT 5 4 Mini). Added 1 structural regression test. * docs: v0.50.30 release — version badge and CHANGELOG --------- Co-authored-by: Nathan Esquenazi <nesquena@gmail.com>
This commit is contained in:
@@ -1,5 +1,14 @@
|
|||||||
# Hermes Web UI -- Changelog
|
# Hermes Web UI -- Changelog
|
||||||
|
|
||||||
|
## [v0.50.30] fix: openai-codex live model fetch routes through agent's get_codex_model_ids()
|
||||||
|
|
||||||
|
`_handle_live_models()` was grouping `openai-codex` with `openai` and sending `GET https://api.openai.com/v1/models` — which returns 403 because Codex auth is OAuth-based via `chatgpt.com`, not a standard API key. The live fetch silently failed, so users only ever saw the hardcoded static list.
|
||||||
|
|
||||||
|
- `api/routes.py`: dedicated early-return branch for `openai-codex` that calls `hermes_cli.codex_models.get_codex_model_ids()` — the same resolver the agent CLI uses. Resolution order: live Codex API (if OAuth token available, hits `chatgpt.com/backend-api/codex/models`) → `~/.codex/` local cache (written by the Codex CLI) → `DEFAULT_CODEX_MODELS` hardcoded fallback. Users with a valid Codex session now get their exact subscription model list including any models not in the hardcoded list.
|
||||||
|
- `api/routes.py`: improved label generation for Codex model IDs (e.g. `gpt-5.4-mini` → `GPT 5.4 Mini`)
|
||||||
|
- `tests/test_opencode_providers.py`: structural regression test verifying the dedicated `openai-codex` branch exists and calls `get_codex_model_ids()`
|
||||||
|
- 1038 tests total (up from 1037)
|
||||||
|
|
||||||
## [v0.50.29] fix: correct tool call card rendering on session load after context compaction (closes #401) (#402)
|
## [v0.50.29] fix: correct tool call card rendering on session load after context compaction (closes #401) (#402)
|
||||||
|
|
||||||
- `static/sessions.js`: replace the flat B9 filter in `loadSession()` with a full sanitization pass that builds `origIdxToSanitizedIdx` — each `session.tool_calls[].assistant_msg_idx` is remapped to the new sanitized-array position as messages are filtered; for tool calls whose empty-assistant host was filtered out, they attach to the nearest prior kept assistant
|
- `static/sessions.js`: replace the flat B9 filter in `loadSession()` with a full sanitization pass that builds `origIdxToSanitizedIdx` — each `session.tool_calls[].assistant_msg_idx` is remapped to the new sanitized-array position as messages are filtered; for tool calls whose empty-assistant host was filtered out, they attach to the nearest prior kept assistant
|
||||||
|
|||||||
@@ -1490,9 +1490,44 @@ def _handle_live_models(handler, parsed):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# openai-codex: use the agent's get_codex_model_ids() which calls the
|
||||||
|
# correct chatgpt.com/backend-api/codex/models endpoint with the OAuth
|
||||||
|
# token and also falls back to ~/.codex/ local cache and DEFAULT_CODEX_MODELS.
|
||||||
|
# This is the only path that can actually return the user's real Codex model list.
|
||||||
|
if provider == "openai-codex":
|
||||||
|
try:
|
||||||
|
from hermes_cli.codex_models import get_codex_model_ids as _get_codex_ids
|
||||||
|
access_token = None
|
||||||
|
try:
|
||||||
|
from hermes_cli.runtime_provider import resolve_runtime_provider as _rrt
|
||||||
|
rt2 = _rrt(requested="openai-codex")
|
||||||
|
access_token = rt2.get("api_key") or rt2.get("access_token")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
ids = _get_codex_ids(access_token=access_token)
|
||||||
|
def _codex_label(mid):
|
||||||
|
# e.g. "gpt-5.4-mini" -> "GPT-5.4 Mini"
|
||||||
|
parts = mid.split("-")
|
||||||
|
result = []
|
||||||
|
for p in parts:
|
||||||
|
if p.lower() == "gpt":
|
||||||
|
result.append("GPT")
|
||||||
|
elif p[:1].isdigit():
|
||||||
|
result.append(p) # version numbers unchanged: 5.4, 5.1
|
||||||
|
else:
|
||||||
|
result.append(p.capitalize())
|
||||||
|
return " ".join(result)
|
||||||
|
models_out = [{"id": mid, "label": _codex_label(mid)} for mid in ids if mid]
|
||||||
|
return j(handler, {"provider": provider, "models": models_out,
|
||||||
|
"count": len(models_out)})
|
||||||
|
except Exception as _ce:
|
||||||
|
logger.debug("Codex live model fetch failed: %s", _ce)
|
||||||
|
# Fall through to static list (handled by get_available_models())
|
||||||
|
return j(handler, {"error": str(_ce), "models": []})
|
||||||
|
|
||||||
# Determine the /v1/models endpoint URL
|
# Determine the /v1/models endpoint URL
|
||||||
if not base_url:
|
if not base_url:
|
||||||
if provider in ("openai", "openai-codex", "copilot"):
|
if provider in ("openai", "copilot"):
|
||||||
base_url = "https://api.openai.com/v1"
|
base_url = "https://api.openai.com/v1"
|
||||||
elif provider == "openrouter":
|
elif provider == "openrouter":
|
||||||
base_url = "https://openrouter.ai/api/v1"
|
base_url = "https://openrouter.ai/api/v1"
|
||||||
|
|||||||
@@ -535,7 +535,7 @@
|
|||||||
<div class="settings-section-title">System</div>
|
<div class="settings-section-title">System</div>
|
||||||
<div class="settings-section-meta">Instance version and access controls.</div>
|
<div class="settings-section-meta">Instance version and access controls.</div>
|
||||||
</div>
|
</div>
|
||||||
<span class="settings-version-badge">v0.50.29</span>
|
<span class="settings-version-badge">v0.50.30</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="settings-field" style="border-top:1px solid var(--border);padding-top:12px;margin-top:8px">
|
<div class="settings-field" style="border-top:1px solid var(--border);padding-top:12px;margin-top:8px">
|
||||||
<label for="settingsPassword" data-i18n="settings_label_password">Access Password</label>
|
<label for="settingsPassword" data-i18n="settings_label_password">Access Password</label>
|
||||||
|
|||||||
@@ -84,3 +84,31 @@ def test_openai_codex_display_name():
|
|||||||
"""openai-codex must have a human-readable display name."""
|
"""openai-codex must have a human-readable display name."""
|
||||||
assert "openai-codex" in config._PROVIDER_DISPLAY
|
assert "openai-codex" in config._PROVIDER_DISPLAY
|
||||||
assert config._PROVIDER_DISPLAY["openai-codex"] == "OpenAI Codex"
|
assert config._PROVIDER_DISPLAY["openai-codex"] == "OpenAI Codex"
|
||||||
|
|
||||||
|
|
||||||
|
def test_live_models_handler_uses_codex_agent_path():
|
||||||
|
"""_handle_live_models for openai-codex must use get_codex_model_ids(), not the
|
||||||
|
standard /v1/models endpoint (which returns 403 for OAuth-based Codex auth).
|
||||||
|
Verify structurally that the routes.py handler has a dedicated codex branch.
|
||||||
|
"""
|
||||||
|
import pathlib
|
||||||
|
routes_src = (pathlib.Path(__file__).parent.parent / "api" / "routes.py").read_text()
|
||||||
|
# Must have a dedicated openai-codex branch before any base_url assignment
|
||||||
|
assert 'provider == "openai-codex"' in routes_src, (
|
||||||
|
"_handle_live_models must have a dedicated openai-codex branch "
|
||||||
|
"that uses get_codex_model_ids() instead of /v1/models"
|
||||||
|
)
|
||||||
|
# Must delegate to the agent's get_codex_model_ids
|
||||||
|
assert "get_codex_model_ids" in routes_src, (
|
||||||
|
"_handle_live_models must call hermes_cli.codex_models.get_codex_model_ids() "
|
||||||
|
"for openai-codex provider"
|
||||||
|
)
|
||||||
|
# Must NOT route openai-codex through the standard OpenAI base URL
|
||||||
|
# (the old bug: openai-codex was grouped with openai and sent to api.openai.com)
|
||||||
|
codex_block_start = routes_src.find('provider == "openai-codex"')
|
||||||
|
openai_base_url_line = routes_src.find('"https://api.openai.com/v1"', codex_block_start)
|
||||||
|
openai_base_url_before = routes_src.find('"https://api.openai.com/v1"')
|
||||||
|
assert openai_base_url_before > codex_block_start or openai_base_url_line == -1, (
|
||||||
|
"openai-codex must be handled before the api.openai.com/v1 fallback, "
|
||||||
|
"not grouped with it"
|
||||||
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user