diff --git a/api/config.py b/api/config.py index 45dc2e0..6f7ff9d 100644 --- a/api/config.py +++ b/api/config.py @@ -448,6 +448,8 @@ _PROVIDER_DISPLAY = { "huggingface": "HuggingFace", "alibaba": "Alibaba", "ollama": "Ollama", + "opencode-zen": "OpenCode Zen", + "opencode-go": "OpenCode Go", "lmstudio": "LM Studio", } @@ -509,6 +511,51 @@ _PROVIDER_MODELS = { {"id": "claude-sonnet-4.6", "label": "Claude Sonnet 4.6"}, {"id": "gemini-2.5-pro", "label": "Gemini 2.5 Pro"}, ], + # OpenCode Zen — curated models via opencode.ai/zen (pay-as-you-go credits) + "opencode-zen": [ + {"id": "gpt-5.4-pro", "label": "GPT-5.4 Pro"}, + {"id": "gpt-5.4", "label": "GPT-5.4"}, + {"id": "gpt-5.4-mini", "label": "GPT-5.4 Mini"}, + {"id": "gpt-5.4-nano", "label": "GPT-5.4 Nano"}, + {"id": "gpt-5.3-codex", "label": "GPT-5.3 Codex"}, + {"id": "gpt-5.3-codex-spark", "label": "GPT-5.3 Codex Spark"}, + {"id": "gpt-5.2", "label": "GPT-5.2"}, + {"id": "gpt-5.2-codex", "label": "GPT-5.2 Codex"}, + {"id": "gpt-5.1", "label": "GPT-5.1"}, + {"id": "gpt-5.1-codex", "label": "GPT-5.1 Codex"}, + {"id": "gpt-5.1-codex-max", "label": "GPT-5.1 Codex Max"}, + {"id": "gpt-5.1-codex-mini", "label": "GPT-5.1 Codex Mini"}, + {"id": "gpt-5", "label": "GPT-5"}, + {"id": "gpt-5-codex", "label": "GPT-5 Codex"}, + {"id": "gpt-5-nano", "label": "GPT-5 Nano"}, + {"id": "claude-opus-4-6", "label": "Claude Opus 4.6"}, + {"id": "claude-opus-4-5", "label": "Claude Opus 4.5"}, + {"id": "claude-opus-4-1", "label": "Claude Opus 4.1"}, + {"id": "claude-sonnet-4-6", "label": "Claude Sonnet 4.6"}, + {"id": "claude-sonnet-4-5", "label": "Claude Sonnet 4.5"}, + {"id": "claude-sonnet-4", "label": "Claude Sonnet 4"}, + {"id": "claude-haiku-4-5", "label": "Claude Haiku 4.5"}, + {"id": "claude-3-5-haiku", "label": "Claude 3.5 Haiku"}, + {"id": "gemini-3.1-pro", "label": "Gemini 3.1 Pro"}, + {"id": "gemini-3-flash", "label": "Gemini 3 Flash"}, + {"id": "glm-5.1", "label": "GLM-5.1"}, + {"id": "glm-5", "label": "GLM-5"}, + {"id": "kimi-k2.5", "label": "Kimi K2.5"}, + {"id": "minimax-m2.5", "label": "MiniMax M2.5"}, + {"id": "minimax-m2.5-free", "label": "MiniMax M2.5 Free"}, + {"id": "nemotron-3-super-free", "label": "Nemotron 3 Super Free"}, + {"id": "big-pickle", "label": "Big Pickle"}, + ], + # OpenCode Go — flat-rate models via opencode.ai/go ($10/month) + "opencode-go": [ + {"id": "glm-5.1", "label": "GLM-5.1"}, + {"id": "glm-5", "label": "GLM-5"}, + {"id": "kimi-k2.5", "label": "Kimi K2.5"}, + {"id": "mimo-v2-pro", "label": "MiMo V2 Pro"}, + {"id": "mimo-v2-omni", "label": "MiMo V2 Omni"}, + {"id": "minimax-m2.7", "label": "MiniMax M2.7"}, + {"id": "minimax-m2.5", "label": "MiniMax M2.5"}, + ], # 'gemini' is the hermes_cli provider ID for Google AI Studio "gemini": [ {"id": "gemini-2.5-pro", "label": "Gemini 2.5 Pro"}, @@ -710,6 +757,8 @@ def get_available_models() -> dict: "GLM_API_KEY", "KIMI_API_KEY", "DEEPSEEK_API_KEY", + "OPENCODE_ZEN_API_KEY", + "OPENCODE_GO_API_KEY", ): val = os.getenv(k) if val: @@ -730,6 +779,10 @@ def get_available_models() -> dict: detected_providers.add("minimax") if all_env.get("DEEPSEEK_API_KEY"): detected_providers.add("deepseek") + if all_env.get("OPENCODE_ZEN_API_KEY"): + detected_providers.add("opencode-zen") + if all_env.get("OPENCODE_GO_API_KEY"): + detected_providers.add("opencode-go") # 3. Fetch models from custom endpoint if base_url is configured auto_detected_models = [] diff --git a/tests/test_opencode_providers.py b/tests/test_opencode_providers.py new file mode 100644 index 0000000..a4efd94 --- /dev/null +++ b/tests/test_opencode_providers.py @@ -0,0 +1,70 @@ +""" +Tests for OpenCode Zen and OpenCode Go provider support. +Verifies provider registration in display/model catalogs and +env-var fallback detection. +""" +import os +import sys +import types +import api.config as config + + +# ── Provider registration ───────────────────────────────────────────── + +def test_opencode_zen_in_provider_display(): + assert "opencode-zen" in config._PROVIDER_DISPLAY + assert config._PROVIDER_DISPLAY["opencode-zen"] == "OpenCode Zen" + + +def test_opencode_go_in_provider_display(): + assert "opencode-go" in config._PROVIDER_DISPLAY + assert config._PROVIDER_DISPLAY["opencode-go"] == "OpenCode Go" + + +def test_opencode_zen_in_provider_models(): + assert "opencode-zen" in config._PROVIDER_MODELS + ids = [m["id"] for m in config._PROVIDER_MODELS["opencode-zen"]] + assert "claude-opus-4-6" in ids + assert "gpt-5.4-pro" in ids + assert "glm-5.1" in ids + + +def test_opencode_go_in_provider_models(): + assert "opencode-go" in config._PROVIDER_MODELS + ids = [m["id"] for m in config._PROVIDER_MODELS["opencode-go"]] + assert "glm-5.1" in ids + assert "glm-5" in ids + assert "mimo-v2-pro" in ids + + +# ── Env-var fallback detection ──────────────────────────────────────── + +def _models_with_env_key(monkeypatch, env_var, expected_provider_display): + """Helper: fake hermes_cli unavailable, set an env var, check detection.""" + # Force the env-var fallback path by making hermes_cli import fail + fake_mod = types.ModuleType("hermes_cli.models") + fake_mod.list_available_providers = None # will raise on call + monkeypatch.setitem(sys.modules, "hermes_cli.models", fake_mod) + monkeypatch.delattr(fake_mod, "list_available_providers") + + old_cfg = dict(config.cfg) + config.cfg["model"] = {} + config.cfg.pop("custom_providers", None) + monkeypatch.setenv(env_var, "test-key") + try: + result = config.get_available_models() + providers = [g["provider"] for g in result["groups"]] + assert expected_provider_display in providers, ( + f"Expected {expected_provider_display} in {providers}" + ) + finally: + config.cfg.clear() + config.cfg.update(old_cfg) + + +def test_opencode_zen_detected_via_env_key(monkeypatch): + _models_with_env_key(monkeypatch, "OPENCODE_ZEN_API_KEY", "OpenCode Zen") + + +def test_opencode_go_detected_via_env_key(monkeypatch): + _models_with_env_key(monkeypatch, "OPENCODE_GO_API_KEY", "OpenCode Go")