security: bandit fixes B310/B324/B110 + QuietHTTPServer (#354)

* security: fix bandit security issues (B310, B324)

- Add usedforsecurity=False to MD5 hash in gateway_watcher.py
- Add URL scheme validation to prevent file:// access in config.py
- Add URL validation to bootstrap.py health check
- Add nosec comments where runtime validation exists

* fix: handle ConnectionResetError gracefully and add debug logging

- Add QuietHTTPServer class to suppress noisy connection reset errors
  caused by clients disconnecting abruptly (fixes log spam from
  'ConnectionResetError: [Errno 54] Connection reset by peer')

- Replace silent 'pass' statements with logger.debug() calls across
  api/auth.py, api/config.py, api/gateway_watcher.py, api/models.py,
  and api/onboarding.py for better observability during troubleshooting

- All tests pass (25 passed in test_regressions.py)

* chore: add debug logging to profiles and routes modules

- Replace silent 'pass' statements with logger.debug() calls in
  api/profiles.py for better error visibility during profile switching
  and module patching

- Add logger initialization to api/routes.py

* security: fix B110 bare except/pass issues (bandit security scan)

- Replace bare except/pass patterns with logger.debug() calls
- Fixes CWE-703 (improper check/handling of exceptional conditions)
- Files affected: routes.py, state_sync.py, streaming.py, workspace.py, server.py
- All tests pass successfully

* security: bandit fixes B310/B324/B110 + QuietHTTPServer (#354)

- api/gateway_watcher.py: MD5 usedforsecurity=False (B324)
- api/config.py, bootstrap.py: URL scheme validation before urlopen (B310)
- 12 files: replace bare except/pass with logger.debug() (B110)
- server.py: QuietHTTPServer suppresses client disconnect log noise
- server.py: fix sys.exc_info() (was traceback.sys.exc_info(), impl detail)
- tests/test_sprint43.py: 19 new tests covering all security fixes
- CHANGELOG.md: v0.50.14 entry; 841 tests total (up from 822)

---------

Co-authored-by: lawrencel1ng <lawrence.ling@global.ntt>
Co-authored-by: Nathan Esquenazi <nesquena@gmail.com>
This commit is contained in:
nesquena-hermes
2026-04-13 11:11:56 -07:00
committed by GitHub
parent 04401787ec
commit dd17a0e9b7
14 changed files with 390 additions and 57 deletions

View File

@@ -6,12 +6,15 @@ or configuring a password in the Settings panel.
import hashlib
import hmac
import http.cookies
import logging
import os
import secrets
import time
from api.config import STATE_DIR, load_settings
logger = logging.getLogger(__name__)
# ── Public paths (no auth required) ─────────────────────────────────────────
PUBLIC_PATHS = frozenset({
'/login', '/health', '/favicon.ico',
@@ -54,7 +57,7 @@ def _signing_key():
if len(raw) >= 32:
return raw[:32]
except Exception:
pass
logger.debug("Failed to read signing key from file, generating new key")
# Generate a new random key
key = secrets.token_bytes(32)
try:
@@ -62,7 +65,7 @@ def _signing_key():
key_file.write_bytes(key)
key_file.chmod(0o600)
except Exception:
pass # key works for this process even if persist fails
logger.debug("Failed to persist signing key, using in-memory key only")
return key

View File

@@ -11,6 +11,7 @@ Discovery order for all paths:
import collections
import json
import logging
import os
import sys
import threading
@@ -48,6 +49,8 @@ SETTINGS_FILE = STATE_DIR / "settings.json"
LAST_WORKSPACE_FILE = STATE_DIR / "last_workspace.txt"
PROJECTS_FILE = STATE_DIR / "projects.json"
logger = logging.getLogger(__name__)
# ── Hermes agent directory discovery ─────────────────────────────────────────
def _discover_agent_dir() -> Path:
@@ -197,7 +200,7 @@ def reload_config() -> None:
if isinstance(loaded, dict):
_cfg_cache.update(loaded)
except Exception:
pass
logger.debug("Failed to load yaml config from %s", config_path)
# Initial load
@@ -601,7 +604,7 @@ def get_available_models() -> dict:
auth_store = _j.loads(auth_store_path.read_text())
active_provider = auth_store.get("active_provider")
except Exception:
pass
logger.debug("Failed to load auth store from %s", auth_store_path)
# 4. Detect available providers.
# Primary: ask hermes-agent's auth layer — the authoritative source. It checks
@@ -629,11 +632,11 @@ def get_available_models() -> dict:
if _src == "gh auth token":
continue
except Exception:
pass
logger.debug("Failed to get key source for provider %s", _p.get("id", "unknown"))
detected_providers.add(_p["id"])
_hermes_auth_used = True
except Exception:
pass
logger.debug("Failed to detect auth providers from hermes")
if not _hermes_auth_used:
# Fallback: scan .env and os.environ for known API key variables
@@ -652,7 +655,7 @@ def get_available_models() -> dict:
k, v = line.split("=", 1)
env_keys[k.strip()] = v.strip().strip('"').strip("'")
except Exception:
pass
logger.debug("Failed to parse hermes env file")
all_env = {**env_keys}
for k in (
"ANTHROPIC_API_KEY",
@@ -760,6 +763,9 @@ def get_available_models() -> dict:
parsed_url = urlparse(
endpoint_url if "://" in endpoint_url else f"http://{endpoint_url}"
)
# Validate URL scheme to prevent file:// and other dangerous schemes
if parsed_url.scheme not in ("", "http", "https"):
raise ValueError(f"Invalid URL scheme: {parsed_url.scheme}")
if parsed_url.hostname:
try:
resolved_ips = socket.getaddrinfo(parsed_url.hostname, None)
@@ -791,7 +797,7 @@ def get_available_models() -> dict:
req.add_header("User-Agent", "OpenAI/Python 1.0")
for k, v in headers.items():
req.add_header(k, v)
with urllib.request.urlopen(req, timeout=10) as response:
with urllib.request.urlopen(req, timeout=10) as response: # nosec B310
data = json.loads(response.read().decode("utf-8"))
# Handle both OpenAI-compatible and llama.cpp response formats
@@ -814,7 +820,7 @@ def get_available_models() -> dict:
auto_detected_models.append({"id": model_id, "label": model_name})
detected_providers.add(provider.lower())
except Exception:
pass # custom endpoint unreachable or misconfigured -- fail silently
logger.debug("Custom endpoint unreachable or misconfigured for provider: %s", provider)
# 3b. Include models from custom_providers config entries.
# These are explicitly configured and should always appear even when the
@@ -1026,7 +1032,7 @@ def load_settings() -> dict:
if isinstance(stored, dict):
settings.update(stored)
except Exception:
pass
logger.debug("Failed to load settings from %s", SETTINGS_FILE)
return settings

View File

@@ -10,6 +10,7 @@ requiring any changes to hermes-agent.
"""
import hashlib
import json
import logging
import os
import queue
import sqlite3
@@ -19,6 +20,8 @@ from pathlib import Path
from api.config import HOME
logger = logging.getLogger(__name__)
# ── State hash tracking ─────────────────────────────────────────────────────
@@ -28,7 +31,7 @@ def _snapshot_hash(sessions: list) -> str:
f"{s['session_id']}:{s.get('updated_at', 0)}:{s.get('message_count', 0)}"
for s in sorted(sessions, key=lambda x: x['session_id'])
)
return hashlib.md5(key.encode()).hexdigest()
return hashlib.md5(key.encode(), usedforsecurity=False).hexdigest()
# ── DB resolution (shared pattern with state_sync.py) ──────────────────────
@@ -124,7 +127,7 @@ class GatewayWatcher:
try:
q.put(None) # sentinel
except Exception:
pass
logger.debug("Failed to send sentinel to subscriber")
if self._thread:
self._thread.join(timeout=3)
self._thread = None
@@ -172,7 +175,7 @@ class GatewayWatcher:
try:
q.put_nowait(None)
except Exception:
pass
logger.debug("Failed to send sentinel to dead subscriber")
def _poll_loop(self):
"""Main polling loop. Runs in a daemon thread."""
@@ -186,7 +189,7 @@ class GatewayWatcher:
self._last_sessions = sessions
self._notify_subscribers(sessions)
except Exception:
pass # never crash the watcher
logger.debug("Error in gateway watcher poll loop", exc_info=True)
# Sleep in small increments so we can stop promptly
for _ in range(self.POLL_INTERVAL * 10):

View File

@@ -3,6 +3,7 @@ Hermes Web UI -- Session model and in-memory session store.
"""
import collections
import json
import logging
import time
import uuid
from pathlib import Path
@@ -14,6 +15,8 @@ from api.config import (
)
from api.workspace import get_last_workspace
logger = logging.getLogger(__name__)
def _write_session_index():
"""Rebuild the session index file for O(1) future reads."""
@@ -24,7 +27,7 @@ def _write_session_index():
s = Session.load(p.stem)
if s: entries.append(s.compact())
except Exception:
pass
logger.debug("Failed to load session from %s", p)
with LOCK:
for s in SESSIONS.values():
if not any(e['session_id'] == s.session_id for e in entries):
@@ -151,7 +154,7 @@ def all_sessions():
s['profile'] = 'default'
return result
except Exception:
pass # fall through to full scan
logger.debug("Failed to load session index, falling back to full scan")
# Full scan fallback
out = []
for p in SESSION_DIR.glob('*.json'):
@@ -160,7 +163,7 @@ def all_sessions():
s = Session.load(p.stem)
if s: out.append(s)
except Exception:
pass
logger.debug("Failed to load session from %s", p)
for s in SESSIONS.values():
if all(s.session_id != x.session_id for x in out): out.append(s)
out.sort(key=lambda s: (getattr(s, 'pinned', False), s.updated_at), reverse=True)

View File

@@ -2,6 +2,7 @@
from __future__ import annotations
import logging
import os
from pathlib import Path
from urllib.parse import urlparse
@@ -24,6 +25,8 @@ from api.config import (
)
from api.workspace import get_last_workspace, load_workspaces
logger = logging.getLogger(__name__)
_SUPPORTED_PROVIDER_SETUPS = {
"openrouter": {
@@ -234,7 +237,7 @@ def _provider_oauth_authenticated(provider: str, hermes_home: "Path") -> bool:
if isinstance(status, dict) and status.get("logged_in"):
return True
except Exception:
pass
logger.debug("Failed to get auth status for provider %s", provider)
# Fallback: parse auth.json ourselves for known OAuth provider IDs.
# Covers deployments where hermes_cli is installed but the import above
@@ -486,7 +489,7 @@ def apply_onboarding_setup(body: dict) -> dict:
from api.profiles import _reload_dotenv
_reload_dotenv(_get_active_hermes_home())
except Exception:
pass
logger.debug("Failed to reload dotenv")
# Belt-and-braces: set directly on os.environ AFTER _reload_dotenv so the
# value survives even if _reload_dotenv cleared it (e.g. when _write_env_file
@@ -499,7 +502,7 @@ def apply_onboarding_setup(body: dict) -> dict:
from hermes_cli.config import reload as _cli_reload
_cli_reload()
except Exception:
pass
logger.debug("Failed to reload hermes_cli config")
reload_config()
return get_onboarding_status()

View File

@@ -9,12 +9,15 @@ cached paths in hermes-agent modules (skills_tool, cron/jobs) that snapshot
HERMES_HOME at import time.
"""
import json
import logging
import os
import re
import shutil
import threading
from pathlib import Path
logger = logging.getLogger(__name__)
# ── Constants (match hermes_cli.profiles upstream) ─────────────────────────
_PROFILE_ID_RE = re.compile(r'^[a-z0-9][a-z0-9_-]{0,63}$')
_PROFILE_DIRS = [
@@ -76,7 +79,7 @@ def _read_active_profile_file() -> str:
if name:
return name
except Exception:
pass
logger.debug("Failed to read active profile file")
return 'default'
@@ -107,7 +110,7 @@ def _set_hermes_home(home: Path):
_sk.HERMES_HOME = home
_sk.SKILLS_DIR = home / 'skills'
except (ImportError, AttributeError):
pass
logger.debug("Failed to patch skills_tool module")
# Patch cron/jobs module-level cache
try:
@@ -117,7 +120,7 @@ def _set_hermes_home(home: Path):
_cj.JOBS_FILE = _cj.CRON_DIR / 'jobs.json'
_cj.OUTPUT_DIR = _cj.CRON_DIR / 'output'
except (ImportError, AttributeError):
pass
logger.debug("Failed to patch cron.jobs module")
def _reload_dotenv(home: Path):
@@ -151,6 +154,7 @@ def _reload_dotenv(home: Path):
_loaded_profile_env_keys = loaded_keys
except Exception:
_loaded_profile_env_keys = set()
logger.debug("Failed to reload dotenv from %s", env_path)
def init_profile_state() -> None:
@@ -206,7 +210,7 @@ def switch_profile(name: str) -> dict:
ap_file = _DEFAULT_HERMES_HOME / 'active_profile'
ap_file.write_text(name if name != 'default' else '')
except Exception:
pass
logger.debug("Failed to write active profile file")
# Reload config.yaml from the new profile
reload_config()
@@ -326,7 +330,7 @@ def _write_endpoint_to_config(profile_dir: Path, base_url: str = None, api_key:
if isinstance(loaded, dict):
cfg = loaded
except Exception:
pass
logger.debug("Failed to load config from %s", config_path)
model_section = cfg.get('model', {})
if not isinstance(model_section, dict):
model_section = {}
@@ -371,7 +375,7 @@ def create_profile_api(name: str, clone_from: str = None,
try:
profile_path = Path(p.get('path') or profile_path)
except Exception:
pass
logger.debug("Failed to parse profile path")
break
profile_path.mkdir(parents=True, exist_ok=True)

View File

@@ -5,6 +5,7 @@ Extracted from server.py (Sprint 11) so server.py is a thin shell.
import html as _html
import json
import logging
import os
import queue
import sys
@@ -14,6 +15,8 @@ import uuid
from pathlib import Path
from urllib.parse import parse_qs
logger = logging.getLogger(__name__)
from api.config import (
STATE_DIR,
SESSION_DIR,
@@ -330,6 +333,10 @@ def handle_get(handler, parsed) -> bool:
deduped_cli = []
merged = webui_sessions + deduped_cli
merged.sort(key=lambda s: s.get("updated_at", 0) or 0, reverse=True)
# Redact credentials from session titles before returning
for s in merged:
if isinstance(s.get("title"), str):
s["title"] = _redact_text(s["title"])
return j(handler, {"sessions": merged, "cli_count": len(deduped_cli)})
if parsed.path == "/api/projects":
@@ -642,18 +649,18 @@ def handle_post(handler, parsed) -> bool:
try:
p.unlink(missing_ok=True)
except Exception:
pass
logger.debug("Failed to unlink session file %s", p)
try:
SESSION_INDEX_FILE.unlink(missing_ok=True)
except Exception:
pass
logger.debug("Failed to unlink session index")
# Also delete from CLI state.db (for CLI sessions shown in sidebar)
try:
from api.models import delete_cli_session
delete_cli_session(sid)
except Exception:
pass
logger.debug("Failed to delete CLI session %s", sid)
return j(handler, {"ok": True})
if parsed.path == "/api/session/clear":
@@ -963,9 +970,9 @@ def handle_post(handler, parsed) -> bool:
s.project_id = None
s.save()
except Exception:
pass
logger.debug("Failed to update session %s", entry.get("session_id"))
except Exception:
pass
logger.debug("Failed to load session index for project unlink")
return j(handler, {"ok": True})
# ── Session import from JSON (POST) ──
@@ -1330,7 +1337,7 @@ def _handle_cron_output(handler, parsed):
txt = f.read_text(encoding="utf-8", errors="replace")
outputs.append({"filename": f.name, "content": txt[:8000]})
except Exception:
pass
logger.debug("Failed to read cron output file %s", f)
return j(handler, {"job_id": job_id, "outputs": outputs})
@@ -1424,7 +1431,7 @@ def _handle_sessions_cleanup(handler, body, zero_only=False):
p.unlink(missing_ok=True)
cleaned += 1
except Exception:
pass
logger.debug("Failed to clean up session file %s", p)
if SESSION_INDEX_FILE.exists():
SESSION_INDEX_FILE.unlink(missing_ok=True)
return j(handler, {"ok": True, "cleaned": cleaned})
@@ -1571,7 +1578,7 @@ def _handle_chat_sync(handler, body):
message_count=len(s.messages),
)
except Exception:
pass
logger.debug("Failed to update session cost tracking")
return j(
handler,
{

View File

@@ -13,9 +13,12 @@ The bridge uses absolute token counts (not deltas) because the WebUI
Session object already accumulates totals across turns. This avoids
any double-counting risk.
"""
import logging
import os
from pathlib import Path
logger = logging.getLogger(__name__)
def _get_state_db():
"""Get a SessionDB instance for the active profile's state.db.
@@ -31,6 +34,7 @@ def _get_state_db():
from api.profiles import get_active_hermes_home
hermes_home = Path(get_active_hermes_home()).expanduser().resolve()
except Exception:
logger.debug("Failed to resolve hermes home, using default")
hermes_home = Path(os.getenv('HERMES_HOME', str(Path.home() / '.hermes')))
db_path = hermes_home / 'state.db'
@@ -40,6 +44,7 @@ def _get_state_db():
try:
return SessionDB(db_path)
except Exception:
logger.debug("Failed to open state.db")
return None
@@ -57,12 +62,12 @@ def sync_session_start(session_id: str, model=None) -> None:
model=model,
)
except Exception:
pass # never crash the WebUI for sync failures
logger.debug("Failed to sync session start to state.db")
finally:
try:
db.close()
except Exception:
pass
logger.debug("Failed to close state.db")
def sync_session_usage(session_id: str, input_tokens: int=0, output_tokens: int=0,
@@ -92,7 +97,7 @@ def sync_session_usage(session_id: str, input_tokens: int=0, output_tokens: int=
try:
db.set_session_title(session_id, title)
except Exception:
pass
logger.debug("Failed to sync session title to state.db")
# Update message count
if message_count is not None:
try:
@@ -103,11 +108,11 @@ def sync_session_usage(session_id: str, input_tokens: int=0, output_tokens: int=
)
db._execute_write(_set_msg_count)
except Exception:
pass
logger.debug("Failed to sync message count to state.db")
except Exception:
pass # never crash the WebUI for sync failures
logger.debug("Failed to sync session usage to state.db")
finally:
try:
db.close()
except Exception:
pass
logger.debug("Failed to close state.db")

View File

@@ -3,6 +3,7 @@ Hermes Web UI -- SSE streaming engine and agent thread runner.
Includes Sprint 10 cancel support via CANCEL_FLAGS.
"""
import json
import logging
import os
import queue
import threading
@@ -10,6 +11,8 @@ import time
import traceback
from pathlib import Path
logger = logging.getLogger(__name__)
from api.config import (
STREAMS, STREAMS_LOCK, CANCEL_FLAGS, AGENT_INSTANCES, CLI_TOOLSETS,
LOCK, SESSIONS, SESSION_DIR,
@@ -97,7 +100,7 @@ def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, atta
try:
q.put_nowait((event, data))
except Exception:
pass
logger.debug("Failed to put event to queue")
try:
s = get_session(session_id)
@@ -157,7 +160,7 @@ def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, atta
_reg_notify(session_id, _approval_notify_cb)
_approval_registered = True
except ImportError:
pass # approval module not available fall back to polling
logger.debug("Approval module not available, falling back to polling")
try:
def on_token(text):
@@ -257,7 +260,7 @@ def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, atta
try:
agent.interrupt("Cancelled before start")
except Exception:
pass
logger.debug("Failed to interrupt agent before start")
put('cancel', {'message': 'Cancelled by user'})
return
@@ -325,7 +328,7 @@ def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, atta
try:
old_path.rename(new_path)
except OSError:
pass
logger.debug("Failed to rename session file during compression")
_compressed = True
# Also detect compression via the result dict or compressor state
if not _compressed:
@@ -440,7 +443,7 @@ def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, atta
message_count=len(s.messages),
)
except Exception:
pass # never crash the stream for sync failures
logger.debug("Failed to sync session to insights")
usage = {'input_tokens': input_tokens, 'output_tokens': output_tokens, 'estimated_cost': estimated_cost}
# Include context window data from the agent's compressor for the UI indicator
_cc = getattr(agent, 'context_compressor', None)
@@ -457,7 +460,7 @@ def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, atta
try:
_unreg_notify(session_id)
except Exception:
pass
logger.debug("Failed to unregister approval callback")
with _ENV_LOCK:
if old_cwd is None: os.environ.pop('TERMINAL_CWD', None)
else: os.environ['TERMINAL_CWD'] = old_cwd
@@ -550,5 +553,5 @@ def cancel_stream(stream_id: str) -> bool:
try:
q.put_nowait(('cancel', {'message': 'Cancelled by user'}))
except Exception:
pass
logger.debug("Failed to put cancel event to queue")
return True

View File

@@ -8,10 +8,13 @@ profile has its own workspace configuration. State files live at
paths are used as fallback when no profile module is available.
"""
import json
import logging
import os
import subprocess
from pathlib import Path
logger = logging.getLogger(__name__)
from api.config import (
WORKSPACES_FILE as _GLOBAL_WS_FILE,
LAST_WORKSPACE_FILE as _GLOBAL_LW_FILE,
@@ -37,7 +40,7 @@ def _profile_state_dir() -> Path:
d.mkdir(parents=True, exist_ok=True)
return d
except ImportError:
pass
logger.debug("Failed to import profiles module, using global state dir")
return _GLOBAL_WS_FILE.parent
@@ -80,7 +83,7 @@ def _profile_default_workspace() -> str:
if p.is_dir():
return str(p)
except (ImportError, Exception):
pass
logger.debug("Failed to load profile default workspace config")
return str(_BOOT_DEFAULT_WORKSPACE)
@@ -156,10 +159,10 @@ def load_workspaces() -> list:
json.dumps(cleaned, ensure_ascii=False, indent=2), encoding='utf-8'
)
except Exception:
pass
logger.debug("Failed to persist cleaned workspace list")
return cleaned or [{'path': _profile_default_workspace(), 'name': 'Home'}]
except Exception:
pass
logger.debug("Failed to load workspaces from %s", ws_file)
# No profile-local file yet.
# For the DEFAULT profile: migrate from the legacy global file (one-time cleanup).
# For NAMED profiles: always start clean with just their own workspace.
@@ -190,7 +193,7 @@ def get_last_workspace() -> str:
if p and Path(p).is_dir():
return p
except Exception:
pass
logger.debug("Failed to read last workspace from %s", lw_file)
# Fallback: try global file
if _GLOBAL_LW_FILE.exists():
try:
@@ -198,7 +201,7 @@ def get_last_workspace() -> str:
if p and Path(p).is_dir():
return p
except Exception:
pass
logger.debug("Failed to read global last workspace")
return _profile_default_workspace()
@@ -208,7 +211,7 @@ def set_last_workspace(path: str) -> None:
lw_file.parent.mkdir(parents=True, exist_ok=True)
lw_file.write_text(str(path), encoding='utf-8')
except Exception:
pass
logger.debug("Failed to set last workspace")
def safe_resolve_ws(root: Path, requested: str) -> Path: