Hermes WebUI v0.1.0 — initial public release
This commit is contained in:
1
api/__init__.py
Normal file
1
api/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Hermes WebUI -- API modules."""
|
||||
273
api/config.py
Normal file
273
api/config.py
Normal file
@@ -0,0 +1,273 @@
|
||||
"""
|
||||
Hermes WebUI -- Shared configuration, constants, and global state.
|
||||
Imported by all other api/* modules and by server.py.
|
||||
|
||||
Discovery order for all paths:
|
||||
1. Explicit environment variable
|
||||
2. Filesystem heuristics (sibling checkout, parent dir, common install locations)
|
||||
3. Hardened defaults relative to $HOME
|
||||
4. Fail loudly with a human-readable fix-it message if required modules are missing
|
||||
"""
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
# ── Basic layout ──────────────────────────────────────────────────────────────
|
||||
HOME = Path.home()
|
||||
# REPO_ROOT is the directory that contains this file's parent (api/ -> repo root)
|
||||
REPO_ROOT = Path(__file__).parent.parent.resolve()
|
||||
|
||||
# ── Network config (env-overridable) ─────────────────────────────────────────
|
||||
HOST = os.getenv('HERMES_WEBUI_HOST', '127.0.0.1')
|
||||
PORT = int(os.getenv('HERMES_WEBUI_PORT', '8787'))
|
||||
|
||||
# ── State directory (env-overridable, never inside repo) ──────────────────────
|
||||
STATE_DIR = Path(os.getenv(
|
||||
'HERMES_WEBUI_STATE_DIR',
|
||||
str(HOME / '.hermes' / 'webui-mvp')
|
||||
)).expanduser().resolve()
|
||||
|
||||
SESSION_DIR = STATE_DIR / 'sessions'
|
||||
WORKSPACES_FILE = STATE_DIR / 'workspaces.json'
|
||||
SESSION_INDEX_FILE = SESSION_DIR / '_index.json'
|
||||
LAST_WORKSPACE_FILE = STATE_DIR / 'last_workspace.txt'
|
||||
|
||||
# ── Hermes agent directory discovery ─────────────────────────────────────────
|
||||
def _discover_agent_dir() -> Path:
|
||||
"""
|
||||
Locate the hermes-agent checkout using a multi-strategy search.
|
||||
|
||||
Priority:
|
||||
1. HERMES_WEBUI_AGENT_DIR env var -- explicit override always wins
|
||||
2. HERMES_HOME / hermes-agent -- e.g. ~/.hermes/hermes-agent
|
||||
3. Sibling of this repo -- ../hermes-agent
|
||||
4. Parent of this repo -- ../../hermes-agent (nested layout)
|
||||
5. Common install paths -- ~/.hermes/hermes-agent (again as fallback)
|
||||
6. HOME / hermes-agent -- ~/hermes-agent (simple flat layout)
|
||||
"""
|
||||
candidates = []
|
||||
|
||||
# 1. Explicit env var
|
||||
if os.getenv('HERMES_WEBUI_AGENT_DIR'):
|
||||
candidates.append(Path(os.getenv('HERMES_WEBUI_AGENT_DIR')).expanduser().resolve())
|
||||
|
||||
# 2. HERMES_HOME / hermes-agent
|
||||
hermes_home = os.getenv('HERMES_HOME', str(HOME / '.hermes'))
|
||||
candidates.append(Path(hermes_home).expanduser() / 'hermes-agent')
|
||||
|
||||
# 3. Sibling: <repo-root>/../hermes-agent
|
||||
candidates.append(REPO_ROOT.parent / 'hermes-agent')
|
||||
|
||||
# 4. Parent is the agent repo itself (repo cloned inside hermes-agent/)
|
||||
if (REPO_ROOT.parent / 'run_agent.py').exists():
|
||||
candidates.append(REPO_ROOT.parent)
|
||||
|
||||
# 5. ~/.hermes/hermes-agent (explicit common path)
|
||||
candidates.append(HOME / '.hermes' / 'hermes-agent')
|
||||
|
||||
# 6. ~/hermes-agent
|
||||
candidates.append(HOME / 'hermes-agent')
|
||||
|
||||
for path in candidates:
|
||||
if path.exists() and (path / 'run_agent.py').exists():
|
||||
return path.resolve()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _discover_python(agent_dir: Path) -> str:
|
||||
"""
|
||||
Locate a Python executable that has the Hermes agent dependencies installed.
|
||||
|
||||
Priority:
|
||||
1. HERMES_WEBUI_PYTHON env var
|
||||
2. Agent venv at <agent_dir>/venv/bin/python
|
||||
3. Local .venv inside this repo
|
||||
4. System python3
|
||||
"""
|
||||
if os.getenv('HERMES_WEBUI_PYTHON'):
|
||||
return os.getenv('HERMES_WEBUI_PYTHON')
|
||||
|
||||
if agent_dir:
|
||||
venv_py = agent_dir / 'venv' / 'bin' / 'python'
|
||||
if venv_py.exists():
|
||||
return str(venv_py)
|
||||
|
||||
# Windows layout
|
||||
venv_py_win = agent_dir / 'venv' / 'Scripts' / 'python.exe'
|
||||
if venv_py_win.exists():
|
||||
return str(venv_py_win)
|
||||
|
||||
# Local .venv inside this repo
|
||||
local_venv = REPO_ROOT / '.venv' / 'bin' / 'python'
|
||||
if local_venv.exists():
|
||||
return str(local_venv)
|
||||
|
||||
# Fall back to system python3
|
||||
import shutil
|
||||
for name in ('python3', 'python'):
|
||||
found = shutil.which(name)
|
||||
if found:
|
||||
return found
|
||||
|
||||
return 'python3'
|
||||
|
||||
|
||||
# Run discovery
|
||||
_AGENT_DIR = _discover_agent_dir()
|
||||
PYTHON_EXE = _discover_python(_AGENT_DIR)
|
||||
|
||||
# ── Inject agent dir into sys.path so Hermes modules are importable ───────────
|
||||
if _AGENT_DIR is not None:
|
||||
if str(_AGENT_DIR) not in sys.path:
|
||||
sys.path.insert(0, str(_AGENT_DIR))
|
||||
_HERMES_FOUND = True
|
||||
else:
|
||||
_HERMES_FOUND = False
|
||||
|
||||
# ── Config file (optional YAML) ──────────────────────────────────────────────
|
||||
CONFIG_PATH = Path(os.getenv(
|
||||
'HERMES_CONFIG_PATH',
|
||||
str(HOME / '.hermes' / 'config.yaml')
|
||||
)).expanduser()
|
||||
|
||||
try:
|
||||
import yaml as _yaml
|
||||
cfg = _yaml.safe_load(CONFIG_PATH.read_text()) if CONFIG_PATH.exists() else {}
|
||||
except Exception:
|
||||
cfg = {}
|
||||
|
||||
# ── Default workspace discovery ───────────────────────────────────────────────
|
||||
def _discover_default_workspace() -> Path:
|
||||
"""
|
||||
Resolve the default workspace in order:
|
||||
1. HERMES_WEBUI_DEFAULT_WORKSPACE env var
|
||||
2. ~/workspace (common Hermes convention)
|
||||
3. STATE_DIR / workspace (isolated fallback)
|
||||
"""
|
||||
if os.getenv('HERMES_WEBUI_DEFAULT_WORKSPACE'):
|
||||
return Path(os.getenv('HERMES_WEBUI_DEFAULT_WORKSPACE')).expanduser().resolve()
|
||||
|
||||
common = HOME / 'workspace'
|
||||
if common.exists():
|
||||
return common.resolve()
|
||||
|
||||
return (STATE_DIR / 'workspace').resolve()
|
||||
|
||||
DEFAULT_WORKSPACE = _discover_default_workspace()
|
||||
DEFAULT_MODEL = os.getenv('HERMES_WEBUI_DEFAULT_MODEL', 'openai/gpt-5.4-mini')
|
||||
|
||||
# ── Startup diagnostics ───────────────────────────────────────────────────────
|
||||
def print_startup_config():
|
||||
"""Print detected configuration at startup so the user can verify what was found."""
|
||||
ok = '\033[32m[ok]\033[0m'
|
||||
warn = '\033[33m[!!]\033[0m'
|
||||
err = '\033[31m[XX]\033[0m'
|
||||
|
||||
lines = [
|
||||
'',
|
||||
' Hermes Web UI -- startup config',
|
||||
' --------------------------------',
|
||||
f' repo root : {REPO_ROOT}',
|
||||
f' agent dir : {_AGENT_DIR if _AGENT_DIR else "NOT FOUND"} {ok if _AGENT_DIR else err}',
|
||||
f' python : {PYTHON_EXE}',
|
||||
f' state dir : {STATE_DIR}',
|
||||
f' workspace : {DEFAULT_WORKSPACE}',
|
||||
f' host:port : {HOST}:{PORT}',
|
||||
f' config file : {CONFIG_PATH} {"(found)" if CONFIG_PATH.exists() else "(not found, using defaults)"}',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(lines), flush=True)
|
||||
|
||||
if not _HERMES_FOUND:
|
||||
print(
|
||||
f'{err} Could not find the Hermes agent directory.\n'
|
||||
' The server will start but agent features will not work.\n'
|
||||
'\n'
|
||||
' To fix, set one of:\n'
|
||||
' export HERMES_WEBUI_AGENT_DIR=/path/to/hermes-agent\n'
|
||||
' export HERMES_HOME=/path/to/.hermes\n'
|
||||
'\n'
|
||||
' Or clone hermes-agent as a sibling of this repo:\n'
|
||||
' git clone <hermes-agent-repo> ../hermes-agent\n',
|
||||
flush=True
|
||||
)
|
||||
|
||||
def verify_hermes_imports():
|
||||
"""
|
||||
Attempt to import the key Hermes modules.
|
||||
Returns (ok: bool, missing: list[str]).
|
||||
"""
|
||||
required = ['run_agent']
|
||||
missing = []
|
||||
for mod in required:
|
||||
try:
|
||||
__import__(mod)
|
||||
except ImportError:
|
||||
missing.append(mod)
|
||||
return (len(missing) == 0), missing
|
||||
|
||||
# ── Limits ───────────────────────────────────────────────────────────────────
|
||||
MAX_FILE_BYTES = 200_000
|
||||
MAX_UPLOAD_BYTES = 20 * 1024 * 1024
|
||||
|
||||
# ── File type maps ───────────────────────────────────────────────────────────
|
||||
IMAGE_EXTS = {'.png', '.jpg', '.jpeg', '.gif', '.svg', '.webp', '.ico', '.bmp'}
|
||||
MD_EXTS = {'.md', '.markdown', '.mdown'}
|
||||
CODE_EXTS = {'.py', '.js', '.ts', '.jsx', '.tsx', '.css', '.html', '.json',
|
||||
'.yaml', '.yml', '.toml', '.sh', '.bash', '.txt', '.log', '.env',
|
||||
'.csv', '.xml', '.sql', '.rs', '.go', '.java', '.c', '.cpp', '.h'}
|
||||
MIME_MAP = {
|
||||
'.png':'image/png', '.jpg':'image/jpeg', '.jpeg':'image/jpeg',
|
||||
'.gif':'image/gif', '.svg':'image/svg+xml', '.webp':'image/webp',
|
||||
'.ico':'image/x-icon', '.bmp':'image/bmp',
|
||||
'.pdf':'application/pdf', '.json':'application/json',
|
||||
}
|
||||
|
||||
# ── Toolsets (from config.yaml or hardcoded default) ─────────────────────────
|
||||
CLI_TOOLSETS = cfg.get('platform_toolsets', {}).get('cli', [
|
||||
'browser', 'clarify', 'code_execution', 'cronjob', 'delegation', 'file',
|
||||
'image_gen', 'memory', 'session_search', 'skills', 'terminal', 'todo',
|
||||
'web', 'webhook',
|
||||
])
|
||||
|
||||
# ── Static file path ─────────────────────────────────────────────────────────
|
||||
_INDEX_HTML_PATH = REPO_ROOT / 'static' / 'index.html'
|
||||
|
||||
# ── Thread synchronisation ───────────────────────────────────────────────────
|
||||
LOCK = threading.Lock()
|
||||
SESSIONS_MAX = 100
|
||||
CHAT_LOCK = threading.Lock()
|
||||
STREAMS: dict = {}
|
||||
STREAMS_LOCK = threading.Lock()
|
||||
CANCEL_FLAGS: dict = {}
|
||||
SERVER_START_TIME = time.time()
|
||||
|
||||
# ── Thread-local env context ─────────────────────────────────────────────────
|
||||
_thread_ctx = threading.local()
|
||||
|
||||
def _set_thread_env(**kwargs):
|
||||
_thread_ctx.env = kwargs
|
||||
|
||||
def _clear_thread_env():
|
||||
_thread_ctx.env = {}
|
||||
|
||||
# ── Per-session agent locks ───────────────────────────────────────────────────
|
||||
SESSION_AGENT_LOCKS: dict = {}
|
||||
SESSION_AGENT_LOCKS_LOCK = threading.Lock()
|
||||
|
||||
def _get_session_agent_lock(session_id: str) -> threading.Lock:
|
||||
with SESSION_AGENT_LOCKS_LOCK:
|
||||
if session_id not in SESSION_AGENT_LOCKS:
|
||||
SESSION_AGENT_LOCKS[session_id] = threading.Lock()
|
||||
return SESSION_AGENT_LOCKS[session_id]
|
||||
|
||||
# ── SESSIONS in-memory cache (LRU OrderedDict) ───────────────────────────────
|
||||
SESSIONS: collections.OrderedDict = collections.OrderedDict()
|
||||
57
api/helpers.py
Normal file
57
api/helpers.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""
|
||||
Hermes WebUI -- HTTP helper functions.
|
||||
"""
|
||||
import json as _json
|
||||
from pathlib import Path
|
||||
from api.config import IMAGE_EXTS, MD_EXTS
|
||||
|
||||
|
||||
def require(body: dict, *fields):
|
||||
"""Phase D: Validate required fields. Raises ValueError with clean message."""
|
||||
missing = [f for f in fields if not body.get(f) and body.get(f) != 0]
|
||||
if missing:
|
||||
raise ValueError(f"Missing required field(s): {', '.join(missing)}")
|
||||
|
||||
|
||||
def bad(handler, msg, status=400):
|
||||
"""Return a clean JSON error response."""
|
||||
return j(handler, {'error': msg}, status=status)
|
||||
|
||||
|
||||
def safe_resolve(root: Path, requested: str) -> Path:
|
||||
"""Resolve a relative path inside root, raising ValueError on traversal."""
|
||||
resolved = (root / requested).resolve()
|
||||
resolved.relative_to(root.resolve()) # raises ValueError if outside root
|
||||
return resolved
|
||||
|
||||
|
||||
def j(handler, payload, status=200):
|
||||
"""Send a JSON response."""
|
||||
body = _json.dumps(payload, ensure_ascii=False, indent=2).encode('utf-8')
|
||||
handler.send_response(status)
|
||||
handler.send_header('Content-Type', 'application/json; charset=utf-8')
|
||||
handler.send_header('Content-Length', str(len(body)))
|
||||
handler.send_header('Cache-Control', 'no-store')
|
||||
handler.end_headers()
|
||||
handler.wfile.write(body)
|
||||
|
||||
|
||||
def t(handler, payload, status=200, content_type='text/plain; charset=utf-8'):
|
||||
"""Send a plain text or HTML response."""
|
||||
body = payload if isinstance(payload, bytes) else str(payload).encode('utf-8')
|
||||
handler.send_response(status)
|
||||
handler.send_header('Content-Type', content_type)
|
||||
handler.send_header('Content-Length', str(len(body)))
|
||||
handler.send_header('Cache-Control', 'no-store')
|
||||
handler.end_headers()
|
||||
handler.wfile.write(body)
|
||||
|
||||
|
||||
def read_body(handler):
|
||||
"""Read and JSON-parse a POST request body."""
|
||||
length = int(handler.headers.get('Content-Length', 0))
|
||||
raw = handler.rfile.read(length) if length else b'{}'
|
||||
try:
|
||||
return _json.loads(raw)
|
||||
except Exception:
|
||||
return {}
|
||||
114
api/models.py
Normal file
114
api/models.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""
|
||||
Hermes WebUI -- Session model and in-memory session store.
|
||||
"""
|
||||
import collections
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from api.config import (
|
||||
SESSION_DIR, SESSION_INDEX_FILE, SESSIONS, SESSIONS_MAX,
|
||||
LOCK, DEFAULT_WORKSPACE, DEFAULT_MODEL
|
||||
)
|
||||
from api.workspace import get_last_workspace
|
||||
|
||||
|
||||
def _write_session_index():
|
||||
"""Rebuild the session index file for O(1) future reads."""
|
||||
entries = []
|
||||
for p in SESSION_DIR.glob('*.json'):
|
||||
if p.name.startswith('_'): continue
|
||||
try:
|
||||
s = Session.load(p.stem)
|
||||
if s: entries.append(s.compact())
|
||||
except Exception:
|
||||
pass
|
||||
with LOCK:
|
||||
for s in SESSIONS.values():
|
||||
if not any(e['session_id'] == s.session_id for e in entries):
|
||||
entries.append(s.compact())
|
||||
entries.sort(key=lambda s: s['updated_at'], reverse=True)
|
||||
SESSION_INDEX_FILE.write_text(json.dumps(entries, ensure_ascii=False, indent=2), encoding='utf-8')
|
||||
|
||||
|
||||
class Session:
|
||||
def __init__(self, session_id=None, title='Untitled', workspace=str(DEFAULT_WORKSPACE), model=DEFAULT_MODEL, messages=None, created_at=None, updated_at=None, tool_calls=None, **kwargs):
|
||||
self.session_id = session_id or uuid.uuid4().hex[:12]; self.title = title; self.workspace = str(Path(workspace).expanduser().resolve()); self.model = model; self.messages = messages or []; self.tool_calls = tool_calls or []; self.created_at = created_at or time.time(); self.updated_at = updated_at or time.time()
|
||||
@property
|
||||
def path(self): return SESSION_DIR / f'{self.session_id}.json'
|
||||
def save(self): self.updated_at = time.time(); self.path.write_text(json.dumps(self.__dict__, ensure_ascii=False, indent=2), encoding='utf-8'); _write_session_index()
|
||||
@classmethod
|
||||
def load(cls, sid):
|
||||
p = SESSION_DIR / f'{sid}.json'
|
||||
if not p.exists(): return None
|
||||
return cls(**json.loads(p.read_text(encoding='utf-8')))
|
||||
def compact(self): return {'session_id': self.session_id, 'title': self.title, 'workspace': self.workspace, 'model': self.model, 'message_count': len(self.messages), 'created_at': self.created_at, 'updated_at': self.updated_at}
|
||||
|
||||
def get_session(sid):
|
||||
with LOCK:
|
||||
if sid in SESSIONS:
|
||||
SESSIONS.move_to_end(sid) # LRU: mark as recently used
|
||||
return SESSIONS[sid]
|
||||
s = Session.load(sid)
|
||||
if s:
|
||||
with LOCK:
|
||||
SESSIONS[sid] = s
|
||||
SESSIONS.move_to_end(sid)
|
||||
while len(SESSIONS) > SESSIONS_MAX:
|
||||
SESSIONS.popitem(last=False) # evict least recently used
|
||||
return s
|
||||
raise KeyError(sid)
|
||||
|
||||
def new_session(workspace=None, model=None):
|
||||
s = Session(workspace=workspace or get_last_workspace(), model=model or DEFAULT_MODEL)
|
||||
with LOCK:
|
||||
SESSIONS[s.session_id] = s
|
||||
SESSIONS.move_to_end(s.session_id)
|
||||
while len(SESSIONS) > SESSIONS_MAX:
|
||||
SESSIONS.popitem(last=False)
|
||||
s.save()
|
||||
return s
|
||||
|
||||
def all_sessions():
|
||||
# Phase C: try index first for O(1) read; fall back to full scan
|
||||
if SESSION_INDEX_FILE.exists():
|
||||
try:
|
||||
index = json.loads(SESSION_INDEX_FILE.read_text(encoding='utf-8'))
|
||||
# Overlay any in-memory sessions that may be newer than the index
|
||||
index_map = {s['session_id']: s for s in index}
|
||||
with LOCK:
|
||||
for s in SESSIONS.values():
|
||||
index_map[s.session_id] = s.compact()
|
||||
result = sorted(index_map.values(), key=lambda s: s['updated_at'], reverse=True)
|
||||
# Hide empty Untitled sessions from the UI (created by tests, page refreshes, etc.)
|
||||
result = [s for s in result if not (s.get('title','Untitled')=='Untitled' and s.get('message_count',0)==0)]
|
||||
return result
|
||||
except Exception:
|
||||
pass # fall through to full scan
|
||||
# Full scan fallback
|
||||
out = []
|
||||
for p in SESSION_DIR.glob('*.json'):
|
||||
if p.name.startswith('_'): continue
|
||||
try:
|
||||
s = Session.load(p.stem)
|
||||
if s: out.append(s)
|
||||
except Exception:
|
||||
pass
|
||||
for s in SESSIONS.values():
|
||||
if all(s.session_id != x.session_id for x in out): out.append(s)
|
||||
out.sort(key=lambda s: s.updated_at, reverse=True)
|
||||
return [s.compact() for s in out if not (s.title=='Untitled' and len(s.messages)==0)]
|
||||
|
||||
|
||||
def title_from(messages, fallback='Untitled'):
|
||||
"""Derive a session title from the first user message."""
|
||||
for m in messages:
|
||||
if m.get('role') == 'user':
|
||||
c = m.get('content', '')
|
||||
if isinstance(c, list):
|
||||
c = ' '.join(p.get('text', '') for p in c if isinstance(p, dict) and p.get('type') == 'text')
|
||||
text = str(c).strip()
|
||||
if text:
|
||||
return text[:64]
|
||||
return fallback
|
||||
218
api/streaming.py
Normal file
218
api/streaming.py
Normal file
@@ -0,0 +1,218 @@
|
||||
"""
|
||||
Hermes WebUI -- SSE streaming engine and agent thread runner.
|
||||
Includes Sprint 10 cancel support via CANCEL_FLAGS.
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
from api.config import (
|
||||
STREAMS, STREAMS_LOCK, CANCEL_FLAGS, CLI_TOOLSETS,
|
||||
_get_session_agent_lock, _set_thread_env, _clear_thread_env,
|
||||
)
|
||||
|
||||
# Lazy import to avoid circular deps -- hermes-agent is on sys.path via api/config.py
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
except ImportError:
|
||||
AIAgent = None
|
||||
from api.models import get_session, title_from
|
||||
from api.workspace import set_last_workspace
|
||||
|
||||
|
||||
def _sse(handler, event, data):
|
||||
"""Write one SSE event to the response stream."""
|
||||
payload = f"event: {event}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"
|
||||
handler.wfile.write(payload.encode('utf-8'))
|
||||
handler.wfile.flush()
|
||||
|
||||
|
||||
def _run_agent_streaming(session_id, msg_text, model, workspace, stream_id, attachments=None):
|
||||
"""Run agent in background thread, writing SSE events to STREAMS[stream_id]."""
|
||||
q = STREAMS.get(stream_id)
|
||||
if q is None:
|
||||
return
|
||||
|
||||
# Sprint 10: create a cancel event for this stream
|
||||
cancel_event = threading.Event()
|
||||
with STREAMS_LOCK:
|
||||
CANCEL_FLAGS[stream_id] = cancel_event
|
||||
|
||||
def put(event, data):
|
||||
# If cancelled, drop all further events except the cancel event itself
|
||||
if cancel_event.is_set() and event not in ('cancel', 'error'):
|
||||
return
|
||||
try:
|
||||
q.put_nowait((event, data))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
s = get_session(session_id)
|
||||
s.workspace = str(Path(workspace).expanduser().resolve())
|
||||
s.model = model
|
||||
|
||||
_agent_lock = _get_session_agent_lock(session_id)
|
||||
# TD1: set thread-local env context so concurrent sessions don't clobber globals
|
||||
# Check for pre-flight cancel (user cancelled before agent even started)
|
||||
if cancel_event.is_set():
|
||||
put('cancel', {'message': 'Cancelled before start'})
|
||||
return
|
||||
|
||||
_set_thread_env(
|
||||
TERMINAL_CWD=str(s.workspace),
|
||||
HERMES_EXEC_ASK='1',
|
||||
HERMES_SESSION_KEY=session_id,
|
||||
)
|
||||
# Still set process-level env as fallback for tools that bypass thread-local
|
||||
with _agent_lock:
|
||||
old_cwd = os.environ.get('TERMINAL_CWD')
|
||||
old_exec_ask = os.environ.get('HERMES_EXEC_ASK')
|
||||
old_session_key = os.environ.get('HERMES_SESSION_KEY')
|
||||
os.environ['TERMINAL_CWD'] = str(s.workspace)
|
||||
os.environ['HERMES_EXEC_ASK'] = '1'
|
||||
os.environ['HERMES_SESSION_KEY'] = session_id
|
||||
|
||||
try:
|
||||
def on_token(text):
|
||||
if text is None:
|
||||
return # end-of-stream sentinel
|
||||
put('token', {'text': text})
|
||||
|
||||
def on_tool(name, preview, args):
|
||||
args_snap = {}
|
||||
if isinstance(args, dict):
|
||||
for k, v in list(args.items())[:4]:
|
||||
s2 = str(v); args_snap[k] = s2[:120]+('...' if len(s2)>120 else '')
|
||||
put('tool', {'name': name, 'preview': preview, 'args': args_snap})
|
||||
# also check for pending approval and surface it immediately
|
||||
from tools.approval import has_pending as _has_pending, _pending, _lock
|
||||
if _has_pending(session_id):
|
||||
with _lock:
|
||||
p = dict(_pending.get(session_id, {}))
|
||||
if p:
|
||||
put('approval', p)
|
||||
|
||||
if AIAgent is None:
|
||||
raise ImportError("AIAgent not available -- check that hermes-agent is on sys.path")
|
||||
agent = AIAgent(
|
||||
model=model,
|
||||
platform='cli',
|
||||
quiet_mode=True,
|
||||
enabled_toolsets=CLI_TOOLSETS,
|
||||
session_id=session_id,
|
||||
stream_delta_callback=on_token,
|
||||
tool_progress_callback=on_tool,
|
||||
)
|
||||
# Prepend workspace context so the agent always knows which directory
|
||||
# to use for file operations, regardless of session age or AGENTS.md defaults.
|
||||
workspace_ctx = f"[Workspace: {s.workspace}]\n"
|
||||
workspace_system_msg = (
|
||||
f"Active workspace at session start: {s.workspace}\n"
|
||||
"Every user message is prefixed with [Workspace: /absolute/path] indicating the "
|
||||
"workspace the user has selected in the web UI at the time they sent that message. "
|
||||
"This tag is the single authoritative source of the active workspace and updates "
|
||||
"with every message. It overrides any prior workspace mentioned in this system "
|
||||
"prompt, memory, or conversation history. Always use the value from the most recent "
|
||||
"[Workspace: ...] tag as your default working directory for ALL file operations: "
|
||||
"write_file, read_file, search_files, terminal workdir, and patch. "
|
||||
"Never fall back to a hardcoded path when this tag is present."
|
||||
)
|
||||
result = agent.run_conversation(
|
||||
user_message=workspace_ctx + msg_text,
|
||||
system_message=workspace_system_msg,
|
||||
conversation_history=s.messages,
|
||||
task_id=session_id,
|
||||
persist_user_message=msg_text,
|
||||
)
|
||||
s.messages = result.get('messages') or s.messages
|
||||
s.title = title_from(s.messages, s.title)
|
||||
# Extract tool call metadata grouped by assistant message index
|
||||
# Each tool call gets assistant_msg_idx so the client can render
|
||||
# cards inline with the assistant bubble that triggered them.
|
||||
tool_calls = []
|
||||
pending_names = {} # tool_call_id -> name
|
||||
pending_asst_idx = {} # tool_call_id -> index in s.messages
|
||||
for msg_idx, m in enumerate(s.messages):
|
||||
if m.get('role') == 'assistant':
|
||||
c = m.get('content', '')
|
||||
if isinstance(c, list):
|
||||
for p in c:
|
||||
if isinstance(p, dict) and p.get('type') == 'tool_use':
|
||||
tid = p.get('id', '')
|
||||
pending_names[tid] = p.get('name', 'tool')
|
||||
pending_asst_idx[tid] = msg_idx
|
||||
elif m.get('role') == 'tool':
|
||||
tid = m.get('tool_call_id') or m.get('tool_use_id', '')
|
||||
name = pending_names.get(tid, 'tool')
|
||||
asst_idx = pending_asst_idx.get(tid, -1)
|
||||
raw = str(m.get('content', ''))
|
||||
try:
|
||||
import json as _j2
|
||||
rd = _j2.loads(raw)
|
||||
snippet = str(rd.get('output') or rd.get('result') or rd.get('error') or raw)[:200]
|
||||
except Exception:
|
||||
snippet = raw[:200]
|
||||
tool_calls.append({
|
||||
'name': name, 'snippet': snippet, 'tid': tid,
|
||||
'assistant_msg_idx': asst_idx,
|
||||
})
|
||||
s.tool_calls = tool_calls
|
||||
# Tag the matching user message with attachment filenames for display on reload
|
||||
# Only tag a user message whose content relates to this turn's text
|
||||
# (msg_text is the full message including the [Attached files: ...] suffix)
|
||||
if attachments:
|
||||
for m in reversed(s.messages):
|
||||
if m.get('role') == 'user':
|
||||
content = str(m.get('content', ''))
|
||||
# Match if content is part of the sent message or vice-versa
|
||||
base_text = msg_text.split('\n\n[Attached files:')[0].strip()
|
||||
if base_text[:60] in content or content[:60] in msg_text:
|
||||
m['attachments'] = attachments
|
||||
break
|
||||
s.save()
|
||||
put('done', {'session': s.compact() | {'messages': s.messages, 'tool_calls': tool_calls}})
|
||||
finally:
|
||||
if old_cwd is None: os.environ.pop('TERMINAL_CWD', None)
|
||||
else: os.environ['TERMINAL_CWD'] = old_cwd
|
||||
if old_exec_ask is None: os.environ.pop('HERMES_EXEC_ASK', None)
|
||||
else: os.environ['HERMES_EXEC_ASK'] = old_exec_ask
|
||||
if old_session_key is None: os.environ.pop('HERMES_SESSION_KEY', None)
|
||||
else: os.environ['HERMES_SESSION_KEY'] = old_session_key
|
||||
|
||||
except Exception as e:
|
||||
put('error', {'message': str(e), 'trace': traceback.format_exc()})
|
||||
finally:
|
||||
_clear_thread_env() # TD1: always clear thread-local context
|
||||
with STREAMS_LOCK:
|
||||
STREAMS.pop(stream_id, None)
|
||||
CANCEL_FLAGS.pop(stream_id, None)
|
||||
|
||||
# ============================================================
|
||||
# SECTION: HTTP Request Handler
|
||||
# do_GET: read-only API endpoints + SSE stream + static HTML
|
||||
# do_POST: mutating endpoints (session CRUD, chat, upload, approval)
|
||||
# Routing is a flat if/elif chain. See ARCHITECTURE.md section 4.1.
|
||||
# ============================================================
|
||||
|
||||
|
||||
def cancel_stream(stream_id: str) -> bool:
|
||||
"""Signal an in-flight stream to cancel. Returns True if the stream existed."""
|
||||
with STREAMS_LOCK:
|
||||
if stream_id not in STREAMS:
|
||||
return False
|
||||
flag = CANCEL_FLAGS.get(stream_id)
|
||||
if flag:
|
||||
flag.set()
|
||||
# Put a cancel sentinel into the queue so the SSE handler wakes up
|
||||
q = STREAMS.get(stream_id)
|
||||
if q:
|
||||
try:
|
||||
q.put_nowait(('cancel', {'message': 'Cancelled by user'}))
|
||||
except Exception:
|
||||
pass
|
||||
return True
|
||||
77
api/upload.py
Normal file
77
api/upload.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
Hermes WebUI -- File upload: multipart parser and upload handler.
|
||||
"""
|
||||
import re as _re
|
||||
import email.parser
|
||||
from pathlib import Path
|
||||
|
||||
from api.config import MAX_UPLOAD_BYTES
|
||||
from api.helpers import j, bad
|
||||
from api.models import get_session
|
||||
from api.workspace import safe_resolve_ws
|
||||
|
||||
|
||||
def parse_multipart(rfile, content_type, content_length):
|
||||
import re as _re, email.parser as _ep
|
||||
m = _re.search(r'boundary=([^;\s]+)', content_type)
|
||||
if not m:
|
||||
raise ValueError('No boundary in Content-Type')
|
||||
boundary = m.group(1).strip('"').encode()
|
||||
raw = rfile.read(content_length)
|
||||
fields = {}
|
||||
files = {}
|
||||
delimiter = b'--' + boundary
|
||||
end_marker = b'--' + boundary + b'--'
|
||||
parts = raw.split(delimiter)
|
||||
for part in parts[1:]:
|
||||
stripped = part.lstrip(b'\r\n')
|
||||
if stripped.startswith(b'--'):
|
||||
break
|
||||
sep = b'\r\n\r\n' if b'\r\n\r\n' in part else b'\n\n'
|
||||
if sep not in part:
|
||||
continue
|
||||
header_raw, body = part.split(sep, 1)
|
||||
if body.endswith(b'\r\n'):
|
||||
body = body[:-2]
|
||||
elif body.endswith(b'\n'):
|
||||
body = body[:-1]
|
||||
header_text = header_raw.lstrip(b'\r\n').decode('utf-8', errors='replace')
|
||||
msg = _ep.HeaderParser().parsestr(header_text)
|
||||
disp = msg.get('Content-Disposition', '')
|
||||
name_m = _re.search(r'name="([^"]*)"', disp)
|
||||
file_m = _re.search(r'filename="([^"]*)"', disp)
|
||||
if not name_m:
|
||||
continue
|
||||
name = name_m.group(1)
|
||||
if file_m:
|
||||
files[name] = (file_m.group(1), body)
|
||||
else:
|
||||
fields[name] = body.decode('utf-8', errors='replace')
|
||||
return fields, files
|
||||
|
||||
|
||||
def handle_upload(handler):
|
||||
import re as _re, traceback as _tb
|
||||
try:
|
||||
content_type = handler.headers.get('Content-Type', '')
|
||||
content_length = int(handler.headers.get('Content-Length', 0) or 0)
|
||||
if content_length > MAX_UPLOAD_BYTES:
|
||||
return j(handler, {'error': f'File too large (max {MAX_UPLOAD_BYTES//1024//1024}MB)'}, status=413)
|
||||
fields, files = parse_multipart(handler.rfile, content_type, content_length)
|
||||
session_id = fields.get('session_id', '')
|
||||
if 'file' not in files:
|
||||
return j(handler, {'error': 'No file field in request'}, status=400)
|
||||
filename, file_bytes = files['file']
|
||||
if not filename:
|
||||
return j(handler, {'error': 'No filename in upload'}, status=400)
|
||||
try:
|
||||
s = get_session(session_id)
|
||||
except KeyError:
|
||||
return j(handler, {'error': 'Session not found'}, status=404)
|
||||
workspace = Path(s.workspace)
|
||||
safe_name = _re.sub(r'[^\w.\-]', '_', Path(filename).name)[:200]
|
||||
dest = workspace / safe_name
|
||||
dest.write_bytes(file_bytes)
|
||||
return j(handler, {'filename': safe_name, 'path': str(dest), 'size': dest.stat().st_size})
|
||||
except Exception as e:
|
||||
return j(handler, {'error': str(e), 'trace': _tb.format_exc()}, status=500)
|
||||
77
api/workspace.py
Normal file
77
api/workspace.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
Hermes WebUI -- Workspace and file system helpers.
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from api.config import (
|
||||
WORKSPACES_FILE, LAST_WORKSPACE_FILE, DEFAULT_WORKSPACE,
|
||||
MAX_FILE_BYTES, IMAGE_EXTS, MD_EXTS
|
||||
)
|
||||
|
||||
|
||||
def load_workspaces() -> list:
|
||||
if WORKSPACES_FILE.exists():
|
||||
try:
|
||||
return json.loads(WORKSPACES_FILE.read_text(encoding='utf-8'))
|
||||
except Exception:
|
||||
pass
|
||||
return [{'path': str(DEFAULT_WORKSPACE), 'name': 'default'}]
|
||||
|
||||
|
||||
def save_workspaces(workspaces: list):
|
||||
WORKSPACES_FILE.write_text(json.dumps(workspaces, ensure_ascii=False, indent=2), encoding='utf-8')
|
||||
|
||||
|
||||
def get_last_workspace() -> str:
|
||||
if LAST_WORKSPACE_FILE.exists():
|
||||
try:
|
||||
p = LAST_WORKSPACE_FILE.read_text(encoding='utf-8').strip()
|
||||
if p and Path(p).is_dir():
|
||||
return p
|
||||
except Exception:
|
||||
pass
|
||||
return str(DEFAULT_WORKSPACE)
|
||||
|
||||
|
||||
def set_last_workspace(path: str):
|
||||
try:
|
||||
LAST_WORKSPACE_FILE.write_text(str(path), encoding='utf-8')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def safe_resolve_ws(root: Path, requested: str) -> Path:
|
||||
"""Resolve a relative path inside a workspace root, raising ValueError on traversal."""
|
||||
resolved = (root / requested).resolve()
|
||||
resolved.relative_to(root.resolve())
|
||||
return resolved
|
||||
|
||||
|
||||
def list_dir(workspace: Path, rel='.'):
|
||||
target = safe_resolve_ws(workspace, rel)
|
||||
if not target.is_dir():
|
||||
raise FileNotFoundError(f"Not a directory: {rel}")
|
||||
entries = []
|
||||
for item in sorted(target.iterdir(), key=lambda p: (p.is_file(), p.name.lower())):
|
||||
entries.append({
|
||||
'name': item.name,
|
||||
'path': str(item.relative_to(workspace)),
|
||||
'type': 'dir' if item.is_dir() else 'file',
|
||||
'size': item.stat().st_size if item.is_file() else None,
|
||||
})
|
||||
if len(entries) >= 200:
|
||||
break
|
||||
return entries
|
||||
|
||||
|
||||
def read_file_content(workspace: Path, rel: str):
|
||||
target = safe_resolve_ws(workspace, rel)
|
||||
if not target.is_file():
|
||||
raise FileNotFoundError(f"Not a file: {rel}")
|
||||
size = target.stat().st_size
|
||||
if size > MAX_FILE_BYTES:
|
||||
raise ValueError(f"File too large ({size} bytes, max {MAX_FILE_BYTES})")
|
||||
content = target.read_text(encoding='utf-8', errors='replace')
|
||||
return {'path': rel, 'content': content, 'size': size, 'lines': content.count('\n') + 1}
|
||||
Reference in New Issue
Block a user