The architecture has been updated

This commit is contained in:
Skyber_2 2026-03-31 23:31:36 +03:00
parent 805f7a017e
commit a01257ead9
1119 changed files with 226 additions and 352 deletions

View file

View file

@ -0,0 +1,70 @@
"""Tests for banner toolset name normalization and skin color usage."""
from unittest.mock import patch
from rich.console import Console
import hermes_cli.banner as banner
import model_tools
import tools.mcp_tool
def test_display_toolset_name_strips_legacy_suffix():
assert banner._display_toolset_name("homeassistant_tools") == "homeassistant"
assert banner._display_toolset_name("honcho_tools") == "honcho"
assert banner._display_toolset_name("web_tools") == "web"
def test_display_toolset_name_preserves_clean_names():
assert banner._display_toolset_name("browser") == "browser"
assert banner._display_toolset_name("file") == "file"
assert banner._display_toolset_name("terminal") == "terminal"
def test_display_toolset_name_handles_empty():
assert banner._display_toolset_name("") == "unknown"
assert banner._display_toolset_name(None) == "unknown"
def test_build_welcome_banner_uses_normalized_toolset_names():
"""Unavailable toolsets should not have '_tools' appended in banner output."""
with (
patch.object(
model_tools,
"check_tool_availability",
return_value=(
["web"],
[
{"name": "homeassistant", "tools": ["ha_call_service"]},
{"name": "honcho", "tools": ["honcho_conclude"]},
],
),
),
patch.object(banner, "get_available_skills", return_value={}),
patch.object(banner, "get_update_result", return_value=None),
patch.object(tools.mcp_tool, "get_mcp_status", return_value=[]),
):
console = Console(
record=True, force_terminal=False, color_system=None, width=160
)
banner.build_welcome_banner(
console=console,
model="anthropic/test-model",
cwd="/tmp/project",
tools=[
{"function": {"name": "web_search"}},
{"function": {"name": "read_file"}},
],
get_toolset_for_tool=lambda name: {
"web_search": "web_tools",
"read_file": "file",
}.get(name),
)
output = console.export_text()
assert "homeassistant:" in output
assert "honcho:" in output
assert "web:" in output
assert "homeassistant_tools:" not in output
assert "honcho_tools:" not in output
assert "web_tools:" not in output

View file

@ -0,0 +1,68 @@
"""Tests for banner get_available_skills() — disabled and platform filtering."""
from unittest.mock import patch
import pytest
_MOCK_SKILLS = [
{"name": "skill-a", "description": "A skill", "category": "tools"},
{"name": "skill-b", "description": "B skill", "category": "tools"},
{"name": "skill-c", "description": "C skill", "category": "creative"},
]
def test_get_available_skills_delegates_to_find_all_skills():
"""get_available_skills should call _find_all_skills (which handles filtering)."""
with patch("tools.skills_tool._find_all_skills", return_value=list(_MOCK_SKILLS)):
from hermes_cli.banner import get_available_skills
result = get_available_skills()
assert "tools" in result
assert "creative" in result
assert sorted(result["tools"]) == ["skill-a", "skill-b"]
assert result["creative"] == ["skill-c"]
def test_get_available_skills_excludes_disabled():
"""Disabled skills should not appear in the banner count."""
# _find_all_skills already filters disabled skills, so if we give it
# a filtered list, get_available_skills should reflect that.
filtered = [s for s in _MOCK_SKILLS if s["name"] != "skill-b"]
with patch("tools.skills_tool._find_all_skills", return_value=filtered):
from hermes_cli.banner import get_available_skills
result = get_available_skills()
all_names = [n for names in result.values() for n in names]
assert "skill-b" not in all_names
assert "skill-a" in all_names
assert len(all_names) == 2
def test_get_available_skills_empty_when_no_skills():
"""No skills installed returns empty dict."""
with patch("tools.skills_tool._find_all_skills", return_value=[]):
from hermes_cli.banner import get_available_skills
result = get_available_skills()
assert result == {}
def test_get_available_skills_handles_import_failure():
"""If _find_all_skills import fails, return empty dict gracefully."""
with patch("tools.skills_tool._find_all_skills", side_effect=ImportError("boom")):
from hermes_cli.banner import get_available_skills
result = get_available_skills()
assert result == {}
def test_get_available_skills_null_category_becomes_general():
"""Skills with None category should be grouped under 'general'."""
skills = [{"name": "orphan-skill", "description": "No cat", "category": None}]
with patch("tools.skills_tool._find_all_skills", return_value=skills):
from hermes_cli.banner import get_available_skills
result = get_available_skills()
assert "general" in result
assert result["general"] == ["orphan-skill"]

View file

@ -0,0 +1,77 @@
import sys
def test_top_level_skills_flag_defaults_to_chat(monkeypatch):
import hermes_cli.main as main_mod
captured = {}
def fake_cmd_chat(args):
captured["skills"] = args.skills
captured["command"] = args.command
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
monkeypatch.setattr(
sys,
"argv",
["hermes", "-s", "hermes-agent-dev,github-auth"],
)
main_mod.main()
assert captured == {
"skills": ["hermes-agent-dev,github-auth"],
"command": None,
}
def test_chat_subcommand_accepts_skills_flag(monkeypatch):
import hermes_cli.main as main_mod
captured = {}
def fake_cmd_chat(args):
captured["skills"] = args.skills
captured["query"] = args.query
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
monkeypatch.setattr(
sys,
"argv",
["hermes", "chat", "-s", "github-auth", "-q", "hello"],
)
main_mod.main()
assert captured == {
"skills": ["github-auth"],
"query": "hello",
}
def test_continue_worktree_and_skills_flags_work_together(monkeypatch):
import hermes_cli.main as main_mod
captured = {}
def fake_cmd_chat(args):
captured["continue_last"] = args.continue_last
captured["worktree"] = args.worktree
captured["skills"] = args.skills
captured["command"] = args.command
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
monkeypatch.setattr(
sys,
"argv",
["hermes", "-c", "-w", "-s", "hermes-agent-dev"],
)
main_mod.main()
assert captured == {
"continue_last": True,
"worktree": True,
"skills": ["hermes-agent-dev"],
"command": "chat",
}

View file

@ -0,0 +1,340 @@
"""Tests for hermes claw commands."""
from argparse import Namespace
from types import ModuleType
from unittest.mock import MagicMock, patch
import pytest
from hermes_cli import claw as claw_mod
# ---------------------------------------------------------------------------
# _find_migration_script
# ---------------------------------------------------------------------------
class TestFindMigrationScript:
"""Test script discovery in known locations."""
def test_finds_project_root_script(self, tmp_path):
script = tmp_path / "openclaw_to_hermes.py"
script.write_text("# placeholder")
with patch.object(claw_mod, "_OPENCLAW_SCRIPT", script):
assert claw_mod._find_migration_script() == script
def test_finds_installed_script(self, tmp_path):
installed = tmp_path / "installed.py"
installed.write_text("# placeholder")
with (
patch.object(claw_mod, "_OPENCLAW_SCRIPT", tmp_path / "nonexistent.py"),
patch.object(claw_mod, "_OPENCLAW_SCRIPT_INSTALLED", installed),
):
assert claw_mod._find_migration_script() == installed
def test_returns_none_when_missing(self, tmp_path):
with (
patch.object(claw_mod, "_OPENCLAW_SCRIPT", tmp_path / "a.py"),
patch.object(claw_mod, "_OPENCLAW_SCRIPT_INSTALLED", tmp_path / "b.py"),
):
assert claw_mod._find_migration_script() is None
# ---------------------------------------------------------------------------
# claw_command routing
# ---------------------------------------------------------------------------
class TestClawCommand:
"""Test the claw_command router."""
def test_routes_to_migrate(self):
args = Namespace(claw_action="migrate", source=None, dry_run=True,
preset="full", overwrite=False, migrate_secrets=False,
workspace_target=None, skill_conflict="skip", yes=False)
with patch.object(claw_mod, "_cmd_migrate") as mock:
claw_mod.claw_command(args)
mock.assert_called_once_with(args)
def test_shows_help_for_no_action(self, capsys):
args = Namespace(claw_action=None)
claw_mod.claw_command(args)
captured = capsys.readouterr()
assert "migrate" in captured.out
# ---------------------------------------------------------------------------
# _cmd_migrate
# ---------------------------------------------------------------------------
class TestCmdMigrate:
"""Test the migrate command handler."""
def test_error_when_source_missing(self, tmp_path, capsys):
args = Namespace(
source=str(tmp_path / "nonexistent"),
dry_run=True, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=False,
)
claw_mod._cmd_migrate(args)
captured = capsys.readouterr()
assert "not found" in captured.out
def test_error_when_script_missing(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
args = Namespace(
source=str(openclaw_dir),
dry_run=True, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=False,
)
with (
patch.object(claw_mod, "_OPENCLAW_SCRIPT", tmp_path / "a.py"),
patch.object(claw_mod, "_OPENCLAW_SCRIPT_INSTALLED", tmp_path / "b.py"),
):
claw_mod._cmd_migrate(args)
captured = capsys.readouterr()
assert "Migration script not found" in captured.out
def test_dry_run_succeeds(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
script = tmp_path / "script.py"
script.write_text("# placeholder")
# Build a fake migration module
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value={"soul", "memory"})
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 0, "skipped": 5, "conflict": 0, "error": 0},
"items": [
{"kind": "soul", "status": "skipped", "reason": "Not found"},
],
"preset": "full",
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
args = Namespace(
source=str(openclaw_dir),
dry_run=True, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=False,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=script),
patch.object(claw_mod, "_load_migration_module", return_value=fake_mod),
patch.object(claw_mod, "get_config_path", return_value=tmp_path / "config.yaml"),
patch.object(claw_mod, "save_config"),
patch.object(claw_mod, "load_config", return_value={}),
):
claw_mod._cmd_migrate(args)
captured = capsys.readouterr()
assert "Dry Run Results" in captured.out
assert "5 skipped" in captured.out
def test_execute_with_confirmation(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
config_path = tmp_path / "config.yaml"
config_path.write_text("agent:\n max_turns: 90\n")
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value={"soul"})
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 2, "skipped": 1, "conflict": 0, "error": 0},
"items": [
{"kind": "soul", "status": "migrated", "destination": str(tmp_path / "SOUL.md")},
{"kind": "memory", "status": "migrated", "destination": str(tmp_path / "memories/MEMORY.md")},
],
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
args = Namespace(
source=str(openclaw_dir),
dry_run=False, preset="user-data", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=False,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"),
patch.object(claw_mod, "_load_migration_module", return_value=fake_mod),
patch.object(claw_mod, "get_config_path", return_value=config_path),
patch.object(claw_mod, "prompt_yes_no", return_value=True),
):
claw_mod._cmd_migrate(args)
captured = capsys.readouterr()
assert "Migration Results" in captured.out
assert "Migration complete!" in captured.out
def test_execute_cancelled_by_user(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
config_path = tmp_path / "config.yaml"
config_path.write_text("")
args = Namespace(
source=str(openclaw_dir),
dry_run=False, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=False,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"),
patch.object(claw_mod, "prompt_yes_no", return_value=False),
):
claw_mod._cmd_migrate(args)
captured = capsys.readouterr()
assert "Migration cancelled" in captured.out
def test_execute_with_yes_skips_confirmation(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
config_path = tmp_path / "config.yaml"
config_path.write_text("")
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value=set())
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 0, "skipped": 0, "conflict": 0, "error": 0},
"items": [],
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
args = Namespace(
source=str(openclaw_dir),
dry_run=False, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=True,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"),
patch.object(claw_mod, "_load_migration_module", return_value=fake_mod),
patch.object(claw_mod, "get_config_path", return_value=config_path),
patch.object(claw_mod, "prompt_yes_no") as mock_prompt,
):
claw_mod._cmd_migrate(args)
mock_prompt.assert_not_called()
def test_handles_migration_error(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
config_path = tmp_path / "config.yaml"
config_path.write_text("")
args = Namespace(
source=str(openclaw_dir),
dry_run=True, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=False,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"),
patch.object(claw_mod, "_load_migration_module", side_effect=RuntimeError("boom")),
patch.object(claw_mod, "get_config_path", return_value=config_path),
patch.object(claw_mod, "save_config"),
patch.object(claw_mod, "load_config", return_value={}),
):
claw_mod._cmd_migrate(args)
captured = capsys.readouterr()
assert "Migration failed" in captured.out
def test_full_preset_enables_secrets(self, tmp_path, capsys):
"""The 'full' preset should set migrate_secrets=True automatically."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value=set())
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 0, "skipped": 0, "conflict": 0, "error": 0},
"items": [],
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
args = Namespace(
source=str(openclaw_dir),
dry_run=True, preset="full", overwrite=False,
migrate_secrets=False, # Not explicitly set by user
workspace_target=None,
skill_conflict="skip", yes=False,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"),
patch.object(claw_mod, "_load_migration_module", return_value=fake_mod),
patch.object(claw_mod, "get_config_path", return_value=tmp_path / "config.yaml"),
patch.object(claw_mod, "save_config"),
patch.object(claw_mod, "load_config", return_value={}),
):
claw_mod._cmd_migrate(args)
# Migrator should have been called with migrate_secrets=True
call_kwargs = fake_mod.Migrator.call_args[1]
assert call_kwargs["migrate_secrets"] is True
# ---------------------------------------------------------------------------
# _print_migration_report
# ---------------------------------------------------------------------------
class TestPrintMigrationReport:
"""Test the report formatting function."""
def test_dry_run_report(self, capsys):
report = {
"summary": {"migrated": 2, "skipped": 1, "conflict": 1, "error": 0},
"items": [
{"kind": "soul", "status": "migrated", "destination": "/home/user/.hermes/SOUL.md"},
{"kind": "memory", "status": "migrated", "destination": "/home/user/.hermes/memories/MEMORY.md"},
{"kind": "skills", "status": "conflict", "reason": "already exists"},
{"kind": "tts-assets", "status": "skipped", "reason": "not found"},
],
"preset": "full",
}
claw_mod._print_migration_report(report, dry_run=True)
captured = capsys.readouterr()
assert "Dry Run Results" in captured.out
assert "Would migrate" in captured.out
assert "2 would migrate" in captured.out
assert "--dry-run" in captured.out
def test_execute_report(self, capsys):
report = {
"summary": {"migrated": 3, "skipped": 0, "conflict": 0, "error": 0},
"items": [
{"kind": "soul", "status": "migrated", "destination": "/home/user/.hermes/SOUL.md"},
],
"output_dir": "/home/user/.hermes/migration/openclaw/20250312T120000",
}
claw_mod._print_migration_report(report, dry_run=False)
captured = capsys.readouterr()
assert "Migration Results" in captured.out
assert "Migrated" in captured.out
assert "Full report saved to" in captured.out
def test_empty_report(self, capsys):
report = {
"summary": {"migrated": 0, "skipped": 0, "conflict": 0, "error": 0},
"items": [],
}
claw_mod._print_migration_report(report, dry_run=False)
captured = capsys.readouterr()
assert "Nothing to migrate" in captured.out

View file

@ -0,0 +1,107 @@
"""Tests for cmd_update — branch fallback when remote branch doesn't exist."""
import subprocess
from types import SimpleNamespace
from unittest.mock import patch
import pytest
from hermes_cli.main import cmd_update, PROJECT_ROOT
def _make_run_side_effect(branch="main", verify_ok=True, commit_count="0"):
"""Build a side_effect function for subprocess.run that simulates git commands."""
def side_effect(cmd, **kwargs):
joined = " ".join(str(c) for c in cmd)
# git rev-parse --abbrev-ref HEAD (get current branch)
if "rev-parse" in joined and "--abbrev-ref" in joined:
return subprocess.CompletedProcess(cmd, 0, stdout=f"{branch}\n", stderr="")
# git rev-parse --verify origin/{branch} (check remote branch exists)
if "rev-parse" in joined and "--verify" in joined:
rc = 0 if verify_ok else 128
return subprocess.CompletedProcess(cmd, rc, stdout="", stderr="")
# git rev-list HEAD..origin/{branch} --count
if "rev-list" in joined:
return subprocess.CompletedProcess(cmd, 0, stdout=f"{commit_count}\n", stderr="")
# Fallback: return a successful CompletedProcess with empty stdout
return subprocess.CompletedProcess(cmd, 0, stdout="", stderr="")
return side_effect
@pytest.fixture
def mock_args():
return SimpleNamespace()
class TestCmdUpdateBranchFallback:
"""cmd_update falls back to main when current branch has no remote counterpart."""
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_falls_back_to_main_when_branch_not_on_remote(
self, mock_run, _mock_which, mock_args, capsys
):
mock_run.side_effect = _make_run_side_effect(
branch="fix/stoicneko", verify_ok=False, commit_count="3"
)
cmd_update(mock_args)
commands = [" ".join(str(a) for a in c.args[0]) for c in mock_run.call_args_list]
# rev-list should use origin/main, not origin/fix/stoicneko
rev_list_cmds = [c for c in commands if "rev-list" in c]
assert len(rev_list_cmds) == 1
assert "origin/main" in rev_list_cmds[0]
assert "origin/fix/stoicneko" not in rev_list_cmds[0]
# pull should use main, not fix/stoicneko
pull_cmds = [c for c in commands if "pull" in c]
assert len(pull_cmds) == 1
assert "main" in pull_cmds[0]
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_uses_current_branch_when_on_remote(
self, mock_run, _mock_which, mock_args, capsys
):
mock_run.side_effect = _make_run_side_effect(
branch="main", verify_ok=True, commit_count="2"
)
cmd_update(mock_args)
commands = [" ".join(str(a) for a in c.args[0]) for c in mock_run.call_args_list]
rev_list_cmds = [c for c in commands if "rev-list" in c]
assert len(rev_list_cmds) == 1
assert "origin/main" in rev_list_cmds[0]
pull_cmds = [c for c in commands if "pull" in c]
assert len(pull_cmds) == 1
assert "main" in pull_cmds[0]
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_already_up_to_date(
self, mock_run, _mock_which, mock_args, capsys
):
mock_run.side_effect = _make_run_side_effect(
branch="main", verify_ok=True, commit_count="0"
)
cmd_update(mock_args)
captured = capsys.readouterr()
assert "Already up to date!" in captured.out
# Should NOT have called pull
commands = [" ".join(str(a) for a in c.args[0]) for c in mock_run.call_args_list]
pull_cmds = [c for c in commands if "pull" in c]
assert len(pull_cmds) == 0

View file

@ -0,0 +1,113 @@
"""Tests for _coalesce_session_name_args — multi-word session name merging."""
import pytest
from hermes_cli.main import _coalesce_session_name_args
class TestCoalesceSessionNameArgs:
"""Ensure unquoted multi-word session names are merged into one token."""
# ── -c / --continue ──────────────────────────────────────────────────
def test_continue_multiword_unquoted(self):
"""hermes -c Pokemon Agent Dev → -c 'Pokemon Agent Dev'"""
assert _coalesce_session_name_args(
["-c", "Pokemon", "Agent", "Dev"]
) == ["-c", "Pokemon Agent Dev"]
def test_continue_long_form_multiword(self):
"""hermes --continue Pokemon Agent Dev"""
assert _coalesce_session_name_args(
["--continue", "Pokemon", "Agent", "Dev"]
) == ["--continue", "Pokemon Agent Dev"]
def test_continue_single_word(self):
"""hermes -c MyProject (no merging needed)"""
assert _coalesce_session_name_args(["-c", "MyProject"]) == [
"-c",
"MyProject",
]
def test_continue_already_quoted(self):
"""hermes -c 'Pokemon Agent Dev' (shell already merged)"""
assert _coalesce_session_name_args(
["-c", "Pokemon Agent Dev"]
) == ["-c", "Pokemon Agent Dev"]
def test_continue_bare_flag(self):
"""hermes -c (no name — means 'continue latest')"""
assert _coalesce_session_name_args(["-c"]) == ["-c"]
def test_continue_followed_by_flag(self):
"""hermes -c -w (no name consumed, -w stays separate)"""
assert _coalesce_session_name_args(["-c", "-w"]) == ["-c", "-w"]
def test_continue_multiword_then_flag(self):
"""hermes -c my project -w"""
assert _coalesce_session_name_args(
["-c", "my", "project", "-w"]
) == ["-c", "my project", "-w"]
def test_continue_multiword_then_subcommand(self):
"""hermes -c my project chat -q hello"""
assert _coalesce_session_name_args(
["-c", "my", "project", "chat", "-q", "hello"]
) == ["-c", "my project", "chat", "-q", "hello"]
# ── -r / --resume ────────────────────────────────────────────────────
def test_resume_multiword(self):
"""hermes -r My Session Name"""
assert _coalesce_session_name_args(
["-r", "My", "Session", "Name"]
) == ["-r", "My Session Name"]
def test_resume_long_form_multiword(self):
"""hermes --resume My Session Name"""
assert _coalesce_session_name_args(
["--resume", "My", "Session", "Name"]
) == ["--resume", "My Session Name"]
def test_resume_multiword_then_flag(self):
"""hermes -r My Session -w"""
assert _coalesce_session_name_args(
["-r", "My", "Session", "-w"]
) == ["-r", "My Session", "-w"]
# ── combined flags ───────────────────────────────────────────────────
def test_worktree_and_continue_multiword(self):
"""hermes -w -c Pokemon Agent Dev (the original failing case)"""
assert _coalesce_session_name_args(
["-w", "-c", "Pokemon", "Agent", "Dev"]
) == ["-w", "-c", "Pokemon Agent Dev"]
def test_continue_multiword_and_worktree(self):
"""hermes -c Pokemon Agent Dev -w (order reversed)"""
assert _coalesce_session_name_args(
["-c", "Pokemon", "Agent", "Dev", "-w"]
) == ["-c", "Pokemon Agent Dev", "-w"]
# ── passthrough (no session flags) ───────────────────────────────────
def test_no_session_flags_passthrough(self):
"""hermes -w chat -q hello (nothing to merge)"""
result = _coalesce_session_name_args(["-w", "chat", "-q", "hello"])
assert result == ["-w", "chat", "-q", "hello"]
def test_empty_argv(self):
assert _coalesce_session_name_args([]) == []
# ── subcommand boundary ──────────────────────────────────────────────
def test_stops_at_sessions_subcommand(self):
"""hermes -c my project sessions list → stops before 'sessions'"""
assert _coalesce_session_name_args(
["-c", "my", "project", "sessions", "list"]
) == ["-c", "my project", "sessions", "list"]
def test_stops_at_setup_subcommand(self):
"""hermes -c my setup → 'setup' is a subcommand, not part of name"""
assert _coalesce_session_name_args(
["-c", "my", "setup"]
) == ["-c", "my", "setup"]

View file

@ -0,0 +1,506 @@
"""Tests for the central command registry and autocomplete."""
from prompt_toolkit.completion import CompleteEvent
from prompt_toolkit.document import Document
from hermes_cli.commands import (
COMMAND_REGISTRY,
COMMANDS,
COMMANDS_BY_CATEGORY,
CommandDef,
GATEWAY_KNOWN_COMMANDS,
SUBCOMMANDS,
SlashCommandAutoSuggest,
SlashCommandCompleter,
gateway_help_lines,
resolve_command,
slack_subcommand_map,
telegram_bot_commands,
)
def _completions(completer: SlashCommandCompleter, text: str):
return list(
completer.get_completions(
Document(text=text),
CompleteEvent(completion_requested=True),
)
)
# ---------------------------------------------------------------------------
# CommandDef registry tests
# ---------------------------------------------------------------------------
class TestCommandRegistry:
def test_registry_is_nonempty(self):
assert len(COMMAND_REGISTRY) > 30
def test_every_entry_is_commanddef(self):
for entry in COMMAND_REGISTRY:
assert isinstance(entry, CommandDef), f"Unexpected type: {type(entry)}"
def test_no_duplicate_canonical_names(self):
names = [cmd.name for cmd in COMMAND_REGISTRY]
assert len(names) == len(set(names)), f"Duplicate names: {[n for n in names if names.count(n) > 1]}"
def test_no_alias_collides_with_canonical_name(self):
"""An alias must not shadow another command's canonical name."""
canonical_names = {cmd.name for cmd in COMMAND_REGISTRY}
for cmd in COMMAND_REGISTRY:
for alias in cmd.aliases:
if alias in canonical_names:
# reset -> new is intentional (reset IS an alias for new)
target = next(c for c in COMMAND_REGISTRY if c.name == alias)
# This should only happen if the alias points to the same entry
assert resolve_command(alias).name == cmd.name or alias == cmd.name, \
f"Alias '{alias}' of '{cmd.name}' shadows canonical '{target.name}'"
def test_every_entry_has_valid_category(self):
valid_categories = {"Session", "Configuration", "Tools & Skills", "Info", "Exit"}
for cmd in COMMAND_REGISTRY:
assert cmd.category in valid_categories, f"{cmd.name} has invalid category '{cmd.category}'"
def test_cli_only_and_gateway_only_are_mutually_exclusive(self):
for cmd in COMMAND_REGISTRY:
assert not (cmd.cli_only and cmd.gateway_only), \
f"{cmd.name} cannot be both cli_only and gateway_only"
# ---------------------------------------------------------------------------
# resolve_command tests
# ---------------------------------------------------------------------------
class TestResolveCommand:
def test_canonical_name_resolves(self):
assert resolve_command("help").name == "help"
assert resolve_command("background").name == "background"
def test_alias_resolves_to_canonical(self):
assert resolve_command("bg").name == "background"
assert resolve_command("reset").name == "new"
assert resolve_command("q").name == "quit"
assert resolve_command("exit").name == "quit"
assert resolve_command("gateway").name == "platforms"
assert resolve_command("set-home").name == "sethome"
assert resolve_command("reload_mcp").name == "reload-mcp"
def test_leading_slash_stripped(self):
assert resolve_command("/help").name == "help"
assert resolve_command("/bg").name == "background"
def test_unknown_returns_none(self):
assert resolve_command("nonexistent") is None
assert resolve_command("") is None
# ---------------------------------------------------------------------------
# Derived dicts (backwards compat)
# ---------------------------------------------------------------------------
class TestDerivedDicts:
def test_commands_dict_excludes_gateway_only(self):
"""gateway_only commands should NOT appear in the CLI COMMANDS dict."""
for cmd in COMMAND_REGISTRY:
if cmd.gateway_only:
assert f"/{cmd.name}" not in COMMANDS, \
f"gateway_only command /{cmd.name} should not be in COMMANDS"
def test_commands_dict_includes_all_cli_commands(self):
for cmd in COMMAND_REGISTRY:
if not cmd.gateway_only:
assert f"/{cmd.name}" in COMMANDS, \
f"/{cmd.name} missing from COMMANDS dict"
def test_commands_dict_includes_aliases(self):
assert "/bg" in COMMANDS
assert "/reset" in COMMANDS
assert "/q" in COMMANDS
assert "/exit" in COMMANDS
assert "/reload_mcp" in COMMANDS
assert "/gateway" in COMMANDS
def test_commands_by_category_covers_all_categories(self):
registry_categories = {cmd.category for cmd in COMMAND_REGISTRY if not cmd.gateway_only}
assert set(COMMANDS_BY_CATEGORY.keys()) == registry_categories
def test_every_command_has_nonempty_description(self):
for cmd, desc in COMMANDS.items():
assert isinstance(desc, str) and len(desc) > 0, f"{cmd} has empty description"
# ---------------------------------------------------------------------------
# Gateway helpers
# ---------------------------------------------------------------------------
class TestGatewayKnownCommands:
def test_excludes_cli_only(self):
for cmd in COMMAND_REGISTRY:
if cmd.cli_only:
assert cmd.name not in GATEWAY_KNOWN_COMMANDS, \
f"cli_only command '{cmd.name}' should not be in GATEWAY_KNOWN_COMMANDS"
def test_includes_gateway_commands(self):
for cmd in COMMAND_REGISTRY:
if not cmd.cli_only:
assert cmd.name in GATEWAY_KNOWN_COMMANDS
for alias in cmd.aliases:
assert alias in GATEWAY_KNOWN_COMMANDS
def test_bg_alias_in_gateway(self):
assert "bg" in GATEWAY_KNOWN_COMMANDS
assert "background" in GATEWAY_KNOWN_COMMANDS
def test_is_frozenset(self):
assert isinstance(GATEWAY_KNOWN_COMMANDS, frozenset)
class TestGatewayHelpLines:
def test_returns_nonempty_list(self):
lines = gateway_help_lines()
assert len(lines) > 10
def test_excludes_cli_only_commands(self):
lines = gateway_help_lines()
joined = "\n".join(lines)
for cmd in COMMAND_REGISTRY:
if cmd.cli_only:
assert f"`/{cmd.name}" not in joined, \
f"cli_only command /{cmd.name} should not be in gateway help"
def test_includes_alias_note_for_bg(self):
lines = gateway_help_lines()
bg_line = [l for l in lines if "/background" in l]
assert len(bg_line) == 1
assert "/bg" in bg_line[0]
class TestTelegramBotCommands:
def test_returns_list_of_tuples(self):
cmds = telegram_bot_commands()
assert len(cmds) > 10
for name, desc in cmds:
assert isinstance(name, str)
assert isinstance(desc, str)
def test_no_hyphens_in_command_names(self):
"""Telegram does not support hyphens in command names."""
for name, _ in telegram_bot_commands():
assert "-" not in name, f"Telegram command '{name}' contains a hyphen"
def test_excludes_cli_only(self):
names = {name for name, _ in telegram_bot_commands()}
for cmd in COMMAND_REGISTRY:
if cmd.cli_only:
tg_name = cmd.name.replace("-", "_")
assert tg_name not in names
class TestSlackSubcommandMap:
def test_returns_dict(self):
mapping = slack_subcommand_map()
assert isinstance(mapping, dict)
assert len(mapping) > 10
def test_values_are_slash_prefixed(self):
for key, val in slack_subcommand_map().items():
assert val.startswith("/"), f"Slack mapping for '{key}' should start with /"
def test_includes_aliases(self):
mapping = slack_subcommand_map()
assert "bg" in mapping
assert "reset" in mapping
def test_excludes_cli_only(self):
mapping = slack_subcommand_map()
for cmd in COMMAND_REGISTRY:
if cmd.cli_only:
assert cmd.name not in mapping
# ---------------------------------------------------------------------------
# Autocomplete (SlashCommandCompleter)
# ---------------------------------------------------------------------------
class TestSlashCommandCompleter:
# -- basic prefix completion -----------------------------------------
def test_builtin_prefix_completion_uses_shared_registry(self):
completions = _completions(SlashCommandCompleter(), "/re")
texts = {item.text for item in completions}
assert "reset" in texts
assert "retry" in texts
assert "reload-mcp" in texts
def test_builtin_completion_display_meta_shows_description(self):
completions = _completions(SlashCommandCompleter(), "/help")
assert len(completions) == 1
assert completions[0].display_meta_text == "Show available commands"
# -- exact-match trailing space --------------------------------------
def test_exact_match_completion_adds_trailing_space(self):
completions = _completions(SlashCommandCompleter(), "/help")
assert [item.text for item in completions] == ["help "]
def test_partial_match_does_not_add_trailing_space(self):
completions = _completions(SlashCommandCompleter(), "/hel")
assert [item.text for item in completions] == ["help"]
# -- non-slash input returns nothing ---------------------------------
def test_no_completions_for_non_slash_input(self):
assert _completions(SlashCommandCompleter(), "help") == []
def test_no_completions_for_empty_input(self):
assert _completions(SlashCommandCompleter(), "") == []
# -- skill commands via provider ------------------------------------
def test_skill_commands_are_completed_from_provider(self):
completer = SlashCommandCompleter(
skill_commands_provider=lambda: {
"/gif-search": {"description": "Search for GIFs across providers"},
}
)
completions = _completions(completer, "/gif")
assert len(completions) == 1
assert completions[0].text == "gif-search"
assert completions[0].display_text == "/gif-search"
assert completions[0].display_meta_text == "⚡ Search for GIFs across providers"
def test_skill_exact_match_adds_trailing_space(self):
completer = SlashCommandCompleter(
skill_commands_provider=lambda: {
"/gif-search": {"description": "Search for GIFs"},
}
)
completions = _completions(completer, "/gif-search")
assert len(completions) == 1
assert completions[0].text == "gif-search "
def test_no_skill_provider_means_no_skill_completions(self):
"""Default (None) provider should not blow up or add completions."""
completer = SlashCommandCompleter()
completions = _completions(completer, "/gif")
# /gif doesn't match any builtin command
assert completions == []
def test_skill_provider_exception_is_swallowed(self):
"""A broken provider should not crash autocomplete."""
completer = SlashCommandCompleter(
skill_commands_provider=lambda: (_ for _ in ()).throw(RuntimeError("boom")),
)
# Should return builtin matches only, no crash
completions = _completions(completer, "/he")
texts = {item.text for item in completions}
assert "help" in texts
def test_skill_description_truncated_at_50_chars(self):
long_desc = "A" * 80
completer = SlashCommandCompleter(
skill_commands_provider=lambda: {
"/long-skill": {"description": long_desc},
}
)
completions = _completions(completer, "/long")
assert len(completions) == 1
meta = completions[0].display_meta_text
# "⚡ " prefix + 50 chars + "..."
assert meta == f"{'A' * 50}..."
def test_skill_missing_description_uses_fallback(self):
completer = SlashCommandCompleter(
skill_commands_provider=lambda: {
"/no-desc": {},
}
)
completions = _completions(completer, "/no-desc")
assert len(completions) == 1
assert "Skill command" in completions[0].display_meta_text
# ── SUBCOMMANDS extraction ──────────────────────────────────────────────
class TestSubcommands:
def test_explicit_subcommands_extracted(self):
"""Commands with explicit subcommands on CommandDef are extracted."""
assert "/prompt" in SUBCOMMANDS
assert "clear" in SUBCOMMANDS["/prompt"]
def test_reasoning_has_subcommands(self):
assert "/reasoning" in SUBCOMMANDS
subs = SUBCOMMANDS["/reasoning"]
assert "high" in subs
assert "show" in subs
assert "hide" in subs
def test_voice_has_subcommands(self):
assert "/voice" in SUBCOMMANDS
assert "on" in SUBCOMMANDS["/voice"]
assert "off" in SUBCOMMANDS["/voice"]
def test_cron_has_subcommands(self):
assert "/cron" in SUBCOMMANDS
assert "list" in SUBCOMMANDS["/cron"]
assert "add" in SUBCOMMANDS["/cron"]
def test_commands_without_subcommands_not_in_dict(self):
"""Plain commands should not appear in SUBCOMMANDS."""
assert "/help" not in SUBCOMMANDS
assert "/quit" not in SUBCOMMANDS
assert "/clear" not in SUBCOMMANDS
# ── Subcommand tab completion ───────────────────────────────────────────
class TestSubcommandCompletion:
def test_subcommand_completion_after_space(self):
"""Typing '/reasoning ' then Tab should show subcommands."""
completions = _completions(SlashCommandCompleter(), "/reasoning ")
texts = {c.text for c in completions}
assert "high" in texts
assert "show" in texts
def test_subcommand_prefix_filters(self):
"""Typing '/reasoning sh' should only show 'show'."""
completions = _completions(SlashCommandCompleter(), "/reasoning sh")
texts = {c.text for c in completions}
assert texts == {"show"}
def test_subcommand_exact_match_suppressed(self):
"""Typing the full subcommand shouldn't re-suggest it."""
completions = _completions(SlashCommandCompleter(), "/reasoning show")
texts = {c.text for c in completions}
assert "show" not in texts
def test_no_subcommands_for_plain_command(self):
"""Commands without subcommands yield nothing after space."""
completions = _completions(SlashCommandCompleter(), "/help ")
assert completions == []
# ── Two-stage /model completion ─────────────────────────────────────────
def _model_completer() -> SlashCommandCompleter:
"""Build a completer with mock model/provider info."""
return SlashCommandCompleter(
model_completer_provider=lambda: {
"current_provider": "openrouter",
"providers": {
"anthropic": "Anthropic",
"openrouter": "OpenRouter",
"nous": "Nous Research",
},
"models_for": lambda p: {
"anthropic": ["claude-sonnet-4-20250514", "claude-opus-4-20250414"],
"openrouter": ["anthropic/claude-sonnet-4", "google/gemini-2.5-pro"],
"nous": ["hermes-3-llama-3.1-405b"],
}.get(p, []),
}
)
class TestModelCompletion:
def test_stage1_shows_providers(self):
completions = _completions(_model_completer(), "/model ")
texts = {c.text for c in completions}
assert "anthropic:" in texts
assert "openrouter:" in texts
assert "nous:" in texts
def test_stage1_current_provider_last(self):
completions = _completions(_model_completer(), "/model ")
texts = [c.text for c in completions]
assert texts[-1] == "openrouter:"
def test_stage1_current_provider_labeled(self):
completions = _completions(_model_completer(), "/model ")
for c in completions:
if c.text == "openrouter:":
assert "current" in c.display_meta_text.lower()
break
else:
raise AssertionError("openrouter: not found in completions")
def test_stage1_prefix_filters(self):
completions = _completions(_model_completer(), "/model an")
texts = {c.text for c in completions}
assert texts == {"anthropic:"}
def test_stage2_shows_models(self):
completions = _completions(_model_completer(), "/model anthropic:")
texts = {c.text for c in completions}
assert "anthropic:claude-sonnet-4-20250514" in texts
assert "anthropic:claude-opus-4-20250414" in texts
def test_stage2_prefix_filters_models(self):
completions = _completions(_model_completer(), "/model anthropic:claude-s")
texts = {c.text for c in completions}
assert "anthropic:claude-sonnet-4-20250514" in texts
assert "anthropic:claude-opus-4-20250414" not in texts
def test_stage2_no_model_provider_returns_empty(self):
completions = _completions(SlashCommandCompleter(), "/model ")
assert completions == []
# ── Ghost text (SlashCommandAutoSuggest) ────────────────────────────────
def _suggestion(text: str, completer=None) -> str | None:
"""Get ghost text suggestion for given input."""
suggest = SlashCommandAutoSuggest(completer=completer)
doc = Document(text=text)
class FakeBuffer:
pass
result = suggest.get_suggestion(FakeBuffer(), doc)
return result.text if result else None
class TestGhostText:
def test_command_name_suggestion(self):
"""/he → 'lp'"""
assert _suggestion("/he") == "lp"
def test_command_name_suggestion_reasoning(self):
"""/rea → 'soning'"""
assert _suggestion("/rea") == "soning"
def test_no_suggestion_for_complete_command(self):
assert _suggestion("/help") is None
def test_subcommand_suggestion(self):
"""/reasoning h → 'igh'"""
assert _suggestion("/reasoning h") == "igh"
def test_subcommand_suggestion_show(self):
"""/reasoning sh → 'ow'"""
assert _suggestion("/reasoning sh") == "ow"
def test_no_suggestion_for_non_slash(self):
assert _suggestion("hello") is None
def test_model_stage1_ghost_text(self):
"""/model a → 'nthropic:'"""
completer = _model_completer()
assert _suggestion("/model a", completer=completer) == "nthropic:"
def test_model_stage2_ghost_text(self):
"""/model anthropic:cl → rest of first matching model"""
completer = _model_completer()
s = _suggestion("/model anthropic:cl", completer=completer)
assert s is not None
assert s.startswith("aude-")

View file

@ -0,0 +1,379 @@
"""Tests for hermes_cli configuration management."""
import os
from pathlib import Path
from unittest.mock import patch, MagicMock
import yaml
from hermes_cli.config import (
DEFAULT_CONFIG,
get_hermes_home,
ensure_hermes_home,
load_config,
load_env,
migrate_config,
save_config,
save_env_value,
save_env_value_secure,
sanitize_env_file,
_sanitize_env_lines,
)
class TestGetHermesHome:
def test_default_path(self):
with patch.dict(os.environ, {}, clear=False):
os.environ.pop("HERMES_HOME", None)
home = get_hermes_home()
assert home == Path.home() / ".hermes"
def test_env_override(self):
with patch.dict(os.environ, {"HERMES_HOME": "/custom/path"}):
home = get_hermes_home()
assert home == Path("/custom/path")
class TestEnsureHermesHome:
def test_creates_subdirs(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
ensure_hermes_home()
assert (tmp_path / "cron").is_dir()
assert (tmp_path / "sessions").is_dir()
assert (tmp_path / "logs").is_dir()
assert (tmp_path / "memories").is_dir()
def test_creates_default_soul_md_if_missing(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
ensure_hermes_home()
soul_path = tmp_path / "SOUL.md"
assert soul_path.exists()
assert soul_path.read_text(encoding="utf-8").strip() != ""
def test_does_not_overwrite_existing_soul_md(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
soul_path = tmp_path / "SOUL.md"
soul_path.write_text("custom soul", encoding="utf-8")
ensure_hermes_home()
assert soul_path.read_text(encoding="utf-8") == "custom soul"
class TestLoadConfigDefaults:
def test_returns_defaults_when_no_file(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
config = load_config()
assert config["model"] == DEFAULT_CONFIG["model"]
assert config["agent"]["max_turns"] == DEFAULT_CONFIG["agent"]["max_turns"]
assert "max_turns" not in config
assert "terminal" in config
assert config["terminal"]["backend"] == "local"
def test_legacy_root_level_max_turns_migrates_to_agent_config(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
config_path = tmp_path / "config.yaml"
config_path.write_text("max_turns: 42\n")
config = load_config()
assert config["agent"]["max_turns"] == 42
assert "max_turns" not in config
class TestSaveAndLoadRoundtrip:
def test_roundtrip(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
config = load_config()
config["model"] = "test/custom-model"
config["agent"]["max_turns"] = 42
save_config(config)
reloaded = load_config()
assert reloaded["model"] == "test/custom-model"
assert reloaded["agent"]["max_turns"] == 42
saved = yaml.safe_load((tmp_path / "config.yaml").read_text())
assert saved["agent"]["max_turns"] == 42
assert "max_turns" not in saved
def test_save_config_normalizes_legacy_root_level_max_turns(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
save_config({"model": "test/custom-model", "max_turns": 37})
saved = yaml.safe_load((tmp_path / "config.yaml").read_text())
assert saved["agent"]["max_turns"] == 37
assert "max_turns" not in saved
def test_nested_values_preserved(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
config = load_config()
config["terminal"]["timeout"] = 999
save_config(config)
reloaded = load_config()
assert reloaded["terminal"]["timeout"] == 999
class TestSaveEnvValueSecure:
def test_save_env_value_writes_without_stdout(self, tmp_path, capsys):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
save_env_value("TENOR_API_KEY", "sk-test-secret")
captured = capsys.readouterr()
assert captured.out == ""
assert captured.err == ""
env_values = load_env()
assert env_values["TENOR_API_KEY"] == "sk-test-secret"
def test_secure_save_returns_metadata_only(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
result = save_env_value_secure("GITHUB_TOKEN", "ghp_test_secret")
assert result == {
"success": True,
"stored_as": "GITHUB_TOKEN",
"validated": False,
}
assert "secret" not in str(result).lower()
def test_save_env_value_updates_process_environment(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}, clear=False):
os.environ.pop("TENOR_API_KEY", None)
save_env_value("TENOR_API_KEY", "sk-test-secret")
assert os.environ["TENOR_API_KEY"] == "sk-test-secret"
def test_save_env_value_hardens_file_permissions_on_posix(self, tmp_path):
if os.name == "nt":
return
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
save_env_value("TENOR_API_KEY", "sk-test-secret")
env_mode = (tmp_path / ".env").stat().st_mode & 0o777
assert env_mode == 0o600
class TestSaveConfigAtomicity:
"""Verify save_config uses atomic writes (tempfile + os.replace)."""
def test_no_partial_write_on_crash(self, tmp_path):
"""If save_config crashes mid-write, the previous file stays intact."""
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
# Write an initial config
config = load_config()
config["model"] = "original-model"
save_config(config)
config_path = tmp_path / "config.yaml"
assert config_path.exists()
# Simulate a crash during yaml.dump by making atomic_yaml_write's
# yaml.dump raise after the temp file is created but before replace.
with patch("utils.yaml.dump", side_effect=OSError("disk full")):
try:
config["model"] = "should-not-persist"
save_config(config)
except OSError:
pass
# Original file must still be intact
reloaded = load_config()
assert reloaded["model"] == "original-model"
def test_no_leftover_temp_files(self, tmp_path):
"""Failed writes must clean up their temp files."""
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
config = load_config()
save_config(config)
with patch("utils.yaml.dump", side_effect=OSError("disk full")):
try:
save_config(config)
except OSError:
pass
# No .tmp files should remain
tmp_files = list(tmp_path.glob(".*config*.tmp"))
assert tmp_files == []
def test_atomic_write_creates_valid_yaml(self, tmp_path):
"""The written file must be valid YAML matching the input."""
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
config = load_config()
config["model"] = "test/atomic-model"
config["agent"]["max_turns"] = 77
save_config(config)
# Read raw YAML to verify it's valid and correct
config_path = tmp_path / "config.yaml"
with open(config_path) as f:
raw = yaml.safe_load(f)
assert raw["model"] == "test/atomic-model"
assert raw["agent"]["max_turns"] == 77
class TestSanitizeEnvLines:
"""Tests for .env file corruption repair."""
def test_splits_concatenated_keys(self):
"""Two KEY=VALUE pairs jammed on one line get split."""
lines = ["ANTHROPIC_API_KEY=sk-ant-xxxOPENAI_BASE_URL=https://api.openai.com/v1\n"]
result = _sanitize_env_lines(lines)
assert result == [
"ANTHROPIC_API_KEY=sk-ant-xxx\n",
"OPENAI_BASE_URL=https://api.openai.com/v1\n",
]
def test_preserves_clean_file(self):
"""A well-formed .env file passes through unchanged (modulo trailing newlines)."""
lines = [
"OPENROUTER_API_KEY=sk-or-xxx\n",
"FIRECRAWL_API_KEY=fc-xxx\n",
"# a comment\n",
"\n",
]
result = _sanitize_env_lines(lines)
assert result == lines
def test_preserves_comments_and_blanks(self):
lines = ["# comment\n", "\n", "KEY=val\n"]
result = _sanitize_env_lines(lines)
assert result == lines
def test_adds_missing_trailing_newline(self):
"""Lines missing trailing newline get one added."""
lines = ["FOO_BAR=baz"]
result = _sanitize_env_lines(lines)
assert result == ["FOO_BAR=baz\n"]
def test_three_concatenated_keys(self):
"""Three known keys on one line all get separated."""
lines = ["FAL_KEY=111FIRECRAWL_API_KEY=222GITHUB_TOKEN=333\n"]
result = _sanitize_env_lines(lines)
assert result == [
"FAL_KEY=111\n",
"FIRECRAWL_API_KEY=222\n",
"GITHUB_TOKEN=333\n",
]
def test_value_with_equals_sign_not_split(self):
"""A value containing '=' shouldn't be falsely split (lowercase in value)."""
lines = ["OPENAI_BASE_URL=https://api.example.com/v1?key=abc123\n"]
result = _sanitize_env_lines(lines)
assert result == lines
def test_unknown_keys_not_split(self):
"""Unknown key names on one line are NOT split (avoids false positives)."""
lines = ["CUSTOM_VAR=value123OTHER_THING=value456\n"]
result = _sanitize_env_lines(lines)
# Unknown keys stay on one line — no false split
assert len(result) == 1
def test_value_ending_with_digits_still_splits(self):
"""Concatenation is detected even when value ends with digits."""
lines = ["OPENROUTER_API_KEY=sk-or-v1-abc123OPENAI_BASE_URL=https://api.openai.com/v1\n"]
result = _sanitize_env_lines(lines)
assert len(result) == 2
assert result[0].startswith("OPENROUTER_API_KEY=")
assert result[1].startswith("OPENAI_BASE_URL=")
def test_save_env_value_fixes_corruption_on_write(self, tmp_path):
"""save_env_value sanitizes corrupted lines when writing a new key."""
env_file = tmp_path / ".env"
env_file.write_text(
"ANTHROPIC_API_KEY=sk-antOPENAI_BASE_URL=https://api.openai.com/v1\n"
"FAL_KEY=existing\n"
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
save_env_value("MESSAGING_CWD", "/tmp")
content = env_file.read_text()
lines = content.strip().split("\n")
# Corrupted line should be split, new key added
assert "ANTHROPIC_API_KEY=sk-ant" in lines
assert "OPENAI_BASE_URL=https://api.openai.com/v1" in lines
assert "MESSAGING_CWD=/tmp" in lines
def test_sanitize_env_file_returns_fix_count(self, tmp_path):
"""sanitize_env_file reports how many entries were fixed."""
env_file = tmp_path / ".env"
env_file.write_text(
"FAL_KEY=good\n"
"OPENROUTER_API_KEY=valFIRECRAWL_API_KEY=val2\n"
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
fixes = sanitize_env_file()
assert fixes > 0
# Verify file is now clean
content = env_file.read_text()
assert "OPENROUTER_API_KEY=val\n" in content
assert "FIRECRAWL_API_KEY=val2\n" in content
def test_sanitize_env_file_noop_on_clean_file(self, tmp_path):
"""No changes when file is already clean."""
env_file = tmp_path / ".env"
env_file.write_text("GOOD_KEY=good\nOTHER_KEY=other\n")
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
fixes = sanitize_env_file()
assert fixes == 0
class TestOptionalEnvVarsRegistry:
"""Verify that key env vars are registered in OPTIONAL_ENV_VARS."""
def test_tavily_api_key_registered(self):
"""TAVILY_API_KEY is listed in OPTIONAL_ENV_VARS."""
from hermes_cli.config import OPTIONAL_ENV_VARS
assert "TAVILY_API_KEY" in OPTIONAL_ENV_VARS
def test_tavily_api_key_is_tool_category(self):
"""TAVILY_API_KEY is in the 'tool' category."""
from hermes_cli.config import OPTIONAL_ENV_VARS
assert OPTIONAL_ENV_VARS["TAVILY_API_KEY"]["category"] == "tool"
def test_tavily_api_key_is_password(self):
"""TAVILY_API_KEY is marked as password."""
from hermes_cli.config import OPTIONAL_ENV_VARS
assert OPTIONAL_ENV_VARS["TAVILY_API_KEY"]["password"] is True
def test_tavily_api_key_has_url(self):
"""TAVILY_API_KEY has a URL."""
from hermes_cli.config import OPTIONAL_ENV_VARS
assert OPTIONAL_ENV_VARS["TAVILY_API_KEY"]["url"] == "https://app.tavily.com/home"
def test_tavily_in_env_vars_by_version(self):
"""TAVILY_API_KEY is listed in ENV_VARS_BY_VERSION."""
from hermes_cli.config import ENV_VARS_BY_VERSION
all_vars = []
for vars_list in ENV_VARS_BY_VERSION.values():
all_vars.extend(vars_list)
assert "TAVILY_API_KEY" in all_vars
class TestAnthropicTokenMigration:
"""Test that config version 8→9 clears ANTHROPIC_TOKEN."""
def _write_config_version(self, tmp_path, version):
config_path = tmp_path / "config.yaml"
import yaml
config_path.write_text(yaml.safe_dump({"_config_version": version}))
def test_clears_token_on_upgrade_to_v9(self, tmp_path):
"""ANTHROPIC_TOKEN is cleared unconditionally when upgrading to v9."""
self._write_config_version(tmp_path, 8)
(tmp_path / ".env").write_text("ANTHROPIC_TOKEN=old-token\n")
with patch.dict(os.environ, {
"HERMES_HOME": str(tmp_path),
"ANTHROPIC_TOKEN": "old-token",
}):
migrate_config(interactive=False, quiet=True)
assert load_env().get("ANTHROPIC_TOKEN") == ""
def test_skips_on_version_9_or_later(self, tmp_path):
"""Already at v9 — ANTHROPIC_TOKEN is not touched."""
self._write_config_version(tmp_path, 9)
(tmp_path / ".env").write_text("ANTHROPIC_TOKEN=current-token\n")
with patch.dict(os.environ, {
"HERMES_HOME": str(tmp_path),
"ANTHROPIC_TOKEN": "current-token",
}):
migrate_config(interactive=False, quiet=True)
assert load_env().get("ANTHROPIC_TOKEN") == "current-token"

View file

@ -0,0 +1,208 @@
"""Tests for hermes_cli.copilot_auth — Copilot token validation and resolution."""
import os
import pytest
from unittest.mock import patch, MagicMock
class TestTokenValidation:
"""Token type validation."""
def test_classic_pat_rejected(self):
from hermes_cli.copilot_auth import validate_copilot_token
valid, msg = validate_copilot_token("ghp_abcdefghijklmnop1234")
assert valid is False
assert "Classic Personal Access Tokens" in msg
assert "ghp_" in msg
def test_oauth_token_accepted(self):
from hermes_cli.copilot_auth import validate_copilot_token
valid, msg = validate_copilot_token("gho_abcdefghijklmnop1234")
assert valid is True
def test_fine_grained_pat_accepted(self):
from hermes_cli.copilot_auth import validate_copilot_token
valid, msg = validate_copilot_token("github_pat_abcdefghijklmnop1234")
assert valid is True
def test_github_app_token_accepted(self):
from hermes_cli.copilot_auth import validate_copilot_token
valid, msg = validate_copilot_token("ghu_abcdefghijklmnop1234")
assert valid is True
def test_empty_token_rejected(self):
from hermes_cli.copilot_auth import validate_copilot_token
valid, msg = validate_copilot_token("")
assert valid is False
def test_is_classic_pat(self):
from hermes_cli.copilot_auth import is_classic_pat
assert is_classic_pat("ghp_abc123") is True
assert is_classic_pat("gho_abc123") is False
assert is_classic_pat("github_pat_abc") is False
assert is_classic_pat("") is False
class TestResolveToken:
"""Token resolution with env var priority."""
def test_copilot_github_token_first_priority(self, monkeypatch):
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.setenv("COPILOT_GITHUB_TOKEN", "gho_copilot_first")
monkeypatch.setenv("GH_TOKEN", "gho_gh_second")
monkeypatch.setenv("GITHUB_TOKEN", "gho_github_third")
token, source = resolve_copilot_token()
assert token == "gho_copilot_first"
assert source == "COPILOT_GITHUB_TOKEN"
def test_gh_token_second_priority(self, monkeypatch):
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.delenv("COPILOT_GITHUB_TOKEN", raising=False)
monkeypatch.setenv("GH_TOKEN", "gho_gh_second")
monkeypatch.setenv("GITHUB_TOKEN", "gho_github_third")
token, source = resolve_copilot_token()
assert token == "gho_gh_second"
assert source == "GH_TOKEN"
def test_github_token_third_priority(self, monkeypatch):
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.delenv("COPILOT_GITHUB_TOKEN", raising=False)
monkeypatch.delenv("GH_TOKEN", raising=False)
monkeypatch.setenv("GITHUB_TOKEN", "gho_github_third")
token, source = resolve_copilot_token()
assert token == "gho_github_third"
assert source == "GITHUB_TOKEN"
def test_classic_pat_in_env_skipped(self, monkeypatch):
"""Classic PATs in env vars should be skipped, not returned."""
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.setenv("COPILOT_GITHUB_TOKEN", "ghp_classic_pat_nope")
monkeypatch.delenv("GH_TOKEN", raising=False)
monkeypatch.setenv("GITHUB_TOKEN", "gho_valid_oauth")
token, source = resolve_copilot_token()
# Should skip the ghp_ token and find the gho_ one
assert token == "gho_valid_oauth"
assert source == "GITHUB_TOKEN"
def test_gh_cli_fallback(self, monkeypatch):
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.delenv("COPILOT_GITHUB_TOKEN", raising=False)
monkeypatch.delenv("GH_TOKEN", raising=False)
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
with patch("hermes_cli.copilot_auth._try_gh_cli_token", return_value="gho_from_cli"):
token, source = resolve_copilot_token()
assert token == "gho_from_cli"
assert source == "gh auth token"
def test_gh_cli_classic_pat_raises(self, monkeypatch):
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.delenv("COPILOT_GITHUB_TOKEN", raising=False)
monkeypatch.delenv("GH_TOKEN", raising=False)
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
with patch("hermes_cli.copilot_auth._try_gh_cli_token", return_value="ghp_classic"):
with pytest.raises(ValueError, match="classic PAT"):
resolve_copilot_token()
def test_no_token_returns_empty(self, monkeypatch):
from hermes_cli.copilot_auth import resolve_copilot_token
monkeypatch.delenv("COPILOT_GITHUB_TOKEN", raising=False)
monkeypatch.delenv("GH_TOKEN", raising=False)
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
with patch("hermes_cli.copilot_auth._try_gh_cli_token", return_value=None):
token, source = resolve_copilot_token()
assert token == ""
assert source == ""
class TestRequestHeaders:
"""Copilot API header generation."""
def test_default_headers_include_openai_intent(self):
from hermes_cli.copilot_auth import copilot_request_headers
headers = copilot_request_headers()
assert headers["Openai-Intent"] == "conversation-edits"
assert headers["User-Agent"] == "HermesAgent/1.0"
assert "Editor-Version" in headers
def test_agent_turn_sets_initiator(self):
from hermes_cli.copilot_auth import copilot_request_headers
headers = copilot_request_headers(is_agent_turn=True)
assert headers["x-initiator"] == "agent"
def test_user_turn_sets_initiator(self):
from hermes_cli.copilot_auth import copilot_request_headers
headers = copilot_request_headers(is_agent_turn=False)
assert headers["x-initiator"] == "user"
def test_vision_header(self):
from hermes_cli.copilot_auth import copilot_request_headers
headers = copilot_request_headers(is_vision=True)
assert headers["Copilot-Vision-Request"] == "true"
def test_no_vision_header_by_default(self):
from hermes_cli.copilot_auth import copilot_request_headers
headers = copilot_request_headers()
assert "Copilot-Vision-Request" not in headers
class TestCopilotDefaultHeaders:
"""The models.py copilot_default_headers uses copilot_auth."""
def test_includes_openai_intent(self):
from hermes_cli.models import copilot_default_headers
headers = copilot_default_headers()
assert "Openai-Intent" in headers
assert headers["Openai-Intent"] == "conversation-edits"
def test_includes_x_initiator(self):
from hermes_cli.models import copilot_default_headers
headers = copilot_default_headers()
assert "x-initiator" in headers
class TestApiModeSelection:
"""API mode selection matching opencode's shouldUseCopilotResponsesApi."""
def test_gpt5_uses_responses(self):
from hermes_cli.models import _should_use_copilot_responses_api
assert _should_use_copilot_responses_api("gpt-5.4") is True
assert _should_use_copilot_responses_api("gpt-5.4-mini") is True
assert _should_use_copilot_responses_api("gpt-5.3-codex") is True
assert _should_use_copilot_responses_api("gpt-5.2-codex") is True
assert _should_use_copilot_responses_api("gpt-5.2") is True
assert _should_use_copilot_responses_api("gpt-5.1-codex-max") is True
def test_gpt5_mini_excluded(self):
from hermes_cli.models import _should_use_copilot_responses_api
assert _should_use_copilot_responses_api("gpt-5-mini") is False
def test_gpt4_uses_chat(self):
from hermes_cli.models import _should_use_copilot_responses_api
assert _should_use_copilot_responses_api("gpt-4.1") is False
assert _should_use_copilot_responses_api("gpt-4o") is False
assert _should_use_copilot_responses_api("gpt-4o-mini") is False
def test_non_gpt_uses_chat(self):
from hermes_cli.models import _should_use_copilot_responses_api
assert _should_use_copilot_responses_api("claude-sonnet-4.6") is False
assert _should_use_copilot_responses_api("claude-opus-4.6") is False
assert _should_use_copilot_responses_api("gemini-2.5-pro") is False
assert _should_use_copilot_responses_api("grok-code-fast-1") is False
class TestEnvVarOrder:
"""PROVIDER_REGISTRY has correct env var order."""
def test_copilot_env_vars_include_copilot_github_token(self):
from hermes_cli.auth import PROVIDER_REGISTRY
copilot = PROVIDER_REGISTRY["copilot"]
assert "COPILOT_GITHUB_TOKEN" in copilot.api_key_env_vars
# COPILOT_GITHUB_TOKEN should be first
assert copilot.api_key_env_vars[0] == "COPILOT_GITHUB_TOKEN"
def test_copilot_env_vars_order_matches_docs(self):
from hermes_cli.auth import PROVIDER_REGISTRY
copilot = PROVIDER_REGISTRY["copilot"]
assert copilot.api_key_env_vars == (
"COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"
)

View file

@ -0,0 +1,107 @@
"""Tests for hermes_cli.cron command handling."""
from argparse import Namespace
import pytest
from cron.jobs import create_job, get_job, list_jobs
from hermes_cli.cron import cron_command
@pytest.fixture()
def tmp_cron_dir(tmp_path, monkeypatch):
monkeypatch.setattr("cron.jobs.CRON_DIR", tmp_path / "cron")
monkeypatch.setattr("cron.jobs.JOBS_FILE", tmp_path / "cron" / "jobs.json")
monkeypatch.setattr("cron.jobs.OUTPUT_DIR", tmp_path / "cron" / "output")
return tmp_path
class TestCronCommandLifecycle:
def test_pause_resume_run(self, tmp_cron_dir, capsys):
job = create_job(prompt="Check server status", schedule="every 1h")
cron_command(Namespace(cron_command="pause", job_id=job["id"]))
paused = get_job(job["id"])
assert paused["state"] == "paused"
cron_command(Namespace(cron_command="resume", job_id=job["id"]))
resumed = get_job(job["id"])
assert resumed["state"] == "scheduled"
cron_command(Namespace(cron_command="run", job_id=job["id"]))
triggered = get_job(job["id"])
assert triggered["state"] == "scheduled"
out = capsys.readouterr().out
assert "Paused job" in out
assert "Resumed job" in out
assert "Triggered job" in out
def test_edit_can_replace_and_clear_skills(self, tmp_cron_dir, capsys):
job = create_job(
prompt="Combine skill outputs",
schedule="every 1h",
skill="blogwatcher",
)
cron_command(
Namespace(
cron_command="edit",
job_id=job["id"],
schedule="every 2h",
prompt="Revised prompt",
name="Edited Job",
deliver=None,
repeat=None,
skill=None,
skills=["find-nearby", "blogwatcher"],
clear_skills=False,
)
)
updated = get_job(job["id"])
assert updated["skills"] == ["find-nearby", "blogwatcher"]
assert updated["name"] == "Edited Job"
assert updated["prompt"] == "Revised prompt"
assert updated["schedule_display"] == "every 120m"
cron_command(
Namespace(
cron_command="edit",
job_id=job["id"],
schedule=None,
prompt=None,
name=None,
deliver=None,
repeat=None,
skill=None,
skills=None,
clear_skills=True,
)
)
cleared = get_job(job["id"])
assert cleared["skills"] == []
assert cleared["skill"] is None
out = capsys.readouterr().out
assert "Updated job" in out
def test_create_with_multiple_skills(self, tmp_cron_dir, capsys):
cron_command(
Namespace(
cron_command="create",
schedule="every 1h",
prompt="Use both skills",
name="Skill combo",
deliver=None,
repeat=None,
skill=None,
skills=["blogwatcher", "find-nearby"],
)
)
out = capsys.readouterr().out
assert "Created job" in out
jobs = list_jobs()
assert len(jobs) == 1
assert jobs[0]["skills"] == ["blogwatcher", "find-nearby"]
assert jobs[0]["name"] == "Skill combo"

View file

@ -0,0 +1,138 @@
"""Tests for hermes_cli.doctor."""
import os
import sys
import types
from argparse import Namespace
from types import SimpleNamespace
import pytest
import hermes_cli.doctor as doctor
import hermes_cli.gateway as gateway_cli
from hermes_cli import doctor as doctor_mod
from hermes_cli.doctor import _has_provider_env_config
class TestProviderEnvDetection:
def test_detects_openai_api_key(self):
content = "OPENAI_BASE_URL=http://localhost:1234/v1\nOPENAI_API_KEY=***"
assert _has_provider_env_config(content)
def test_detects_custom_endpoint_without_openrouter_key(self):
content = "OPENAI_BASE_URL=http://localhost:8080/v1\n"
assert _has_provider_env_config(content)
def test_returns_false_when_no_provider_settings(self):
content = "TERMINAL_ENV=local\n"
assert not _has_provider_env_config(content)
class TestDoctorToolAvailabilityOverrides:
def test_marks_honcho_available_when_configured(self, monkeypatch):
monkeypatch.setattr(doctor, "_honcho_is_configured_for_doctor", lambda: True)
available, unavailable = doctor._apply_doctor_tool_availability_overrides(
[],
[{"name": "honcho", "env_vars": [], "tools": ["query_user_context"]}],
)
assert available == ["honcho"]
assert unavailable == []
def test_leaves_honcho_unavailable_when_not_configured(self, monkeypatch):
monkeypatch.setattr(doctor, "_honcho_is_configured_for_doctor", lambda: False)
honcho_entry = {"name": "honcho", "env_vars": [], "tools": ["query_user_context"]}
available, unavailable = doctor._apply_doctor_tool_availability_overrides(
[],
[honcho_entry],
)
assert available == []
assert unavailable == [honcho_entry]
class TestHonchoDoctorConfigDetection:
def test_reports_configured_when_enabled_with_api_key(self, monkeypatch):
fake_config = SimpleNamespace(enabled=True, api_key="***")
monkeypatch.setattr(
"honcho_integration.client.HonchoClientConfig.from_global_config",
lambda: fake_config,
)
assert doctor._honcho_is_configured_for_doctor()
def test_reports_not_configured_without_api_key(self, monkeypatch):
fake_config = SimpleNamespace(enabled=True, api_key="")
monkeypatch.setattr(
"honcho_integration.client.HonchoClientConfig.from_global_config",
lambda: fake_config,
)
assert not doctor._honcho_is_configured_for_doctor()
def test_run_doctor_sets_interactive_env_for_tool_checks(monkeypatch, tmp_path):
"""Doctor should present CLI-gated tools as available in CLI context."""
project_root = tmp_path / "project"
hermes_home = tmp_path / ".hermes"
project_root.mkdir()
hermes_home.mkdir()
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project_root)
monkeypatch.setattr(doctor_mod, "HERMES_HOME", hermes_home)
monkeypatch.delenv("HERMES_INTERACTIVE", raising=False)
seen = {}
def fake_check_tool_availability(*args, **kwargs):
seen["interactive"] = os.getenv("HERMES_INTERACTIVE")
raise SystemExit(0)
fake_model_tools = types.SimpleNamespace(
check_tool_availability=fake_check_tool_availability,
TOOLSET_REQUIREMENTS={},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
with pytest.raises(SystemExit):
doctor_mod.run_doctor(Namespace(fix=False))
assert seen["interactive"] == "1"
def test_check_gateway_service_linger_warns_when_disabled(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("[Unit]\n")
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda: unit_path)
monkeypatch.setattr(gateway_cli, "get_systemd_linger_status", lambda: (False, ""))
issues = []
doctor._check_gateway_service_linger(issues)
out = capsys.readouterr().out
assert "Gateway Service" in out
assert "Systemd linger disabled" in out
assert "loginctl enable-linger" in out
assert issues == [
"Enable linger for the gateway user service: sudo loginctl enable-linger $USER"
]
def test_check_gateway_service_linger_skips_when_service_not_installed(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "missing.service"
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda: unit_path)
issues = []
doctor._check_gateway_service_linger(issues)
out = capsys.readouterr().out
assert out == ""
assert issues == []

View file

@ -0,0 +1,70 @@
import importlib
import os
import sys
from pathlib import Path
from hermes_cli.env_loader import load_hermes_dotenv
def test_user_env_overrides_stale_shell_values(tmp_path, monkeypatch):
home = tmp_path / "hermes"
home.mkdir()
env_file = home / ".env"
env_file.write_text("OPENAI_BASE_URL=https://new.example/v1\n", encoding="utf-8")
monkeypatch.setenv("OPENAI_BASE_URL", "https://old.example/v1")
loaded = load_hermes_dotenv(hermes_home=home)
assert loaded == [env_file]
assert os.getenv("OPENAI_BASE_URL") == "https://new.example/v1"
def test_project_env_overrides_stale_shell_values_when_user_env_missing(tmp_path, monkeypatch):
home = tmp_path / "hermes"
project_env = tmp_path / ".env"
project_env.write_text("OPENAI_BASE_URL=https://project.example/v1\n", encoding="utf-8")
monkeypatch.setenv("OPENAI_BASE_URL", "https://old.example/v1")
loaded = load_hermes_dotenv(hermes_home=home, project_env=project_env)
assert loaded == [project_env]
assert os.getenv("OPENAI_BASE_URL") == "https://project.example/v1"
def test_user_env_takes_precedence_over_project_env(tmp_path, monkeypatch):
home = tmp_path / "hermes"
home.mkdir()
user_env = home / ".env"
project_env = tmp_path / ".env"
user_env.write_text("OPENAI_BASE_URL=https://user.example/v1\n", encoding="utf-8")
project_env.write_text("OPENAI_BASE_URL=https://project.example/v1\nOPENAI_API_KEY=project-key\n", encoding="utf-8")
monkeypatch.setenv("OPENAI_BASE_URL", "https://old.example/v1")
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
loaded = load_hermes_dotenv(hermes_home=home, project_env=project_env)
assert loaded == [user_env, project_env]
assert os.getenv("OPENAI_BASE_URL") == "https://user.example/v1"
assert os.getenv("OPENAI_API_KEY") == "project-key"
def test_main_import_applies_user_env_over_shell_values(tmp_path, monkeypatch):
home = tmp_path / "hermes"
home.mkdir()
(home / ".env").write_text(
"OPENAI_BASE_URL=https://new.example/v1\nHERMES_INFERENCE_PROVIDER=custom\n",
encoding="utf-8",
)
monkeypatch.setenv("HERMES_HOME", str(home))
monkeypatch.setenv("OPENAI_BASE_URL", "https://old.example/v1")
monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "openrouter")
sys.modules.pop("hermes_cli.main", None)
importlib.import_module("hermes_cli.main")
assert os.getenv("OPENAI_BASE_URL") == "https://new.example/v1"
assert os.getenv("HERMES_INFERENCE_PROVIDER") == "custom"

View file

@ -0,0 +1,254 @@
"""Tests for hermes_cli.gateway."""
import signal
from types import SimpleNamespace
from unittest.mock import patch, call
import hermes_cli.gateway as gateway
class TestSystemdLingerStatus:
def test_reports_enabled(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setenv("USER", "alice")
monkeypatch.setattr(
gateway.subprocess,
"run",
lambda *args, **kwargs: SimpleNamespace(returncode=0, stdout="yes\n", stderr=""),
)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/loginctl")
assert gateway.get_systemd_linger_status() == (True, "")
def test_reports_disabled(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setenv("USER", "alice")
monkeypatch.setattr(
gateway.subprocess,
"run",
lambda *args, **kwargs: SimpleNamespace(returncode=0, stdout="no\n", stderr=""),
)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/loginctl")
assert gateway.get_systemd_linger_status() == (False, "")
def test_systemd_status_warns_when_linger_disabled(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("[Unit]\n")
monkeypatch.setattr(gateway, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, ""))
def fake_run(cmd, capture_output=False, text=False, check=False):
if cmd[:4] == ["systemctl", "--user", "status", gateway.get_service_name()]:
return SimpleNamespace(returncode=0, stdout="", stderr="")
if cmd[:3] == ["systemctl", "--user", "is-active"]:
return SimpleNamespace(returncode=0, stdout="active\n", stderr="")
raise AssertionError(f"Unexpected command: {cmd}")
monkeypatch.setattr(gateway.subprocess, "run", fake_run)
gateway.systemd_status(deep=False)
out = capsys.readouterr().out
assert "gateway service is running" in out
assert "Systemd linger is disabled" in out
assert "loginctl enable-linger" in out
def test_systemd_install_checks_linger_status(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "systemd" / "user" / "hermes-gateway.service"
monkeypatch.setattr(gateway, "get_systemd_unit_path", lambda system=False: unit_path)
calls = []
helper_calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append((cmd, check))
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway.subprocess, "run", fake_run)
monkeypatch.setattr(gateway, "_ensure_linger_enabled", lambda: helper_calls.append(True))
gateway.systemd_install(force=False)
out = capsys.readouterr().out
assert unit_path.exists()
assert [cmd for cmd, _ in calls] == [
["systemctl", "--user", "daemon-reload"],
["systemctl", "--user", "enable", gateway.get_service_name()],
]
assert helper_calls == [True]
assert "User service installed and enabled" in out
def test_systemd_install_system_scope_skips_linger_and_uses_systemctl(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "etc" / "systemd" / "system" / "hermes-gateway.service"
monkeypatch.setattr(gateway, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(
gateway,
"generate_systemd_unit",
lambda system=False, run_as_user=None: f"scope={system} user={run_as_user}\n",
)
monkeypatch.setattr(gateway, "_require_root_for_system_service", lambda action: None)
calls = []
helper_calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append((cmd, check))
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway.subprocess, "run", fake_run)
monkeypatch.setattr(gateway, "_ensure_linger_enabled", lambda: helper_calls.append(True))
gateway.systemd_install(force=False, system=True, run_as_user="alice")
out = capsys.readouterr().out
assert unit_path.exists()
assert unit_path.read_text(encoding="utf-8") == "scope=True user=alice\n"
assert [cmd for cmd, _ in calls] == [
["systemctl", "daemon-reload"],
["systemctl", "enable", gateway.get_service_name()],
]
assert helper_calls == []
assert "Configured to run as: alice" not in out # generated test unit has no User= line
assert "System service installed and enabled" in out
def test_conflicting_systemd_units_warning(monkeypatch, tmp_path, capsys):
user_unit = tmp_path / "user" / "hermes-gateway.service"
system_unit = tmp_path / "system" / "hermes-gateway.service"
user_unit.parent.mkdir(parents=True)
system_unit.parent.mkdir(parents=True)
user_unit.write_text("[Unit]\n", encoding="utf-8")
system_unit.write_text("[Unit]\n", encoding="utf-8")
monkeypatch.setattr(
gateway,
"get_systemd_unit_path",
lambda system=False: system_unit if system else user_unit,
)
gateway.print_systemd_scope_conflict_warning()
out = capsys.readouterr().out
assert "Both user and system gateway services are installed" in out
assert "hermes gateway uninstall" in out
assert "--system" in out
def test_install_linux_gateway_from_setup_system_choice_without_root_prints_followup(monkeypatch, capsys):
monkeypatch.setattr(gateway, "prompt_linux_gateway_install_scope", lambda: "system")
monkeypatch.setattr(gateway.os, "geteuid", lambda: 1000)
monkeypatch.setattr(gateway, "_default_system_service_user", lambda: "alice")
monkeypatch.setattr(gateway, "systemd_install", lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("should not install")))
scope, did_install = gateway.install_linux_gateway_from_setup(force=False)
out = capsys.readouterr().out
assert (scope, did_install) == ("system", False)
assert "sudo hermes gateway install --system --run-as-user alice" in out
assert "sudo hermes gateway start --system" in out
def test_install_linux_gateway_from_setup_system_choice_as_root_installs(monkeypatch):
monkeypatch.setattr(gateway, "prompt_linux_gateway_install_scope", lambda: "system")
monkeypatch.setattr(gateway.os, "geteuid", lambda: 0)
monkeypatch.setattr(gateway, "_default_system_service_user", lambda: "alice")
calls = []
monkeypatch.setattr(
gateway,
"systemd_install",
lambda force=False, system=False, run_as_user=None: calls.append((force, system, run_as_user)),
)
scope, did_install = gateway.install_linux_gateway_from_setup(force=True)
assert (scope, did_install) == ("system", True)
assert calls == [(True, True, "alice")]
# ---------------------------------------------------------------------------
# _wait_for_gateway_exit
# ---------------------------------------------------------------------------
class TestWaitForGatewayExit:
"""PID-based wait with force-kill on timeout."""
def test_returns_immediately_when_no_pid(self, monkeypatch):
"""If get_running_pid returns None, exit instantly."""
monkeypatch.setattr("gateway.status.get_running_pid", lambda: None)
# Should return without sleeping at all.
gateway._wait_for_gateway_exit(timeout=1.0, force_after=0.5)
def test_returns_when_process_exits_gracefully(self, monkeypatch):
"""Process exits after a couple of polls — no SIGKILL needed."""
poll_count = 0
def mock_get_running_pid():
nonlocal poll_count
poll_count += 1
return 12345 if poll_count <= 2 else None
monkeypatch.setattr("gateway.status.get_running_pid", mock_get_running_pid)
monkeypatch.setattr("time.sleep", lambda _: None)
gateway._wait_for_gateway_exit(timeout=10.0, force_after=999.0)
# Should have polled until None was returned.
assert poll_count == 3
def test_force_kills_after_grace_period(self, monkeypatch):
"""When the process doesn't exit, SIGKILL the saved PID."""
import time as _time
# Simulate monotonic time advancing past force_after
call_num = 0
def fake_monotonic():
nonlocal call_num
call_num += 1
# First two calls: initial deadline + force_deadline setup (time 0)
# Then each loop iteration advances time
return call_num * 2.0 # 2, 4, 6, 8, ...
kills = []
def mock_kill(pid, sig):
kills.append((pid, sig))
# get_running_pid returns the PID until kill is sent, then None
def mock_get_running_pid():
return None if kills else 42
monkeypatch.setattr("time.monotonic", fake_monotonic)
monkeypatch.setattr("time.sleep", lambda _: None)
monkeypatch.setattr("gateway.status.get_running_pid", mock_get_running_pid)
monkeypatch.setattr("os.kill", mock_kill)
gateway._wait_for_gateway_exit(timeout=10.0, force_after=5.0)
assert (42, signal.SIGKILL) in kills
def test_handles_process_already_gone_on_kill(self, monkeypatch):
"""ProcessLookupError during SIGKILL is not fatal."""
import time as _time
call_num = 0
def fake_monotonic():
nonlocal call_num
call_num += 1
return call_num * 3.0 # Jump past force_after quickly
def mock_kill(pid, sig):
raise ProcessLookupError
monkeypatch.setattr("time.monotonic", fake_monotonic)
monkeypatch.setattr("time.sleep", lambda _: None)
monkeypatch.setattr("gateway.status.get_running_pid", lambda: 99)
monkeypatch.setattr("os.kill", mock_kill)
# Should not raise — ProcessLookupError means it's already gone.
gateway._wait_for_gateway_exit(timeout=10.0, force_after=2.0)

View file

@ -0,0 +1,120 @@
"""Tests for gateway linger auto-enable behavior on headless Linux installs."""
from types import SimpleNamespace
import hermes_cli.gateway as gateway
class TestEnsureLingerEnabled:
def test_linger_already_enabled_via_file(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: True))
calls = []
monkeypatch.setattr(gateway.subprocess, "run", lambda *args, **kwargs: calls.append((args, kwargs)))
gateway._ensure_linger_enabled()
out = capsys.readouterr().out
assert "Systemd linger is enabled" in out
assert calls == []
def test_status_enabled_skips_enable(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (True, ""))
calls = []
monkeypatch.setattr(gateway.subprocess, "run", lambda *args, **kwargs: calls.append((args, kwargs)))
gateway._ensure_linger_enabled()
out = capsys.readouterr().out
assert "Systemd linger is enabled" in out
assert calls == []
def test_loginctl_success_enables_linger(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, ""))
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/loginctl")
run_calls = []
def fake_run(cmd, capture_output=False, text=False, check=False):
run_calls.append((cmd, capture_output, text, check))
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway.subprocess, "run", fake_run)
gateway._ensure_linger_enabled()
out = capsys.readouterr().out
assert "Enabling linger" in out
assert "Linger enabled" in out
assert run_calls == [(["loginctl", "enable-linger", "testuser"], True, True, False)]
def test_missing_loginctl_shows_manual_guidance(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (None, "loginctl not found"))
monkeypatch.setattr("shutil.which", lambda name: None)
calls = []
monkeypatch.setattr(gateway.subprocess, "run", lambda *args, **kwargs: calls.append((args, kwargs)))
gateway._ensure_linger_enabled()
out = capsys.readouterr().out
assert "sudo loginctl enable-linger testuser" in out
assert "loginctl not found" in out
assert calls == []
def test_loginctl_failure_shows_manual_guidance(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, ""))
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/loginctl")
monkeypatch.setattr(
gateway.subprocess,
"run",
lambda *args, **kwargs: SimpleNamespace(returncode=1, stdout="", stderr="Permission denied"),
)
gateway._ensure_linger_enabled()
out = capsys.readouterr().out
assert "sudo loginctl enable-linger testuser" in out
assert "Permission denied" in out
def test_systemd_install_calls_linger_helper(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "systemd" / "user" / "hermes-gateway.service"
monkeypatch.setattr(gateway, "get_systemd_unit_path", lambda system=False: unit_path)
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append((cmd, check))
return SimpleNamespace(returncode=0, stdout="", stderr="")
helper_calls = []
monkeypatch.setattr(gateway.subprocess, "run", fake_run)
monkeypatch.setattr(gateway, "_ensure_linger_enabled", lambda: helper_calls.append(True))
gateway.systemd_install(force=False)
out = capsys.readouterr().out
assert unit_path.exists()
assert [cmd for cmd, _ in calls] == [
["systemctl", "--user", "daemon-reload"],
["systemctl", "--user", "enable", gateway.get_service_name()],
]
assert helper_calls == [True]
assert "User service installed and enabled" in out

View file

@ -0,0 +1,22 @@
from hermes_cli.gateway import _runtime_health_lines
def test_runtime_health_lines_include_fatal_platform_and_startup_reason(monkeypatch):
monkeypatch.setattr(
"gateway.status.read_runtime_status",
lambda: {
"gateway_state": "startup_failed",
"exit_reason": "telegram conflict",
"platforms": {
"telegram": {
"state": "fatal",
"error_message": "another poller is active",
}
},
},
)
lines = _runtime_health_lines()
assert "⚠ telegram: another poller is active" in lines
assert "⚠ Last startup issue: telegram conflict" in lines

View file

@ -0,0 +1,428 @@
"""Tests for gateway service management helpers."""
import os
from types import SimpleNamespace
import hermes_cli.gateway as gateway_cli
class TestSystemdServiceRefresh:
def test_systemd_install_repairs_outdated_unit_without_force(self, tmp_path, monkeypatch):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("old unit\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(gateway_cli, "generate_systemd_unit", lambda system=False, run_as_user=None: "new unit\n")
calls = []
def fake_run(cmd, check=True, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.systemd_install()
assert unit_path.read_text(encoding="utf-8") == "new unit\n"
assert calls[:2] == [
["systemctl", "--user", "daemon-reload"],
["systemctl", "--user", "enable", gateway_cli.get_service_name()],
]
def test_systemd_start_refreshes_outdated_unit(self, tmp_path, monkeypatch):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("old unit\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(gateway_cli, "generate_systemd_unit", lambda system=False, run_as_user=None: "new unit\n")
calls = []
def fake_run(cmd, check=True, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.systemd_start()
assert unit_path.read_text(encoding="utf-8") == "new unit\n"
assert calls[:2] == [
["systemctl", "--user", "daemon-reload"],
["systemctl", "--user", "start", gateway_cli.get_service_name()],
]
def test_systemd_restart_refreshes_outdated_unit(self, tmp_path, monkeypatch):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("old unit\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(gateway_cli, "generate_systemd_unit", lambda system=False, run_as_user=None: "new unit\n")
calls = []
def fake_run(cmd, check=True, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.systemd_restart()
assert unit_path.read_text(encoding="utf-8") == "new unit\n"
assert calls[:2] == [
["systemctl", "--user", "daemon-reload"],
["systemctl", "--user", "restart", gateway_cli.get_service_name()],
]
class TestGeneratedSystemdUnits:
def test_user_unit_avoids_recursive_execstop_and_uses_extended_stop_timeout(self):
unit = gateway_cli.generate_systemd_unit(system=False)
assert "ExecStart=" in unit
assert "ExecStop=" not in unit
assert "TimeoutStopSec=60" in unit
def test_user_unit_includes_resolved_node_directory_in_path(self, monkeypatch):
monkeypatch.setattr(gateway_cli.shutil, "which", lambda cmd: "/home/test/.nvm/versions/node/v24.14.0/bin/node" if cmd == "node" else None)
unit = gateway_cli.generate_systemd_unit(system=False)
assert "/home/test/.nvm/versions/node/v24.14.0/bin" in unit
def test_system_unit_avoids_recursive_execstop_and_uses_extended_stop_timeout(self):
unit = gateway_cli.generate_systemd_unit(system=True)
assert "ExecStart=" in unit
assert "ExecStop=" not in unit
assert "TimeoutStopSec=60" in unit
assert "WantedBy=multi-user.target" in unit
class TestGatewayStopCleanup:
def test_stop_sweeps_manual_gateway_processes_after_service_stop(self, tmp_path, monkeypatch):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("unit\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
service_calls = []
kill_calls = []
monkeypatch.setattr(gateway_cli, "systemd_stop", lambda system=False: service_calls.append("stop"))
monkeypatch.setattr(
gateway_cli,
"kill_gateway_processes",
lambda force=False: kill_calls.append(force) or 2,
)
gateway_cli.gateway_command(SimpleNamespace(gateway_command="stop"))
assert service_calls == ["stop"]
assert kill_calls == [False]
class TestLaunchdServiceRecovery:
def test_launchd_install_repairs_outdated_plist_without_force(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("<plist>old content</plist>", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.launchd_install()
assert "--replace" in plist_path.read_text(encoding="utf-8")
assert calls[:2] == [
["launchctl", "unload", str(plist_path)],
["launchctl", "load", str(plist_path)],
]
def test_launchd_start_reloads_unloaded_job_and_retries(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text(gateway_cli.generate_launchd_plist(), encoding="utf-8")
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append(cmd)
if cmd == ["launchctl", "start", "ai.hermes.gateway"] and calls.count(cmd) == 1:
raise gateway_cli.subprocess.CalledProcessError(3, cmd, stderr="Could not find service")
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.launchd_start()
assert calls == [
["launchctl", "start", "ai.hermes.gateway"],
["launchctl", "load", str(plist_path)],
["launchctl", "start", "ai.hermes.gateway"],
]
def test_launchd_status_reports_local_stale_plist_when_unloaded(self, tmp_path, monkeypatch, capsys):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("<plist>old content</plist>", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
monkeypatch.setattr(
gateway_cli.subprocess,
"run",
lambda *args, **kwargs: SimpleNamespace(returncode=113, stdout="", stderr="Could not find service"),
)
gateway_cli.launchd_status()
output = capsys.readouterr().out
assert str(plist_path) in output
assert "stale" in output.lower()
assert "not loaded" in output.lower()
class TestGatewayServiceDetection:
def test_is_service_running_checks_system_scope_when_user_scope_is_inactive(self, monkeypatch):
user_unit = SimpleNamespace(exists=lambda: True)
system_unit = SimpleNamespace(exists=lambda: True)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(
gateway_cli,
"get_systemd_unit_path",
lambda system=False: system_unit if system else user_unit,
)
def fake_run(cmd, capture_output=True, text=True, **kwargs):
if cmd == ["systemctl", "--user", "is-active", gateway_cli.get_service_name()]:
return SimpleNamespace(returncode=0, stdout="inactive\n", stderr="")
if cmd == ["systemctl", "is-active", gateway_cli.get_service_name()]:
return SimpleNamespace(returncode=0, stdout="active\n", stderr="")
raise AssertionError(f"Unexpected command: {cmd}")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
assert gateway_cli._is_service_running() is True
class TestGatewaySystemServiceRouting:
def test_gateway_install_passes_system_flags(self, monkeypatch):
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
calls = []
monkeypatch.setattr(
gateway_cli,
"systemd_install",
lambda force=False, system=False, run_as_user=None: calls.append((force, system, run_as_user)),
)
gateway_cli.gateway_command(
SimpleNamespace(gateway_command="install", force=True, system=True, run_as_user="alice")
)
assert calls == [(True, True, "alice")]
def test_gateway_status_prefers_system_service_when_only_system_unit_exists(self, monkeypatch):
user_unit = SimpleNamespace(exists=lambda: False)
system_unit = SimpleNamespace(exists=lambda: True)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(
gateway_cli,
"get_systemd_unit_path",
lambda system=False: system_unit if system else user_unit,
)
calls = []
monkeypatch.setattr(gateway_cli, "systemd_status", lambda deep=False, system=False: calls.append((deep, system)))
gateway_cli.gateway_command(SimpleNamespace(gateway_command="status", deep=False, system=False))
assert calls == [(False, False)]
def test_gateway_restart_does_not_fallback_to_foreground_when_launchd_restart_fails(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("plist\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "is_linux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: True)
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
monkeypatch.setattr(
gateway_cli,
"launchd_restart",
lambda: (_ for _ in ()).throw(
gateway_cli.subprocess.CalledProcessError(5, ["launchctl", "start", "ai.hermes.gateway"])
),
)
run_calls = []
monkeypatch.setattr(gateway_cli, "run_gateway", lambda verbose=False, replace=False: run_calls.append((verbose, replace)))
monkeypatch.setattr(gateway_cli, "kill_gateway_processes", lambda force=False: 0)
try:
gateway_cli.gateway_command(SimpleNamespace(gateway_command="restart", system=False))
except SystemExit as exc:
assert exc.code == 1
else:
raise AssertionError("Expected gateway_command to exit when service restart fails")
assert run_calls == []
class TestDetectVenvDir:
"""Tests for _detect_venv_dir() virtualenv detection."""
def test_detects_active_virtualenv_via_sys_prefix(self, tmp_path, monkeypatch):
venv_path = tmp_path / "my-custom-venv"
venv_path.mkdir()
monkeypatch.setattr("sys.prefix", str(venv_path))
monkeypatch.setattr("sys.base_prefix", "/usr")
result = gateway_cli._detect_venv_dir()
assert result == venv_path
def test_falls_back_to_dot_venv_directory(self, tmp_path, monkeypatch):
# Not inside a virtualenv
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
dot_venv = tmp_path / ".venv"
dot_venv.mkdir()
result = gateway_cli._detect_venv_dir()
assert result == dot_venv
def test_falls_back_to_venv_directory(self, tmp_path, monkeypatch):
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
venv = tmp_path / "venv"
venv.mkdir()
result = gateway_cli._detect_venv_dir()
assert result == venv
def test_prefers_dot_venv_over_venv(self, tmp_path, monkeypatch):
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
(tmp_path / ".venv").mkdir()
(tmp_path / "venv").mkdir()
result = gateway_cli._detect_venv_dir()
assert result == tmp_path / ".venv"
def test_returns_none_when_no_virtualenv(self, tmp_path, monkeypatch):
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
result = gateway_cli._detect_venv_dir()
assert result is None
class TestGeneratedUnitUsesDetectedVenv:
def test_systemd_unit_uses_dot_venv_when_detected(self, tmp_path, monkeypatch):
dot_venv = tmp_path / ".venv"
dot_venv.mkdir()
(dot_venv / "bin").mkdir()
monkeypatch.setattr(gateway_cli, "_detect_venv_dir", lambda: dot_venv)
monkeypatch.setattr(gateway_cli, "get_python_path", lambda: str(dot_venv / "bin" / "python"))
unit = gateway_cli.generate_systemd_unit(system=False)
assert f"VIRTUAL_ENV={dot_venv}" in unit
assert f"{dot_venv}/bin" in unit
# Must NOT contain a hardcoded /venv/ path
assert "/venv/" not in unit or "/.venv/" in unit
class TestEnsureUserSystemdEnv:
"""Tests for _ensure_user_systemd_env() D-Bus session bus auto-detection."""
def test_sets_xdg_runtime_dir_when_missing(self, tmp_path, monkeypatch):
monkeypatch.delenv("XDG_RUNTIME_DIR", raising=False)
monkeypatch.delenv("DBUS_SESSION_BUS_ADDRESS", raising=False)
monkeypatch.setattr(os, "getuid", lambda: 42)
# Patch Path.exists so /run/user/42 appears to exist.
# Using a FakePath subclass breaks on Python 3.12+ where
# PosixPath.__new__ ignores the redirected path argument.
_orig_exists = gateway_cli.Path.exists
monkeypatch.setattr(
gateway_cli.Path, "exists",
lambda self: True if str(self) == "/run/user/42" else _orig_exists(self),
)
gateway_cli._ensure_user_systemd_env()
assert os.environ.get("XDG_RUNTIME_DIR") == "/run/user/42"
def test_sets_dbus_address_when_bus_socket_exists(self, tmp_path, monkeypatch):
runtime = tmp_path / "runtime"
runtime.mkdir()
bus_socket = runtime / "bus"
bus_socket.touch() # simulate the socket file
monkeypatch.setenv("XDG_RUNTIME_DIR", str(runtime))
monkeypatch.delenv("DBUS_SESSION_BUS_ADDRESS", raising=False)
monkeypatch.setattr(os, "getuid", lambda: 99)
gateway_cli._ensure_user_systemd_env()
assert os.environ["DBUS_SESSION_BUS_ADDRESS"] == f"unix:path={bus_socket}"
def test_preserves_existing_env_vars(self, monkeypatch):
monkeypatch.setenv("XDG_RUNTIME_DIR", "/custom/runtime")
monkeypatch.setenv("DBUS_SESSION_BUS_ADDRESS", "unix:path=/custom/bus")
gateway_cli._ensure_user_systemd_env()
assert os.environ["XDG_RUNTIME_DIR"] == "/custom/runtime"
assert os.environ["DBUS_SESSION_BUS_ADDRESS"] == "unix:path=/custom/bus"
def test_no_dbus_when_bus_socket_missing(self, tmp_path, monkeypatch):
runtime = tmp_path / "runtime"
runtime.mkdir()
# no bus socket created
monkeypatch.setenv("XDG_RUNTIME_DIR", str(runtime))
monkeypatch.delenv("DBUS_SESSION_BUS_ADDRESS", raising=False)
monkeypatch.setattr(os, "getuid", lambda: 99)
gateway_cli._ensure_user_systemd_env()
assert "DBUS_SESSION_BUS_ADDRESS" not in os.environ
def test_systemctl_cmd_calls_ensure_for_user_mode(self, monkeypatch):
calls = []
monkeypatch.setattr(gateway_cli, "_ensure_user_systemd_env", lambda: calls.append("called"))
result = gateway_cli._systemctl_cmd(system=False)
assert result == ["systemctl", "--user"]
assert calls == ["called"]
def test_systemctl_cmd_skips_ensure_for_system_mode(self, monkeypatch):
calls = []
monkeypatch.setattr(gateway_cli, "_ensure_user_systemd_env", lambda: calls.append("called"))
result = gateway_cli._systemctl_cmd(system=True)
assert result == ["systemctl"]
assert calls == []

View file

@ -0,0 +1,400 @@
"""
Tests for hermes_cli.mcp_config ``hermes mcp`` subcommands.
These tests mock the MCP server connection layer so they run without
any actual MCP servers or API keys.
"""
import argparse
import json
import os
import types
from pathlib import Path
from typing import Any, Dict, List
from unittest.mock import MagicMock, patch, PropertyMock
import pytest
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def _isolate_config(tmp_path, monkeypatch):
"""Redirect all config I/O to a temp directory."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setattr(
"hermes_cli.config.get_hermes_home", lambda: tmp_path
)
config_path = tmp_path / "config.yaml"
env_path = tmp_path / ".env"
monkeypatch.setattr(
"hermes_cli.config.get_config_path", lambda: config_path
)
monkeypatch.setattr(
"hermes_cli.config.get_env_path", lambda: env_path
)
return tmp_path
def _make_args(**kwargs):
"""Build a minimal argparse.Namespace."""
defaults = {
"name": "test-server",
"url": None,
"command": None,
"args": None,
"auth": None,
"mcp_action": None,
}
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def _seed_config(tmp_path: Path, mcp_servers: dict):
"""Write a config.yaml with the given mcp_servers."""
import yaml
config = {"mcp_servers": mcp_servers, "_config_version": 9}
config_path = tmp_path / "config.yaml"
with open(config_path, "w") as f:
yaml.safe_dump(config, f)
class FakeTool:
"""Mimics an MCP tool object returned by the SDK."""
def __init__(self, name: str, description: str = ""):
self.name = name
self.description = description
# ---------------------------------------------------------------------------
# Tests: cmd_mcp_list
# ---------------------------------------------------------------------------
class TestMcpList:
def test_list_empty_config(self, tmp_path, capsys):
from hermes_cli.mcp_config import cmd_mcp_list
cmd_mcp_list()
out = capsys.readouterr().out
assert "No MCP servers configured" in out
def test_list_with_servers(self, tmp_path, capsys):
_seed_config(tmp_path, {
"ink": {
"url": "https://mcp.ml.ink/mcp",
"enabled": True,
"tools": {"include": ["create_service", "get_service"]},
},
"github": {
"command": "npx",
"args": ["@mcp/github"],
"enabled": False,
},
})
from hermes_cli.mcp_config import cmd_mcp_list
cmd_mcp_list()
out = capsys.readouterr().out
assert "ink" in out
assert "github" in out
assert "2 selected" in out # ink has 2 in include
assert "disabled" in out # github is disabled
def test_list_enabled_default_true(self, tmp_path, capsys):
"""Server without explicit enabled key defaults to enabled."""
_seed_config(tmp_path, {
"myserver": {"url": "https://example.com/mcp"},
})
from hermes_cli.mcp_config import cmd_mcp_list
cmd_mcp_list()
out = capsys.readouterr().out
assert "myserver" in out
assert "enabled" in out
# ---------------------------------------------------------------------------
# Tests: cmd_mcp_remove
# ---------------------------------------------------------------------------
class TestMcpRemove:
def test_remove_existing_server(self, tmp_path, capsys, monkeypatch):
_seed_config(tmp_path, {
"myserver": {"url": "https://example.com/mcp"},
})
monkeypatch.setattr("builtins.input", lambda _: "y")
from hermes_cli.mcp_config import cmd_mcp_remove
cmd_mcp_remove(_make_args(name="myserver"))
out = capsys.readouterr().out
assert "Removed" in out
# Verify config updated
from hermes_cli.config import load_config
config = load_config()
assert "myserver" not in config.get("mcp_servers", {})
def test_remove_nonexistent(self, tmp_path, capsys):
_seed_config(tmp_path, {})
from hermes_cli.mcp_config import cmd_mcp_remove
cmd_mcp_remove(_make_args(name="ghost"))
out = capsys.readouterr().out
assert "not found" in out
def test_remove_cleans_oauth_tokens(self, tmp_path, capsys, monkeypatch):
_seed_config(tmp_path, {
"oauth-srv": {"url": "https://example.com/mcp", "auth": "oauth"},
})
monkeypatch.setattr("builtins.input", lambda _: "y")
# Also patch get_hermes_home in the mcp_config module namespace
monkeypatch.setattr(
"hermes_cli.mcp_config.get_hermes_home", lambda: tmp_path
)
# Create a fake token file
token_dir = tmp_path / "mcp-tokens"
token_dir.mkdir()
token_file = token_dir / "oauth-srv.json"
token_file.write_text("{}")
from hermes_cli.mcp_config import cmd_mcp_remove
cmd_mcp_remove(_make_args(name="oauth-srv"))
assert not token_file.exists()
# ---------------------------------------------------------------------------
# Tests: cmd_mcp_add
# ---------------------------------------------------------------------------
class TestMcpAdd:
def test_add_no_transport(self, capsys):
"""Must specify --url or --command."""
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(name="bad"))
out = capsys.readouterr().out
assert "Must specify" in out
def test_add_http_server_all_tools(self, tmp_path, capsys, monkeypatch):
"""Add an HTTP server, accept all tools."""
fake_tools = [
FakeTool("create_service", "Deploy from repo"),
FakeTool("list_services", "List all services"),
]
def mock_probe(name, config, **kw):
return [(t.name, t.description) for t in fake_tools]
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe
)
# No auth, accept all tools
inputs = iter(["n", ""]) # no auth needed, enable all
monkeypatch.setattr("builtins.input", lambda _: next(inputs))
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(name="ink", url="https://mcp.ml.ink/mcp"))
out = capsys.readouterr().out
assert "Saved" in out
assert "2/2 tools" in out
# Verify config written
from hermes_cli.config import load_config
config = load_config()
assert "ink" in config.get("mcp_servers", {})
assert config["mcp_servers"]["ink"]["url"] == "https://mcp.ml.ink/mcp"
def test_add_stdio_server(self, tmp_path, capsys, monkeypatch):
"""Add a stdio server."""
fake_tools = [FakeTool("search", "Search repos")]
def mock_probe(name, config, **kw):
return [(t.name, t.description) for t in fake_tools]
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe
)
inputs = iter([""]) # accept all tools
monkeypatch.setattr("builtins.input", lambda _: next(inputs))
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(
name="github",
command="npx",
args=["@mcp/github"],
))
out = capsys.readouterr().out
assert "Saved" in out
from hermes_cli.config import load_config
config = load_config()
srv = config["mcp_servers"]["github"]
assert srv["command"] == "npx"
assert srv["args"] == ["@mcp/github"]
def test_add_connection_failure_save_disabled(
self, tmp_path, capsys, monkeypatch
):
"""Failed connection → option to save as disabled."""
def mock_probe_fail(name, config, **kw):
raise ConnectionError("Connection refused")
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe_fail
)
inputs = iter(["n", "y"]) # no auth, yes save disabled
monkeypatch.setattr("builtins.input", lambda _: next(inputs))
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(name="broken", url="https://bad.host/mcp"))
out = capsys.readouterr().out
assert "disabled" in out
from hermes_cli.config import load_config
config = load_config()
assert config["mcp_servers"]["broken"]["enabled"] is False
# ---------------------------------------------------------------------------
# Tests: cmd_mcp_test
# ---------------------------------------------------------------------------
class TestMcpTest:
def test_test_not_found(self, tmp_path, capsys):
_seed_config(tmp_path, {})
from hermes_cli.mcp_config import cmd_mcp_test
cmd_mcp_test(_make_args(name="ghost"))
out = capsys.readouterr().out
assert "not found" in out
def test_test_success(self, tmp_path, capsys, monkeypatch):
_seed_config(tmp_path, {
"ink": {"url": "https://mcp.ml.ink/mcp"},
})
def mock_probe(name, config, **kw):
return [("create_service", "Deploy"), ("list_services", "List all")]
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe
)
from hermes_cli.mcp_config import cmd_mcp_test
cmd_mcp_test(_make_args(name="ink"))
out = capsys.readouterr().out
assert "Connected" in out
assert "Tools discovered: 2" in out
# ---------------------------------------------------------------------------
# Tests: env var interpolation
# ---------------------------------------------------------------------------
class TestEnvVarInterpolation:
def test_interpolate_simple(self, monkeypatch):
monkeypatch.setenv("MY_KEY", "secret123")
from tools.mcp_tool import _interpolate_env_vars
result = _interpolate_env_vars("Bearer ${MY_KEY}")
assert result == "Bearer secret123"
def test_interpolate_missing_var(self, monkeypatch):
monkeypatch.delenv("MISSING_VAR", raising=False)
from tools.mcp_tool import _interpolate_env_vars
result = _interpolate_env_vars("Bearer ${MISSING_VAR}")
assert result == "Bearer ${MISSING_VAR}"
def test_interpolate_nested_dict(self, monkeypatch):
monkeypatch.setenv("API_KEY", "abc")
from tools.mcp_tool import _interpolate_env_vars
result = _interpolate_env_vars({
"url": "https://example.com",
"headers": {"Authorization": "Bearer ${API_KEY}"},
})
assert result["headers"]["Authorization"] == "Bearer abc"
assert result["url"] == "https://example.com"
def test_interpolate_list(self, monkeypatch):
monkeypatch.setenv("ARG1", "hello")
from tools.mcp_tool import _interpolate_env_vars
result = _interpolate_env_vars(["${ARG1}", "static"])
assert result == ["hello", "static"]
def test_interpolate_non_string(self):
from tools.mcp_tool import _interpolate_env_vars
assert _interpolate_env_vars(42) == 42
assert _interpolate_env_vars(True) is True
assert _interpolate_env_vars(None) is None
# ---------------------------------------------------------------------------
# Tests: config helpers
# ---------------------------------------------------------------------------
class TestConfigHelpers:
def test_save_and_load_mcp_server(self, tmp_path):
from hermes_cli.mcp_config import _save_mcp_server, _get_mcp_servers
_save_mcp_server("mysvr", {"url": "https://example.com/mcp"})
servers = _get_mcp_servers()
assert "mysvr" in servers
assert servers["mysvr"]["url"] == "https://example.com/mcp"
def test_remove_mcp_server(self, tmp_path):
from hermes_cli.mcp_config import (
_save_mcp_server,
_remove_mcp_server,
_get_mcp_servers,
)
_save_mcp_server("s1", {"command": "test"})
_save_mcp_server("s2", {"command": "test2"})
result = _remove_mcp_server("s1")
assert result is True
assert "s1" not in _get_mcp_servers()
assert "s2" in _get_mcp_servers()
def test_remove_nonexistent(self, tmp_path):
from hermes_cli.mcp_config import _remove_mcp_server
assert _remove_mcp_server("ghost") is False
def test_env_key_for_server(self):
from hermes_cli.mcp_config import _env_key_for_server
assert _env_key_for_server("ink") == "MCP_INK_API_KEY"
assert _env_key_for_server("my-server") == "MCP_MY_SERVER_API_KEY"
# ---------------------------------------------------------------------------
# Tests: dispatcher
# ---------------------------------------------------------------------------
class TestDispatcher:
def test_no_action_shows_list(self, tmp_path, capsys):
from hermes_cli.mcp_config import mcp_command
_seed_config(tmp_path, {})
mcp_command(_make_args(mcp_action=None))
out = capsys.readouterr().out
assert "Commands:" in out or "No MCP servers" in out

View file

@ -0,0 +1,291 @@
"""Tests for MCP tools interactive configuration in hermes_cli.tools_config."""
from types import SimpleNamespace
from unittest.mock import MagicMock, patch
from hermes_cli.tools_config import _configure_mcp_tools_interactive
# Patch targets: imports happen inside the function body, so patch at source
_PROBE = "tools.mcp_tool.probe_mcp_server_tools"
_CHECKLIST = "hermes_cli.curses_ui.curses_checklist"
_SAVE = "hermes_cli.tools_config.save_config"
def test_no_mcp_servers_prints_info(capsys):
"""Returns immediately when no MCP servers are configured."""
config = {}
_configure_mcp_tools_interactive(config)
captured = capsys.readouterr()
assert "No MCP servers configured" in captured.out
def test_all_servers_disabled_prints_info(capsys):
"""Returns immediately when all configured servers have enabled=false."""
config = {
"mcp_servers": {
"github": {"command": "npx", "enabled": False},
"slack": {"command": "npx", "enabled": "false"},
}
}
_configure_mcp_tools_interactive(config)
captured = capsys.readouterr()
assert "disabled" in captured.out
def test_probe_failure_shows_warning(capsys):
"""Shows warning when probe returns no tools."""
config = {"mcp_servers": {"github": {"command": "npx"}}}
with patch(_PROBE, return_value={}):
_configure_mcp_tools_interactive(config)
captured = capsys.readouterr()
assert "Could not discover" in captured.out
def test_probe_exception_shows_error(capsys):
"""Shows error when probe raises an exception."""
config = {"mcp_servers": {"github": {"command": "npx"}}}
with patch(_PROBE, side_effect=RuntimeError("MCP not installed")):
_configure_mcp_tools_interactive(config)
captured = capsys.readouterr()
assert "Failed to probe" in captured.out
def test_no_changes_when_checklist_cancelled(capsys):
"""No config changes when user cancels (ESC) the checklist."""
config = {
"mcp_servers": {
"github": {"command": "npx", "args": ["-y", "server-github"]},
}
}
tools = [("create_issue", "Create an issue"), ("search_repos", "Search repos")]
with patch(_PROBE, return_value={"github": tools}), \
patch(_CHECKLIST, return_value={0, 1}), \
patch(_SAVE) as mock_save:
_configure_mcp_tools_interactive(config)
mock_save.assert_not_called()
captured = capsys.readouterr()
assert "no changes" in captured.out.lower()
def test_disabling_tool_writes_exclude_list(capsys):
"""Unchecking a tool adds it to the exclude list."""
config = {
"mcp_servers": {
"github": {"command": "npx"},
}
}
tools = [
("create_issue", "Create an issue"),
("delete_repo", "Delete a repo"),
("search_repos", "Search repos"),
]
# User unchecks delete_repo (index 1)
with patch(_PROBE, return_value={"github": tools}), \
patch(_CHECKLIST, return_value={0, 2}), \
patch(_SAVE) as mock_save:
_configure_mcp_tools_interactive(config)
mock_save.assert_called_once()
tools_cfg = config["mcp_servers"]["github"]["tools"]
assert tools_cfg["exclude"] == ["delete_repo"]
assert "include" not in tools_cfg
def test_enabling_all_clears_filters(capsys):
"""Checking all tools clears both include and exclude lists."""
config = {
"mcp_servers": {
"github": {
"command": "npx",
"tools": {"exclude": ["delete_repo"], "include": ["create_issue"]},
},
}
}
tools = [("create_issue", "Create"), ("delete_repo", "Delete")]
# User checks all tools — pre_selected would be {0} (include mode),
# so returning {0, 1} is a change
with patch(_PROBE, return_value={"github": tools}), \
patch(_CHECKLIST, return_value={0, 1}), \
patch(_SAVE) as mock_save:
_configure_mcp_tools_interactive(config)
mock_save.assert_called_once()
tools_cfg = config["mcp_servers"]["github"]["tools"]
assert "exclude" not in tools_cfg
assert "include" not in tools_cfg
def test_pre_selection_respects_existing_exclude(capsys):
"""Tools in exclude list start unchecked."""
config = {
"mcp_servers": {
"github": {
"command": "npx",
"tools": {"exclude": ["delete_repo"]},
},
}
}
tools = [("create_issue", "Create"), ("delete_repo", "Delete"), ("search", "Search")]
captured_pre_selected = {}
def fake_checklist(title, labels, pre_selected, **kwargs):
captured_pre_selected["value"] = set(pre_selected)
return pre_selected # No changes
with patch(_PROBE, return_value={"github": tools}), \
patch(_CHECKLIST, side_effect=fake_checklist), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
# create_issue (0) and search (2) should be pre-selected, delete_repo (1) should not
assert captured_pre_selected["value"] == {0, 2}
def test_pre_selection_respects_existing_include(capsys):
"""Only tools in include list start checked."""
config = {
"mcp_servers": {
"github": {
"command": "npx",
"tools": {"include": ["search"]},
},
}
}
tools = [("create_issue", "Create"), ("delete_repo", "Delete"), ("search", "Search")]
captured_pre_selected = {}
def fake_checklist(title, labels, pre_selected, **kwargs):
captured_pre_selected["value"] = set(pre_selected)
return pre_selected # No changes
with patch(_PROBE, return_value={"github": tools}), \
patch(_CHECKLIST, side_effect=fake_checklist), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
# Only search (2) should be pre-selected
assert captured_pre_selected["value"] == {2}
def test_multiple_servers_each_get_checklist(capsys):
"""Each server gets its own checklist."""
config = {
"mcp_servers": {
"github": {"command": "npx"},
"slack": {"url": "https://mcp.example.com"},
}
}
checklist_calls = []
def fake_checklist(title, labels, pre_selected, **kwargs):
checklist_calls.append(title)
return pre_selected # No changes
with patch(
_PROBE,
return_value={
"github": [("create_issue", "Create")],
"slack": [("send_message", "Send")],
},
), patch(_CHECKLIST, side_effect=fake_checklist), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
assert len(checklist_calls) == 2
assert any("github" in t for t in checklist_calls)
assert any("slack" in t for t in checklist_calls)
def test_failed_server_shows_warning(capsys):
"""Servers that fail to connect show warnings."""
config = {
"mcp_servers": {
"github": {"command": "npx"},
"broken": {"command": "nonexistent"},
}
}
# Only github succeeds
with patch(
_PROBE, return_value={"github": [("create_issue", "Create")]},
), patch(_CHECKLIST, return_value={0}), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
captured = capsys.readouterr()
assert "broken" in captured.out
def test_description_truncation_in_labels():
"""Long descriptions are truncated in checklist labels."""
config = {
"mcp_servers": {
"github": {"command": "npx"},
}
}
long_desc = "A" * 100
captured_labels = {}
def fake_checklist(title, labels, pre_selected, **kwargs):
captured_labels["value"] = labels
return pre_selected
with patch(
_PROBE, return_value={"github": [("my_tool", long_desc)]},
), patch(_CHECKLIST, side_effect=fake_checklist), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
label = captured_labels["value"][0]
assert "..." in label
assert len(label) < len(long_desc) + 30 # truncated + tool name + parens
def test_switching_from_include_to_exclude(capsys):
"""When user modifies selection, include list is replaced by exclude list."""
config = {
"mcp_servers": {
"github": {
"command": "npx",
"tools": {"include": ["create_issue"]},
},
}
}
tools = [("create_issue", "Create"), ("search", "Search"), ("delete", "Delete")]
# User selects create_issue and search (deselects delete)
# pre_selected would be {0} (only create_issue from include), so {0, 1} is a change
with patch(_PROBE, return_value={"github": tools}), \
patch(_CHECKLIST, return_value={0, 1}), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
tools_cfg = config["mcp_servers"]["github"]["tools"]
assert tools_cfg["exclude"] == ["delete"]
assert "include" not in tools_cfg
def test_empty_tools_server_skipped(capsys):
"""Server with no tools shows info message and skips checklist."""
config = {
"mcp_servers": {
"empty": {"command": "npx"},
}
}
checklist_calls = []
def fake_checklist(title, labels, pre_selected, **kwargs):
checklist_calls.append(title)
return pre_selected
with patch(_PROBE, return_value={"empty": []}), \
patch(_CHECKLIST, side_effect=fake_checklist), \
patch(_SAVE):
_configure_mcp_tools_interactive(config)
assert len(checklist_calls) == 0
captured = capsys.readouterr()
assert "no tools found" in captured.out

View file

@ -0,0 +1,450 @@
"""Tests for provider-aware `/model` validation in hermes_cli.models."""
from unittest.mock import patch
from hermes_cli.models import (
copilot_model_api_mode,
fetch_github_model_catalog,
curated_models_for_provider,
fetch_api_models,
github_model_reasoning_efforts,
normalize_copilot_model_id,
normalize_provider,
parse_model_input,
probe_api_models,
provider_label,
provider_model_ids,
validate_requested_model,
)
# -- helpers -----------------------------------------------------------------
FAKE_API_MODELS = [
"anthropic/claude-opus-4.6",
"anthropic/claude-sonnet-4.5",
"openai/gpt-5.4-pro",
"openai/gpt-5.4",
"google/gemini-3-pro-preview",
]
def _validate(model, provider="openrouter", api_models=FAKE_API_MODELS, **kw):
"""Shortcut: call validate_requested_model with mocked API."""
probe_payload = {
"models": api_models,
"probed_url": "http://localhost:11434/v1/models",
"resolved_base_url": kw.get("base_url", "") or "http://localhost:11434/v1",
"suggested_base_url": None,
"used_fallback": False,
}
with patch("hermes_cli.models.fetch_api_models", return_value=api_models), \
patch("hermes_cli.models.probe_api_models", return_value=probe_payload):
return validate_requested_model(model, provider, **kw)
# -- parse_model_input -------------------------------------------------------
class TestParseModelInput:
def test_plain_model_keeps_current_provider(self):
provider, model = parse_model_input("anthropic/claude-sonnet-4.5", "openrouter")
assert provider == "openrouter"
assert model == "anthropic/claude-sonnet-4.5"
def test_provider_colon_model_switches_provider(self):
provider, model = parse_model_input("openrouter:anthropic/claude-sonnet-4.5", "nous")
assert provider == "openrouter"
assert model == "anthropic/claude-sonnet-4.5"
def test_provider_alias_resolved(self):
provider, model = parse_model_input("glm:glm-5", "openrouter")
assert provider == "zai"
assert model == "glm-5"
def test_no_slash_no_colon_keeps_provider(self):
provider, model = parse_model_input("gpt-5.4", "openrouter")
assert provider == "openrouter"
assert model == "gpt-5.4"
def test_nous_provider_switch(self):
provider, model = parse_model_input("nous:hermes-3", "openrouter")
assert provider == "nous"
assert model == "hermes-3"
def test_empty_model_after_colon_keeps_current(self):
provider, model = parse_model_input("openrouter:", "nous")
assert provider == "nous"
assert model == "openrouter:"
def test_colon_at_start_keeps_current(self):
provider, model = parse_model_input(":something", "openrouter")
assert provider == "openrouter"
assert model == ":something"
def test_unknown_prefix_colon_not_treated_as_provider(self):
"""Colons are only provider delimiters if the left side is a known provider."""
provider, model = parse_model_input("anthropic/claude-3.5-sonnet:beta", "openrouter")
assert provider == "openrouter"
assert model == "anthropic/claude-3.5-sonnet:beta"
def test_http_url_not_treated_as_provider(self):
provider, model = parse_model_input("http://localhost:8080/model", "openrouter")
assert provider == "openrouter"
assert model == "http://localhost:8080/model"
def test_custom_colon_model_single(self):
"""custom:model-name → anonymous custom provider."""
provider, model = parse_model_input("custom:qwen-2.5", "openrouter")
assert provider == "custom"
assert model == "qwen-2.5"
def test_custom_triple_syntax(self):
"""custom:name:model → named custom provider."""
provider, model = parse_model_input("custom:local-server:qwen-2.5", "openrouter")
assert provider == "custom:local-server"
assert model == "qwen-2.5"
def test_custom_triple_spaces(self):
"""Triple syntax should handle whitespace."""
provider, model = parse_model_input("custom: my-server : my-model ", "openrouter")
assert provider == "custom:my-server"
assert model == "my-model"
def test_custom_triple_empty_model_falls_back(self):
"""custom:name: with no model → treated as custom:name (bare)."""
provider, model = parse_model_input("custom:name:", "openrouter")
# Empty model after second colon → no triple match, falls through
assert provider == "custom"
assert model == "name:"
# -- curated_models_for_provider ---------------------------------------------
class TestCuratedModelsForProvider:
def test_openrouter_returns_curated_list(self):
models = curated_models_for_provider("openrouter")
assert len(models) > 0
assert any("claude" in m[0] for m in models)
def test_zai_returns_glm_models(self):
models = curated_models_for_provider("zai")
assert any("glm" in m[0] for m in models)
def test_unknown_provider_returns_empty(self):
assert curated_models_for_provider("totally-unknown") == []
# -- normalize_provider ------------------------------------------------------
class TestNormalizeProvider:
def test_defaults_to_openrouter(self):
assert normalize_provider(None) == "openrouter"
assert normalize_provider("") == "openrouter"
def test_known_aliases(self):
assert normalize_provider("glm") == "zai"
assert normalize_provider("kimi") == "kimi-coding"
assert normalize_provider("moonshot") == "kimi-coding"
assert normalize_provider("github-copilot") == "copilot"
def test_case_insensitive(self):
assert normalize_provider("OpenRouter") == "openrouter"
class TestProviderLabel:
def test_known_labels_and_auto(self):
assert provider_label("anthropic") == "Anthropic"
assert provider_label("kimi") == "Kimi / Moonshot"
assert provider_label("copilot") == "GitHub Copilot"
assert provider_label("copilot-acp") == "GitHub Copilot ACP"
assert provider_label("auto") == "Auto"
def test_unknown_provider_preserves_original_name(self):
assert provider_label("my-custom-provider") == "my-custom-provider"
# -- provider_model_ids ------------------------------------------------------
class TestProviderModelIds:
def test_openrouter_returns_curated_list(self):
ids = provider_model_ids("openrouter")
assert len(ids) > 0
assert all("/" in mid for mid in ids)
def test_unknown_provider_returns_empty(self):
assert provider_model_ids("some-unknown-provider") == []
def test_zai_returns_glm_models(self):
assert "glm-5" in provider_model_ids("zai")
def test_copilot_prefers_live_catalog(self):
with patch("hermes_cli.auth.resolve_api_key_provider_credentials", return_value={"api_key": "gh-token"}), \
patch("hermes_cli.models._fetch_github_models", return_value=["gpt-5.4", "claude-sonnet-4.6"]):
assert provider_model_ids("copilot") == ["gpt-5.4", "claude-sonnet-4.6"]
def test_copilot_acp_reuses_copilot_catalog(self):
with patch("hermes_cli.auth.resolve_api_key_provider_credentials", return_value={"api_key": "gh-token"}), \
patch("hermes_cli.models._fetch_github_models", return_value=["gpt-5.4", "claude-sonnet-4.6"]):
assert provider_model_ids("copilot-acp") == ["gpt-5.4", "claude-sonnet-4.6"]
def test_copilot_acp_falls_back_to_copilot_defaults(self):
with patch("hermes_cli.auth.resolve_api_key_provider_credentials", side_effect=Exception("no token")), \
patch("hermes_cli.models._fetch_github_models", return_value=None):
ids = provider_model_ids("copilot-acp")
assert "gpt-5.4" in ids
assert "copilot-acp" not in ids
# -- fetch_api_models --------------------------------------------------------
class TestFetchApiModels:
def test_returns_none_when_no_base_url(self):
assert fetch_api_models("key", None) is None
def test_returns_none_on_network_error(self):
with patch("hermes_cli.models.urllib.request.urlopen", side_effect=Exception("timeout")):
assert fetch_api_models("key", "https://example.com/v1") is None
def test_probe_api_models_tries_v1_fallback(self):
class _Resp:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return b'{"data": [{"id": "local-model"}]}'
calls = []
def _fake_urlopen(req, timeout=5.0):
calls.append(req.full_url)
if req.full_url.endswith("/v1/models"):
return _Resp()
raise Exception("404")
with patch("hermes_cli.models.urllib.request.urlopen", side_effect=_fake_urlopen):
probe = probe_api_models("key", "http://localhost:8000")
assert calls == ["http://localhost:8000/models", "http://localhost:8000/v1/models"]
assert probe["models"] == ["local-model"]
assert probe["resolved_base_url"] == "http://localhost:8000/v1"
assert probe["used_fallback"] is True
def test_probe_api_models_uses_copilot_catalog(self):
class _Resp:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return b'{"data": [{"id": "gpt-5.4", "model_picker_enabled": true, "supported_endpoints": ["/responses"], "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}}, {"id": "claude-sonnet-4.6", "model_picker_enabled": true, "supported_endpoints": ["/chat/completions"], "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}}, {"id": "text-embedding-3-small", "model_picker_enabled": true, "capabilities": {"type": "embedding"}}]}'
with patch("hermes_cli.models.urllib.request.urlopen", return_value=_Resp()) as mock_urlopen:
probe = probe_api_models("gh-token", "https://api.githubcopilot.com")
assert mock_urlopen.call_args[0][0].full_url == "https://api.githubcopilot.com/models"
assert probe["models"] == ["gpt-5.4", "claude-sonnet-4.6"]
assert probe["resolved_base_url"] == "https://api.githubcopilot.com"
assert probe["used_fallback"] is False
def test_fetch_github_model_catalog_filters_non_chat_models(self):
class _Resp:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return b'{"data": [{"id": "gpt-5.4", "model_picker_enabled": true, "supported_endpoints": ["/responses"], "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}}, {"id": "text-embedding-3-small", "model_picker_enabled": true, "capabilities": {"type": "embedding"}}]}'
with patch("hermes_cli.models.urllib.request.urlopen", return_value=_Resp()):
catalog = fetch_github_model_catalog("gh-token")
assert catalog is not None
assert [item["id"] for item in catalog] == ["gpt-5.4"]
class TestGithubReasoningEfforts:
def test_gpt5_supports_minimal_to_high(self):
catalog = [{
"id": "gpt-5.4",
"capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}},
"supported_endpoints": ["/responses"],
}]
assert github_model_reasoning_efforts("gpt-5.4", catalog=catalog) == [
"low",
"medium",
"high",
]
def test_legacy_catalog_reasoning_still_supported(self):
catalog = [{"id": "openai/o3", "capabilities": ["reasoning"]}]
assert github_model_reasoning_efforts("openai/o3", catalog=catalog) == [
"low",
"medium",
"high",
]
def test_non_reasoning_model_returns_empty(self):
catalog = [{"id": "gpt-4.1", "capabilities": {"type": "chat", "supports": {}}}]
assert github_model_reasoning_efforts("gpt-4.1", catalog=catalog) == []
class TestCopilotNormalization:
def test_normalize_old_github_models_slug(self):
catalog = [{"id": "gpt-4.1"}, {"id": "gpt-5.4"}]
assert normalize_copilot_model_id("openai/gpt-4.1-mini", catalog=catalog) == "gpt-4.1"
def test_copilot_api_mode_gpt5_uses_responses(self):
"""GPT-5+ models should use Responses API (matching opencode)."""
assert copilot_model_api_mode("gpt-5.4") == "codex_responses"
assert copilot_model_api_mode("gpt-5.4-mini") == "codex_responses"
assert copilot_model_api_mode("gpt-5.3-codex") == "codex_responses"
assert copilot_model_api_mode("gpt-5.2-codex") == "codex_responses"
assert copilot_model_api_mode("gpt-5.2") == "codex_responses"
def test_copilot_api_mode_gpt5_mini_uses_chat(self):
"""gpt-5-mini is the exception — uses Chat Completions."""
assert copilot_model_api_mode("gpt-5-mini") == "chat_completions"
def test_copilot_api_mode_non_gpt5_uses_chat(self):
"""Non-GPT-5 models use Chat Completions."""
assert copilot_model_api_mode("gpt-4.1") == "chat_completions"
assert copilot_model_api_mode("gpt-4o") == "chat_completions"
assert copilot_model_api_mode("gpt-4o-mini") == "chat_completions"
assert copilot_model_api_mode("claude-sonnet-4.6") == "chat_completions"
assert copilot_model_api_mode("claude-opus-4.6") == "chat_completions"
assert copilot_model_api_mode("gemini-2.5-pro") == "chat_completions"
def test_copilot_api_mode_with_catalog_both_endpoints(self):
"""When catalog shows both endpoints, model ID pattern wins."""
catalog = [{
"id": "gpt-5.4",
"supported_endpoints": ["/chat/completions", "/responses"],
}]
# GPT-5.4 should use responses even though chat/completions is listed
assert copilot_model_api_mode("gpt-5.4", catalog=catalog) == "codex_responses"
def test_copilot_api_mode_with_catalog_only_responses(self):
catalog = [{
"id": "gpt-5.4",
"supported_endpoints": ["/responses"],
"capabilities": {"type": "chat"},
}]
assert copilot_model_api_mode("gpt-5.4", catalog=catalog) == "codex_responses"
# -- validate — format checks -----------------------------------------------
class TestValidateFormatChecks:
def test_empty_model_rejected(self):
result = _validate("")
assert result["accepted"] is False
assert "empty" in result["message"]
def test_whitespace_only_rejected(self):
result = _validate(" ")
assert result["accepted"] is False
def test_model_with_spaces_rejected(self):
result = _validate("anthropic/ claude-opus")
assert result["accepted"] is False
def test_no_slash_model_still_probes_api(self):
result = _validate("gpt-5.4", api_models=["gpt-5.4", "gpt-5.4-pro"])
assert result["accepted"] is True
assert result["persist"] is True
def test_no_slash_model_rejected_if_not_in_api(self):
result = _validate("gpt-5.4", api_models=["openai/gpt-5.4"])
assert result["accepted"] is True
assert "not found" in result["message"]
# -- validate — API found ----------------------------------------------------
class TestValidateApiFound:
def test_model_found_in_api(self):
result = _validate("anthropic/claude-opus-4.6")
assert result["accepted"] is True
assert result["persist"] is True
assert result["recognized"] is True
def test_model_found_for_custom_endpoint(self):
result = _validate(
"my-model", provider="openrouter",
api_models=["my-model"], base_url="http://localhost:11434/v1",
)
assert result["accepted"] is True
assert result["persist"] is True
assert result["recognized"] is True
# -- validate — API not found ------------------------------------------------
class TestValidateApiNotFound:
def test_model_not_in_api_accepted_with_warning(self):
result = _validate("anthropic/claude-nonexistent")
assert result["accepted"] is True
assert result["persist"] is True
assert "not found" in result["message"]
def test_warning_includes_suggestions(self):
result = _validate("anthropic/claude-opus-4.5")
assert result["accepted"] is True
assert "Similar models" in result["message"]
# -- validate — API unreachable — accept and persist everything ----------------
class TestValidateApiFallback:
def test_any_model_accepted_when_api_down(self):
result = _validate("anthropic/claude-opus-4.6", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
def test_unknown_model_also_accepted_when_api_down(self):
"""No hardcoded catalog gatekeeping — accept, persist, and warn."""
result = _validate("anthropic/claude-next-gen", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
assert "could not reach" in result["message"].lower()
def test_zai_model_accepted_when_api_down(self):
result = _validate("glm-5", provider="zai", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
def test_unknown_provider_accepted_when_api_down(self):
result = _validate("some-model", provider="totally-unknown", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
def test_custom_endpoint_warns_with_probed_url_and_v1_hint(self):
with patch(
"hermes_cli.models.probe_api_models",
return_value={
"models": None,
"probed_url": "http://localhost:8000/v1/models",
"resolved_base_url": "http://localhost:8000",
"suggested_base_url": "http://localhost:8000/v1",
"used_fallback": False,
},
):
result = validate_requested_model(
"qwen3",
"custom",
api_key="local-key",
base_url="http://localhost:8000",
)
assert result["accepted"] is True
assert result["persist"] is True
assert "http://localhost:8000/v1/models" in result["message"]
assert "http://localhost:8000/v1" in result["message"]

View file

@ -0,0 +1,119 @@
"""Tests for the hermes_cli models module."""
from hermes_cli.models import OPENROUTER_MODELS, menu_labels, model_ids, detect_provider_for_model
class TestModelIds:
def test_returns_non_empty_list(self):
ids = model_ids()
assert isinstance(ids, list)
assert len(ids) > 0
def test_ids_match_models_list(self):
ids = model_ids()
expected = [mid for mid, _ in OPENROUTER_MODELS]
assert ids == expected
def test_all_ids_contain_provider_slash(self):
"""Model IDs should follow the provider/model format."""
for mid in model_ids():
assert "/" in mid, f"Model ID '{mid}' missing provider/ prefix"
def test_no_duplicate_ids(self):
ids = model_ids()
assert len(ids) == len(set(ids)), "Duplicate model IDs found"
class TestMenuLabels:
def test_same_length_as_model_ids(self):
assert len(menu_labels()) == len(model_ids())
def test_first_label_marked_recommended(self):
labels = menu_labels()
assert "recommended" in labels[0].lower()
def test_each_label_contains_its_model_id(self):
for label, mid in zip(menu_labels(), model_ids()):
assert mid in label, f"Label '{label}' doesn't contain model ID '{mid}'"
def test_non_recommended_labels_have_no_tag(self):
"""Only the first model should have (recommended)."""
labels = menu_labels()
for label in labels[1:]:
assert "recommended" not in label.lower(), f"Unexpected 'recommended' in '{label}'"
class TestOpenRouterModels:
def test_structure_is_list_of_tuples(self):
for entry in OPENROUTER_MODELS:
assert isinstance(entry, tuple) and len(entry) == 2
mid, desc = entry
assert isinstance(mid, str) and len(mid) > 0
assert isinstance(desc, str)
def test_at_least_5_models(self):
"""Sanity check that the models list hasn't been accidentally truncated."""
assert len(OPENROUTER_MODELS) >= 5
class TestFindOpenrouterSlug:
def test_exact_match(self):
from hermes_cli.models import _find_openrouter_slug
assert _find_openrouter_slug("anthropic/claude-opus-4.6") == "anthropic/claude-opus-4.6"
def test_bare_name_match(self):
from hermes_cli.models import _find_openrouter_slug
result = _find_openrouter_slug("claude-opus-4.6")
assert result == "anthropic/claude-opus-4.6"
def test_case_insensitive(self):
from hermes_cli.models import _find_openrouter_slug
result = _find_openrouter_slug("Anthropic/Claude-Opus-4.6")
assert result is not None
def test_unknown_returns_none(self):
from hermes_cli.models import _find_openrouter_slug
assert _find_openrouter_slug("totally-fake-model-xyz") is None
class TestDetectProviderForModel:
def test_anthropic_model_detected(self):
"""claude-opus-4-6 should resolve to anthropic provider."""
result = detect_provider_for_model("claude-opus-4-6", "openai-codex")
assert result is not None
assert result[0] == "anthropic"
def test_deepseek_model_detected(self):
"""deepseek-chat should resolve to deepseek provider."""
result = detect_provider_for_model("deepseek-chat", "openai-codex")
assert result is not None
# Provider is deepseek (direct) or openrouter (fallback) depending on creds
assert result[0] in ("deepseek", "openrouter")
def test_current_provider_model_returns_none(self):
"""Models belonging to the current provider should not trigger a switch."""
assert detect_provider_for_model("gpt-5.3-codex", "openai-codex") is None
def test_openrouter_slug_match(self):
"""Models in the OpenRouter catalog should be found."""
result = detect_provider_for_model("anthropic/claude-opus-4.6", "openai-codex")
assert result is not None
assert result[0] == "openrouter"
assert result[1] == "anthropic/claude-opus-4.6"
def test_bare_name_gets_openrouter_slug(self):
"""Bare model names should get mapped to full OpenRouter slugs."""
result = detect_provider_for_model("claude-opus-4.6", "openai-codex")
assert result is not None
# Should find it on OpenRouter with full slug
assert result[1] == "anthropic/claude-opus-4.6"
def test_unknown_model_returns_none(self):
"""Completely unknown model names should return None."""
assert detect_provider_for_model("nonexistent-model-xyz", "openai-codex") is None
def test_aggregator_not_suggested(self):
"""nous/openrouter should never be auto-suggested as target provider."""
result = detect_provider_for_model("claude-opus-4-6", "openai-codex")
assert result is not None
assert result[0] not in ("nous",) # nous has claude models but shouldn't be suggested

View file

@ -0,0 +1,184 @@
"""Tests for file path autocomplete in the CLI completer."""
import os
from unittest.mock import MagicMock
import pytest
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import to_plain_text
from hermes_cli.commands import SlashCommandCompleter, _file_size_label
def _display_names(completions):
"""Extract plain-text display names from a list of Completion objects."""
return [to_plain_text(c.display) for c in completions]
def _display_metas(completions):
"""Extract plain-text display_meta from a list of Completion objects."""
return [to_plain_text(c.display_meta) if c.display_meta else "" for c in completions]
@pytest.fixture
def completer():
return SlashCommandCompleter()
class TestExtractPathWord:
def test_relative_path(self):
assert SlashCommandCompleter._extract_path_word("look at ./src/main.py") == "./src/main.py"
def test_home_path(self):
assert SlashCommandCompleter._extract_path_word("edit ~/docs/") == "~/docs/"
def test_absolute_path(self):
assert SlashCommandCompleter._extract_path_word("read /etc/hosts") == "/etc/hosts"
def test_parent_path(self):
assert SlashCommandCompleter._extract_path_word("check ../config.yaml") == "../config.yaml"
def test_path_with_slash_in_middle(self):
assert SlashCommandCompleter._extract_path_word("open src/utils/helpers.py") == "src/utils/helpers.py"
def test_plain_word_not_path(self):
assert SlashCommandCompleter._extract_path_word("hello world") is None
def test_empty_string(self):
assert SlashCommandCompleter._extract_path_word("") is None
def test_single_word_no_slash(self):
assert SlashCommandCompleter._extract_path_word("README.md") is None
def test_word_after_space(self):
assert SlashCommandCompleter._extract_path_word("fix the bug in ./tools/") == "./tools/"
def test_just_dot_slash(self):
assert SlashCommandCompleter._extract_path_word("./") == "./"
def test_just_tilde_slash(self):
assert SlashCommandCompleter._extract_path_word("~/") == "~/"
class TestPathCompletions:
def test_lists_current_directory(self, tmp_path):
(tmp_path / "file_a.py").touch()
(tmp_path / "file_b.txt").touch()
(tmp_path / "subdir").mkdir()
old_cwd = os.getcwd()
os.chdir(tmp_path)
try:
completions = list(SlashCommandCompleter._path_completions("./"))
names = _display_names(completions)
assert "file_a.py" in names
assert "file_b.txt" in names
assert "subdir/" in names
finally:
os.chdir(old_cwd)
def test_filters_by_prefix(self, tmp_path):
(tmp_path / "alpha.py").touch()
(tmp_path / "beta.py").touch()
(tmp_path / "alpha_test.py").touch()
completions = list(SlashCommandCompleter._path_completions(f"{tmp_path}/alpha"))
names = _display_names(completions)
assert "alpha.py" in names
assert "alpha_test.py" in names
assert "beta.py" not in names
def test_directories_have_trailing_slash(self, tmp_path):
(tmp_path / "mydir").mkdir()
(tmp_path / "myfile.txt").touch()
completions = list(SlashCommandCompleter._path_completions(f"{tmp_path}/"))
names = _display_names(completions)
metas = _display_metas(completions)
assert "mydir/" in names
idx = names.index("mydir/")
assert metas[idx] == "dir"
def test_home_expansion(self, tmp_path, monkeypatch):
monkeypatch.setenv("HOME", str(tmp_path))
(tmp_path / "testfile.md").touch()
completions = list(SlashCommandCompleter._path_completions("~/test"))
names = _display_names(completions)
assert "testfile.md" in names
def test_nonexistent_dir_returns_empty(self):
completions = list(SlashCommandCompleter._path_completions("/nonexistent_dir_xyz/"))
assert completions == []
def test_respects_limit(self, tmp_path):
for i in range(50):
(tmp_path / f"file_{i:03d}.txt").touch()
completions = list(SlashCommandCompleter._path_completions(f"{tmp_path}/", limit=10))
assert len(completions) == 10
def test_case_insensitive_prefix(self, tmp_path):
(tmp_path / "README.md").touch()
completions = list(SlashCommandCompleter._path_completions(f"{tmp_path}/read"))
names = _display_names(completions)
assert "README.md" in names
class TestIntegration:
"""Test the completer produces path completions via the prompt_toolkit API."""
def test_slash_commands_still_work(self, completer):
doc = Document("/hel", cursor_position=4)
event = MagicMock()
completions = list(completer.get_completions(doc, event))
names = _display_names(completions)
assert "/help" in names
def test_path_completion_triggers_on_dot_slash(self, completer, tmp_path):
(tmp_path / "test.py").touch()
old_cwd = os.getcwd()
os.chdir(tmp_path)
try:
doc = Document("edit ./te", cursor_position=9)
event = MagicMock()
completions = list(completer.get_completions(doc, event))
names = _display_names(completions)
assert "test.py" in names
finally:
os.chdir(old_cwd)
def test_no_completion_for_plain_words(self, completer):
doc = Document("hello world", cursor_position=11)
event = MagicMock()
completions = list(completer.get_completions(doc, event))
assert completions == []
def test_absolute_path_triggers_completion(self, completer):
doc = Document("check /etc/hos", cursor_position=14)
event = MagicMock()
completions = list(completer.get_completions(doc, event))
names = _display_names(completions)
# /etc/hosts should exist on Linux
assert any("host" in n.lower() for n in names)
class TestFileSizeLabel:
def test_bytes(self, tmp_path):
f = tmp_path / "small.txt"
f.write_text("hi")
assert _file_size_label(str(f)) == "2B"
def test_kilobytes(self, tmp_path):
f = tmp_path / "medium.txt"
f.write_bytes(b"x" * 2048)
assert _file_size_label(str(f)) == "2K"
def test_megabytes(self, tmp_path):
f = tmp_path / "large.bin"
f.write_bytes(b"x" * (2 * 1024 * 1024))
assert _file_size_label(str(f)) == "2.0M"
def test_nonexistent(self):
assert _file_size_label("/nonexistent_xyz") == ""

View file

@ -0,0 +1,48 @@
"""Tests for CLI placeholder text in config/setup output."""
import os
from argparse import Namespace
from unittest.mock import patch
import pytest
from hermes_cli.config import config_command, show_config
from hermes_cli.setup import _print_setup_summary
def test_config_set_usage_marks_placeholders(capsys):
args = Namespace(config_command="set", key=None, value=None)
with pytest.raises(SystemExit) as exc:
config_command(args)
assert exc.value.code == 1
out = capsys.readouterr().out
assert "Usage: hermes config set <key> <value>" in out
def test_config_unknown_command_help_marks_placeholders(capsys):
args = Namespace(config_command="wat")
with pytest.raises(SystemExit) as exc:
config_command(args)
assert exc.value.code == 1
out = capsys.readouterr().out
assert "hermes config set <key> <value> Set a config value" in out
def test_show_config_marks_placeholders(tmp_path, capsys):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
show_config()
out = capsys.readouterr().out
assert "hermes config set <key> <value>" in out
def test_setup_summary_marks_placeholders(tmp_path, capsys):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
_print_setup_summary({"tts": {"provider": "edge"}}, tmp_path)
out = capsys.readouterr().out
assert "hermes config set <key> <value>" in out

View file

@ -0,0 +1,542 @@
"""Tests for the interactive session browser (`hermes sessions browse`).
Covers:
- _session_browse_picker logic (curses mocked, fallback tested)
- cmd_sessions 'browse' action integration
- Argument parser registration
"""
import os
import time
from unittest.mock import MagicMock, patch, call
import pytest
from hermes_cli.main import _session_browse_picker
# ─── Sample session data ──────────────────────────────────────────────────────
def _make_sessions(n=5):
"""Generate a list of fake rich-session dicts."""
now = time.time()
sessions = []
for i in range(n):
sessions.append({
"id": f"20260308_{i:06d}_abcdef",
"source": "cli" if i % 2 == 0 else "telegram",
"model": "test/model",
"title": f"Session {i}" if i % 3 != 0 else None,
"preview": f"Hello from session {i}",
"last_active": now - i * 3600,
"started_at": now - i * 3600 - 60,
"message_count": (i + 1) * 5,
})
return sessions
SAMPLE_SESSIONS = _make_sessions(5)
# ─── _session_browse_picker ──────────────────────────────────────────────────
class TestSessionBrowsePicker:
"""Tests for the _session_browse_picker function."""
def test_empty_sessions_returns_none(self, capsys):
result = _session_browse_picker([])
assert result is None
assert "No sessions found" in capsys.readouterr().out
def test_returns_none_when_no_sessions(self, capsys):
result = _session_browse_picker([])
assert result is None
def test_fallback_mode_valid_selection(self):
"""When curses is unavailable, fallback numbered list should work."""
sessions = _make_sessions(3)
# Mock curses import to fail, forcing fallback
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="2"):
result = _session_browse_picker(sessions)
assert result == sessions[1]["id"]
def test_fallback_mode_cancel_q(self):
"""Entering 'q' in fallback mode cancels."""
sessions = _make_sessions(3)
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
result = _session_browse_picker(sessions)
assert result is None
def test_fallback_mode_cancel_empty(self):
"""Entering empty string in fallback mode cancels."""
sessions = _make_sessions(3)
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value=""):
result = _session_browse_picker(sessions)
assert result is None
def test_fallback_mode_invalid_then_valid(self):
"""Invalid selection followed by valid one works."""
sessions = _make_sessions(3)
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", side_effect=["99", "1"]):
result = _session_browse_picker(sessions)
assert result == sessions[0]["id"]
def test_fallback_mode_keyboard_interrupt(self):
"""KeyboardInterrupt in fallback mode returns None."""
sessions = _make_sessions(3)
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", side_effect=KeyboardInterrupt):
result = _session_browse_picker(sessions)
assert result is None
def test_fallback_displays_all_sessions(self, capsys):
"""Fallback mode should display all session entries."""
sessions = _make_sessions(4)
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
_session_browse_picker(sessions)
output = capsys.readouterr().out
# All 4 entries should be shown
assert "1." in output
assert "2." in output
assert "3." in output
assert "4." in output
def test_fallback_shows_title_over_preview(self, capsys):
"""When a session has a title, show it instead of the preview."""
sessions = [{
"id": "test_001",
"source": "cli",
"title": "My Cool Project",
"preview": "some preview text",
"last_active": time.time(),
}]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
_session_browse_picker(sessions)
output = capsys.readouterr().out
assert "My Cool Project" in output
def test_fallback_shows_preview_when_no_title(self, capsys):
"""When no title, show preview."""
sessions = [{
"id": "test_002",
"source": "cli",
"title": None,
"preview": "Hello world test message",
"last_active": time.time(),
}]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
_session_browse_picker(sessions)
output = capsys.readouterr().out
assert "Hello world test message" in output
def test_fallback_shows_id_when_no_title_or_preview(self, capsys):
"""When neither title nor preview, show session ID."""
sessions = [{
"id": "test_003_fallback",
"source": "cli",
"title": None,
"preview": "",
"last_active": time.time(),
}]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
_session_browse_picker(sessions)
output = capsys.readouterr().out
assert "test_003_fallback" in output
# ─── Curses-based picker (mocked curses) ────────────────────────────────────
class TestCursesBrowse:
"""Tests for the curses-based interactive picker via simulated key sequences."""
def _run_with_keys(self, sessions, key_sequence):
"""Simulate running the curses picker with a given key sequence."""
import curses
# Build a mock stdscr that returns keys from the sequence
mock_stdscr = MagicMock()
mock_stdscr.getmaxyx.return_value = (30, 120)
mock_stdscr.getch.side_effect = key_sequence
# Capture what curses.wrapper receives and call it with our mock
with patch("curses.wrapper") as mock_wrapper:
# When wrapper is called, invoke the function with our mock stdscr
def run_inner(func):
try:
func(mock_stdscr)
except StopIteration:
pass # key sequence exhausted
mock_wrapper.side_effect = run_inner
with patch("curses.curs_set"):
with patch("curses.has_colors", return_value=False):
return _session_browse_picker(sessions)
def test_enter_selects_first_session(self):
sessions = _make_sessions(3)
result = self._run_with_keys(sessions, [10]) # Enter key
assert result == sessions[0]["id"]
def test_down_then_enter_selects_second(self):
import curses
sessions = _make_sessions(3)
result = self._run_with_keys(sessions, [curses.KEY_DOWN, 10])
assert result == sessions[1]["id"]
def test_down_down_enter_selects_third(self):
import curses
sessions = _make_sessions(5)
result = self._run_with_keys(sessions, [curses.KEY_DOWN, curses.KEY_DOWN, 10])
assert result == sessions[2]["id"]
def test_up_wraps_to_last(self):
import curses
sessions = _make_sessions(3)
result = self._run_with_keys(sessions, [curses.KEY_UP, 10])
assert result == sessions[2]["id"]
def test_escape_cancels(self):
sessions = _make_sessions(3)
result = self._run_with_keys(sessions, [27]) # Esc
assert result is None
def test_q_cancels(self):
sessions = _make_sessions(3)
result = self._run_with_keys(sessions, [ord('q')])
assert result is None
def test_type_to_filter_then_enter(self):
"""Typing characters filters the list, Enter selects from filtered."""
import curses
sessions = [
{"id": "s1", "source": "cli", "title": "Alpha project", "preview": "", "last_active": time.time()},
{"id": "s2", "source": "cli", "title": "Beta project", "preview": "", "last_active": time.time()},
{"id": "s3", "source": "cli", "title": "Gamma project", "preview": "", "last_active": time.time()},
]
# Type "Beta" then Enter — should select s2
keys = [ord(c) for c in "Beta"] + [10]
result = self._run_with_keys(sessions, keys)
assert result == "s2"
def test_filter_no_match_enter_does_nothing(self):
"""When filter produces no results, Enter shouldn't select."""
sessions = _make_sessions(3)
keys = [ord(c) for c in "zzzznonexistent"] + [10]
result = self._run_with_keys(sessions, keys)
assert result is None
def test_backspace_removes_filter_char(self):
"""Backspace removes the last character from the filter."""
import curses
sessions = [
{"id": "s1", "source": "cli", "title": "Alpha", "preview": "", "last_active": time.time()},
{"id": "s2", "source": "cli", "title": "Beta", "preview": "", "last_active": time.time()},
]
# Type "Bet", backspace, backspace, backspace (clears filter), then Enter (selects first)
keys = [ord('B'), ord('e'), ord('t'), 127, 127, 127, 10]
result = self._run_with_keys(sessions, keys)
assert result == "s1"
def test_escape_clears_filter_first(self):
"""First Esc clears the search text, second Esc exits."""
import curses
sessions = _make_sessions(3)
# Type "ab" then Esc (clears filter) then Enter (selects first)
keys = [ord('a'), ord('b'), 27, 10]
result = self._run_with_keys(sessions, keys)
assert result == sessions[0]["id"]
def test_filter_matches_preview(self):
"""Typing should match against session preview text."""
sessions = [
{"id": "s1", "source": "cli", "title": None, "preview": "Set up Minecraft server", "last_active": time.time()},
{"id": "s2", "source": "cli", "title": None, "preview": "Review PR 438", "last_active": time.time()},
]
keys = [ord(c) for c in "Mine"] + [10]
result = self._run_with_keys(sessions, keys)
assert result == "s1"
def test_filter_matches_source(self):
"""Typing a source name should filter by source."""
sessions = [
{"id": "s1", "source": "telegram", "title": "TG session", "preview": "", "last_active": time.time()},
{"id": "s2", "source": "cli", "title": "CLI session", "preview": "", "last_active": time.time()},
]
keys = [ord(c) for c in "telegram"] + [10]
result = self._run_with_keys(sessions, keys)
assert result == "s1"
def test_q_quits_when_no_filter_active(self):
"""When no search text is active, 'q' should quit (not filter)."""
sessions = _make_sessions(3)
result = self._run_with_keys(sessions, [ord('q')])
assert result is None
def test_q_types_into_filter_when_filter_active(self):
"""When search text is already active, 'q' should add to filter, not quit."""
sessions = [
{"id": "s1", "source": "cli", "title": "the sequel", "preview": "", "last_active": time.time()},
{"id": "s2", "source": "cli", "title": "other thing", "preview": "", "last_active": time.time()},
]
# Type "se" first (activates filter, matches "the sequel")
# Then type "q" — should add 'q' to filter (filter="seq"), NOT quit
# "seq" still matches "the sequel" → Enter selects it
keys = [ord('s'), ord('e'), ord('q'), 10]
result = self._run_with_keys(sessions, keys)
assert result == "s1" # "the sequel" matches "seq"
# ─── Argument parser registration ──────────────────────────────────────────
class TestSessionBrowseArgparse:
"""Verify the 'browse' subcommand is properly registered."""
def test_browse_subcommand_exists(self):
"""hermes sessions browse should be parseable."""
from hermes_cli.main import main as _main_entry
# We can't run main(), but we can import and test the parser setup
# by checking that argparse doesn't error on "sessions browse"
import argparse
# Re-create the parser portion
# Instead, let's just verify the import works and the function exists
from hermes_cli.main import _session_browse_picker
assert callable(_session_browse_picker)
def test_browse_default_limit_is_50(self):
"""The default --limit for browse should be 50."""
# This test verifies at the argparse level
# We test by running the parse on "sessions browse" args
# Since we can't easily extract the subparser, verify via the
# _session_browse_picker accepting large lists
sessions = _make_sessions(50)
assert len(sessions) == 50
# ─── Integration: cmd_sessions browse action ────────────────────────────────
class TestCmdSessionsBrowse:
"""Integration tests for the 'browse' action in cmd_sessions."""
def test_browse_no_sessions_prints_message(self, capsys):
"""When no sessions exist, _session_browse_picker returns None and prints message."""
result = _session_browse_picker([])
assert result is None
output = capsys.readouterr().out
assert "No sessions found" in output
def test_browse_with_source_filter(self):
"""The --source flag should be passed to list_sessions_rich."""
sessions = [
{"id": "s1", "source": "cli", "title": "CLI only", "preview": "", "last_active": time.time()},
]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="1"):
result = _session_browse_picker(sessions)
assert result == "s1"
# ─── Edge cases ──────────────────────────────────────────────────────────────
class TestEdgeCases:
"""Edge case handling for the session browser."""
def test_sessions_with_missing_fields(self):
"""Sessions with missing optional fields should not crash."""
sessions = [
{"id": "minimal_001", "source": "cli"}, # No title, preview, last_active
]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="1"):
result = _session_browse_picker(sessions)
assert result == "minimal_001"
def test_single_session(self):
"""A single session in the list should work fine."""
sessions = [
{"id": "only_one", "source": "cli", "title": "Solo", "preview": "", "last_active": time.time()},
]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="1"):
result = _session_browse_picker(sessions)
assert result == "only_one"
def test_long_title_truncated_in_fallback(self, capsys):
"""Very long titles should be truncated in fallback mode."""
sessions = [{
"id": "long_title_001",
"source": "cli",
"title": "A" * 100,
"preview": "",
"last_active": time.time(),
}]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
_session_browse_picker(sessions)
output = capsys.readouterr().out
# Title should be truncated to 50 chars with "..."
assert "..." in output
def test_relative_time_formatting(self, capsys):
"""Verify various time deltas format correctly."""
now = time.time()
sessions = [
{"id": "recent", "source": "cli", "title": None, "preview": "just now test", "last_active": now},
{"id": "hour_ago", "source": "cli", "title": None, "preview": "hour ago test", "last_active": now - 7200},
{"id": "days_ago", "source": "cli", "title": None, "preview": "days ago test", "last_active": now - 259200},
]
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "curses":
raise ImportError("no curses")
return original_import(name, *args, **kwargs)
with patch.object(builtins, "__import__", side_effect=mock_import):
with patch("builtins.input", return_value="q"):
_session_browse_picker(sessions)
output = capsys.readouterr().out
assert "just now" in output
assert "2h ago" in output
assert "3d ago" in output

View file

@ -0,0 +1,64 @@
import sys
def test_sessions_delete_accepts_unique_id_prefix(monkeypatch, capsys):
import hermes_cli.main as main_mod
import hermes_state
captured = {}
class FakeDB:
def resolve_session_id(self, session_id):
captured["resolved_from"] = session_id
return "20260315_092437_c9a6ff"
def delete_session(self, session_id):
captured["deleted"] = session_id
return True
def close(self):
captured["closed"] = True
monkeypatch.setattr(hermes_state, "SessionDB", lambda: FakeDB())
monkeypatch.setattr(
sys,
"argv",
["hermes", "sessions", "delete", "20260315_092437_c9a6", "--yes"],
)
main_mod.main()
output = capsys.readouterr().out
assert captured == {
"resolved_from": "20260315_092437_c9a6",
"deleted": "20260315_092437_c9a6ff",
"closed": True,
}
assert "Deleted session '20260315_092437_c9a6ff'." in output
def test_sessions_delete_reports_not_found_when_prefix_is_unknown(monkeypatch, capsys):
import hermes_cli.main as main_mod
import hermes_state
class FakeDB:
def resolve_session_id(self, session_id):
return None
def delete_session(self, session_id):
raise AssertionError("delete_session should not be called when resolution fails")
def close(self):
pass
monkeypatch.setattr(hermes_state, "SessionDB", lambda: FakeDB())
monkeypatch.setattr(
sys,
"argv",
["hermes", "sessions", "delete", "missing-prefix", "--yes"],
)
main_mod.main()
output = capsys.readouterr().out
assert "Session 'missing-prefix' not found." in output

View file

@ -0,0 +1,127 @@
"""Tests for set_config_value — verifying secrets route to .env and config to config.yaml."""
import os
from pathlib import Path
from unittest.mock import patch, call
import pytest
from hermes_cli.config import set_config_value
@pytest.fixture(autouse=True)
def _isolated_hermes_home(tmp_path):
"""Point HERMES_HOME at a temp dir so tests never touch real config."""
env_file = tmp_path / ".env"
env_file.touch()
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
yield tmp_path
def _read_env(tmp_path):
return (tmp_path / ".env").read_text()
def _read_config(tmp_path):
config_path = tmp_path / "config.yaml"
return config_path.read_text() if config_path.exists() else ""
# ---------------------------------------------------------------------------
# Explicit allowlist keys → .env
# ---------------------------------------------------------------------------
class TestExplicitAllowlist:
"""Keys in the hardcoded allowlist should always go to .env."""
@pytest.mark.parametrize("key", [
"OPENROUTER_API_KEY",
"OPENAI_API_KEY",
"ANTHROPIC_API_KEY",
"WANDB_API_KEY",
"TINKER_API_KEY",
"HONCHO_API_KEY",
"FIRECRAWL_API_KEY",
"BROWSERBASE_API_KEY",
"FAL_KEY",
"SUDO_PASSWORD",
"GITHUB_TOKEN",
"TELEGRAM_BOT_TOKEN",
"DISCORD_BOT_TOKEN",
"SLACK_BOT_TOKEN",
"SLACK_APP_TOKEN",
])
def test_explicit_key_routes_to_env(self, key, _isolated_hermes_home):
set_config_value(key, "test-value-123")
env_content = _read_env(_isolated_hermes_home)
assert f"{key}=test-value-123" in env_content
# Must NOT appear in config.yaml
assert key not in _read_config(_isolated_hermes_home)
# ---------------------------------------------------------------------------
# Catch-all patterns → .env
# ---------------------------------------------------------------------------
class TestCatchAllPatterns:
"""Any key ending in _API_KEY or _TOKEN should route to .env."""
@pytest.mark.parametrize("key", [
"DAYTONA_API_KEY",
"ELEVENLABS_API_KEY",
"SOME_FUTURE_SERVICE_API_KEY",
"MY_CUSTOM_TOKEN",
"WHATSAPP_BOT_TOKEN",
])
def test_api_key_suffix_routes_to_env(self, key, _isolated_hermes_home):
set_config_value(key, "secret-456")
env_content = _read_env(_isolated_hermes_home)
assert f"{key}=secret-456" in env_content
assert key not in _read_config(_isolated_hermes_home)
def test_case_insensitive(self, _isolated_hermes_home):
"""Keys should be uppercased regardless of input casing."""
set_config_value("openai_api_key", "sk-test")
env_content = _read_env(_isolated_hermes_home)
assert "OPENAI_API_KEY=sk-test" in env_content
def test_terminal_ssh_prefix_routes_to_env(self, _isolated_hermes_home):
set_config_value("TERMINAL_SSH_PORT", "2222")
env_content = _read_env(_isolated_hermes_home)
assert "TERMINAL_SSH_PORT=2222" in env_content
# ---------------------------------------------------------------------------
# Non-secret keys → config.yaml
# ---------------------------------------------------------------------------
class TestConfigYamlRouting:
"""Regular config keys should go to config.yaml, NOT .env."""
def test_simple_key(self, _isolated_hermes_home):
set_config_value("model", "gpt-4o")
config = _read_config(_isolated_hermes_home)
assert "gpt-4o" in config
assert "model" not in _read_env(_isolated_hermes_home)
def test_nested_key(self, _isolated_hermes_home):
set_config_value("terminal.backend", "docker")
config = _read_config(_isolated_hermes_home)
assert "docker" in config
assert "terminal" not in _read_env(_isolated_hermes_home)
def test_terminal_image_goes_to_config(self, _isolated_hermes_home):
"""TERMINAL_DOCKER_IMAGE doesn't match _API_KEY or _TOKEN, so config.yaml."""
set_config_value("terminal.docker_image", "python:3.12")
config = _read_config(_isolated_hermes_home)
assert "python:3.12" in config
def test_terminal_docker_cwd_mount_flag_goes_to_config_and_env(self, _isolated_hermes_home):
set_config_value("terminal.docker_mount_cwd_to_workspace", "true")
config = _read_config(_isolated_hermes_home)
env_content = _read_env(_isolated_hermes_home)
assert "docker_mount_cwd_to_workspace: 'true'" in config or "docker_mount_cwd_to_workspace: true" in config
assert (
"TERMINAL_DOCKER_MOUNT_CWD_TO_WORKSPACE=true" in env_content
or "TERMINAL_DOCKER_MOUNT_CWD_TO_WORKSPACE=True" in env_content
)

View file

@ -0,0 +1,178 @@
import json
from hermes_cli.auth import _update_config_for_provider, get_active_provider
from hermes_cli.config import load_config, save_config
from hermes_cli.setup import setup_model_provider
def _maybe_keep_current_tts(question, choices):
if question != "Select TTS provider:":
return None
assert choices[-1].startswith("Keep current (")
return len(choices) - 1
def _clear_provider_env(monkeypatch):
for key in (
"NOUS_API_KEY",
"OPENROUTER_API_KEY",
"OPENAI_BASE_URL",
"OPENAI_API_KEY",
"LLM_MODEL",
):
monkeypatch.delenv(key, raising=False)
def test_nous_oauth_setup_keeps_current_model_when_syncing_disk_provider(
tmp_path, monkeypatch
):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 0
if question == "Configure vision:":
return len(choices) - 1
if question == "Select default model:":
assert choices[-1] == "Keep current (anthropic/claude-opus-4.6)"
return len(choices) - 1
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
def _fake_login_nous(*args, **kwargs):
auth_path = tmp_path / "auth.json"
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}}))
_update_config_for_provider("nous", "https://inference.example.com/v1")
monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login_nous)
monkeypatch.setattr(
"hermes_cli.auth.resolve_nous_runtime_credentials",
lambda *args, **kwargs: {
"base_url": "https://inference.example.com/v1",
"api_key": "nous-key",
},
)
monkeypatch.setattr(
"hermes_cli.auth.fetch_nous_models",
lambda *args, **kwargs: ["gemini-3-flash"],
)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "nous"
assert reloaded["model"]["base_url"] == "https://inference.example.com/v1"
assert reloaded["model"]["default"] == "anthropic/claude-opus-4.6"
def test_custom_setup_clears_active_oauth_provider(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
auth_path = tmp_path / "auth.json"
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}}))
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 3
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
input_values = iter([
"https://custom.example/v1",
"custom-api-key",
"custom/model",
"", # context_length (blank = auto-detect)
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {"models": ["m"], "probed_url": base_url + "/models"},
)
setup_model_provider(config)
# Core assertion: switching to custom endpoint clears OAuth provider
assert get_active_provider() is None
# _model_flow_custom writes config via its own load/save cycle
reloaded = load_config()
if isinstance(reloaded.get("model"), dict):
assert reloaded["model"].get("provider") == "custom"
assert reloaded["model"].get("default") == "custom/model"
def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key")
_clear_provider_env(monkeypatch)
monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key")
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 1
if question == "Select default model:":
return 0
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.auth.resolve_codex_runtime_credentials",
lambda *args, **kwargs: {
"base_url": "https://chatgpt.com/backend-api/codex",
"api_key": "codex-access-token",
},
)
captured = {}
def _fake_get_codex_model_ids(access_token=None):
captured["access_token"] = access_token
return ["gpt-5.2-codex", "gpt-5.2"]
monkeypatch.setattr(
"hermes_cli.codex_models.get_codex_model_ids",
_fake_get_codex_model_ids,
)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert captured["access_token"] == "codex-access-token"
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "openai-codex"
assert reloaded["model"]["default"] == "gpt-5.2-codex"
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"

View file

@ -0,0 +1,473 @@
"""Regression tests for interactive setup provider/model persistence."""
from __future__ import annotations
from hermes_cli.config import load_config, save_config, save_env_value
from hermes_cli.setup import _print_setup_summary, setup_model_provider
def _maybe_keep_current_tts(question, choices):
if question != "Select TTS provider:":
return None
assert choices[-1].startswith("Keep current (")
return len(choices) - 1
def _read_env(home):
env_path = home / ".env"
data = {}
if not env_path.exists():
return data
for line in env_path.read_text().splitlines():
if not line or line.startswith("#") or "=" not in line:
continue
k, v = line.split("=", 1)
data[k] = v
return data
def _clear_provider_env(monkeypatch):
for key in (
"HERMES_INFERENCE_PROVIDER",
"OPENAI_BASE_URL",
"OPENAI_API_KEY",
"OPENROUTER_API_KEY",
"GITHUB_TOKEN",
"GH_TOKEN",
"GLM_API_KEY",
"KIMI_API_KEY",
"MINIMAX_API_KEY",
"MINIMAX_CN_API_KEY",
"ANTHROPIC_TOKEN",
"ANTHROPIC_API_KEY",
):
monkeypatch.delenv(key, raising=False)
def test_setup_keep_current_custom_from_config_does_not_fall_through(tmp_path, monkeypatch):
"""Keep-current custom should not fall through to the generic model menu."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1")
save_env_value("OPENAI_API_KEY", "custom-key")
config = load_config()
config["model"] = {
"default": "custom/model",
"provider": "custom",
"base_url": "https://example.invalid/v1",
}
save_config(config)
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
assert choices[-1] == "Keep current (Custom: https://example.invalid/v1)"
return len(choices) - 1
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError("Model menu should not appear for keep-current custom")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert reloaded["model"]["provider"] == "custom"
assert reloaded["model"]["default"] == "custom/model"
assert reloaded["model"]["base_url"] == "https://example.invalid/v1"
def test_setup_custom_endpoint_saves_working_v1_base_url(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 3 # Custom endpoint
if question == "Configure vision:":
return len(choices) - 1 # Skip
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
input_values = iter([
"http://localhost:8000",
"local-key",
"llm",
"", # context_length (blank = auto-detect)
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {
"models": ["llm"],
"probed_url": "http://localhost:8000/v1/models",
"resolved_base_url": "http://localhost:8000/v1",
"suggested_base_url": "http://localhost:8000/v1",
"used_fallback": True,
},
)
setup_model_provider(config)
env = _read_env(tmp_path)
# _model_flow_custom saves env vars and config to disk
assert env.get("OPENAI_BASE_URL") == "http://localhost:8000/v1"
assert env.get("OPENAI_API_KEY") == "local-key"
# The model config is saved as a dict by _model_flow_custom
reloaded = load_config()
model_cfg = reloaded.get("model", {})
if isinstance(model_cfg, dict):
assert model_cfg.get("provider") == "custom"
assert model_cfg.get("default") == "llm"
def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(tmp_path, monkeypatch):
"""Keep-current should respect config-backed providers, not fall back to OpenRouter."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
config["model"] = {
"default": "claude-opus-4-6",
"provider": "anthropic",
}
save_config(config)
captured = {"provider_choices": None, "model_choices": None}
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
captured["provider_choices"] = list(choices)
assert choices[-1] == "Keep current (Anthropic)"
return len(choices) - 1
if question == "Configure vision:":
assert question == "Configure vision:"
assert choices[-1] == "Skip for now"
return len(choices) - 1
if question == "Select default model:":
captured["model_choices"] = list(choices)
return len(choices) - 1 # keep current model
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.models.provider_model_ids", lambda provider: [])
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
setup_model_provider(config)
save_config(config)
assert captured["provider_choices"] is not None
assert captured["model_choices"] is not None
assert captured["model_choices"][0] == "claude-opus-4-6"
assert "anthropic/claude-opus-4.6 (recommended)" not in captured["model_choices"]
def test_setup_keep_current_anthropic_can_configure_openai_vision_default(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
config["model"] = {
"default": "claude-opus-4-6",
"provider": "anthropic",
}
save_config(config)
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
assert choices[-1] == "Keep current (Anthropic)"
return len(choices) - 1
if question == "Configure vision:":
return 1
if question == "Select vision model:":
assert choices[-1] == "Use default (gpt-4o-mini)"
return len(choices) - 1
if question == "Select default model:":
assert choices[-1] == "Keep current (claude-opus-4-6)"
return len(choices) - 1
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr(
"hermes_cli.setup.prompt",
lambda message, *args, **kwargs: "sk-openai" if "OpenAI API key" in message else "",
)
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.models.provider_model_ids", lambda provider: [])
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
setup_model_provider(config)
env = _read_env(tmp_path)
assert env.get("OPENAI_API_KEY") == "sk-openai"
assert env.get("OPENAI_BASE_URL") == "https://api.openai.com/v1"
assert env.get("AUXILIARY_VISION_MODEL") == "gpt-4o-mini"
def test_setup_copilot_uses_gh_auth_and_saves_provider(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
assert choices[14] == "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"
return 14
if question == "Select default model:":
assert "gpt-4.1" in choices
assert "gpt-5.4" in choices
return choices.index("gpt-5.4")
if question == "Select reasoning effort:":
assert "low" in choices
assert "high" in choices
return choices.index("high")
if question == "Configure vision:":
return len(choices) - 1
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_prompt(message, *args, **kwargs):
raise AssertionError(f"Unexpected prompt call: {message}")
def fake_get_auth_status(provider_id):
if provider_id == "copilot":
return {"logged_in": True}
return {"logged_in": False}
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt)
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.auth.get_auth_status", fake_get_auth_status)
monkeypatch.setattr(
"hermes_cli.auth.resolve_api_key_provider_credentials",
lambda provider_id: {
"provider": provider_id,
"api_key": "gh-cli-token",
"base_url": "https://api.githubcopilot.com",
"source": "gh auth token",
},
)
monkeypatch.setattr(
"hermes_cli.models.fetch_github_model_catalog",
lambda api_key: [
{
"id": "gpt-4.1",
"capabilities": {"type": "chat", "supports": {}},
"supported_endpoints": ["/chat/completions"],
},
{
"id": "gpt-5.4",
"capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}},
"supported_endpoints": ["/responses"],
},
],
)
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
setup_model_provider(config)
save_config(config)
env = _read_env(tmp_path)
reloaded = load_config()
assert env.get("GITHUB_TOKEN") is None
assert reloaded["model"]["provider"] == "copilot"
assert reloaded["model"]["base_url"] == "https://api.githubcopilot.com"
assert reloaded["model"]["default"] == "gpt-5.4"
assert reloaded["model"]["api_mode"] == "codex_responses"
assert reloaded["agent"]["reasoning_effort"] == "high"
def test_setup_copilot_acp_uses_model_picker_and_saves_provider(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
assert choices[15] == "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"
return 15
if question == "Select default model:":
assert "gpt-4.1" in choices
assert "gpt-5.4" in choices
return choices.index("gpt-5.4")
if question == "Configure vision:":
return len(choices) - 1
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_prompt(message, *args, **kwargs):
raise AssertionError(f"Unexpected prompt call: {message}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt)
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda provider_id: {"logged_in": provider_id == "copilot-acp"})
monkeypatch.setattr(
"hermes_cli.auth.resolve_api_key_provider_credentials",
lambda provider_id: {
"provider": "copilot",
"api_key": "gh-cli-token",
"base_url": "https://api.githubcopilot.com",
"source": "gh auth token",
},
)
monkeypatch.setattr(
"hermes_cli.models.fetch_github_model_catalog",
lambda api_key: [
{
"id": "gpt-4.1",
"capabilities": {"type": "chat", "supports": {}},
"supported_endpoints": ["/chat/completions"],
},
{
"id": "gpt-5.4",
"capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}},
"supported_endpoints": ["/responses"],
},
],
)
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert reloaded["model"]["provider"] == "copilot-acp"
assert reloaded["model"]["base_url"] == "acp://copilot"
assert reloaded["model"]["default"] == "gpt-5.4"
assert reloaded["model"]["api_mode"] == "chat_completions"
def test_setup_switch_custom_to_codex_clears_custom_endpoint_and_updates_config(tmp_path, monkeypatch):
"""Switching from custom to Codex should clear custom endpoint overrides."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1")
save_env_value("OPENAI_API_KEY", "sk-custom")
save_env_value("OPENROUTER_API_KEY", "sk-or")
config = load_config()
config["model"] = {
"default": "custom/model",
"provider": "custom",
"base_url": "https://example.invalid/v1",
}
save_config(config)
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 1
if question == "Select default model:":
return 0
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.auth.resolve_codex_runtime_credentials",
lambda *args, **kwargs: {
"base_url": "https://chatgpt.com/backend-api/codex",
"api_key": "codex-...oken",
},
)
monkeypatch.setattr(
"hermes_cli.codex_models.get_codex_model_ids",
lambda **kwargs: ["openai/gpt-5.3-codex", "openai/gpt-5-codex-mini"],
)
setup_model_provider(config)
save_config(config)
env = _read_env(tmp_path)
reloaded = load_config()
assert env.get("OPENAI_BASE_URL") == ""
assert env.get("OPENAI_API_KEY") == ""
assert reloaded["model"]["provider"] == "openai-codex"
assert reloaded["model"]["default"] == "openai/gpt-5.3-codex"
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"
def test_setup_summary_marks_codex_auth_as_vision_available(tmp_path, monkeypatch, capsys):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
(tmp_path / "auth.json").write_text(
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token": "***", "refresh_token": "***"}}}}'
)
monkeypatch.setattr("shutil.which", lambda _name: None)
_print_setup_summary(load_config(), tmp_path)
output = capsys.readouterr().out
assert "Vision (image analysis)" in output
assert "missing run 'hermes setup' to configure" not in output
assert "Mixture of Agents" in output
assert "missing OPENROUTER_API_KEY" in output
def test_setup_summary_marks_anthropic_auth_as_vision_available(tmp_path, monkeypatch, capsys):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-api03-key")
monkeypatch.setattr("shutil.which", lambda _name: None)
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: ["anthropic"])
_print_setup_summary(load_config(), tmp_path)
output = capsys.readouterr().out
assert "Vision (image analysis)" in output
assert "missing run 'hermes setup' to configure" not in output

View file

@ -0,0 +1,94 @@
"""Tests for non-interactive setup and first-run headless behavior."""
from argparse import Namespace
from unittest.mock import patch
import pytest
def _make_setup_args(**overrides):
return Namespace(
non_interactive=overrides.get("non_interactive", False),
section=overrides.get("section", None),
reset=overrides.get("reset", False),
)
def _make_chat_args(**overrides):
return Namespace(
continue_last=overrides.get("continue_last", None),
resume=overrides.get("resume", None),
model=overrides.get("model", None),
provider=overrides.get("provider", None),
toolsets=overrides.get("toolsets", None),
verbose=overrides.get("verbose", False),
query=overrides.get("query", None),
worktree=overrides.get("worktree", False),
yolo=overrides.get("yolo", False),
pass_session_id=overrides.get("pass_session_id", False),
quiet=overrides.get("quiet", False),
checkpoints=overrides.get("checkpoints", False),
)
class TestNonInteractiveSetup:
"""Verify setup paths exit cleanly in headless/non-interactive environments."""
def test_non_interactive_flag_skips_wizard(self, capsys):
"""--non-interactive should print guidance and not enter the wizard."""
from hermes_cli.setup import run_setup_wizard
args = _make_setup_args(non_interactive=True)
with (
patch("hermes_cli.setup.ensure_hermes_home"),
patch("hermes_cli.setup.load_config", return_value={}),
patch("hermes_cli.setup.get_hermes_home", return_value="/tmp/.hermes"),
patch("hermes_cli.auth.get_active_provider", side_effect=AssertionError("wizard continued")),
patch("builtins.input", side_effect=AssertionError("input should not be called")),
):
run_setup_wizard(args)
out = capsys.readouterr().out
assert "hermes config set model.provider custom" in out
def test_no_tty_skips_wizard(self, capsys):
"""When stdin has no TTY, the setup wizard should print guidance and return."""
from hermes_cli.setup import run_setup_wizard
args = _make_setup_args(non_interactive=False)
with (
patch("hermes_cli.setup.ensure_hermes_home"),
patch("hermes_cli.setup.load_config", return_value={}),
patch("hermes_cli.setup.get_hermes_home", return_value="/tmp/.hermes"),
patch("hermes_cli.auth.get_active_provider", side_effect=AssertionError("wizard continued")),
patch("sys.stdin") as mock_stdin,
patch("builtins.input", side_effect=AssertionError("input should not be called")),
):
mock_stdin.isatty.return_value = False
run_setup_wizard(args)
out = capsys.readouterr().out
assert "hermes config set model.provider custom" in out
def test_chat_first_run_headless_skips_setup_prompt(self, capsys):
"""Bare `hermes` should not prompt for input when no provider exists and stdin is headless."""
from hermes_cli.main import cmd_chat
args = _make_chat_args()
with (
patch("hermes_cli.main._has_any_provider_configured", return_value=False),
patch("hermes_cli.main.cmd_setup") as mock_setup,
patch("sys.stdin") as mock_stdin,
patch("builtins.input", side_effect=AssertionError("input should not be called")),
):
mock_stdin.isatty.return_value = False
with pytest.raises(SystemExit) as exc:
cmd_chat(args)
assert exc.value.code == 1
mock_setup.assert_not_called()
out = capsys.readouterr().out
assert "hermes config set model.provider custom" in out

View file

@ -0,0 +1,287 @@
"""Tests for OpenClaw migration integration in the setup wizard."""
from argparse import Namespace
from types import ModuleType
from unittest.mock import MagicMock, patch
from hermes_cli import setup as setup_mod
# ---------------------------------------------------------------------------
# _offer_openclaw_migration — unit tests
# ---------------------------------------------------------------------------
class TestOfferOpenclawMigration:
"""Test the _offer_openclaw_migration helper in isolation."""
def test_skips_when_no_openclaw_dir(self, tmp_path):
"""Should return False immediately when ~/.openclaw does not exist."""
with patch("hermes_cli.setup.Path.home", return_value=tmp_path):
assert setup_mod._offer_openclaw_migration(tmp_path / ".hermes") is False
def test_skips_when_migration_script_missing(self, tmp_path):
"""Should return False when the migration script file is absent."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", tmp_path / "nonexistent.py"),
):
assert setup_mod._offer_openclaw_migration(tmp_path / ".hermes") is False
def test_skips_when_user_declines(self, tmp_path):
"""Should return False when user declines the migration prompt."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
script = tmp_path / "openclaw_to_hermes.py"
script.write_text("# placeholder")
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", script),
patch.object(setup_mod, "prompt_yes_no", return_value=False),
):
assert setup_mod._offer_openclaw_migration(tmp_path / ".hermes") is False
def test_runs_migration_when_user_accepts(self, tmp_path):
"""Should dynamically load the script and run the Migrator."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
# Create a fake hermes home with config
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
config_path = hermes_home / "config.yaml"
config_path.write_text("agent:\n max_turns: 90\n")
# Build a fake migration module
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value={"soul", "memory"})
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 3, "skipped": 1, "conflict": 0, "error": 0},
"output_dir": str(hermes_home / "migration"),
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
script = tmp_path / "openclaw_to_hermes.py"
script.write_text("# placeholder")
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", script),
patch.object(setup_mod, "prompt_yes_no", return_value=True),
patch.object(setup_mod, "get_config_path", return_value=config_path),
patch("importlib.util.spec_from_file_location") as mock_spec_fn,
):
# Wire up the fake module loading
mock_spec = MagicMock()
mock_spec.loader = MagicMock()
mock_spec_fn.return_value = mock_spec
def exec_module(mod):
mod.resolve_selected_options = fake_mod.resolve_selected_options
mod.Migrator = fake_mod.Migrator
mock_spec.loader.exec_module = exec_module
result = setup_mod._offer_openclaw_migration(hermes_home)
assert result is True
fake_mod.resolve_selected_options.assert_called_once_with(
None, None, preset="full"
)
fake_mod.Migrator.assert_called_once()
call_kwargs = fake_mod.Migrator.call_args[1]
assert call_kwargs["execute"] is True
assert call_kwargs["overwrite"] is False
assert call_kwargs["migrate_secrets"] is True
assert call_kwargs["preset_name"] == "full"
fake_migrator.migrate.assert_called_once()
def test_handles_migration_error_gracefully(self, tmp_path):
"""Should catch exceptions and return False."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
config_path = hermes_home / "config.yaml"
config_path.write_text("")
script = tmp_path / "openclaw_to_hermes.py"
script.write_text("# placeholder")
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", script),
patch.object(setup_mod, "prompt_yes_no", return_value=True),
patch.object(setup_mod, "get_config_path", return_value=config_path),
patch(
"importlib.util.spec_from_file_location",
side_effect=RuntimeError("boom"),
),
):
result = setup_mod._offer_openclaw_migration(hermes_home)
assert result is False
def test_creates_config_if_missing(self, tmp_path):
"""Should bootstrap config.yaml before running migration."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
config_path = hermes_home / "config.yaml"
# config does NOT exist yet
script = tmp_path / "openclaw_to_hermes.py"
script.write_text("# placeholder")
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", script),
patch.object(setup_mod, "prompt_yes_no", return_value=True),
patch.object(setup_mod, "get_config_path", return_value=config_path),
patch.object(setup_mod, "load_config", return_value={"agent": {}}),
patch.object(setup_mod, "save_config") as mock_save,
patch(
"importlib.util.spec_from_file_location",
side_effect=RuntimeError("stop early"),
),
):
setup_mod._offer_openclaw_migration(hermes_home)
# save_config should have been called to bootstrap the file
mock_save.assert_called_once_with({"agent": {}})
# ---------------------------------------------------------------------------
# Integration with run_setup_wizard — first-time flow
# ---------------------------------------------------------------------------
def _first_time_args() -> Namespace:
return Namespace(
section=None,
non_interactive=False,
reset=False,
)
class TestSetupWizardOpenclawIntegration:
"""Verify _offer_openclaw_migration is called during first-time setup."""
def test_migration_offered_during_first_time_setup(self, tmp_path):
"""On first-time setup, _offer_openclaw_migration should be called."""
args = _first_time_args()
with (
patch.object(setup_mod, "ensure_hermes_home"),
patch.object(setup_mod, "load_config", return_value={}),
patch.object(setup_mod, "get_hermes_home", return_value=tmp_path),
patch.object(setup_mod, "get_env_value", return_value=""),
patch.object(setup_mod, "is_interactive_stdin", return_value=True),
patch("hermes_cli.auth.get_active_provider", return_value=None),
# User presses Enter to start
patch("builtins.input", return_value=""),
# Mock the migration offer
patch.object(
setup_mod, "_offer_openclaw_migration", return_value=False
) as mock_migration,
# Mock the actual setup sections so they don't run
patch.object(setup_mod, "setup_model_provider"),
patch.object(setup_mod, "setup_terminal_backend"),
patch.object(setup_mod, "setup_agent_settings"),
patch.object(setup_mod, "setup_gateway"),
patch.object(setup_mod, "setup_tools"),
patch.object(setup_mod, "save_config"),
patch.object(setup_mod, "_print_setup_summary"),
):
setup_mod.run_setup_wizard(args)
mock_migration.assert_called_once_with(tmp_path)
def test_migration_reloads_config_on_success(self, tmp_path):
"""When migration returns True, config should be reloaded."""
args = _first_time_args()
call_order = []
def tracking_load_config():
call_order.append("load_config")
return {}
with (
patch.object(setup_mod, "ensure_hermes_home"),
patch.object(setup_mod, "load_config", side_effect=tracking_load_config),
patch.object(setup_mod, "get_hermes_home", return_value=tmp_path),
patch.object(setup_mod, "get_env_value", return_value=""),
patch.object(setup_mod, "is_interactive_stdin", return_value=True),
patch("hermes_cli.auth.get_active_provider", return_value=None),
patch("builtins.input", return_value=""),
patch.object(setup_mod, "_offer_openclaw_migration", return_value=True),
patch.object(setup_mod, "setup_model_provider"),
patch.object(setup_mod, "setup_terminal_backend"),
patch.object(setup_mod, "setup_agent_settings"),
patch.object(setup_mod, "setup_gateway"),
patch.object(setup_mod, "setup_tools"),
patch.object(setup_mod, "save_config"),
patch.object(setup_mod, "_print_setup_summary"),
):
setup_mod.run_setup_wizard(args)
# load_config called twice: once at start, once after migration
assert call_order.count("load_config") == 2
def test_reloaded_config_flows_into_remaining_setup_sections(self, tmp_path):
args = _first_time_args()
initial_config = {}
reloaded_config = {"model": {"provider": "openrouter"}}
with (
patch.object(setup_mod, "ensure_hermes_home"),
patch.object(
setup_mod,
"load_config",
side_effect=[initial_config, reloaded_config],
),
patch.object(setup_mod, "get_hermes_home", return_value=tmp_path),
patch.object(setup_mod, "get_env_value", return_value=""),
patch.object(setup_mod, "is_interactive_stdin", return_value=True),
patch("hermes_cli.auth.get_active_provider", return_value=None),
patch("builtins.input", return_value=""),
patch.object(setup_mod, "_offer_openclaw_migration", return_value=True),
patch.object(setup_mod, "setup_model_provider") as setup_model_provider,
patch.object(setup_mod, "setup_terminal_backend"),
patch.object(setup_mod, "setup_agent_settings"),
patch.object(setup_mod, "setup_gateway"),
patch.object(setup_mod, "setup_tools"),
patch.object(setup_mod, "save_config"),
patch.object(setup_mod, "_print_setup_summary"),
):
setup_mod.run_setup_wizard(args)
setup_model_provider.assert_called_once_with(reloaded_config)
def test_migration_not_offered_for_existing_install(self, tmp_path):
"""Returning users should not see the migration prompt."""
args = _first_time_args()
with (
patch.object(setup_mod, "ensure_hermes_home"),
patch.object(setup_mod, "load_config", return_value={}),
patch.object(setup_mod, "get_hermes_home", return_value=tmp_path),
patch.object(
setup_mod,
"get_env_value",
side_effect=lambda k: "sk-xxx" if k == "OPENROUTER_API_KEY" else "",
),
patch("hermes_cli.auth.get_active_provider", return_value=None),
# Returning user picks "Exit"
patch.object(setup_mod, "prompt_choice", return_value=9),
patch.object(
setup_mod, "_offer_openclaw_migration", return_value=False
) as mock_migration,
):
setup_mod.run_setup_wizard(args)
mock_migration.assert_not_called()

View file

@ -0,0 +1,29 @@
from hermes_cli import setup as setup_mod
def test_prompt_choice_uses_curses_helper(monkeypatch):
monkeypatch.setattr(setup_mod, "_curses_prompt_choice", lambda question, choices, default=0: 1)
idx = setup_mod.prompt_choice("Pick one", ["a", "b", "c"], default=0)
assert idx == 1
def test_prompt_choice_falls_back_to_numbered_input(monkeypatch):
monkeypatch.setattr(setup_mod, "_curses_prompt_choice", lambda question, choices, default=0: -1)
monkeypatch.setattr("builtins.input", lambda _prompt="": "2")
idx = setup_mod.prompt_choice("Pick one", ["a", "b", "c"], default=0)
assert idx == 1
def test_prompt_checklist_uses_shared_curses_checklist(monkeypatch):
monkeypatch.setattr(
"hermes_cli.curses_ui.curses_checklist",
lambda title, items, selected, cancel_returns=None: {0, 2},
)
selected = setup_mod.prompt_checklist("Pick tools", ["one", "two", "three"], pre_selected=[1])
assert selected == [0, 2]

View file

@ -0,0 +1,211 @@
"""Tests for hermes_cli/skills_config.py and skills_tool disabled filtering."""
import pytest
from unittest.mock import patch, MagicMock
# ---------------------------------------------------------------------------
# get_disabled_skills
# ---------------------------------------------------------------------------
class TestGetDisabledSkills:
def test_empty_config(self):
from hermes_cli.skills_config import get_disabled_skills
assert get_disabled_skills({}) == set()
def test_reads_global_disabled(self):
from hermes_cli.skills_config import get_disabled_skills
config = {"skills": {"disabled": ["skill-a", "skill-b"]}}
assert get_disabled_skills(config) == {"skill-a", "skill-b"}
def test_reads_platform_disabled(self):
from hermes_cli.skills_config import get_disabled_skills
config = {"skills": {
"disabled": ["skill-a"],
"platform_disabled": {"telegram": ["skill-b"]}
}}
assert get_disabled_skills(config, platform="telegram") == {"skill-b"}
def test_platform_falls_back_to_global(self):
from hermes_cli.skills_config import get_disabled_skills
config = {"skills": {"disabled": ["skill-a"]}}
# no platform_disabled for cli -> falls back to global
assert get_disabled_skills(config, platform="cli") == {"skill-a"}
def test_missing_skills_key(self):
from hermes_cli.skills_config import get_disabled_skills
assert get_disabled_skills({"other": "value"}) == set()
def test_empty_disabled_list(self):
from hermes_cli.skills_config import get_disabled_skills
assert get_disabled_skills({"skills": {"disabled": []}}) == set()
# ---------------------------------------------------------------------------
# save_disabled_skills
# ---------------------------------------------------------------------------
class TestSaveDisabledSkills:
@patch("hermes_cli.skills_config.save_config")
def test_saves_global_sorted(self, mock_save):
from hermes_cli.skills_config import save_disabled_skills
config = {}
save_disabled_skills(config, {"skill-z", "skill-a"})
assert config["skills"]["disabled"] == ["skill-a", "skill-z"]
mock_save.assert_called_once()
@patch("hermes_cli.skills_config.save_config")
def test_saves_platform_disabled(self, mock_save):
from hermes_cli.skills_config import save_disabled_skills
config = {}
save_disabled_skills(config, {"skill-x"}, platform="telegram")
assert config["skills"]["platform_disabled"]["telegram"] == ["skill-x"]
@patch("hermes_cli.skills_config.save_config")
def test_saves_empty(self, mock_save):
from hermes_cli.skills_config import save_disabled_skills
config = {"skills": {"disabled": ["skill-a"]}}
save_disabled_skills(config, set())
assert config["skills"]["disabled"] == []
@patch("hermes_cli.skills_config.save_config")
def test_creates_skills_key(self, mock_save):
from hermes_cli.skills_config import save_disabled_skills
config = {}
save_disabled_skills(config, {"skill-x"})
assert "skills" in config
assert "disabled" in config["skills"]
# ---------------------------------------------------------------------------
# _is_skill_disabled
# ---------------------------------------------------------------------------
class TestIsSkillDisabled:
@patch("hermes_cli.config.load_config")
def test_globally_disabled(self, mock_load):
mock_load.return_value = {"skills": {"disabled": ["bad-skill"]}}
from tools.skills_tool import _is_skill_disabled
assert _is_skill_disabled("bad-skill") is True
@patch("hermes_cli.config.load_config")
def test_globally_enabled(self, mock_load):
mock_load.return_value = {"skills": {"disabled": ["other"]}}
from tools.skills_tool import _is_skill_disabled
assert _is_skill_disabled("good-skill") is False
@patch("hermes_cli.config.load_config")
def test_platform_disabled(self, mock_load):
mock_load.return_value = {"skills": {
"disabled": [],
"platform_disabled": {"telegram": ["tg-skill"]}
}}
from tools.skills_tool import _is_skill_disabled
assert _is_skill_disabled("tg-skill", platform="telegram") is True
@patch("hermes_cli.config.load_config")
def test_platform_enabled_overrides_global(self, mock_load):
mock_load.return_value = {"skills": {
"disabled": ["skill-a"],
"platform_disabled": {"telegram": []}
}}
from tools.skills_tool import _is_skill_disabled
# telegram has explicit empty list -> skill-a is NOT disabled for telegram
assert _is_skill_disabled("skill-a", platform="telegram") is False
@patch("hermes_cli.config.load_config")
def test_platform_falls_back_to_global(self, mock_load):
mock_load.return_value = {"skills": {"disabled": ["skill-a"]}}
from tools.skills_tool import _is_skill_disabled
# no platform_disabled for cli -> global
assert _is_skill_disabled("skill-a", platform="cli") is True
@patch("hermes_cli.config.load_config")
def test_empty_config(self, mock_load):
mock_load.return_value = {}
from tools.skills_tool import _is_skill_disabled
assert _is_skill_disabled("any-skill") is False
@patch("hermes_cli.config.load_config")
def test_exception_returns_false(self, mock_load):
mock_load.side_effect = Exception("config error")
from tools.skills_tool import _is_skill_disabled
assert _is_skill_disabled("any-skill") is False
@patch("hermes_cli.config.load_config")
@patch.dict("os.environ", {"HERMES_PLATFORM": "discord"})
def test_env_var_platform(self, mock_load):
mock_load.return_value = {"skills": {
"platform_disabled": {"discord": ["discord-skill"]}
}}
from tools.skills_tool import _is_skill_disabled
assert _is_skill_disabled("discord-skill") is True
# ---------------------------------------------------------------------------
# _find_all_skills — disabled filtering
# ---------------------------------------------------------------------------
class TestFindAllSkillsFiltering:
@patch("tools.skills_tool._get_disabled_skill_names", return_value={"my-skill"})
@patch("tools.skills_tool.skill_matches_platform", return_value=True)
@patch("tools.skills_tool.SKILLS_DIR")
def test_disabled_skill_excluded(self, mock_dir, mock_platform, mock_disabled, tmp_path):
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
skill_md = skill_dir / "SKILL.md"
skill_md.write_text("---\nname: my-skill\ndescription: A test skill\n---\nContent")
mock_dir.exists.return_value = True
mock_dir.rglob.return_value = [skill_md]
from tools.skills_tool import _find_all_skills
skills = _find_all_skills()
assert not any(s["name"] == "my-skill" for s in skills)
@patch("tools.skills_tool._get_disabled_skill_names", return_value=set())
@patch("tools.skills_tool.skill_matches_platform", return_value=True)
@patch("tools.skills_tool.SKILLS_DIR")
def test_enabled_skill_included(self, mock_dir, mock_platform, mock_disabled, tmp_path):
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
skill_md = skill_dir / "SKILL.md"
skill_md.write_text("---\nname: my-skill\ndescription: A test skill\n---\nContent")
mock_dir.exists.return_value = True
mock_dir.rglob.return_value = [skill_md]
from tools.skills_tool import _find_all_skills
skills = _find_all_skills()
assert any(s["name"] == "my-skill" for s in skills)
@patch("tools.skills_tool._get_disabled_skill_names", return_value={"my-skill"})
@patch("tools.skills_tool.skill_matches_platform", return_value=True)
@patch("tools.skills_tool.SKILLS_DIR")
def test_skip_disabled_returns_all(self, mock_dir, mock_platform, mock_disabled, tmp_path):
"""skip_disabled=True ignores the disabled set (for config UI)."""
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
skill_md = skill_dir / "SKILL.md"
skill_md.write_text("---\nname: my-skill\ndescription: A test skill\n---\nContent")
mock_dir.exists.return_value = True
mock_dir.rglob.return_value = [skill_md]
from tools.skills_tool import _find_all_skills
skills = _find_all_skills(skip_disabled=True)
assert any(s["name"] == "my-skill" for s in skills)
# ---------------------------------------------------------------------------
# _get_categories
# ---------------------------------------------------------------------------
class TestGetCategories:
def test_extracts_unique_categories(self):
from hermes_cli.skills_config import _get_categories
skills = [
{"name": "a", "category": "mlops", "description": ""},
{"name": "b", "category": "coding", "description": ""},
{"name": "c", "category": "mlops", "description": ""},
]
cats = _get_categories(skills)
assert cats == ["coding", "mlops"]
def test_none_becomes_uncategorized(self):
from hermes_cli.skills_config import _get_categories
skills = [{"name": "a", "category": None, "description": ""}]
assert "uncategorized" in _get_categories(skills)

View file

@ -0,0 +1,179 @@
from io import StringIO
import pytest
from rich.console import Console
from hermes_cli.skills_hub import do_check, do_list, do_update, handle_skills_slash
class _DummyLockFile:
def __init__(self, installed):
self._installed = installed
def list_installed(self):
return self._installed
@pytest.fixture()
def hub_env(monkeypatch, tmp_path):
"""Set up isolated hub directory paths and return (monkeypatch, tmp_path)."""
import tools.skills_hub as hub
hub_dir = tmp_path / "skills" / ".hub"
monkeypatch.setattr(hub, "SKILLS_DIR", tmp_path / "skills")
monkeypatch.setattr(hub, "HUB_DIR", hub_dir)
monkeypatch.setattr(hub, "LOCK_FILE", hub_dir / "lock.json")
monkeypatch.setattr(hub, "QUARANTINE_DIR", hub_dir / "quarantine")
monkeypatch.setattr(hub, "AUDIT_LOG", hub_dir / "audit.log")
monkeypatch.setattr(hub, "TAPS_FILE", hub_dir / "taps.json")
monkeypatch.setattr(hub, "INDEX_CACHE_DIR", hub_dir / "index-cache")
return hub_dir
# ---------------------------------------------------------------------------
# Fixtures for common skill setups
# ---------------------------------------------------------------------------
_HUB_ENTRY = {"name": "hub-skill", "source": "github", "trust_level": "community"}
_ALL_THREE_SKILLS = [
{"name": "hub-skill", "category": "x", "description": "hub"},
{"name": "builtin-skill", "category": "x", "description": "builtin"},
{"name": "local-skill", "category": "x", "description": "local"},
]
_BUILTIN_MANIFEST = {"builtin-skill": "abc123"}
@pytest.fixture()
def three_source_env(monkeypatch, hub_env):
"""Populate hub/builtin/local skills for source-classification tests."""
import tools.skills_hub as hub
import tools.skills_sync as skills_sync
import tools.skills_tool as skills_tool
monkeypatch.setattr(hub, "HubLockFile", lambda: _DummyLockFile([_HUB_ENTRY]))
monkeypatch.setattr(skills_tool, "_find_all_skills", lambda: list(_ALL_THREE_SKILLS))
monkeypatch.setattr(skills_sync, "_read_manifest", lambda: dict(_BUILTIN_MANIFEST))
return hub_env
def _capture(source_filter: str = "all") -> str:
"""Run do_list into a string buffer and return the output."""
sink = StringIO()
console = Console(file=sink, force_terminal=False, color_system=None)
do_list(source_filter=source_filter, console=console)
return sink.getvalue()
def _capture_check(monkeypatch, results, name=None) -> str:
import tools.skills_hub as hub
sink = StringIO()
console = Console(file=sink, force_terminal=False, color_system=None)
monkeypatch.setattr(hub, "check_for_skill_updates", lambda **_kwargs: results)
do_check(name=name, console=console)
return sink.getvalue()
def _capture_update(monkeypatch, results) -> tuple[str, list[tuple[str, str, bool]]]:
import tools.skills_hub as hub
import hermes_cli.skills_hub as cli_hub
sink = StringIO()
console = Console(file=sink, force_terminal=False, color_system=None)
installs = []
monkeypatch.setattr(hub, "check_for_skill_updates", lambda **_kwargs: results)
monkeypatch.setattr(hub, "HubLockFile", lambda: type("L", (), {
"get_installed": lambda self, name: {"install_path": "category/" + name}
})())
monkeypatch.setattr(cli_hub, "do_install", lambda identifier, category="", force=False, console=None: installs.append((identifier, category, force)))
do_update(console=console)
return sink.getvalue(), installs
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
def test_do_list_initializes_hub_dir(monkeypatch, hub_env):
import tools.skills_sync as skills_sync
import tools.skills_tool as skills_tool
monkeypatch.setattr(skills_tool, "_find_all_skills", lambda: [])
monkeypatch.setattr(skills_sync, "_read_manifest", lambda: {})
hub_dir = hub_env
assert not hub_dir.exists()
_capture()
assert hub_dir.exists()
assert (hub_dir / "lock.json").exists()
assert (hub_dir / "quarantine").is_dir()
assert (hub_dir / "index-cache").is_dir()
def test_do_list_distinguishes_hub_builtin_and_local(three_source_env):
output = _capture()
assert "hub-skill" in output
assert "builtin-skill" in output
assert "local-skill" in output
assert "1 hub-installed, 1 builtin, 1 local" in output
def test_do_list_filter_local(three_source_env):
output = _capture(source_filter="local")
assert "local-skill" in output
assert "builtin-skill" not in output
assert "hub-skill" not in output
def test_do_list_filter_hub(three_source_env):
output = _capture(source_filter="hub")
assert "hub-skill" in output
assert "builtin-skill" not in output
assert "local-skill" not in output
def test_do_list_filter_builtin(three_source_env):
output = _capture(source_filter="builtin")
assert "builtin-skill" in output
assert "hub-skill" not in output
assert "local-skill" not in output
def test_do_check_reports_available_updates(monkeypatch):
output = _capture_check(monkeypatch, [
{"name": "hub-skill", "source": "skills.sh", "status": "update_available"},
{"name": "other-skill", "source": "github", "status": "up_to_date"},
])
assert "hub-skill" in output
assert "update_available" in output
assert "up_to_date" in output
def test_do_check_handles_no_installed_updates(monkeypatch):
output = _capture_check(monkeypatch, [])
assert "No hub-installed skills to check" in output
def test_do_update_reinstalls_outdated_skills(monkeypatch):
output, installs = _capture_update(monkeypatch, [
{"name": "hub-skill", "identifier": "skills-sh/example/repo/hub-skill", "status": "update_available"},
{"name": "other-skill", "identifier": "github/example/other-skill", "status": "up_to_date"},
])
assert installs == [("skills-sh/example/repo/hub-skill", "category", True)]
assert "Updated 1 skill" in output

View file

@ -0,0 +1,128 @@
"""
Tests for --yes / --force flag separation in `hermes skills install`.
--yes / -y skip_confirm (bypass interactive prompt, needed in TUI mode)
--force force (install despite blocked scan verdict)
Based on PR #1595 by 333Alden333 (salvaged).
"""
import sys
from types import SimpleNamespace
def test_cli_skills_install_yes_sets_skip_confirm(monkeypatch):
"""--yes should set skip_confirm=True but NOT force."""
from hermes_cli.main import main
captured = {}
def fake_skills_command(args):
captured["identifier"] = args.identifier
captured["force"] = args.force
captured["yes"] = args.yes
monkeypatch.setattr("hermes_cli.skills_hub.skills_command", fake_skills_command)
monkeypatch.setattr(
sys,
"argv",
["hermes", "skills", "install", "official/email/agentmail", "--yes"],
)
main()
assert captured["identifier"] == "official/email/agentmail"
assert captured["yes"] is True
assert captured["force"] is False
def test_cli_skills_install_y_alias(monkeypatch):
"""-y should behave the same as --yes."""
from hermes_cli.main import main
captured = {}
def fake_skills_command(args):
captured["yes"] = args.yes
captured["force"] = args.force
monkeypatch.setattr("hermes_cli.skills_hub.skills_command", fake_skills_command)
monkeypatch.setattr(
sys,
"argv",
["hermes", "skills", "install", "test/skill", "-y"],
)
main()
assert captured["yes"] is True
assert captured["force"] is False
def test_cli_skills_install_force_sets_force(monkeypatch):
"""--force should set force=True but NOT yes."""
from hermes_cli.main import main
captured = {}
def fake_skills_command(args):
captured["force"] = args.force
captured["yes"] = args.yes
monkeypatch.setattr("hermes_cli.skills_hub.skills_command", fake_skills_command)
monkeypatch.setattr(
sys,
"argv",
["hermes", "skills", "install", "test/skill", "--force"],
)
main()
assert captured["force"] is True
assert captured["yes"] is False
def test_cli_skills_install_force_and_yes_together(monkeypatch):
"""--force --yes should set both flags."""
from hermes_cli.main import main
captured = {}
def fake_skills_command(args):
captured["force"] = args.force
captured["yes"] = args.yes
monkeypatch.setattr("hermes_cli.skills_hub.skills_command", fake_skills_command)
monkeypatch.setattr(
sys,
"argv",
["hermes", "skills", "install", "test/skill", "--force", "--yes"],
)
main()
assert captured["force"] is True
assert captured["yes"] is True
def test_cli_skills_install_no_flags(monkeypatch):
"""Without flags, both force and yes should be False."""
from hermes_cli.main import main
captured = {}
def fake_skills_command(args):
captured["force"] = args.force
captured["yes"] = args.yes
monkeypatch.setattr("hermes_cli.skills_hub.skills_command", fake_skills_command)
monkeypatch.setattr(
sys,
"argv",
["hermes", "skills", "install", "test/skill"],
)
main()
assert captured["force"] is False
assert captured["yes"] is False

View file

@ -0,0 +1,132 @@
"""
Tests for skip_confirm behavior in /skills install and /skills uninstall.
Verifies that --yes / -y bypasses the interactive confirmation prompt
that hangs inside prompt_toolkit's TUI.
Based on PR #1595 by 333Alden333 (salvaged).
"""
from unittest.mock import patch, MagicMock
import pytest
class TestHandleSkillsSlashInstallFlags:
"""Test flag parsing in handle_skills_slash for install."""
def test_yes_flag_sets_skip_confirm(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill --yes")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("skip_confirm") is True
assert kwargs.get("force") is False
def test_y_flag_sets_skip_confirm(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill -y")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("skip_confirm") is True
def test_force_flag_sets_force_not_skip(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill --force")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("force") is True
assert kwargs.get("skip_confirm") is False
def test_no_flags(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("force") is False
assert kwargs.get("skip_confirm") is False
class TestHandleSkillsSlashUninstallFlags:
"""Test flag parsing in handle_skills_slash for uninstall."""
def test_yes_flag_sets_skip_confirm(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_uninstall") as mock_uninstall:
handle_skills_slash("/skills uninstall test-skill --yes")
mock_uninstall.assert_called_once()
_, kwargs = mock_uninstall.call_args
assert kwargs.get("skip_confirm") is True
def test_y_flag_sets_skip_confirm(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_uninstall") as mock_uninstall:
handle_skills_slash("/skills uninstall test-skill -y")
mock_uninstall.assert_called_once()
_, kwargs = mock_uninstall.call_args
assert kwargs.get("skip_confirm") is True
def test_no_flags(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_uninstall") as mock_uninstall:
handle_skills_slash("/skills uninstall test-skill")
mock_uninstall.assert_called_once()
_, kwargs = mock_uninstall.call_args
assert kwargs.get("skip_confirm", False) is False
class TestDoInstallSkipConfirm:
"""Test that do_install respects skip_confirm parameter."""
@patch("hermes_cli.skills_hub.input", return_value="n")
def test_without_skip_confirm_prompts_user(self, mock_input):
"""Without skip_confirm, input() is called for confirmation."""
from hermes_cli.skills_hub import do_install
with patch("hermes_cli.skills_hub._console"), \
patch("tools.skills_hub.ensure_hub_dirs"), \
patch("tools.skills_hub.GitHubAuth"), \
patch("tools.skills_hub.create_source_router") as mock_router, \
patch("hermes_cli.skills_hub._resolve_short_name", return_value="test/skill"), \
patch("hermes_cli.skills_hub._resolve_source_meta_and_bundle") as mock_resolve:
# Make it return None so we exit early
mock_resolve.return_value = (None, None, None)
do_install("test-skill", skip_confirm=False)
# We don't get to the input() call because resolve returns None,
# but the parameter wiring is correct
class TestDoUninstallSkipConfirm:
"""Test that do_uninstall respects skip_confirm parameter."""
def test_skip_confirm_bypasses_input(self):
"""With skip_confirm=True, input() should not be called."""
from hermes_cli.skills_hub import do_uninstall
with patch("hermes_cli.skills_hub._console") as mock_console, \
patch("tools.skills_hub.uninstall_skill", return_value=(True, "Removed")) as mock_uninstall, \
patch("builtins.input") as mock_input:
do_uninstall("test-skill", skip_confirm=True)
mock_input.assert_not_called()
mock_uninstall.assert_called_once_with("test-skill")
def test_without_skip_confirm_calls_input(self):
"""Without skip_confirm, input() should be called."""
from hermes_cli.skills_hub import do_uninstall
with patch("hermes_cli.skills_hub._console"), \
patch("tools.skills_hub.uninstall_skill", return_value=(True, "Removed")), \
patch("builtins.input", return_value="y") as mock_input:
do_uninstall("test-skill", skip_confirm=False)
mock_input.assert_called_once()
def test_without_skip_confirm_cancel(self):
"""Without skip_confirm, answering 'n' should cancel."""
from hermes_cli.skills_hub import do_uninstall
with patch("hermes_cli.skills_hub._console"), \
patch("tools.skills_hub.uninstall_skill") as mock_uninstall, \
patch("builtins.input", return_value="n"):
do_uninstall("test-skill", skip_confirm=False)
mock_uninstall.assert_not_called()

View file

@ -0,0 +1,35 @@
"""Test that skills subparser doesn't conflict (regression test for #898)."""
import argparse
def test_no_duplicate_skills_subparser():
"""Ensure 'skills' subparser is only registered once to avoid Python 3.11+ crash.
Python 3.11 changed argparse to raise an exception on duplicate subparser
names instead of silently overwriting (see CPython #94331).
This test will fail with:
argparse.ArgumentError: argument command: conflicting subparser: skills
if the duplicate 'skills' registration is reintroduced.
"""
# Force fresh import of the module where parser is constructed
# If there are duplicate 'skills' subparsers, this import will raise
# argparse.ArgumentError at module load time
import importlib
import sys
# Remove cached module if present
if 'hermes_cli.main' in sys.modules:
del sys.modules['hermes_cli.main']
try:
import hermes_cli.main # noqa: F401
except argparse.ArgumentError as e:
if "conflicting subparser" in str(e):
raise AssertionError(
f"Duplicate subparser detected: {e}. "
"See issue #898 for details."
) from e
raise

View file

@ -0,0 +1,314 @@
"""Tests for hermes_cli.skin_engine — the data-driven skin/theme system."""
import json
import os
import pytest
from pathlib import Path
from unittest.mock import patch
@pytest.fixture(autouse=True)
def reset_skin_state():
"""Reset skin engine state between tests."""
from hermes_cli import skin_engine
skin_engine._active_skin = None
skin_engine._active_skin_name = "default"
yield
skin_engine._active_skin = None
skin_engine._active_skin_name = "default"
class TestSkinConfig:
def test_default_skin_has_required_fields(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("default")
assert skin.name == "default"
assert skin.tool_prefix == ""
assert "banner_title" in skin.colors
assert "banner_border" in skin.colors
assert "agent_name" in skin.branding
def test_get_color_with_fallback(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("default")
assert skin.get_color("banner_title") == "#FFD700"
assert skin.get_color("nonexistent", "#000") == "#000"
def test_get_branding_with_fallback(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("default")
assert skin.get_branding("agent_name") == "Hermes Agent"
assert skin.get_branding("nonexistent", "fallback") == "fallback"
def test_get_spinner_list_empty_for_default(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("default")
# Default skin has no custom spinner config
assert skin.get_spinner_list("waiting_faces") == []
assert skin.get_spinner_list("thinking_verbs") == []
def test_get_spinner_wings_empty_for_default(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("default")
assert skin.get_spinner_wings() == []
class TestBuiltinSkins:
def test_ares_skin_loads(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("ares")
assert skin.name == "ares"
assert skin.tool_prefix == ""
assert skin.get_color("banner_border") == "#9F1C1C"
assert skin.get_color("response_border") == "#C7A96B"
assert skin.get_color("session_label") == "#C7A96B"
assert skin.get_color("session_border") == "#6E584B"
assert skin.get_branding("agent_name") == "Ares Agent"
def test_ares_has_spinner_customization(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("ares")
assert len(skin.get_spinner_list("waiting_faces")) > 0
assert len(skin.get_spinner_list("thinking_faces")) > 0
assert len(skin.get_spinner_list("thinking_verbs")) > 0
wings = skin.get_spinner_wings()
assert len(wings) > 0
assert isinstance(wings[0], tuple)
assert len(wings[0]) == 2
def test_mono_skin_loads(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("mono")
assert skin.name == "mono"
assert skin.get_color("banner_title") == "#e6edf3"
def test_slate_skin_loads(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("slate")
assert skin.name == "slate"
assert skin.get_color("banner_title") == "#7eb8f6"
def test_unknown_skin_falls_back_to_default(self):
from hermes_cli.skin_engine import load_skin
skin = load_skin("nonexistent_skin_xyz")
assert skin.name == "default"
def test_all_builtin_skins_have_complete_colors(self):
from hermes_cli.skin_engine import _BUILTIN_SKINS, _build_skin_config
required_keys = ["banner_border", "banner_title", "banner_accent",
"banner_dim", "banner_text", "ui_accent"]
for name, data in _BUILTIN_SKINS.items():
skin = _build_skin_config(data)
for key in required_keys:
assert key in skin.colors, f"Skin '{name}' missing color '{key}'"
class TestSkinManagement:
def test_set_active_skin(self):
from hermes_cli.skin_engine import set_active_skin, get_active_skin, get_active_skin_name
skin = set_active_skin("ares")
assert skin.name == "ares"
assert get_active_skin_name() == "ares"
assert get_active_skin().name == "ares"
def test_get_active_skin_defaults(self):
from hermes_cli.skin_engine import get_active_skin
skin = get_active_skin()
assert skin.name == "default"
def test_list_skins_includes_builtins(self):
from hermes_cli.skin_engine import list_skins
skins = list_skins()
names = [s["name"] for s in skins]
assert "default" in names
assert "ares" in names
assert "mono" in names
assert "slate" in names
for s in skins:
assert "source" in s
assert s["source"] == "builtin"
def test_init_skin_from_config(self):
from hermes_cli.skin_engine import init_skin_from_config, get_active_skin_name
init_skin_from_config({"display": {"skin": "ares"}})
assert get_active_skin_name() == "ares"
def test_init_skin_from_empty_config(self):
from hermes_cli.skin_engine import init_skin_from_config, get_active_skin_name
init_skin_from_config({})
assert get_active_skin_name() == "default"
class TestUserSkins:
def test_load_user_skin_from_yaml(self, tmp_path, monkeypatch):
from hermes_cli.skin_engine import load_skin, _skins_dir
# Create a user skin YAML
skins_dir = tmp_path / "skins"
skins_dir.mkdir()
skin_file = skins_dir / "custom.yaml"
skin_data = {
"name": "custom",
"description": "A custom test skin",
"colors": {"banner_title": "#FF0000"},
"branding": {"agent_name": "Custom Agent"},
"tool_prefix": "",
}
import yaml
skin_file.write_text(yaml.dump(skin_data))
# Patch skins dir
monkeypatch.setattr("hermes_cli.skin_engine._skins_dir", lambda: skins_dir)
skin = load_skin("custom")
assert skin.name == "custom"
assert skin.get_color("banner_title") == "#FF0000"
assert skin.get_branding("agent_name") == "Custom Agent"
assert skin.tool_prefix == ""
# Should inherit defaults for unspecified colors
assert skin.get_color("banner_border") == "#CD7F32" # from default
def test_list_skins_includes_user_skins(self, tmp_path, monkeypatch):
from hermes_cli.skin_engine import list_skins
skins_dir = tmp_path / "skins"
skins_dir.mkdir()
import yaml
(skins_dir / "pirate.yaml").write_text(yaml.dump({
"name": "pirate",
"description": "Arr matey",
}))
monkeypatch.setattr("hermes_cli.skin_engine._skins_dir", lambda: skins_dir)
skins = list_skins()
names = [s["name"] for s in skins]
assert "pirate" in names
pirate = [s for s in skins if s["name"] == "pirate"][0]
assert pirate["source"] == "user"
class TestDisplayIntegration:
def test_get_skin_tool_prefix_default(self):
from agent.display import get_skin_tool_prefix
assert get_skin_tool_prefix() == ""
def test_get_skin_tool_prefix_custom(self):
from hermes_cli.skin_engine import set_active_skin
from agent.display import get_skin_tool_prefix
set_active_skin("ares")
assert get_skin_tool_prefix() == ""
def test_get_skin_faces_default(self):
from agent.display import get_skin_faces, KawaiiSpinner
faces = get_skin_faces("waiting_faces", KawaiiSpinner.KAWAII_WAITING)
# Default skin has no custom faces, so should return the default list
assert faces == KawaiiSpinner.KAWAII_WAITING
def test_get_skin_faces_ares(self):
from hermes_cli.skin_engine import set_active_skin
from agent.display import get_skin_faces, KawaiiSpinner
set_active_skin("ares")
faces = get_skin_faces("waiting_faces", KawaiiSpinner.KAWAII_WAITING)
assert "(⚔)" in faces
def test_get_skin_verbs_default(self):
from agent.display import get_skin_verbs, KawaiiSpinner
verbs = get_skin_verbs()
assert verbs == KawaiiSpinner.THINKING_VERBS
def test_get_skin_verbs_ares(self):
from hermes_cli.skin_engine import set_active_skin
from agent.display import get_skin_verbs
set_active_skin("ares")
verbs = get_skin_verbs()
assert "forging" in verbs
def test_tool_message_uses_skin_prefix(self):
from hermes_cli.skin_engine import set_active_skin
from agent.display import get_cute_tool_message
set_active_skin("ares")
msg = get_cute_tool_message("terminal", {"command": "ls"}, 0.5)
assert msg.startswith("")
assert "" not in msg
def test_tool_message_default_prefix(self):
from agent.display import get_cute_tool_message
msg = get_cute_tool_message("terminal", {"command": "ls"}, 0.5)
assert msg.startswith("")
class TestCliBrandingHelpers:
def test_active_prompt_symbol_default(self):
from hermes_cli.skin_engine import get_active_prompt_symbol
assert get_active_prompt_symbol() == " "
def test_active_prompt_symbol_ares(self):
from hermes_cli.skin_engine import set_active_skin, get_active_prompt_symbol
set_active_skin("ares")
assert get_active_prompt_symbol() == " "
def test_active_help_header_ares(self):
from hermes_cli.skin_engine import set_active_skin, get_active_help_header
set_active_skin("ares")
assert get_active_help_header() == "(⚔) Available Commands"
def test_active_goodbye_ares(self):
from hermes_cli.skin_engine import set_active_skin, get_active_goodbye
set_active_skin("ares")
assert get_active_goodbye() == "Farewell, warrior! ⚔"
def test_prompt_toolkit_style_overrides_cover_tui_classes(self):
from hermes_cli.skin_engine import set_active_skin, get_prompt_toolkit_style_overrides
set_active_skin("ares")
overrides = get_prompt_toolkit_style_overrides()
required = {
"input-area",
"placeholder",
"prompt",
"prompt-working",
"hint",
"input-rule",
"image-badge",
"completion-menu",
"completion-menu.completion",
"completion-menu.completion.current",
"completion-menu.meta.completion",
"completion-menu.meta.completion.current",
"clarify-border",
"clarify-title",
"clarify-question",
"clarify-choice",
"clarify-selected",
"clarify-active-other",
"clarify-countdown",
"sudo-prompt",
"sudo-border",
"sudo-title",
"sudo-text",
"approval-border",
"approval-title",
"approval-desc",
"approval-cmd",
"approval-choice",
"approval-selected",
}
assert required.issubset(overrides.keys())
def test_prompt_toolkit_style_overrides_use_skin_colors(self):
from hermes_cli.skin_engine import (
set_active_skin,
get_active_skin,
get_prompt_toolkit_style_overrides,
)
set_active_skin("ares")
skin = get_active_skin()
overrides = get_prompt_toolkit_style_overrides()
assert overrides["prompt"] == skin.get_color("prompt")
assert overrides["input-rule"] == skin.get_color("input_rule")
assert overrides["clarify-title"] == f"{skin.get_color('banner_title')} bold"
assert overrides["sudo-prompt"] == f"{skin.get_color('ui_error')} bold"
assert overrides["approval-title"] == f"{skin.get_color('ui_warn')} bold"

View file

@ -0,0 +1,14 @@
from types import SimpleNamespace
from hermes_cli.status import show_status
def test_show_status_includes_tavily_key(monkeypatch, capsys, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("TAVILY_API_KEY", "tvly-1234567890abcdef")
show_status(SimpleNamespace(all=False, deep=False))
output = capsys.readouterr().out
assert "Tavily" in output
assert "tvly...cdef" in output

View file

@ -0,0 +1,61 @@
"""Tests for hermes_cli.status model/provider display."""
from types import SimpleNamespace
def _patch_common_status_deps(monkeypatch, status_mod, tmp_path, *, openai_base_url=""):
import hermes_cli.auth as auth_mod
monkeypatch.setattr(status_mod, "get_env_path", lambda: tmp_path / ".env", raising=False)
monkeypatch.setattr(status_mod, "get_hermes_home", lambda: tmp_path, raising=False)
def _get_env_value(name: str):
if name == "OPENAI_BASE_URL":
return openai_base_url
return ""
monkeypatch.setattr(status_mod, "get_env_value", _get_env_value, raising=False)
monkeypatch.setattr(auth_mod, "get_nous_auth_status", lambda: {}, raising=False)
monkeypatch.setattr(auth_mod, "get_codex_auth_status", lambda: {}, raising=False)
monkeypatch.setattr(
status_mod.subprocess,
"run",
lambda *args, **kwargs: SimpleNamespace(stdout="inactive\n", returncode=3),
)
def test_show_status_displays_configured_dict_model_and_provider_label(monkeypatch, capsys, tmp_path):
from hermes_cli import status as status_mod
_patch_common_status_deps(monkeypatch, status_mod, tmp_path)
monkeypatch.setattr(
status_mod,
"load_config",
lambda: {"model": {"default": "anthropic/claude-sonnet-4", "provider": "anthropic"}},
raising=False,
)
monkeypatch.setattr(status_mod, "resolve_requested_provider", lambda requested=None: "anthropic", raising=False)
monkeypatch.setattr(status_mod, "resolve_provider", lambda requested=None, **kwargs: "anthropic", raising=False)
monkeypatch.setattr(status_mod, "provider_label", lambda provider: "Anthropic", raising=False)
status_mod.show_status(SimpleNamespace(all=False, deep=False))
out = capsys.readouterr().out
assert "Model: anthropic/claude-sonnet-4" in out
assert "Provider: Anthropic" in out
def test_show_status_displays_legacy_string_model_and_custom_endpoint(monkeypatch, capsys, tmp_path):
from hermes_cli import status as status_mod
_patch_common_status_deps(monkeypatch, status_mod, tmp_path, openai_base_url="http://localhost:8080/v1")
monkeypatch.setattr(status_mod, "load_config", lambda: {"model": "qwen3:latest"}, raising=False)
monkeypatch.setattr(status_mod, "resolve_requested_provider", lambda requested=None: "auto", raising=False)
monkeypatch.setattr(status_mod, "resolve_provider", lambda requested=None, **kwargs: "openrouter", raising=False)
monkeypatch.setattr(status_mod, "provider_label", lambda provider: "Custom endpoint" if provider == "custom" else provider, raising=False)
status_mod.show_status(SimpleNamespace(all=False, deep=False))
out = capsys.readouterr().out
assert "Model: qwen3:latest" in out
assert "Provider: Custom endpoint" in out

View file

@ -0,0 +1,206 @@
"""Tests for hermes_cli.tools_config platform tool persistence."""
from unittest.mock import patch
from hermes_cli.tools_config import (
_get_platform_tools,
_platform_toolset_summary,
_save_platform_tools,
_toolset_has_keys,
)
def test_get_platform_tools_uses_default_when_platform_not_configured():
config = {}
enabled = _get_platform_tools(config, "cli")
assert enabled
def test_get_platform_tools_preserves_explicit_empty_selection():
config = {"platform_toolsets": {"cli": []}}
enabled = _get_platform_tools(config, "cli")
assert enabled == set()
def test_platform_toolset_summary_uses_explicit_platform_list():
config = {}
summary = _platform_toolset_summary(config, platforms=["cli"])
assert set(summary.keys()) == {"cli"}
assert summary["cli"] == _get_platform_tools(config, "cli")
def test_toolset_has_keys_for_vision_accepts_codex_auth(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
(tmp_path / "auth.json").write_text(
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token": "codex-...oken","refresh_token": "codex-...oken"}}}}'
)
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
monkeypatch.delenv("OPENAI_BASE_URL", raising=False)
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
monkeypatch.delenv("AUXILIARY_VISION_PROVIDER", raising=False)
monkeypatch.delenv("CONTEXT_VISION_PROVIDER", raising=False)
assert _toolset_has_keys("vision") is True
def test_save_platform_tools_preserves_mcp_server_names():
"""Ensure MCP server names are preserved when saving platform tools.
Regression test for https://github.com/NousResearch/hermes-agent/issues/1247
"""
config = {
"platform_toolsets": {
"cli": ["web", "terminal", "time", "github", "custom-mcp-server"]
}
}
new_selection = {"web", "browser"}
with patch("hermes_cli.tools_config.save_config"):
_save_platform_tools(config, "cli", new_selection)
saved_toolsets = config["platform_toolsets"]["cli"]
assert "time" in saved_toolsets
assert "github" in saved_toolsets
assert "custom-mcp-server" in saved_toolsets
assert "web" in saved_toolsets
assert "browser" in saved_toolsets
assert "terminal" not in saved_toolsets
def test_save_platform_tools_handles_empty_existing_config():
"""Saving platform tools works when no existing config exists."""
config = {}
with patch("hermes_cli.tools_config.save_config"):
_save_platform_tools(config, "telegram", {"web", "terminal"})
saved_toolsets = config["platform_toolsets"]["telegram"]
assert "web" in saved_toolsets
assert "terminal" in saved_toolsets
def test_save_platform_tools_handles_invalid_existing_config():
"""Saving platform tools works when existing config is not a list."""
config = {
"platform_toolsets": {
"cli": "invalid-string-value"
}
}
with patch("hermes_cli.tools_config.save_config"):
_save_platform_tools(config, "cli", {"web"})
saved_toolsets = config["platform_toolsets"]["cli"]
assert "web" in saved_toolsets
def test_save_platform_tools_does_not_preserve_platform_default_toolsets():
"""Platform default toolsets (hermes-cli, hermes-telegram, etc.) must NOT
be preserved across saves.
These "super" toolsets resolve to ALL tools, so if they survive in the
config, they silently override any tools the user unchecked. Previously,
the preserve filter only excluded configurable toolset keys (web, browser,
terminal, etc.) and treated platform defaults as unknown custom entries
(like MCP server names), causing them to be kept unconditionally.
Regression test: user unchecks image_gen and homeassistant via
``hermes tools``, but hermes-cli stays in the config and re-enables
everything on the next read.
"""
config = {
"platform_toolsets": {
"cli": [
"browser", "clarify", "code_execution", "cronjob",
"delegation", "file", "hermes-cli", # <-- the culprit
"memory", "session_search", "skills", "terminal",
"todo", "tts", "vision", "web",
]
}
}
# User unchecks image_gen, homeassistant, moa — keeps the rest
new_selection = {
"browser", "clarify", "code_execution", "cronjob",
"delegation", "file", "memory", "session_search",
"skills", "terminal", "todo", "tts", "vision", "web",
}
with patch("hermes_cli.tools_config.save_config"):
_save_platform_tools(config, "cli", new_selection)
saved = config["platform_toolsets"]["cli"]
# hermes-cli must NOT survive — it's a platform default, not an MCP server
assert "hermes-cli" not in saved
# The individual toolset keys the user selected must be present
assert "web" in saved
assert "terminal" in saved
assert "browser" in saved
# Tools the user unchecked must NOT be present
assert "image_gen" not in saved
assert "homeassistant" not in saved
assert "moa" not in saved
def test_save_platform_tools_does_not_preserve_hermes_telegram():
"""Same bug for Telegram — hermes-telegram must not be preserved."""
config = {
"platform_toolsets": {
"telegram": [
"browser", "file", "hermes-telegram", "terminal", "web",
]
}
}
new_selection = {"browser", "file", "terminal", "web"}
with patch("hermes_cli.tools_config.save_config"):
_save_platform_tools(config, "telegram", new_selection)
saved = config["platform_toolsets"]["telegram"]
assert "hermes-telegram" not in saved
assert "web" in saved
def test_save_platform_tools_still_preserves_mcp_with_platform_default_present():
"""MCP server names must still be preserved even when platform defaults
are being stripped out."""
config = {
"platform_toolsets": {
"cli": [
"web", "terminal", "hermes-cli", "my-mcp-server", "github-tools",
]
}
}
new_selection = {"web", "browser"}
with patch("hermes_cli.tools_config.save_config"):
_save_platform_tools(config, "cli", new_selection)
saved = config["platform_toolsets"]["cli"]
# MCP servers preserved
assert "my-mcp-server" in saved
assert "github-tools" in saved
# Platform default stripped
assert "hermes-cli" not in saved
# User selections present
assert "web" in saved
assert "browser" in saved
# Deselected configurable toolset removed
assert "terminal" not in saved

View file

@ -0,0 +1,207 @@
"""Tests for hermes tools disable/enable/list command (backend)."""
from argparse import Namespace
from unittest.mock import patch
from hermes_cli.tools_config import tools_disable_enable_command
# ── Built-in toolset disable ────────────────────────────────────────────────
class TestToolsDisableBuiltin:
def test_disable_removes_toolset_from_platform(self):
config = {"platform_toolsets": {"cli": ["web", "memory", "terminal"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(Namespace(tools_action="disable", names=["web"], platform="cli"))
saved = mock_save.call_args[0][0]
assert "web" not in saved["platform_toolsets"]["cli"]
assert "memory" in saved["platform_toolsets"]["cli"]
def test_disable_multiple_toolsets(self):
config = {"platform_toolsets": {"cli": ["web", "memory", "terminal"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(Namespace(tools_action="disable", names=["web", "memory"], platform="cli"))
saved = mock_save.call_args[0][0]
assert "web" not in saved["platform_toolsets"]["cli"]
assert "memory" not in saved["platform_toolsets"]["cli"]
assert "terminal" in saved["platform_toolsets"]["cli"]
def test_disable_already_absent_is_idempotent(self):
config = {"platform_toolsets": {"cli": ["memory"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(Namespace(tools_action="disable", names=["web"], platform="cli"))
saved = mock_save.call_args[0][0]
assert "web" not in saved["platform_toolsets"]["cli"]
# ── Built-in toolset enable ─────────────────────────────────────────────────
class TestToolsEnableBuiltin:
def test_enable_adds_toolset_to_platform(self):
config = {"platform_toolsets": {"cli": ["memory"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(Namespace(tools_action="enable", names=["web"], platform="cli"))
saved = mock_save.call_args[0][0]
assert "web" in saved["platform_toolsets"]["cli"]
def test_enable_already_present_is_idempotent(self):
config = {"platform_toolsets": {"cli": ["web"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(Namespace(tools_action="enable", names=["web"], platform="cli"))
saved = mock_save.call_args[0][0]
assert saved["platform_toolsets"]["cli"].count("web") == 1
# ── MCP tool disable ────────────────────────────────────────────────────────
class TestToolsDisableMcp:
def test_disable_adds_to_exclude_list(self):
config = {"mcp_servers": {"github": {"command": "npx"}}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(
Namespace(tools_action="disable", names=["github:create_issue"], platform="cli")
)
saved = mock_save.call_args[0][0]
assert "create_issue" in saved["mcp_servers"]["github"]["tools"]["exclude"]
def test_disable_already_excluded_is_idempotent(self):
config = {"mcp_servers": {"github": {"tools": {"exclude": ["create_issue"]}}}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(
Namespace(tools_action="disable", names=["github:create_issue"], platform="cli")
)
saved = mock_save.call_args[0][0]
assert saved["mcp_servers"]["github"]["tools"]["exclude"].count("create_issue") == 1
def test_disable_unknown_server_prints_error(self, capsys):
config = {"mcp_servers": {}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config"):
tools_disable_enable_command(
Namespace(tools_action="disable", names=["unknown:tool"], platform="cli")
)
out = capsys.readouterr().out
assert "MCP server 'unknown' not found in config" in out
# ── MCP tool enable ──────────────────────────────────────────────────────────
class TestToolsEnableMcp:
def test_enable_removes_from_exclude_list(self):
config = {"mcp_servers": {"github": {"tools": {"exclude": ["create_issue", "delete_branch"]}}}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(
Namespace(tools_action="enable", names=["github:create_issue"], platform="cli")
)
saved = mock_save.call_args[0][0]
assert "create_issue" not in saved["mcp_servers"]["github"]["tools"]["exclude"]
assert "delete_branch" in saved["mcp_servers"]["github"]["tools"]["exclude"]
# ── Mixed targets ────────────────────────────────────────────────────────────
class TestToolsMixedTargets:
def test_disable_builtin_and_mcp_together(self):
config = {
"platform_toolsets": {"cli": ["web", "memory"]},
"mcp_servers": {"github": {"command": "npx"}},
}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(Namespace(
tools_action="disable",
names=["web", "github:create_issue"],
platform="cli",
))
saved = mock_save.call_args[0][0]
assert "web" not in saved["platform_toolsets"]["cli"]
assert "create_issue" in saved["mcp_servers"]["github"]["tools"]["exclude"]
# ── List output ──────────────────────────────────────────────────────────────
class TestToolsList:
def test_list_shows_enabled_toolsets(self, capsys):
config = {"platform_toolsets": {"cli": ["web", "memory"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config):
tools_disable_enable_command(Namespace(tools_action="list", platform="cli"))
out = capsys.readouterr().out
assert "web" in out
assert "memory" in out
def test_list_shows_mcp_excluded_tools(self, capsys):
config = {
"mcp_servers": {"github": {"tools": {"exclude": ["create_issue"]}}},
}
with patch("hermes_cli.tools_config.load_config", return_value=config):
tools_disable_enable_command(Namespace(tools_action="list", platform="cli"))
out = capsys.readouterr().out
assert "github" in out
assert "create_issue" in out
# ── Validation ───────────────────────────────────────────────────────────────
class TestToolsValidation:
def test_unknown_platform_prints_error(self, capsys):
config = {}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config"):
tools_disable_enable_command(
Namespace(tools_action="disable", names=["web"], platform="invalid_platform")
)
out = capsys.readouterr().out
assert "Unknown platform 'invalid_platform'" in out
def test_unknown_toolset_prints_error(self, capsys):
config = {"platform_toolsets": {"cli": ["web"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config"):
tools_disable_enable_command(
Namespace(tools_action="disable", names=["nonexistent_toolset"], platform="cli")
)
out = capsys.readouterr().out
assert "Unknown toolset 'nonexistent_toolset'" in out
def test_unknown_toolset_does_not_corrupt_config(self):
config = {"platform_toolsets": {"cli": ["web", "memory"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(
Namespace(tools_action="disable", names=["nonexistent_toolset"], platform="cli")
)
saved = mock_save.call_args[0][0]
assert "web" in saved["platform_toolsets"]["cli"]
assert "memory" in saved["platform_toolsets"]["cli"]
def test_mixed_valid_and_invalid_applies_valid_only(self):
config = {"platform_toolsets": {"cli": ["web", "memory"]}}
with patch("hermes_cli.tools_config.load_config", return_value=config), \
patch("hermes_cli.tools_config.save_config") as mock_save:
tools_disable_enable_command(
Namespace(tools_action="disable", names=["web", "bad_toolset"], platform="cli")
)
saved = mock_save.call_args[0][0]
assert "web" not in saved["platform_toolsets"]["cli"]
assert "memory" in saved["platform_toolsets"]["cli"]

View file

@ -0,0 +1,386 @@
from pathlib import Path
from subprocess import CalledProcessError
from types import SimpleNamespace
import pytest
from hermes_cli import config as hermes_config
from hermes_cli import main as hermes_main
def test_stash_local_changes_if_needed_returns_none_when_tree_clean(monkeypatch, tmp_path):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[-2:] == ["status", "--porcelain"]:
return SimpleNamespace(stdout="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
stash_ref = hermes_main._stash_local_changes_if_needed(["git"], tmp_path)
assert stash_ref is None
assert [cmd[-2:] for cmd, _ in calls] == [["status", "--porcelain"]]
def test_stash_local_changes_if_needed_returns_specific_stash_commit(monkeypatch, tmp_path):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[-2:] == ["status", "--porcelain"]:
return SimpleNamespace(stdout=" M hermes_cli/main.py\n?? notes.txt\n", returncode=0)
if cmd[1:4] == ["stash", "push", "--include-untracked"]:
return SimpleNamespace(stdout="Saved working directory\n", returncode=0)
if cmd[-3:] == ["rev-parse", "--verify", "refs/stash"]:
return SimpleNamespace(stdout="abc123\n", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
stash_ref = hermes_main._stash_local_changes_if_needed(["git"], tmp_path)
assert stash_ref == "abc123"
assert calls[1][0][1:4] == ["stash", "push", "--include-untracked"]
assert calls[2][0][-3:] == ["rev-parse", "--verify", "refs/stash"]
def test_resolve_stash_selector_returns_matching_entry(monkeypatch, tmp_path):
def fake_run(cmd, **kwargs):
assert cmd == ["git", "stash", "list", "--format=%gd %H"]
return SimpleNamespace(
stdout="stash@{0} def456\nstash@{1} abc123\n",
returncode=0,
)
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
assert hermes_main._resolve_stash_selector(["git"], tmp_path, "abc123") == "stash@{1}"
def test_restore_stashed_changes_prompts_before_applying(monkeypatch, tmp_path, capsys):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="applied\n", stderr="", returncode=0)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if cmd[1:3] == ["stash", "list"]:
return SimpleNamespace(stdout="stash@{1} abc123\n", stderr="", returncode=0)
if cmd[1:3] == ["stash", "drop"]:
return SimpleNamespace(stdout="dropped\n", stderr="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
monkeypatch.setattr("builtins.input", lambda: "")
restored = hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=True)
assert restored is True
assert calls[0][0] == ["git", "stash", "apply", "abc123"]
assert calls[1][0] == ["git", "diff", "--name-only", "--diff-filter=U"]
assert calls[2][0] == ["git", "stash", "list", "--format=%gd %H"]
assert calls[3][0] == ["git", "stash", "drop", "stash@{1}"]
out = capsys.readouterr().out
assert "Restore local changes now? [Y/n]" in out
assert "restored on top of the updated codebase" in out
assert "git diff" in out
assert "git status" in out
def test_restore_stashed_changes_can_skip_restore_and_keep_stash(monkeypatch, tmp_path, capsys):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
monkeypatch.setattr("builtins.input", lambda: "n")
restored = hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=True)
assert restored is False
assert calls == []
out = capsys.readouterr().out
assert "Restore local changes now? [Y/n]" in out
assert "Your changes are still preserved in git stash." in out
assert "git stash apply abc123" in out
def test_restore_stashed_changes_applies_without_prompt_when_disabled(monkeypatch, tmp_path, capsys):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="applied\n", stderr="", returncode=0)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if cmd[1:3] == ["stash", "list"]:
return SimpleNamespace(stdout="stash@{0} abc123\n", stderr="", returncode=0)
if cmd[1:3] == ["stash", "drop"]:
return SimpleNamespace(stdout="dropped\n", stderr="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
restored = hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=False)
assert restored is True
assert calls[0][0] == ["git", "stash", "apply", "abc123"]
assert calls[1][0] == ["git", "diff", "--name-only", "--diff-filter=U"]
assert calls[2][0] == ["git", "stash", "list", "--format=%gd %H"]
assert calls[3][0] == ["git", "stash", "drop", "stash@{0}"]
assert "Restore local changes now?" not in capsys.readouterr().out
def test_print_stash_cleanup_guidance_with_selector(capsys):
hermes_main._print_stash_cleanup_guidance("abc123", "stash@{2}")
out = capsys.readouterr().out
assert "Check `git status` first" in out
assert "git stash list --format='%gd %H %s'" in out
assert "git stash drop stash@{2}" in out
def test_restore_stashed_changes_keeps_going_when_stash_entry_cannot_be_resolved(monkeypatch, tmp_path, capsys):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="applied\n", stderr="", returncode=0)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if cmd[1:3] == ["stash", "list"]:
return SimpleNamespace(stdout="stash@{0} def456\n", stderr="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
restored = hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=False)
assert restored is True
assert calls[0] == (["git", "stash", "apply", "abc123"], {"cwd": tmp_path, "capture_output": True, "text": True})
assert calls[1] == (["git", "diff", "--name-only", "--diff-filter=U"], {"cwd": tmp_path, "capture_output": True, "text": True})
assert calls[2] == (["git", "stash", "list", "--format=%gd %H"], {"cwd": tmp_path, "capture_output": True, "text": True, "check": True})
out = capsys.readouterr().out
assert "couldn't find the stash entry to drop" in out
assert "stash was left in place" in out
assert "Check `git status` first" in out
assert "git stash list --format='%gd %H %s'" in out
assert "Look for commit abc123" in out
def test_restore_stashed_changes_keeps_going_when_drop_fails(monkeypatch, tmp_path, capsys):
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="applied\n", stderr="", returncode=0)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if cmd[1:3] == ["stash", "list"]:
return SimpleNamespace(stdout="stash@{0} abc123\n", stderr="", returncode=0)
if cmd[1:3] == ["stash", "drop"]:
return SimpleNamespace(stdout="", stderr="drop failed\n", returncode=1)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
restored = hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=False)
assert restored is True
assert calls[3][0] == ["git", "stash", "drop", "stash@{0}"]
out = capsys.readouterr().out
assert "couldn't drop the saved stash entry" in out
assert "drop failed" in out
assert "Check `git status` first" in out
assert "git stash list --format='%gd %H %s'" in out
assert "git stash drop stash@{0}" in out
def test_restore_stashed_changes_prompts_before_reset_on_conflict(monkeypatch, tmp_path, capsys):
"""When conflicts occur interactively, user is prompted before reset."""
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="conflict output\n", stderr="conflict stderr\n", returncode=1)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="hermes_cli/main.py\n", stderr="", returncode=0)
if cmd[1:3] == ["reset", "--hard"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
monkeypatch.setattr("builtins.input", lambda: "y")
with pytest.raises(SystemExit, match="1"):
hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=True)
out = capsys.readouterr().out
assert "Conflicted files:" in out
assert "hermes_cli/main.py" in out
assert "stashed changes are preserved" in out
assert "Reset working tree to clean state" in out
assert "Working tree reset to clean state" in out
reset_calls = [c for c, _ in calls if c[1:3] == ["reset", "--hard"]]
assert len(reset_calls) == 1
def test_restore_stashed_changes_user_declines_reset(monkeypatch, tmp_path, capsys):
"""When user declines reset, working tree is left as-is."""
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="", stderr="conflict\n", returncode=1)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="cli.py\n", stderr="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
# First input: "y" to restore, second input: "n" to decline reset
inputs = iter(["y", "n"])
monkeypatch.setattr("builtins.input", lambda: next(inputs))
with pytest.raises(SystemExit, match="1"):
hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=True)
out = capsys.readouterr().out
assert "left as-is" in out
reset_calls = [c for c, _ in calls if c[1:3] == ["reset", "--hard"]]
assert len(reset_calls) == 0
def test_restore_stashed_changes_auto_resets_non_interactive(monkeypatch, tmp_path, capsys):
"""Non-interactive mode auto-resets without prompting."""
calls = []
def fake_run(cmd, **kwargs):
calls.append((cmd, kwargs))
if cmd[1:3] == ["stash", "apply"]:
return SimpleNamespace(stdout="applied\n", stderr="", returncode=0)
if cmd[1:3] == ["diff", "--name-only"]:
return SimpleNamespace(stdout="cli.py\n", stderr="", returncode=0)
if cmd[1:3] == ["reset", "--hard"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
with pytest.raises(SystemExit, match="1"):
hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=False)
out = capsys.readouterr().out
assert "Working tree reset to clean state" in out
reset_calls = [c for c, _ in calls if c[1:3] == ["reset", "--hard"]]
assert len(reset_calls) == 1
def test_stash_local_changes_if_needed_raises_when_stash_ref_missing(monkeypatch, tmp_path):
def fake_run(cmd, **kwargs):
if cmd[-2:] == ["status", "--porcelain"]:
return SimpleNamespace(stdout=" M hermes_cli/main.py\n", returncode=0)
if cmd[1:4] == ["stash", "push", "--include-untracked"]:
return SimpleNamespace(stdout="Saved working directory\n", returncode=0)
if cmd[-3:] == ["rev-parse", "--verify", "refs/stash"]:
raise CalledProcessError(returncode=128, cmd=cmd)
raise AssertionError(f"unexpected command: {cmd}")
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
with pytest.raises(CalledProcessError):
hermes_main._stash_local_changes_if_needed(["git"], Path(tmp_path))
# ---------------------------------------------------------------------------
# Update uses .[all] with fallback to .
# ---------------------------------------------------------------------------
def _setup_update_mocks(monkeypatch, tmp_path):
"""Common setup for cmd_update tests."""
(tmp_path / ".git").mkdir()
monkeypatch.setattr(hermes_main, "PROJECT_ROOT", tmp_path)
monkeypatch.setattr(hermes_main, "_stash_local_changes_if_needed", lambda *a, **kw: None)
monkeypatch.setattr(hermes_main, "_restore_stashed_changes", lambda *a, **kw: True)
monkeypatch.setattr(hermes_config, "get_missing_env_vars", lambda required_only=True: [])
monkeypatch.setattr(hermes_config, "get_missing_config_fields", lambda: [])
monkeypatch.setattr(hermes_config, "check_config_version", lambda: (5, 5))
monkeypatch.setattr(hermes_config, "migrate_config", lambda **kw: {"env_added": [], "config_added": []})
def test_cmd_update_tries_extras_first_then_falls_back(monkeypatch, tmp_path):
"""When .[all] fails, update should fall back to . instead of aborting."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
recorded = []
def fake_run(cmd, **kwargs):
recorded.append(cmd)
if cmd == ["git", "fetch", "origin"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if cmd == ["git", "rev-parse", "--abbrev-ref", "HEAD"]:
return SimpleNamespace(stdout="main\n", stderr="", returncode=0)
if cmd == ["git", "rev-list", "HEAD..origin/main", "--count"]:
return SimpleNamespace(stdout="1\n", stderr="", returncode=0)
if cmd == ["git", "pull", "origin", "main"]:
return SimpleNamespace(stdout="Updating\n", stderr="", returncode=0)
# .[all] fails
if ".[all]" in cmd:
raise CalledProcessError(returncode=1, cmd=cmd)
# bare . succeeds
if cmd == ["/usr/bin/uv", "pip", "install", "-e", ".", "--quiet"]:
return SimpleNamespace(returncode=0)
return SimpleNamespace(returncode=0)
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
hermes_main.cmd_update(SimpleNamespace())
install_cmds = [c for c in recorded if "pip" in c and "install" in c]
assert len(install_cmds) == 2
assert ".[all]" in install_cmds[0]
assert "." in install_cmds[1] and ".[all]" not in install_cmds[1]
def test_cmd_update_succeeds_with_extras(monkeypatch, tmp_path):
"""When .[all] succeeds, no fallback should be attempted."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
recorded = []
def fake_run(cmd, **kwargs):
recorded.append(cmd)
if cmd == ["git", "fetch", "origin"]:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if cmd == ["git", "rev-parse", "--abbrev-ref", "HEAD"]:
return SimpleNamespace(stdout="main\n", stderr="", returncode=0)
if cmd == ["git", "rev-list", "HEAD..origin/main", "--count"]:
return SimpleNamespace(stdout="1\n", stderr="", returncode=0)
if cmd == ["git", "pull", "origin", "main"]:
return SimpleNamespace(stdout="Updating\n", stderr="", returncode=0)
return SimpleNamespace(returncode=0)
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
hermes_main.cmd_update(SimpleNamespace())
install_cmds = [c for c in recorded if "pip" in c and "install" in c]
assert len(install_cmds) == 1
assert ".[all]" in install_cmds[0]

View file

@ -0,0 +1,135 @@
"""Tests for the update check mechanism in hermes_cli.banner."""
import json
import threading
import time
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
def test_version_string_no_v_prefix():
"""__version__ should be bare semver without a 'v' prefix."""
from hermes_cli import __version__
assert not __version__.startswith("v"), f"__version__ should not start with 'v', got {__version__!r}"
def test_check_for_updates_uses_cache(tmp_path):
"""When cache is fresh, check_for_updates should return cached value without calling git."""
from hermes_cli.banner import check_for_updates
# Create a fake git repo and fresh cache
repo_dir = tmp_path / "hermes-agent"
repo_dir.mkdir()
(repo_dir / ".git").mkdir()
cache_file = tmp_path / ".update_check"
cache_file.write_text(json.dumps({"ts": time.time(), "behind": 3}))
with patch("hermes_cli.banner.os.getenv", return_value=str(tmp_path)):
with patch("hermes_cli.banner.subprocess.run") as mock_run:
result = check_for_updates()
assert result == 3
mock_run.assert_not_called()
def test_check_for_updates_expired_cache(tmp_path):
"""When cache is expired, check_for_updates should call git fetch."""
from hermes_cli.banner import check_for_updates
repo_dir = tmp_path / "hermes-agent"
repo_dir.mkdir()
(repo_dir / ".git").mkdir()
# Write an expired cache (timestamp far in the past)
cache_file = tmp_path / ".update_check"
cache_file.write_text(json.dumps({"ts": 0, "behind": 1}))
mock_result = MagicMock(returncode=0, stdout="5\n")
with patch("hermes_cli.banner.os.getenv", return_value=str(tmp_path)):
with patch("hermes_cli.banner.subprocess.run", return_value=mock_result) as mock_run:
result = check_for_updates()
assert result == 5
assert mock_run.call_count == 2 # git fetch + git rev-list
def test_check_for_updates_no_git_dir(tmp_path):
"""Returns None when .git directory doesn't exist anywhere."""
import hermes_cli.banner as banner
# Create a fake banner.py so the fallback path also has no .git
fake_banner = tmp_path / "hermes_cli" / "banner.py"
fake_banner.parent.mkdir(parents=True, exist_ok=True)
fake_banner.touch()
original = banner.__file__
try:
banner.__file__ = str(fake_banner)
with patch("hermes_cli.banner.os.getenv", return_value=str(tmp_path)):
with patch("hermes_cli.banner.subprocess.run") as mock_run:
result = banner.check_for_updates()
assert result is None
mock_run.assert_not_called()
finally:
banner.__file__ = original
def test_check_for_updates_fallback_to_project_root():
"""Dev install: falls back to Path(__file__).parent.parent when HERMES_HOME has no git repo."""
import hermes_cli.banner as banner
project_root = Path(banner.__file__).parent.parent.resolve()
if not (project_root / ".git").exists():
pytest.skip("Not running from a git checkout")
# Point HERMES_HOME at a temp dir with no hermes-agent/.git
import tempfile
with tempfile.TemporaryDirectory() as td:
with patch("hermes_cli.banner.os.getenv", return_value=td):
with patch("hermes_cli.banner.subprocess.run") as mock_run:
mock_run.return_value = MagicMock(returncode=0, stdout="0\n")
result = banner.check_for_updates()
# Should have fallen back to project root and run git commands
assert mock_run.call_count >= 1
def test_prefetch_non_blocking():
"""prefetch_update_check() should return immediately without blocking."""
import hermes_cli.banner as banner
# Reset module state
banner._update_result = None
banner._update_check_done = threading.Event()
with patch.object(banner, "check_for_updates", return_value=5):
start = time.monotonic()
banner.prefetch_update_check()
elapsed = time.monotonic() - start
# Should return almost immediately (well under 1 second)
assert elapsed < 1.0
# Wait for the background thread to finish
banner._update_check_done.wait(timeout=5)
assert banner._update_result == 5
def test_get_update_result_timeout():
"""get_update_result() returns None when check hasn't completed within timeout."""
import hermes_cli.banner as banner
# Reset module state — don't set the event
banner._update_result = None
banner._update_check_done = threading.Event()
start = time.monotonic()
result = banner.get_update_result(timeout=0.1)
elapsed = time.monotonic() - start
# Should have waited ~0.1s and returned None
assert result is None
assert elapsed < 0.5

View file

@ -0,0 +1,305 @@
"""Tests for cmd_update gateway auto-restart — systemd + launchd coverage.
Ensures ``hermes update`` correctly detects running gateways managed by
systemd (Linux) or launchd (macOS) and restarts/informs the user properly,
rather than leaving zombie processes or telling users to manually restart
when launchd will auto-respawn.
"""
import subprocess
from types import SimpleNamespace
from unittest.mock import patch, MagicMock
import pytest
import hermes_cli.gateway as gateway_cli
from hermes_cli.main import cmd_update
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_run_side_effect(
branch="main",
verify_ok=True,
commit_count="3",
systemd_active=False,
launchctl_loaded=False,
):
"""Build a subprocess.run side_effect that simulates git + service commands."""
def side_effect(cmd, **kwargs):
joined = " ".join(str(c) for c in cmd)
# git rev-parse --abbrev-ref HEAD
if "rev-parse" in joined and "--abbrev-ref" in joined:
return subprocess.CompletedProcess(cmd, 0, stdout=f"{branch}\n", stderr="")
# git rev-parse --verify origin/{branch}
if "rev-parse" in joined and "--verify" in joined:
rc = 0 if verify_ok else 128
return subprocess.CompletedProcess(cmd, rc, stdout="", stderr="")
# git rev-list HEAD..origin/{branch} --count
if "rev-list" in joined:
return subprocess.CompletedProcess(cmd, 0, stdout=f"{commit_count}\n", stderr="")
# systemctl --user is-active
if "systemctl" in joined and "is-active" in joined:
if systemd_active:
return subprocess.CompletedProcess(cmd, 0, stdout="active\n", stderr="")
return subprocess.CompletedProcess(cmd, 3, stdout="inactive\n", stderr="")
# systemctl --user restart
if "systemctl" in joined and "restart" in joined:
return subprocess.CompletedProcess(cmd, 0, stdout="", stderr="")
# launchctl list ai.hermes.gateway
if "launchctl" in joined and "list" in joined:
if launchctl_loaded:
return subprocess.CompletedProcess(cmd, 0, stdout="PID\tStatus\tLabel\n123\t0\tai.hermes.gateway\n", stderr="")
return subprocess.CompletedProcess(cmd, 113, stdout="", stderr="Could not find service")
return subprocess.CompletedProcess(cmd, 0, stdout="", stderr="")
return side_effect
@pytest.fixture
def mock_args():
return SimpleNamespace()
# ---------------------------------------------------------------------------
# Launchd plist includes --replace
# ---------------------------------------------------------------------------
class TestLaunchdPlistReplace:
"""The generated launchd plist must include --replace so respawned
gateways kill stale instances."""
def test_plist_contains_replace_flag(self):
plist = gateway_cli.generate_launchd_plist()
assert "--replace" in plist
def test_plist_program_arguments_order(self):
"""--replace comes after 'run' in the ProgramArguments."""
plist = gateway_cli.generate_launchd_plist()
lines = [line.strip() for line in plist.splitlines()]
# Find 'run' and '--replace' in the string entries
string_values = [
line.replace("<string>", "").replace("</string>", "")
for line in lines
if "<string>" in line and "</string>" in line
]
assert "run" in string_values
assert "--replace" in string_values
run_idx = string_values.index("run")
replace_idx = string_values.index("--replace")
assert replace_idx == run_idx + 1
# ---------------------------------------------------------------------------
# cmd_update — macOS launchd detection
# ---------------------------------------------------------------------------
class TestLaunchdPlistRefresh:
"""refresh_launchd_plist_if_needed rewrites stale plists (like systemd's
refresh_systemd_unit_if_needed)."""
def test_refresh_rewrites_stale_plist(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("<plist>old content</plist>")
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
result = gateway_cli.refresh_launchd_plist_if_needed()
assert result is True
# Plist should now contain the generated content (which includes --replace)
assert "--replace" in plist_path.read_text()
# Should have unloaded then reloaded
assert any("unload" in str(c) for c in calls)
assert any("load" in str(c) for c in calls)
def test_refresh_skips_when_current(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
# Write the current expected content
plist_path.write_text(gateway_cli.generate_launchd_plist())
calls = []
monkeypatch.setattr(
gateway_cli.subprocess, "run",
lambda cmd, **kw: calls.append(cmd) or SimpleNamespace(returncode=0),
)
result = gateway_cli.refresh_launchd_plist_if_needed()
assert result is False
assert len(calls) == 0 # No launchctl calls needed
def test_refresh_skips_when_no_plist(self, tmp_path, monkeypatch):
plist_path = tmp_path / "nonexistent.plist"
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
result = gateway_cli.refresh_launchd_plist_if_needed()
assert result is False
def test_launchd_start_calls_refresh(self, tmp_path, monkeypatch):
"""launchd_start refreshes the plist before starting."""
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("<plist>old</plist>")
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.launchd_start()
# First calls should be refresh (unload/load), then start
cmd_strs = [" ".join(c) for c in calls]
assert any("unload" in s for s in cmd_strs)
assert any("start" in s for s in cmd_strs)
class TestCmdUpdateLaunchdRestart:
"""cmd_update correctly detects and handles launchd on macOS."""
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_detects_launchd_and_skips_manual_restart_message(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""When launchd is running the gateway, update should print
'auto-restart via launchd' instead of 'Restart it with: hermes gateway run'."""
# Create a fake launchd plist so is_macos + plist.exists() passes
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("<plist/>")
monkeypatch.setattr(
gateway_cli, "is_macos", lambda: True,
)
monkeypatch.setattr(
gateway_cli, "get_launchd_plist_path", lambda: plist_path,
)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
launchctl_loaded=True,
)
# Mock get_running_pid to return a PID
with patch("gateway.status.get_running_pid", return_value=12345), \
patch("gateway.status.remove_pid_file"):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Gateway restarted via launchd" in captured
assert "Restart it with: hermes gateway run" not in captured
# Verify launchctl stop + start were called (not manual SIGTERM)
launchctl_calls = [
c for c in mock_run.call_args_list
if len(c.args[0]) > 0 and c.args[0][0] == "launchctl"
]
stop_calls = [c for c in launchctl_calls if "stop" in c.args[0]]
start_calls = [c for c in launchctl_calls if "start" in c.args[0]]
assert len(stop_calls) >= 1
assert len(start_calls) >= 1
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_without_launchd_shows_manual_restart(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""When no service manager is running, update should show the manual restart hint."""
monkeypatch.setattr(
gateway_cli, "is_macos", lambda: True,
)
plist_path = tmp_path / "ai.hermes.gateway.plist"
# plist does NOT exist — no launchd service
monkeypatch.setattr(
gateway_cli, "get_launchd_plist_path", lambda: plist_path,
)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
launchctl_loaded=False,
)
with patch("gateway.status.get_running_pid", return_value=12345), \
patch("gateway.status.remove_pid_file"), \
patch("os.kill"):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Restart it with: hermes gateway run" in captured
assert "Gateway restarted via launchd" not in captured
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_with_systemd_still_restarts_via_systemd(
self, mock_run, _mock_which, mock_args, capsys, monkeypatch,
):
"""On Linux with systemd active, update should restart via systemctl."""
monkeypatch.setattr(
gateway_cli, "is_macos", lambda: False,
)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
systemd_active=True,
)
with patch("gateway.status.get_running_pid", return_value=12345), \
patch("gateway.status.remove_pid_file"), \
patch("os.kill"):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Gateway restarted" in captured
# Verify systemctl restart was called
restart_calls = [
c for c in mock_run.call_args_list
if "restart" in " ".join(str(a) for a in c.args[0])
and "systemctl" in " ".join(str(a) for a in c.args[0])
]
assert len(restart_calls) == 1
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_no_gateway_running_skips_restart(
self, mock_run, _mock_which, mock_args, capsys, monkeypatch,
):
"""When no gateway is running, update should skip the restart section entirely."""
monkeypatch.setattr(
gateway_cli, "is_macos", lambda: False,
)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
systemd_active=False,
)
with patch("gateway.status.get_running_pid", return_value=None):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Stopped gateway" not in captured
assert "Gateway restarted" not in captured
assert "Gateway restarted via launchd" not in captured