Merge PR #736: feat(honcho): async writes, memory modes, session title integration, setup CLI

Authored by erosika. Builds on #38 and #243.

Adds async write support, configurable memory modes, context prefetch pipeline,
4 new Honcho tools (honcho_context, honcho_profile, honcho_search, honcho_conclude),
full 'hermes honcho' CLI, session strategies, AI peer identity, recallMode A/B,
gateway lifecycle management, and comprehensive docs.

Cherry-picks fixes from PRs #831/#832 (adavyas).

Co-authored-by: erosika <erosika@users.noreply.github.com>
Co-authored-by: adavyas <adavyas@users.noreply.github.com>
This commit is contained in:
Teknium 2026-03-12 19:05:11 -07:00 committed by GitHub
commit 475dd58a8e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 4688 additions and 355 deletions

View file

@ -110,7 +110,7 @@ DEFAULT_CONFIG = {
"inactivity_timeout": 120,
"record_sessions": False, # Auto-record browser sessions as WebM videos
},
# Filesystem checkpoints — automatic snapshots before destructive file ops.
# When enabled, the agent takes a snapshot of the working directory once per
# conversation turn (on first write_file/patch call). Use /rollback to restore.
@ -456,7 +456,7 @@ OPTIONAL_ENV_VARS = {
"description": "Honcho API key for AI-native persistent memory",
"prompt": "Honcho API key",
"url": "https://app.honcho.dev",
"tools": ["query_user_context"],
"tools": ["honcho_context"],
"password": True,
"category": "tool",
},
@ -907,6 +907,36 @@ _COMMENTED_SECTIONS = """
"""
_COMMENTED_SECTIONS = """
# ── Security ──────────────────────────────────────────────────────────
# API keys, tokens, and passwords are redacted from tool output by default.
# Set to false to see full values (useful for debugging auth issues).
#
# security:
# redact_secrets: false
# ── Fallback Model ────────────────────────────────────────────────────
# Automatic provider failover when primary is unavailable.
# Uncomment and configure to enable. Triggers on rate limits (429),
# overload (529), service errors (503), or connection failures.
#
# Supported providers:
# openrouter (OPENROUTER_API_KEY) — routes to any model
# openai-codex (OAuth — hermes login) — OpenAI Codex
# nous (OAuth — hermes login) — Nous Portal
# zai (ZAI_API_KEY) — Z.AI / GLM
# kimi-coding (KIMI_API_KEY) — Kimi / Moonshot
# minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
#
# For custom OpenAI-compatible endpoints, add base_url and api_key_env.
#
# fallback_model:
# provider: openrouter
# model: anthropic/claude-sonnet-4
"""
def save_config(config: Dict[str, Any]):
"""Save configuration to ~/.hermes/config.yaml."""
from utils import atomic_yaml_write

View file

@ -634,6 +634,40 @@ def run_doctor(args):
else:
check_warn("No GITHUB_TOKEN", "(60 req/hr rate limit — set in ~/.hermes/.env for better rates)")
# =========================================================================
# Honcho memory
# =========================================================================
print()
print(color("◆ Honcho Memory", Colors.CYAN, Colors.BOLD))
try:
from honcho_integration.client import HonchoClientConfig, GLOBAL_CONFIG_PATH
hcfg = HonchoClientConfig.from_global_config()
if not GLOBAL_CONFIG_PATH.exists():
check_warn("Honcho config not found", f"run: hermes honcho setup")
elif not hcfg.enabled:
check_info("Honcho disabled (set enabled: true in ~/.honcho/config.json to activate)")
elif not hcfg.api_key:
check_fail("Honcho API key not set", "run: hermes honcho setup")
issues.append("No Honcho API key — run 'hermes honcho setup'")
else:
from honcho_integration.client import get_honcho_client, reset_honcho_client
reset_honcho_client()
try:
get_honcho_client(hcfg)
check_ok(
"Honcho connected",
f"workspace={hcfg.workspace_id} mode={hcfg.memory_mode} freq={hcfg.write_frequency}",
)
except Exception as _e:
check_fail("Honcho connection failed", str(_e))
issues.append(f"Honcho unreachable: {_e}")
except ImportError:
check_warn("honcho-ai not installed", "pip install honcho-ai")
except Exception as _e:
check_warn("Honcho check failed", str(_e))
# =========================================================================
# Summary
# =========================================================================

View file

@ -18,6 +18,22 @@ Usage:
hermes cron list # List cron jobs
hermes cron status # Check if cron scheduler is running
hermes doctor # Check configuration and dependencies
hermes honcho setup # Configure Honcho AI memory integration
hermes honcho status # Show Honcho config and connection status
hermes honcho sessions # List directory → session name mappings
hermes honcho map <name> # Map current directory to a session name
hermes honcho peer # Show peer names and dialectic settings
hermes honcho peer --user NAME # Set user peer name
hermes honcho peer --ai NAME # Set AI peer name
hermes honcho peer --reasoning LEVEL # Set dialectic reasoning level
hermes honcho mode # Show current memory mode
hermes honcho mode [hybrid|honcho|local] # Set memory mode
hermes honcho tokens # Show token budget settings
hermes honcho tokens --context N # Set session.context() token cap
hermes honcho tokens --dialectic N # Set dialectic result char cap
hermes honcho identity # Show AI peer identity representation
hermes honcho identity <file> # Seed AI peer identity from a file (SOUL.md etc.)
hermes honcho migrate # Step-by-step migration guide: OpenClaw native → Hermes + Honcho
hermes version # Show version
hermes update # Update to latest version
hermes uninstall # Uninstall Hermes Agent
@ -2595,6 +2611,94 @@ For more help on a command:
skills_parser.set_defaults(func=cmd_skills)
# =========================================================================
# honcho command
# =========================================================================
honcho_parser = subparsers.add_parser(
"honcho",
help="Manage Honcho AI memory integration",
description=(
"Honcho is a memory layer that persists across sessions.\n\n"
"Each conversation is stored as a peer interaction in a workspace. "
"Honcho builds a representation of the user over time — conclusions, "
"patterns, context — and surfaces the relevant slice at the start of "
"each turn so Hermes knows who you are without you having to repeat yourself.\n\n"
"Modes: hybrid (Honcho + local MEMORY.md), honcho (Honcho only), "
"local (MEMORY.md only). Write frequency is configurable so memory "
"writes never block the response."
),
formatter_class=__import__("argparse").RawDescriptionHelpFormatter,
)
honcho_subparsers = honcho_parser.add_subparsers(dest="honcho_command")
honcho_subparsers.add_parser("setup", help="Interactive setup wizard for Honcho integration")
honcho_subparsers.add_parser("status", help="Show current Honcho config and connection status")
honcho_subparsers.add_parser("sessions", help="List known Honcho session mappings")
honcho_map = honcho_subparsers.add_parser(
"map", help="Map current directory to a Honcho session name (no arg = list mappings)"
)
honcho_map.add_argument(
"session_name", nargs="?", default=None,
help="Session name to associate with this directory. Omit to list current mappings.",
)
honcho_peer = honcho_subparsers.add_parser(
"peer", help="Show or update peer names and dialectic reasoning level"
)
honcho_peer.add_argument("--user", metavar="NAME", help="Set user peer name")
honcho_peer.add_argument("--ai", metavar="NAME", help="Set AI peer name")
honcho_peer.add_argument(
"--reasoning",
metavar="LEVEL",
choices=("minimal", "low", "medium", "high", "max"),
help="Set default dialectic reasoning level (minimal/low/medium/high/max)",
)
honcho_mode = honcho_subparsers.add_parser(
"mode", help="Show or set memory mode (hybrid/honcho/local)"
)
honcho_mode.add_argument(
"mode", nargs="?", metavar="MODE",
choices=("hybrid", "honcho", "local"),
help="Memory mode to set (hybrid/honcho/local). Omit to show current.",
)
honcho_tokens = honcho_subparsers.add_parser(
"tokens", help="Show or set token budget for context and dialectic"
)
honcho_tokens.add_argument(
"--context", type=int, metavar="N",
help="Max tokens Honcho returns from session.context() per turn",
)
honcho_tokens.add_argument(
"--dialectic", type=int, metavar="N",
help="Max chars of dialectic result to inject into system prompt",
)
honcho_identity = honcho_subparsers.add_parser(
"identity", help="Seed or show the AI peer's Honcho identity representation"
)
honcho_identity.add_argument(
"file", nargs="?", default=None,
help="Path to file to seed from (e.g. SOUL.md). Omit to show usage.",
)
honcho_identity.add_argument(
"--show", action="store_true",
help="Show current AI peer representation from Honcho",
)
honcho_subparsers.add_parser(
"migrate",
help="Step-by-step migration guide from openclaw-honcho to Hermes Honcho",
)
def cmd_honcho(args):
from honcho_integration.cli import honcho_command
honcho_command(args)
honcho_parser.set_defaults(func=cmd_honcho)
# =========================================================================
# tools command
# =========================================================================