feat(honcho): async memory integration with prefetch pipeline and recallMode

Adds full Honcho memory integration to Hermes:

- Session manager with async background writes, memory modes (honcho/hybrid/local),
  and dialectic prefetch for first-turn context warming
- Agent integration: prefetch pipeline, tool surface gated by recallMode,
  system prompt context injection, SIGTERM/SIGINT flush handlers
- CLI commands: setup, status, mode, tokens, peer, identity, migrate
- recallMode setting (auto | context | tools) for A/B testing retrieval strategies
- Session strategies: per-session, per-repo (git tree root), per-directory, global
- Polymorphic memoryMode config: string shorthand or per-peer object overrides
- 97 tests covering async writes, client config, session resolution, and memory modes
This commit is contained in:
Erosika 2026-03-09 15:58:22 -04:00
parent 8eefbef91c
commit 74c214e957
17 changed files with 2478 additions and 135 deletions

749
honcho_integration/cli.py Normal file
View file

@ -0,0 +1,749 @@
"""CLI commands for Honcho integration management.
Handles: hermes honcho setup | status | sessions | map | peer
"""
from __future__ import annotations
import json
import os
import sys
from pathlib import Path
GLOBAL_CONFIG_PATH = Path.home() / ".honcho" / "config.json"
HOST = "hermes"
def _read_config() -> dict:
if GLOBAL_CONFIG_PATH.exists():
try:
return json.loads(GLOBAL_CONFIG_PATH.read_text(encoding="utf-8"))
except Exception:
pass
return {}
def _write_config(cfg: dict) -> None:
GLOBAL_CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
GLOBAL_CONFIG_PATH.write_text(
json.dumps(cfg, indent=2, ensure_ascii=False) + "\n",
encoding="utf-8",
)
def _prompt(label: str, default: str | None = None, secret: bool = False) -> str:
suffix = f" [{default}]" if default else ""
sys.stdout.write(f" {label}{suffix}: ")
sys.stdout.flush()
if secret:
if sys.stdin.isatty():
import getpass
val = getpass.getpass(prompt="")
else:
# Non-TTY (piped input, test runners) — read plaintext
val = sys.stdin.readline().strip()
else:
val = sys.stdin.readline().strip()
return val or (default or "")
def _ensure_sdk_installed() -> bool:
"""Check honcho-ai is importable; offer to install if not. Returns True if ready."""
try:
import honcho # noqa: F401
return True
except ImportError:
pass
print(" honcho-ai is not installed.")
answer = _prompt("Install it now? (honcho-ai>=2.0.1)", default="y")
if answer.lower() not in ("y", "yes"):
print(" Skipping install. Run: pip install 'honcho-ai>=2.0.1'\n")
return False
import subprocess
print(" Installing honcho-ai...", flush=True)
result = subprocess.run(
[sys.executable, "-m", "pip", "install", "honcho-ai>=2.0.1"],
capture_output=True,
text=True,
)
if result.returncode == 0:
print(" Installed.\n")
return True
else:
print(f" Install failed:\n{result.stderr.strip()}")
print(" Run manually: pip install 'honcho-ai>=2.0.1'\n")
return False
def cmd_setup(args) -> None:
"""Interactive Honcho setup wizard."""
cfg = _read_config()
print("\nHoncho memory setup\n" + "" * 40)
print(" Honcho gives Hermes persistent cross-session memory.")
print(" Config is shared with other hosts at ~/.honcho/config.json\n")
if not _ensure_sdk_installed():
return
# API key
current_key = cfg.get("apiKey", "")
masked = f"...{current_key[-8:]}" if len(current_key) > 8 else ("set" if current_key else "not set")
print(f" Current API key: {masked}")
new_key = _prompt("Honcho API key (leave blank to keep current)", secret=True)
if new_key:
cfg["apiKey"] = new_key
if not cfg.get("apiKey"):
print("\n No API key configured. Get one at https://app.honcho.dev")
print(" Run 'hermes honcho setup' again once you have a key.\n")
return
# Peer name
current_peer = cfg.get("peerName", "")
new_peer = _prompt("Your name (user peer)", default=current_peer or os.getenv("USER", "user"))
if new_peer:
cfg["peerName"] = new_peer
# Host block
hosts = cfg.setdefault("hosts", {})
hermes_host = hosts.setdefault(HOST, {})
current_workspace = hermes_host.get("workspace") or cfg.get("workspace", "hermes")
new_workspace = _prompt("Workspace ID", default=current_workspace)
if new_workspace:
hermes_host["workspace"] = new_workspace
# Also update flat workspace if it was the primary one
if cfg.get("workspace") == current_workspace:
cfg["workspace"] = new_workspace
hermes_host.setdefault("aiPeer", HOST)
# Memory mode
current_mode = cfg.get("memoryMode", "hybrid")
print(f"\n Memory mode options:")
print(" hybrid — write to both Honcho and local MEMORY.md (default)")
print(" honcho — Honcho only, skip MEMORY.md writes")
print(" local — MEMORY.md only, Honcho disabled")
new_mode = _prompt("Memory mode", default=current_mode)
if new_mode in ("hybrid", "honcho", "local"):
cfg["memoryMode"] = new_mode
else:
cfg["memoryMode"] = "hybrid"
# Write frequency
current_wf = str(cfg.get("writeFrequency", "async"))
print(f"\n Write frequency options:")
print(" async — background thread, no token cost (recommended)")
print(" turn — sync write after every turn")
print(" session — batch write at session end only")
print(" N — write every N turns (e.g. 5)")
new_wf = _prompt("Write frequency", default=current_wf)
try:
cfg["writeFrequency"] = int(new_wf)
except (ValueError, TypeError):
cfg["writeFrequency"] = new_wf if new_wf in ("async", "turn", "session") else "async"
# Recall mode
current_recall = cfg.get("recallMode", "auto")
print(f"\n Recall mode options:")
print(" auto — pre-warmed context + memory tools available (default)")
print(" context — pre-warmed context only, memory tools suppressed")
print(" tools — no pre-loaded context, rely on tool calls only")
new_recall = _prompt("Recall mode", default=current_recall)
if new_recall in ("auto", "context", "tools"):
cfg["recallMode"] = new_recall
# Session strategy
current_strat = cfg.get("sessionStrategy", "per-session")
print(f"\n Session strategy options:")
print(" per-session — new Honcho session each run, named by Hermes session ID (default)")
print(" per-repo — one session per git repository (uses repo root name)")
print(" per-directory — one session per working directory")
print(" global — single session across all directories")
new_strat = _prompt("Session strategy", default=current_strat)
if new_strat in ("per-session", "per-repo", "per-directory", "global"):
cfg["sessionStrategy"] = new_strat
cfg.setdefault("enabled", True)
cfg.setdefault("saveMessages", True)
_write_config(cfg)
print(f"\n Config written to {GLOBAL_CONFIG_PATH}")
# Test connection
print(" Testing connection... ", end="", flush=True)
try:
from honcho_integration.client import HonchoClientConfig, get_honcho_client, reset_honcho_client
reset_honcho_client()
hcfg = HonchoClientConfig.from_global_config()
get_honcho_client(hcfg)
print("OK")
except Exception as e:
print(f"FAILED\n Error: {e}")
return
print(f"\n Honcho is ready.")
print(f" Session: {hcfg.resolve_session_name()}")
print(f" Workspace: {hcfg.workspace_id}")
print(f" Peer: {hcfg.peer_name}")
_mode_str = hcfg.memory_mode
if hcfg.peer_memory_modes:
overrides = ", ".join(f"{k}={v}" for k, v in hcfg.peer_memory_modes.items())
_mode_str = f"{hcfg.memory_mode} (peers: {overrides})"
print(f" Mode: {_mode_str}")
print(f" Frequency: {hcfg.write_frequency}")
print(f"\n Tools available in chat:")
print(f" query_user_context — ask Honcho a question about you (LLM-synthesized)")
print(f" honcho_search — semantic search over your history (no LLM)")
print(f" honcho_profile — your peer card, key facts (no LLM)")
print(f"\n Other commands:")
print(f" hermes honcho status — show full config")
print(f" hermes honcho mode — show or change memory mode")
print(f" hermes honcho tokens — show or set token budgets")
print(f" hermes honcho identity — seed or show AI peer identity")
print(f" hermes honcho map <name> — map this directory to a session name\n")
def cmd_status(args) -> None:
"""Show current Honcho config and connection status."""
try:
import honcho # noqa: F401
except ImportError:
print(" honcho-ai is not installed. Run: hermes honcho setup\n")
return
cfg = _read_config()
if not cfg:
print(" No Honcho config found at ~/.honcho/config.json")
print(" Run 'hermes honcho setup' to configure.\n")
return
try:
from honcho_integration.client import HonchoClientConfig, get_honcho_client
hcfg = HonchoClientConfig.from_global_config()
except Exception as e:
print(f" Config error: {e}\n")
return
api_key = hcfg.api_key or ""
masked = f"...{api_key[-8:]}" if len(api_key) > 8 else ("set" if api_key else "not set")
print(f"\nHoncho status\n" + "" * 40)
print(f" Enabled: {hcfg.enabled}")
print(f" API key: {masked}")
print(f" Workspace: {hcfg.workspace_id}")
print(f" Host: {hcfg.host}")
print(f" Config path: {GLOBAL_CONFIG_PATH}")
print(f" AI peer: {hcfg.ai_peer}")
print(f" User peer: {hcfg.peer_name or 'not set'}")
print(f" Session key: {hcfg.resolve_session_name()}")
print(f" Recall mode: {hcfg.recall_mode}")
print(f" Memory mode: {hcfg.memory_mode}")
if hcfg.peer_memory_modes:
print(f" Per-peer modes:")
for peer, mode in hcfg.peer_memory_modes.items():
print(f" {peer}: {mode}")
print(f" Write freq: {hcfg.write_frequency}")
if hcfg.enabled and hcfg.api_key:
print("\n Connection... ", end="", flush=True)
try:
get_honcho_client(hcfg)
print("OK\n")
except Exception as e:
print(f"FAILED ({e})\n")
else:
reason = "disabled" if not hcfg.enabled else "no API key"
print(f"\n Not connected ({reason})\n")
def cmd_sessions(args) -> None:
"""List known directory → session name mappings."""
cfg = _read_config()
sessions = cfg.get("sessions", {})
if not sessions:
print(" No session mappings configured.\n")
print(" Add one with: hermes honcho map <session-name>")
print(" Or edit ~/.honcho/config.json directly.\n")
return
cwd = os.getcwd()
print(f"\nHoncho session mappings ({len(sessions)})\n" + "" * 40)
for path, name in sorted(sessions.items()):
marker = "" if path == cwd else ""
print(f" {name:<30} {path}{marker}")
print()
def cmd_map(args) -> None:
"""Map current directory to a Honcho session name."""
if not args.session_name:
cmd_sessions(args)
return
cwd = os.getcwd()
session_name = args.session_name.strip()
if not session_name:
print(" Session name cannot be empty.\n")
return
import re
sanitized = re.sub(r'[^a-zA-Z0-9_-]', '-', session_name).strip('-')
if sanitized != session_name:
print(f" Session name sanitized to: {sanitized}")
session_name = sanitized
cfg = _read_config()
cfg.setdefault("sessions", {})[cwd] = session_name
_write_config(cfg)
print(f" Mapped {cwd}\n{session_name}\n")
def cmd_peer(args) -> None:
"""Show or update peer names and dialectic reasoning level."""
cfg = _read_config()
changed = False
user_name = getattr(args, "user", None)
ai_name = getattr(args, "ai", None)
reasoning = getattr(args, "reasoning", None)
REASONING_LEVELS = ("minimal", "low", "medium", "high", "max")
if user_name is None and ai_name is None and reasoning is None:
# Show current values
hosts = cfg.get("hosts", {})
hermes = hosts.get(HOST, {})
print(f"\nHoncho peer config\n" + "" * 40)
print(f" User peer: {cfg.get('peerName') or '(not set)'}")
print(f" AI peer: {hermes.get('aiPeer') or cfg.get('aiPeer') or HOST}")
lvl = hermes.get("dialecticReasoningLevel") or cfg.get("dialecticReasoningLevel") or "low"
max_chars = hermes.get("dialecticMaxChars") or cfg.get("dialecticMaxChars") or 600
print(f" Dialectic level: {lvl} (options: {', '.join(REASONING_LEVELS)})")
print(f" Dialectic cap: {max_chars} chars\n")
return
if user_name is not None:
cfg["peerName"] = user_name.strip()
changed = True
print(f" User peer → {cfg['peerName']}")
if ai_name is not None:
cfg.setdefault("hosts", {}).setdefault(HOST, {})["aiPeer"] = ai_name.strip()
changed = True
print(f" AI peer → {ai_name.strip()}")
if reasoning is not None:
if reasoning not in REASONING_LEVELS:
print(f" Invalid reasoning level '{reasoning}'. Options: {', '.join(REASONING_LEVELS)}")
return
cfg.setdefault("hosts", {}).setdefault(HOST, {})["dialecticReasoningLevel"] = reasoning
changed = True
print(f" Dialectic reasoning level → {reasoning}")
if changed:
_write_config(cfg)
print(f" Saved to {GLOBAL_CONFIG_PATH}\n")
def cmd_mode(args) -> None:
"""Show or set the memory mode."""
MODES = {
"hybrid": "write to both Honcho and local MEMORY.md (default)",
"honcho": "Honcho only — MEMORY.md writes disabled",
"local": "MEMORY.md only — Honcho disabled",
}
cfg = _read_config()
mode_arg = getattr(args, "mode", None)
if mode_arg is None:
current = (
(cfg.get("hosts") or {}).get(HOST, {}).get("memoryMode")
or cfg.get("memoryMode")
or "hybrid"
)
print(f"\nHoncho memory mode\n" + "" * 40)
for m, desc in MODES.items():
marker = "" if m == current else ""
print(f" {m:<8} {desc}{marker}")
print(f"\n Set with: hermes honcho mode [hybrid|honcho|local]\n")
return
if mode_arg not in MODES:
print(f" Invalid mode '{mode_arg}'. Options: {', '.join(MODES)}\n")
return
cfg.setdefault("hosts", {}).setdefault(HOST, {})["memoryMode"] = mode_arg
_write_config(cfg)
print(f" Memory mode → {mode_arg} ({MODES[mode_arg]})\n")
def cmd_tokens(args) -> None:
"""Show or set token budget settings."""
cfg = _read_config()
hosts = cfg.get("hosts", {})
hermes = hosts.get(HOST, {})
context = getattr(args, "context", None)
dialectic = getattr(args, "dialectic", None)
if context is None and dialectic is None:
ctx_tokens = hermes.get("contextTokens") or cfg.get("contextTokens") or "(Honcho default)"
d_chars = hermes.get("dialecticMaxChars") or cfg.get("dialecticMaxChars") or 600
d_level = hermes.get("dialecticReasoningLevel") or cfg.get("dialecticReasoningLevel") or "low"
print(f"\nHoncho token settings\n" + "" * 40)
print(f" context tokens: {ctx_tokens}")
print(f" Max tokens Honcho returns from session.context() per turn.")
print(f" Injected into Hermes system prompt — counts against your LLM budget.")
print(f" dialectic cap: {d_chars} chars")
print(f" Max chars of peer.chat() result injected per turn.")
print(f" dialectic level: {d_level} (controls Honcho-side inference depth)")
print(f"\n Set with: hermes honcho tokens [--context N] [--dialectic N]\n")
return
changed = False
if context is not None:
cfg.setdefault("hosts", {}).setdefault(HOST, {})["contextTokens"] = context
print(f" context tokens → {context}")
changed = True
if dialectic is not None:
cfg.setdefault("hosts", {}).setdefault(HOST, {})["dialecticMaxChars"] = dialectic
print(f" dialectic cap → {dialectic} chars")
changed = True
if changed:
_write_config(cfg)
print(f" Saved to {GLOBAL_CONFIG_PATH}\n")
def cmd_identity(args) -> None:
"""Seed AI peer identity or show both peer representations."""
cfg = _read_config()
if not cfg.get("apiKey"):
print(" No API key configured. Run 'hermes honcho setup' first.\n")
return
file_path = getattr(args, "file", None)
show = getattr(args, "show", False)
try:
from honcho_integration.client import HonchoClientConfig, get_honcho_client
from honcho_integration.session import HonchoSessionManager
hcfg = HonchoClientConfig.from_global_config()
client = get_honcho_client(hcfg)
mgr = HonchoSessionManager(honcho=client, config=hcfg)
session_key = hcfg.resolve_session_name()
mgr.get_or_create(session_key)
except Exception as e:
print(f" Honcho connection failed: {e}\n")
return
if show:
# ── User peer ────────────────────────────────────────────────────────
user_card = mgr.get_peer_card(session_key)
print(f"\nUser peer ({hcfg.peer_name or 'not set'})\n" + "" * 40)
if user_card:
for fact in user_card:
print(f" {fact}")
else:
print(" No user peer card yet. Send a few messages to build one.")
# ── AI peer ──────────────────────────────────────────────────────────
ai_rep = mgr.get_ai_representation(session_key)
print(f"\nAI peer ({hcfg.ai_peer})\n" + "" * 40)
if ai_rep.get("representation"):
print(ai_rep["representation"])
elif ai_rep.get("card"):
print(ai_rep["card"])
else:
print(" No representation built yet.")
print(" Run 'hermes honcho identity <file>' to seed one.")
print()
return
if not file_path:
print("\nHoncho identity management\n" + "" * 40)
print(f" User peer: {hcfg.peer_name or 'not set'}")
print(f" AI peer: {hcfg.ai_peer}")
print()
print(" hermes honcho identity --show — show both peer representations")
print(" hermes honcho identity <file> — seed AI peer from SOUL.md or any .md/.txt\n")
return
from pathlib import Path
p = Path(file_path).expanduser()
if not p.exists():
print(f" File not found: {p}\n")
return
content = p.read_text(encoding="utf-8").strip()
if not content:
print(f" File is empty: {p}\n")
return
source = p.name
ok = mgr.seed_ai_identity(session_key, content, source=source)
if ok:
print(f" Seeded AI peer identity from {p.name} into session '{session_key}'")
print(f" Honcho will incorporate this into {hcfg.ai_peer}'s representation over time.\n")
else:
print(f" Failed to seed identity. Check logs for details.\n")
def cmd_migrate(args) -> None:
"""Step-by-step migration guide: OpenClaw native memory → Hermes + Honcho."""
from pathlib import Path
# ── Detect OpenClaw native memory files ──────────────────────────────────
cwd = Path(os.getcwd())
openclaw_home = Path.home() / ".openclaw"
# User peer: facts about the user
user_file_names = ["USER.md", "MEMORY.md"]
# AI peer: agent identity / configuration
agent_file_names = ["SOUL.md", "IDENTITY.md", "AGENTS.md", "TOOLS.md", "BOOTSTRAP.md"]
user_files: list[Path] = []
agent_files: list[Path] = []
for name in user_file_names:
for d in [cwd, openclaw_home]:
p = d / name
if p.exists() and p not in user_files:
user_files.append(p)
for name in agent_file_names:
for d in [cwd, openclaw_home]:
p = d / name
if p.exists() and p not in agent_files:
agent_files.append(p)
cfg = _read_config()
has_key = bool(cfg.get("apiKey", ""))
print("\nHoncho migration: OpenClaw native memory → Hermes\n" + "" * 50)
print()
print(" OpenClaw's native memory stores context in local markdown files")
print(" (USER.md, MEMORY.md, SOUL.md, ...) and injects them via QMD search.")
print(" Honcho replaces that with a cloud-backed, LLM-observable memory layer:")
print(" context is retrieved semantically, injected automatically each turn,")
print(" and enriched by a dialectic reasoning layer that builds over time.")
print()
# ── Step 1: Honcho account ────────────────────────────────────────────────
print("Step 1 Create a Honcho account")
print()
if has_key:
masked = f"...{cfg['apiKey'][-8:]}" if len(cfg["apiKey"]) > 8 else "set"
print(f" Honcho API key already configured: {masked}")
print(" Skip to Step 2.")
else:
print(" Honcho is a cloud memory service. You need a free account to use it.")
print()
print(" 1. Go to https://app.honcho.dev and create an account.")
print(" 2. Copy your API key from the dashboard.")
print(" 3. Run: hermes honcho setup")
print(" This will store the key and create a workspace for this project.")
print()
answer = _prompt(" Run 'hermes honcho setup' now?", default="y")
if answer.lower() in ("y", "yes"):
cmd_setup(args)
cfg = _read_config()
has_key = bool(cfg.get("apiKey", ""))
else:
print()
print(" Run 'hermes honcho setup' when ready, then re-run this walkthrough.")
# ── Step 2: Detected files ────────────────────────────────────────────────
print()
print("Step 2 Detected OpenClaw memory files")
print()
if user_files or agent_files:
if user_files:
print(f" User memory ({len(user_files)} file(s)) — will go to Honcho user peer:")
for f in user_files:
print(f" {f}")
if agent_files:
print(f" Agent identity ({len(agent_files)} file(s)) — will go to Honcho AI peer:")
for f in agent_files:
print(f" {f}")
else:
print(" No OpenClaw native memory files found in cwd or ~/.openclaw/.")
print(" If your files are elsewhere, copy them here before continuing,")
print(" or seed them manually: hermes honcho identity <path/to/file>")
# ── Step 3: Migrate user memory ───────────────────────────────────────────
print()
print("Step 3 Migrate user memory files → Honcho user peer")
print()
print(" USER.md and MEMORY.md contain facts about you that the agent should")
print(" remember across sessions. Honcho will store these under your user peer")
print(" and inject relevant excerpts into the system prompt automatically.")
print()
if user_files:
print(f" Found: {', '.join(f.name for f in user_files)}")
print()
print(" These are picked up automatically the first time you run 'hermes'")
print(" with Honcho configured and no prior session history.")
print(" (Hermes calls migrate_memory_files() on first session init.)")
print()
print(" If you want to migrate them now without starting a session:")
for f in user_files:
print(f" hermes honcho migrate — this step handles it interactively")
if has_key:
answer = _prompt(" Upload user memory files to Honcho now?", default="y")
if answer.lower() in ("y", "yes"):
try:
from honcho_integration.client import (
HonchoClientConfig,
get_honcho_client,
reset_honcho_client,
)
from honcho_integration.session import HonchoSessionManager
reset_honcho_client()
hcfg = HonchoClientConfig.from_global_config()
client = get_honcho_client(hcfg)
mgr = HonchoSessionManager(honcho=client, config=hcfg)
session_key = hcfg.resolve_session_name()
mgr.get_or_create(session_key)
# Upload from each directory that had user files
dirs_with_files = set(str(f.parent) for f in user_files)
any_uploaded = False
for d in dirs_with_files:
if mgr.migrate_memory_files(session_key, d):
any_uploaded = True
if any_uploaded:
print(f" Uploaded user memory files from: {', '.join(dirs_with_files)}")
else:
print(" Nothing uploaded (files may already be migrated or empty).")
except Exception as e:
print(f" Failed: {e}")
else:
print(" Run 'hermes honcho setup' first, then re-run this step.")
else:
print(" No user memory files detected. Nothing to migrate here.")
# ── Step 4: Seed AI identity ──────────────────────────────────────────────
print()
print("Step 4 Seed AI identity files → Honcho AI peer")
print()
print(" SOUL.md, IDENTITY.md, AGENTS.md, TOOLS.md, BOOTSTRAP.md define the")
print(" agent's character, capabilities, and behavioral rules. In OpenClaw")
print(" these are injected via file search at prompt-build time.")
print()
print(" In Hermes, they are seeded once into Honcho's AI peer through the")
print(" observation pipeline. Honcho builds a representation from them and")
print(" from every subsequent assistant message (observe_me=True). Over time")
print(" the representation reflects actual behavior, not just declaration.")
print()
if agent_files:
print(f" Found: {', '.join(f.name for f in agent_files)}")
print()
if has_key:
answer = _prompt(" Seed AI identity from all detected files now?", default="y")
if answer.lower() in ("y", "yes"):
try:
from honcho_integration.client import (
HonchoClientConfig,
get_honcho_client,
reset_honcho_client,
)
from honcho_integration.session import HonchoSessionManager
reset_honcho_client()
hcfg = HonchoClientConfig.from_global_config()
client = get_honcho_client(hcfg)
mgr = HonchoSessionManager(honcho=client, config=hcfg)
session_key = hcfg.resolve_session_name()
mgr.get_or_create(session_key)
for f in agent_files:
content = f.read_text(encoding="utf-8").strip()
if content:
ok = mgr.seed_ai_identity(session_key, content, source=f.name)
status = "seeded" if ok else "failed"
print(f" {f.name}: {status}")
except Exception as e:
print(f" Failed: {e}")
else:
print(" Run 'hermes honcho setup' first, then seed manually:")
for f in agent_files:
print(f" hermes honcho identity {f}")
else:
print(" No agent identity files detected.")
print(" To seed manually: hermes honcho identity <path/to/SOUL.md>")
# ── Step 5: What changes ──────────────────────────────────────────────────
print()
print("Step 5 What changes vs. OpenClaw native memory")
print()
print(" Storage")
print(" OpenClaw: markdown files on disk, searched via QMD at prompt-build time.")
print(" Hermes: cloud-backed Honcho peers. Files can stay on disk as source")
print(" of truth; Honcho holds the live representation.")
print()
print(" Context injection")
print(" OpenClaw: file excerpts injected synchronously before each LLM call.")
print(" Hermes: Honcho context prefetched async at turn end, injected next turn.")
print(" First turn has no Honcho context; subsequent turns are loaded.")
print()
print(" Memory growth")
print(" OpenClaw: you edit files manually to update memory.")
print(" Hermes: Honcho observes every message and updates representations")
print(" automatically. Files become the seed, not the live store.")
print()
print(" Tool surface (available to the agent during conversation)")
print(" query_user_context — ask Honcho a question, get a synthesized answer (LLM)")
print(" honcho_search — semantic search over stored context (no LLM)")
print(" honcho_profile — fast peer card snapshot (no LLM)")
print()
print(" Session naming")
print(" OpenClaw: no persistent session concept — files are global.")
print(" Hermes: per-session by default — each run gets a new Honcho session")
print(" Map a custom name: hermes honcho map <session-name>")
# ── Step 6: Next steps ────────────────────────────────────────────────────
print()
print("Step 6 Next steps")
print()
if not has_key:
print(" 1. hermes honcho setup — configure API key (required)")
print(" 2. hermes honcho migrate — re-run this walkthrough")
else:
print(" 1. hermes honcho status — verify Honcho connection")
print(" 2. hermes — start a session")
print(" (user memory files auto-uploaded on first turn if not done above)")
print(" 3. hermes honcho identity --show — verify AI peer representation")
print(" 4. hermes honcho tokens — tune context and dialectic budgets")
print(" 5. hermes honcho mode — view or change memory mode")
print()
def honcho_command(args) -> None:
"""Route honcho subcommands."""
sub = getattr(args, "honcho_command", None)
if sub == "setup" or sub is None:
cmd_setup(args)
elif sub == "status":
cmd_status(args)
elif sub == "sessions":
cmd_sessions(args)
elif sub == "map":
cmd_map(args)
elif sub == "peer":
cmd_peer(args)
elif sub == "mode":
cmd_mode(args)
elif sub == "tokens":
cmd_tokens(args)
elif sub == "identity":
cmd_identity(args)
elif sub == "migrate":
cmd_migrate(args)
else:
print(f" Unknown honcho command: {sub}")
print(" Available: setup, status, sessions, map, peer, mode, tokens, identity, migrate\n")

View file

@ -27,6 +27,30 @@ GLOBAL_CONFIG_PATH = Path.home() / ".honcho" / "config.json"
HOST = "hermes"
def _resolve_memory_mode(
global_val: str | dict,
host_val: str | dict | None,
) -> dict:
"""Parse memoryMode (string or object) into memory_mode + peer_memory_modes.
Resolution order: host-level wins over global.
String form: applies as the default for all peers.
Object form: { "default": "hybrid", "hermes": "honcho", ... }
"default" key sets the fallback; other keys are per-peer overrides.
"""
# Pick the winning value (host beats global)
val = host_val if host_val is not None else global_val
if isinstance(val, dict):
default = val.get("default", "hybrid")
overrides = {k: v for k, v in val.items() if k != "default"}
else:
default = str(val) if val else "hybrid"
overrides = {}
return {"memory_mode": default, "peer_memory_modes": overrides}
@dataclass
class HonchoClientConfig:
"""Configuration for Honcho client, resolved for a specific host."""
@ -42,10 +66,36 @@ class HonchoClientConfig:
# Toggles
enabled: bool = False
save_messages: bool = True
# memoryMode: default for all peers. "hybrid" / "honcho" / "local"
memory_mode: str = "hybrid"
# Per-peer overrides — any named Honcho peer. Override memory_mode when set.
# Config object form: "memoryMode": { "default": "hybrid", "hermes": "honcho" }
peer_memory_modes: dict[str, str] = field(default_factory=dict)
def peer_memory_mode(self, peer_name: str) -> str:
"""Return the effective memory mode for a named peer.
Resolution: per-peer override global memory_mode default.
"""
return self.peer_memory_modes.get(peer_name, self.memory_mode)
# Write frequency: "async" (background thread), "turn" (sync per turn),
# "session" (flush on session end), or int (every N turns)
write_frequency: str | int = "async"
# Prefetch budget
context_tokens: int | None = None
# Dialectic (peer.chat) settings
# reasoning_level: "minimal" | "low" | "medium" | "high" | "max"
# Used as the default; prefetch_dialectic may bump it dynamically.
dialectic_reasoning_level: str = "low"
# Max chars of dialectic result to inject into Hermes system prompt
dialectic_max_chars: int = 600
# Recall mode: how memory retrieval works when Honcho is active.
# "auto" — pre-warmed context + memory tools available (model decides)
# "context" — pre-warmed context only, honcho memory tools removed
# "tools" — no pre-loaded context, rely on tool calls only
recall_mode: str = "auto"
# Session resolution
session_strategy: str = "per-directory"
session_strategy: str = "per-session"
session_peer_prefix: bool = False
sessions: dict[str, str] = field(default_factory=dict)
# Raw global config for anything else consumers need
@ -109,6 +159,17 @@ class HonchoClientConfig:
# Respect explicit setting
enabled = explicit_enabled
# write_frequency: accept int or string
raw_wf = (
host_block.get("writeFrequency")
or raw.get("writeFrequency")
or "async"
)
try:
write_frequency: str | int = int(raw_wf)
except (TypeError, ValueError):
write_frequency = str(raw_wf)
return cls(
host=host,
workspace_id=workspace,
@ -119,31 +180,105 @@ class HonchoClientConfig:
linked_hosts=linked_hosts,
enabled=enabled,
save_messages=raw.get("saveMessages", True),
context_tokens=raw.get("contextTokens") or host_block.get("contextTokens"),
session_strategy=raw.get("sessionStrategy", "per-directory"),
**_resolve_memory_mode(
raw.get("memoryMode", "hybrid"),
host_block.get("memoryMode"),
),
write_frequency=write_frequency,
context_tokens=host_block.get("contextTokens") or raw.get("contextTokens"),
dialectic_reasoning_level=(
host_block.get("dialecticReasoningLevel")
or raw.get("dialecticReasoningLevel")
or "low"
),
dialectic_max_chars=int(
host_block.get("dialecticMaxChars")
or raw.get("dialecticMaxChars")
or 600
),
recall_mode=(
host_block.get("recallMode")
or raw.get("recallMode")
or "auto"
),
session_strategy=raw.get("sessionStrategy", "per-session"),
session_peer_prefix=raw.get("sessionPeerPrefix", False),
sessions=raw.get("sessions", {}),
raw=raw,
)
def resolve_session_name(self, cwd: str | None = None) -> str | None:
"""Resolve session name for a directory.
@staticmethod
def _git_repo_name(cwd: str) -> str | None:
"""Return the git repo root directory name, or None if not in a repo."""
import subprocess
Checks manual overrides first, then derives from directory name.
try:
root = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
capture_output=True, text=True, cwd=cwd, timeout=5,
)
if root.returncode == 0:
return Path(root.stdout.strip()).name
except (OSError, subprocess.TimeoutExpired):
pass
return None
def resolve_session_name(
self,
cwd: str | None = None,
session_title: str | None = None,
session_id: str | None = None,
) -> str | None:
"""Resolve Honcho session name.
Resolution order:
1. Manual directory override from sessions map
2. Hermes session title (from /title command)
3. per-session strategy Hermes session_id ({timestamp}_{hex})
4. per-repo strategy git repo root directory name
5. per-directory strategy directory basename
6. global strategy workspace name
"""
import re
if not cwd:
cwd = os.getcwd()
# Manual override
# Manual override always wins
manual = self.sessions.get(cwd)
if manual:
return manual
# Derive from directory basename
base = Path(cwd).name
if self.session_peer_prefix and self.peer_name:
return f"{self.peer_name}-{base}"
return base
# /title mid-session remap
if session_title:
sanitized = re.sub(r'[^a-zA-Z0-9_-]', '-', session_title).strip('-')
if sanitized:
if self.session_peer_prefix and self.peer_name:
return f"{self.peer_name}-{sanitized}"
return sanitized
# per-session: inherit Hermes session_id (new Honcho session each run)
if self.session_strategy == "per-session" and session_id:
if self.session_peer_prefix and self.peer_name:
return f"{self.peer_name}-{session_id}"
return session_id
# per-repo: one Honcho session per git repository
if self.session_strategy == "per-repo":
base = self._git_repo_name(cwd) or Path(cwd).name
if self.session_peer_prefix and self.peer_name:
return f"{self.peer_name}-{base}"
return base
# per-directory: one Honcho session per working directory
if self.session_strategy in ("per-directory", "per-session"):
base = Path(cwd).name
if self.session_peer_prefix and self.peer_name:
return f"{self.peer_name}-{base}"
return base
# global: single session across all directories
return self.workspace_id
def get_linked_workspaces(self) -> list[str]:
"""Resolve linked host keys to workspace names."""

View file

@ -2,8 +2,10 @@
from __future__ import annotations
import queue
import re
import logging
import threading
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, TYPE_CHECKING
@ -15,6 +17,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
# Sentinel to signal the async writer thread to shut down
_ASYNC_SHUTDOWN = object()
@dataclass
class HonchoSession:
@ -80,7 +85,8 @@ class HonchoSessionManager:
Args:
honcho: Optional Honcho client. If not provided, uses the singleton.
context_tokens: Max tokens for context() calls (None = Honcho default).
config: HonchoClientConfig from global config (provides peer_name, ai_peer, etc.).
config: HonchoClientConfig from global config (provides peer_name, ai_peer,
write_frequency, memory_mode, etc.).
"""
self._honcho = honcho
self._context_tokens = context_tokens
@ -89,6 +95,33 @@ class HonchoSessionManager:
self._peers_cache: dict[str, Any] = {}
self._sessions_cache: dict[str, Any] = {}
# Write frequency state
write_frequency = (config.write_frequency if config else "async")
self._write_frequency = write_frequency
self._turn_counter: int = 0
# Prefetch caches: session_key → last result (consumed once per turn)
self._context_cache: dict[str, dict] = {}
self._dialectic_cache: dict[str, str] = {}
self._dialectic_reasoning_level: str = (
config.dialectic_reasoning_level if config else "low"
)
self._dialectic_max_chars: int = (
config.dialectic_max_chars if config else 600
)
# Async write queue — started lazily on first enqueue
self._async_queue: queue.Queue | None = None
self._async_thread: threading.Thread | None = None
if write_frequency == "async":
self._async_queue = queue.Queue()
self._async_thread = threading.Thread(
target=self._async_writer_loop,
name="honcho-async-writer",
daemon=True,
)
self._async_thread.start()
@property
def honcho(self) -> Honcho:
"""Get the Honcho client, initializing if needed."""
@ -125,10 +158,12 @@ class HonchoSessionManager:
session = self.honcho.session(session_id)
# Configure peer observation settings
# Configure peer observation settings.
# observe_me=True for AI peer so Honcho watches what the agent says
# and builds its representation over time — enabling identity formation.
from honcho.session import SessionPeerConfig
user_config = SessionPeerConfig(observe_me=True, observe_others=True)
ai_config = SessionPeerConfig(observe_me=False, observe_others=True)
ai_config = SessionPeerConfig(observe_me=True, observe_others=True)
session.add_peers([(user_peer, user_config), (assistant_peer, ai_config)])
@ -234,16 +269,11 @@ class HonchoSessionManager:
self._cache[key] = session
return session
def save(self, session: HonchoSession) -> None:
"""
Save messages to Honcho.
Syncs only new (unsynced) messages from the local cache.
"""
def _flush_session(self, session: HonchoSession) -> None:
"""Internal: write unsynced messages to Honcho synchronously."""
if not session.messages:
return
# Get the Honcho session and peers
user_peer = self._get_or_create_peer(session.user_peer_id)
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
honcho_session = self._sessions_cache.get(session.honcho_session_id)
@ -253,9 +283,7 @@ class HonchoSessionManager:
session.honcho_session_id, user_peer, assistant_peer
)
# Only send new messages (those without a '_synced' flag)
new_messages = [m for m in session.messages if not m.get("_synced")]
if not new_messages:
return
@ -274,9 +302,83 @@ class HonchoSessionManager:
msg["_synced"] = False
logger.error("Failed to sync messages to Honcho: %s", e)
# Update cache
self._cache[session.key] = session
def _async_writer_loop(self) -> None:
"""Background daemon thread: drains the async write queue."""
while True:
try:
item = self._async_queue.get(timeout=5)
if item is _ASYNC_SHUTDOWN:
break
try:
self._flush_session(item)
except Exception as e:
logger.warning("Honcho async write failed, retrying once: %s", e)
import time as _time
_time.sleep(2)
try:
self._flush_session(item)
except Exception as e2:
logger.error("Honcho async write retry failed, dropping batch: %s", e2)
except queue.Empty:
continue
except Exception as e:
logger.error("Honcho async writer error: %s", e)
def save(self, session: HonchoSession) -> None:
"""Save messages to Honcho, respecting write_frequency.
write_frequency modes:
"async" enqueue for background thread (zero blocking, zero token cost)
"turn" flush synchronously every turn
"session" defer until flush_session() is called explicitly
N (int) flush every N turns
"""
self._turn_counter += 1
wf = self._write_frequency
if wf == "async":
if self._async_queue is not None:
self._async_queue.put(session)
elif wf == "turn":
self._flush_session(session)
elif wf == "session":
# Accumulate; caller must call flush_all() at session end
pass
elif isinstance(wf, int) and wf > 0:
if self._turn_counter % wf == 0:
self._flush_session(session)
def flush_all(self) -> None:
"""Flush all pending unsynced messages for all cached sessions.
Called at session end for "session" write_frequency, or to force
a sync before process exit regardless of mode.
"""
for session in list(self._cache.values()):
try:
self._flush_session(session)
except Exception as e:
logger.error("Honcho flush_all error for %s: %s", session.key, e)
# Drain async queue synchronously if it exists
if self._async_queue is not None:
while not self._async_queue.empty():
try:
item = self._async_queue.get_nowait()
if item is not _ASYNC_SHUTDOWN:
self._flush_session(item)
except queue.Empty:
break
def shutdown(self) -> None:
"""Gracefully shut down the async writer thread."""
if self._async_queue is not None and self._async_thread is not None:
self.flush_all()
self._async_queue.put(_ASYNC_SHUTDOWN)
self._async_thread.join(timeout=10)
def delete(self, key: str) -> bool:
"""Delete a session from local cache."""
if key in self._cache:
@ -305,49 +407,141 @@ class HonchoSessionManager:
# get_or_create will create a fresh session
session = self.get_or_create(new_key)
# Cache under both original key and timestamped key
# Cache under the original key so callers find it by the expected name
self._cache[key] = session
self._cache[new_key] = session
logger.info("Created new session for %s (honcho: %s)", key, session.honcho_session_id)
return session
def get_user_context(self, session_key: str, query: str) -> str:
_REASONING_LEVELS = ("minimal", "low", "medium", "high", "max")
def _dynamic_reasoning_level(self, query: str) -> str:
"""
Query Honcho's dialectic chat for user context.
Pick a reasoning level based on message complexity.
Uses the configured default as a floor; bumps up for longer or
more complex messages so Honcho applies more inference where it matters.
< 120 chars default (typically "low")
120400 chars one level above default (cap at "high")
> 400 chars two levels above default (cap at "high")
"max" is never selected automatically reserve it for explicit config.
"""
levels = self._REASONING_LEVELS
default_idx = levels.index(self._dialectic_reasoning_level) if self._dialectic_reasoning_level in levels else 1
n = len(query)
if n < 120:
bump = 0
elif n < 400:
bump = 1
else:
bump = 2
# Cap at "high" (index 3) for auto-selection
idx = min(default_idx + bump, 3)
return levels[idx]
def dialectic_query(self, session_key: str, query: str, reasoning_level: str | None = None) -> str:
"""
Query Honcho's dialectic endpoint about the user.
Runs an LLM on Honcho's backend against the user peer's full
representation. Higher latency than context() call async via
prefetch_dialectic() to avoid blocking the response.
Args:
session_key: The session key to get context for.
session_key: The session key to query against.
query: Natural language question about the user.
reasoning_level: Override the config default. If None, uses
_dynamic_reasoning_level(query).
Returns:
Honcho's response about the user.
Honcho's synthesized answer, or empty string on failure.
"""
session = self._cache.get(session_key)
if not session:
return "No session found for this context."
return ""
user_peer = self._get_or_create_peer(session.user_peer_id)
level = reasoning_level or self._dynamic_reasoning_level(query)
try:
return user_peer.chat(query)
result = user_peer.chat(query, reasoning_level=level) or ""
# Apply Hermes-side char cap before caching
if result and self._dialectic_max_chars and len(result) > self._dialectic_max_chars:
result = result[:self._dialectic_max_chars].rsplit(" ", 1)[0] + ""
return result
except Exception as e:
logger.error("Failed to get user context from Honcho: %s", e)
return f"Unable to retrieve user context: {e}"
logger.warning("Honcho dialectic query failed: %s", e)
return ""
def prefetch_dialectic(self, session_key: str, query: str) -> None:
"""
Fire a dialectic_query in a background thread, caching the result.
Non-blocking. The result is available via pop_dialectic_result()
on the next call (typically the following turn). Reasoning level
is selected dynamically based on query complexity.
Args:
session_key: The session key to query against.
query: The user's current message, used as the query.
"""
def _run():
result = self.dialectic_query(session_key, query)
if result:
self._dialectic_cache[session_key] = result
t = threading.Thread(target=_run, name="honcho-dialectic-prefetch", daemon=True)
t.start()
def pop_dialectic_result(self, session_key: str) -> str:
"""
Return and clear the cached dialectic result for this session.
Returns empty string if no result is ready yet.
"""
return self._dialectic_cache.pop(session_key, "")
def prefetch_context(self, session_key: str, user_message: str | None = None) -> None:
"""
Fire get_prefetch_context in a background thread, caching the result.
Non-blocking. Consumed next turn via pop_context_result(). This avoids
a synchronous HTTP round-trip blocking every response.
"""
def _run():
result = self.get_prefetch_context(session_key, user_message)
if result:
self._context_cache[session_key] = result
t = threading.Thread(target=_run, name="honcho-context-prefetch", daemon=True)
t.start()
def pop_context_result(self, session_key: str) -> dict[str, str]:
"""
Return and clear the cached context result for this session.
Returns empty dict if no result is ready yet (first turn).
"""
return self._context_cache.pop(session_key, {})
def get_prefetch_context(self, session_key: str, user_message: str | None = None) -> dict[str, str]:
"""
Pre-fetch user context using Honcho's context() method.
Pre-fetch user and AI peer context from Honcho.
Single API call that returns the user's representation
and peer card, using semantic search based on the user's message.
Fetches peer_representation and peer_card for both peers. search_query
is intentionally omitted it would only affect additional excerpts
that this code does not consume, and passing the raw message exposes
conversation content in server access logs.
Args:
session_key: The session key to get context for.
user_message: The user's message for semantic search.
user_message: Unused; kept for call-site compatibility.
Returns:
Dictionary with 'representation' and 'card' keys.
Dictionary with 'representation', 'card', 'ai_representation',
and 'ai_card' keys.
"""
session = self._cache.get(session_key)
if not session:
@ -357,23 +551,35 @@ class HonchoSessionManager:
if not honcho_session:
return {}
result: dict[str, str] = {}
try:
ctx = honcho_session.context(
summary=False,
tokens=self._context_tokens,
peer_target=session.user_peer_id,
search_query=user_message,
peer_perspective=session.assistant_peer_id,
)
# peer_card is list[str] in SDK v2, join for prompt injection
card = ctx.peer_card or []
card_str = "\n".join(card) if isinstance(card, list) else str(card)
return {
"representation": ctx.peer_representation or "",
"card": card_str,
}
result["representation"] = ctx.peer_representation or ""
result["card"] = "\n".join(card) if isinstance(card, list) else str(card)
except Exception as e:
logger.warning("Failed to fetch context from Honcho: %s", e)
return {}
logger.warning("Failed to fetch user context from Honcho: %s", e)
# Also fetch AI peer's own representation so Hermes knows itself.
try:
ai_ctx = honcho_session.context(
summary=False,
tokens=self._context_tokens,
peer_target=session.assistant_peer_id,
peer_perspective=session.user_peer_id,
)
ai_card = ai_ctx.peer_card or []
result["ai_representation"] = ai_ctx.peer_representation or ""
result["ai_card"] = "\n".join(ai_card) if isinstance(ai_card, list) else str(ai_card)
except Exception as e:
logger.debug("Failed to fetch AI peer context from Honcho: %s", e)
return result
def migrate_local_history(self, session_key: str, messages: list[dict[str, Any]]) -> bool:
"""
@ -491,6 +697,7 @@ class HonchoSessionManager:
files = [
("MEMORY.md", "consolidated_memory.md", "Long-term agent notes and preferences"),
("USER.md", "user_profile.md", "User profile and preferences"),
("SOUL.md", "agent_soul.md", "Agent persona and identity configuration"),
]
for filename, upload_name, description in files:
@ -525,6 +732,150 @@ class HonchoSessionManager:
return uploaded
def get_peer_card(self, session_key: str) -> list[str]:
"""
Fetch the user peer's card — a curated list of key facts.
Fast, no LLM reasoning. Returns raw structured facts Honcho has
inferred about the user (name, role, preferences, patterns).
Empty list if unavailable.
"""
session = self._cache.get(session_key)
if not session:
return []
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return []
try:
ctx = honcho_session.context(
summary=False,
tokens=200,
peer_target=session.user_peer_id,
peer_perspective=session.assistant_peer_id,
)
card = ctx.peer_card or []
return card if isinstance(card, list) else [str(card)]
except Exception as e:
logger.debug("Failed to fetch peer card from Honcho: %s", e)
return []
def search_context(self, session_key: str, query: str, max_tokens: int = 800) -> str:
"""
Semantic search over Honcho session context.
Returns raw excerpts ranked by relevance to the query. No LLM
reasoning cheaper and faster than dialectic_query. Good for
factual lookups where the model will do its own synthesis.
Args:
session_key: Session to search against.
query: Search query for semantic matching.
max_tokens: Token budget for returned content.
Returns:
Relevant context excerpts as a string, or empty string if none.
"""
session = self._cache.get(session_key)
if not session:
return ""
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return ""
try:
ctx = honcho_session.context(
summary=False,
tokens=max_tokens,
peer_target=session.user_peer_id,
peer_perspective=session.assistant_peer_id,
search_query=query,
)
parts = []
if ctx.peer_representation:
parts.append(ctx.peer_representation)
card = ctx.peer_card or []
if card:
facts = card if isinstance(card, list) else [str(card)]
parts.append("\n".join(f"- {f}" for f in facts))
return "\n\n".join(parts)
except Exception as e:
logger.debug("Honcho search_context failed: %s", e)
return ""
def seed_ai_identity(self, session_key: str, content: str, source: str = "manual") -> bool:
"""
Seed the AI peer's Honcho representation from text content.
Useful for priming AI identity from SOUL.md, exported chats, or
any structured description. The content is sent as an assistant
peer message so Honcho's reasoning model can incorporate it.
Args:
session_key: The session key to associate with.
content: The identity/persona content to seed.
source: Metadata tag for the source (e.g. "soul_md", "export").
Returns:
True on success, False on failure.
"""
if not content or not content.strip():
return False
session = self._cache.get(session_key)
if not session:
logger.warning("No session cached for '%s', skipping AI seed", session_key)
return False
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
try:
wrapped = (
f"<ai_identity_seed>\n"
f"<source>{source}</source>\n"
f"\n"
f"{content.strip()}\n"
f"</ai_identity_seed>"
)
assistant_peer.add_message("assistant", wrapped)
logger.info("Seeded AI identity from '%s' into %s", source, session_key)
return True
except Exception as e:
logger.error("Failed to seed AI identity: %s", e)
return False
def get_ai_representation(self, session_key: str) -> dict[str, str]:
"""
Fetch the AI peer's current Honcho representation.
Returns:
Dict with 'representation' and 'card' keys, empty strings if unavailable.
"""
session = self._cache.get(session_key)
if not session:
return {"representation": "", "card": ""}
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return {"representation": "", "card": ""}
try:
ctx = honcho_session.context(
summary=False,
tokens=self._context_tokens,
peer_target=session.assistant_peer_id,
peer_perspective=session.user_peer_id,
)
ai_card = ctx.peer_card or []
return {
"representation": ctx.peer_representation or "",
"card": "\n".join(ai_card) if isinstance(ai_card, list) else str(ai_card),
}
except Exception as e:
logger.debug("Failed to fetch AI representation: %s", e)
return {"representation": "", "card": ""}
def list_sessions(self) -> list[dict[str, Any]]:
"""List all cached sessions."""
return [