The architecture has been updated
This commit is contained in:
parent
805f7a017e
commit
a01257ead9
1119 changed files with 226 additions and 352 deletions
9
hermes_code/honcho_integration/__init__.py
Normal file
9
hermes_code/honcho_integration/__init__.py
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
"""Honcho integration for AI-native memory.
|
||||
|
||||
This package is only active when honcho.enabled=true in config and
|
||||
HONCHO_API_KEY is set. All honcho-ai imports are deferred to avoid
|
||||
ImportError when the package is not installed.
|
||||
|
||||
Named ``honcho_integration`` (not ``honcho``) to avoid shadowing the
|
||||
``honcho`` package installed by the ``honcho-ai`` SDK.
|
||||
"""
|
||||
780
hermes_code/honcho_integration/cli.py
Normal file
780
hermes_code/honcho_integration/cli.py
Normal file
|
|
@ -0,0 +1,780 @@
|
|||
"""CLI commands for Honcho integration management.
|
||||
|
||||
Handles: hermes honcho setup | status | sessions | map | peer
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from honcho_integration.client import resolve_config_path, GLOBAL_CONFIG_PATH
|
||||
|
||||
HOST = "hermes"
|
||||
|
||||
|
||||
def _config_path() -> Path:
|
||||
"""Return the active Honcho config path (instance-local or global)."""
|
||||
return resolve_config_path()
|
||||
|
||||
|
||||
def _read_config() -> dict:
|
||||
path = _config_path()
|
||||
if path.exists():
|
||||
try:
|
||||
return json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception:
|
||||
pass
|
||||
return {}
|
||||
|
||||
|
||||
def _write_config(cfg: dict, path: Path | None = None) -> None:
|
||||
path = path or _config_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(
|
||||
json.dumps(cfg, indent=2, ensure_ascii=False) + "\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
def _resolve_api_key(cfg: dict) -> str:
|
||||
"""Resolve API key with host -> root -> env fallback."""
|
||||
host_key = ((cfg.get("hosts") or {}).get(HOST) or {}).get("apiKey")
|
||||
return host_key or cfg.get("apiKey", "") or os.environ.get("HONCHO_API_KEY", "")
|
||||
|
||||
|
||||
def _prompt(label: str, default: str | None = None, secret: bool = False) -> str:
|
||||
suffix = f" [{default}]" if default else ""
|
||||
sys.stdout.write(f" {label}{suffix}: ")
|
||||
sys.stdout.flush()
|
||||
if secret:
|
||||
if sys.stdin.isatty():
|
||||
import getpass
|
||||
val = getpass.getpass(prompt="")
|
||||
else:
|
||||
# Non-TTY (piped input, test runners) — read plaintext
|
||||
val = sys.stdin.readline().strip()
|
||||
else:
|
||||
val = sys.stdin.readline().strip()
|
||||
return val or (default or "")
|
||||
|
||||
|
||||
def _ensure_sdk_installed() -> bool:
|
||||
"""Check honcho-ai is importable; offer to install if not. Returns True if ready."""
|
||||
try:
|
||||
import honcho # noqa: F401
|
||||
return True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
print(" honcho-ai is not installed.")
|
||||
answer = _prompt("Install it now? (honcho-ai>=2.0.1)", default="y")
|
||||
if answer.lower() not in ("y", "yes"):
|
||||
print(" Skipping install. Run: pip install 'honcho-ai>=2.0.1'\n")
|
||||
return False
|
||||
|
||||
import subprocess
|
||||
print(" Installing honcho-ai...", flush=True)
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "pip", "install", "honcho-ai>=2.0.1"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(" Installed.\n")
|
||||
return True
|
||||
else:
|
||||
print(f" Install failed:\n{result.stderr.strip()}")
|
||||
print(" Run manually: pip install 'honcho-ai>=2.0.1'\n")
|
||||
return False
|
||||
|
||||
|
||||
def cmd_setup(args) -> None:
|
||||
"""Interactive Honcho setup wizard."""
|
||||
cfg = _read_config()
|
||||
|
||||
active_path = _config_path()
|
||||
print("\nHoncho memory setup\n" + "─" * 40)
|
||||
print(" Honcho gives Hermes persistent cross-session memory.")
|
||||
if active_path != GLOBAL_CONFIG_PATH:
|
||||
print(f" Instance config: {active_path}")
|
||||
else:
|
||||
print(" Config is shared with other hosts at ~/.honcho/config.json")
|
||||
print()
|
||||
|
||||
if not _ensure_sdk_installed():
|
||||
return
|
||||
|
||||
# All writes go to hosts.hermes — root keys are managed by the user
|
||||
# or the honcho CLI only.
|
||||
hosts = cfg.setdefault("hosts", {})
|
||||
hermes_host = hosts.setdefault(HOST, {})
|
||||
|
||||
# API key — shared credential, lives at root so all hosts can read it
|
||||
current_key = cfg.get("apiKey", "")
|
||||
masked = f"...{current_key[-8:]}" if len(current_key) > 8 else ("set" if current_key else "not set")
|
||||
print(f" Current API key: {masked}")
|
||||
new_key = _prompt("Honcho API key (leave blank to keep current)", secret=True)
|
||||
if new_key:
|
||||
cfg["apiKey"] = new_key
|
||||
|
||||
effective_key = cfg.get("apiKey", "")
|
||||
if not effective_key:
|
||||
print("\n No API key configured. Get your API key at https://app.honcho.dev")
|
||||
print(" Run 'hermes honcho setup' again once you have a key.\n")
|
||||
return
|
||||
|
||||
# Peer name
|
||||
current_peer = hermes_host.get("peerName") or cfg.get("peerName", "")
|
||||
new_peer = _prompt("Your name (user peer)", default=current_peer or os.getenv("USER", "user"))
|
||||
if new_peer:
|
||||
hermes_host["peerName"] = new_peer
|
||||
|
||||
current_workspace = hermes_host.get("workspace") or cfg.get("workspace", "hermes")
|
||||
new_workspace = _prompt("Workspace ID", default=current_workspace)
|
||||
if new_workspace:
|
||||
hermes_host["workspace"] = new_workspace
|
||||
|
||||
hermes_host.setdefault("aiPeer", HOST)
|
||||
|
||||
# Memory mode
|
||||
current_mode = hermes_host.get("memoryMode") or cfg.get("memoryMode", "hybrid")
|
||||
print(f"\n Memory mode options:")
|
||||
print(" hybrid — write to both Honcho and local MEMORY.md (default)")
|
||||
print(" honcho — Honcho only, skip MEMORY.md writes")
|
||||
new_mode = _prompt("Memory mode", default=current_mode)
|
||||
if new_mode in ("hybrid", "honcho"):
|
||||
hermes_host["memoryMode"] = new_mode
|
||||
else:
|
||||
hermes_host["memoryMode"] = "hybrid"
|
||||
|
||||
# Write frequency
|
||||
current_wf = str(hermes_host.get("writeFrequency") or cfg.get("writeFrequency", "async"))
|
||||
print(f"\n Write frequency options:")
|
||||
print(" async — background thread, no token cost (recommended)")
|
||||
print(" turn — sync write after every turn")
|
||||
print(" session — batch write at session end only")
|
||||
print(" N — write every N turns (e.g. 5)")
|
||||
new_wf = _prompt("Write frequency", default=current_wf)
|
||||
try:
|
||||
hermes_host["writeFrequency"] = int(new_wf)
|
||||
except (ValueError, TypeError):
|
||||
hermes_host["writeFrequency"] = new_wf if new_wf in ("async", "turn", "session") else "async"
|
||||
|
||||
# Recall mode
|
||||
_raw_recall = hermes_host.get("recallMode") or cfg.get("recallMode", "hybrid")
|
||||
current_recall = "hybrid" if _raw_recall not in ("hybrid", "context", "tools") else _raw_recall
|
||||
print(f"\n Recall mode options:")
|
||||
print(" hybrid — auto-injected context + Honcho tools available (default)")
|
||||
print(" context — auto-injected context only, Honcho tools hidden")
|
||||
print(" tools — Honcho tools only, no auto-injected context")
|
||||
new_recall = _prompt("Recall mode", default=current_recall)
|
||||
if new_recall in ("hybrid", "context", "tools"):
|
||||
hermes_host["recallMode"] = new_recall
|
||||
|
||||
# Session strategy
|
||||
current_strat = hermes_host.get("sessionStrategy") or cfg.get("sessionStrategy", "per-directory")
|
||||
print(f"\n Session strategy options:")
|
||||
print(" per-directory — one session per working directory (default)")
|
||||
print(" per-session — new Honcho session each run, named by Hermes session ID")
|
||||
print(" per-repo — one session per git repository (uses repo root name)")
|
||||
print(" global — single session across all directories")
|
||||
new_strat = _prompt("Session strategy", default=current_strat)
|
||||
if new_strat in ("per-session", "per-repo", "per-directory", "global"):
|
||||
hermes_host["sessionStrategy"] = new_strat
|
||||
|
||||
hermes_host.setdefault("enabled", True)
|
||||
hermes_host.setdefault("saveMessages", True)
|
||||
|
||||
_write_config(cfg)
|
||||
print(f"\n Config written to {active_path}")
|
||||
|
||||
# Test connection
|
||||
print(" Testing connection... ", end="", flush=True)
|
||||
try:
|
||||
from honcho_integration.client import HonchoClientConfig, get_honcho_client, reset_honcho_client
|
||||
reset_honcho_client()
|
||||
hcfg = HonchoClientConfig.from_global_config()
|
||||
get_honcho_client(hcfg)
|
||||
print("OK")
|
||||
except Exception as e:
|
||||
print(f"FAILED\n Error: {e}")
|
||||
return
|
||||
|
||||
print(f"\n Honcho is ready.")
|
||||
print(f" Session: {hcfg.resolve_session_name()}")
|
||||
print(f" Workspace: {hcfg.workspace_id}")
|
||||
print(f" Peer: {hcfg.peer_name}")
|
||||
_mode_str = hcfg.memory_mode
|
||||
if hcfg.peer_memory_modes:
|
||||
overrides = ", ".join(f"{k}={v}" for k, v in hcfg.peer_memory_modes.items())
|
||||
_mode_str = f"{hcfg.memory_mode} (peers: {overrides})"
|
||||
print(f" Mode: {_mode_str}")
|
||||
print(f" Frequency: {hcfg.write_frequency}")
|
||||
print(f"\n Honcho tools available in chat:")
|
||||
print(f" honcho_context — ask Honcho a question about you (LLM-synthesized)")
|
||||
print(f" honcho_search — semantic search over your history (no LLM)")
|
||||
print(f" honcho_profile — your peer card, key facts (no LLM)")
|
||||
print(f" honcho_conclude — persist a user fact to Honcho memory (no LLM)")
|
||||
print(f"\n Other commands:")
|
||||
print(f" hermes honcho status — show full config")
|
||||
print(f" hermes honcho mode — show or change memory mode")
|
||||
print(f" hermes honcho tokens — show or set token budgets")
|
||||
print(f" hermes honcho identity — seed or show AI peer identity")
|
||||
print(f" hermes honcho map <name> — map this directory to a session name\n")
|
||||
|
||||
|
||||
def cmd_status(args) -> None:
|
||||
"""Show current Honcho config and connection status."""
|
||||
try:
|
||||
import honcho # noqa: F401
|
||||
except ImportError:
|
||||
print(" honcho-ai is not installed. Run: hermes honcho setup\n")
|
||||
return
|
||||
|
||||
cfg = _read_config()
|
||||
|
||||
active_path = _config_path()
|
||||
|
||||
if not cfg:
|
||||
print(f" No Honcho config found at {active_path}")
|
||||
print(" Run 'hermes honcho setup' to configure.\n")
|
||||
return
|
||||
|
||||
try:
|
||||
from honcho_integration.client import HonchoClientConfig, get_honcho_client
|
||||
hcfg = HonchoClientConfig.from_global_config()
|
||||
except Exception as e:
|
||||
print(f" Config error: {e}\n")
|
||||
return
|
||||
|
||||
api_key = hcfg.api_key or ""
|
||||
masked = f"...{api_key[-8:]}" if len(api_key) > 8 else ("set" if api_key else "not set")
|
||||
|
||||
print(f"\nHoncho status\n" + "─" * 40)
|
||||
print(f" Enabled: {hcfg.enabled}")
|
||||
print(f" API key: {masked}")
|
||||
print(f" Workspace: {hcfg.workspace_id}")
|
||||
print(f" Host: {hcfg.host}")
|
||||
print(f" Config path: {active_path}")
|
||||
print(f" AI peer: {hcfg.ai_peer}")
|
||||
print(f" User peer: {hcfg.peer_name or 'not set'}")
|
||||
print(f" Session key: {hcfg.resolve_session_name()}")
|
||||
print(f" Recall mode: {hcfg.recall_mode}")
|
||||
print(f" Memory mode: {hcfg.memory_mode}")
|
||||
if hcfg.peer_memory_modes:
|
||||
print(f" Per-peer modes:")
|
||||
for peer, mode in hcfg.peer_memory_modes.items():
|
||||
print(f" {peer}: {mode}")
|
||||
print(f" Write freq: {hcfg.write_frequency}")
|
||||
|
||||
if hcfg.enabled and hcfg.api_key:
|
||||
print("\n Connection... ", end="", flush=True)
|
||||
try:
|
||||
get_honcho_client(hcfg)
|
||||
print("OK\n")
|
||||
except Exception as e:
|
||||
print(f"FAILED ({e})\n")
|
||||
else:
|
||||
reason = "disabled" if not hcfg.enabled else "no API key"
|
||||
print(f"\n Not connected ({reason})\n")
|
||||
|
||||
|
||||
def cmd_sessions(args) -> None:
|
||||
"""List known directory → session name mappings."""
|
||||
cfg = _read_config()
|
||||
sessions = cfg.get("sessions", {})
|
||||
|
||||
if not sessions:
|
||||
print(" No session mappings configured.\n")
|
||||
print(" Add one with: hermes honcho map <session-name>")
|
||||
print(f" Or edit {_config_path()} directly.\n")
|
||||
return
|
||||
|
||||
cwd = os.getcwd()
|
||||
print(f"\nHoncho session mappings ({len(sessions)})\n" + "─" * 40)
|
||||
for path, name in sorted(sessions.items()):
|
||||
marker = " ←" if path == cwd else ""
|
||||
print(f" {name:<30} {path}{marker}")
|
||||
print()
|
||||
|
||||
|
||||
def cmd_map(args) -> None:
|
||||
"""Map current directory to a Honcho session name."""
|
||||
if not args.session_name:
|
||||
cmd_sessions(args)
|
||||
return
|
||||
|
||||
cwd = os.getcwd()
|
||||
session_name = args.session_name.strip()
|
||||
|
||||
if not session_name:
|
||||
print(" Session name cannot be empty.\n")
|
||||
return
|
||||
|
||||
import re
|
||||
sanitized = re.sub(r'[^a-zA-Z0-9_-]', '-', session_name).strip('-')
|
||||
if sanitized != session_name:
|
||||
print(f" Session name sanitized to: {sanitized}")
|
||||
session_name = sanitized
|
||||
|
||||
cfg = _read_config()
|
||||
cfg.setdefault("sessions", {})[cwd] = session_name
|
||||
_write_config(cfg)
|
||||
print(f" Mapped {cwd}\n → {session_name}\n")
|
||||
|
||||
|
||||
def cmd_peer(args) -> None:
|
||||
"""Show or update peer names and dialectic reasoning level."""
|
||||
cfg = _read_config()
|
||||
changed = False
|
||||
|
||||
user_name = getattr(args, "user", None)
|
||||
ai_name = getattr(args, "ai", None)
|
||||
reasoning = getattr(args, "reasoning", None)
|
||||
|
||||
REASONING_LEVELS = ("minimal", "low", "medium", "high", "max")
|
||||
|
||||
if user_name is None and ai_name is None and reasoning is None:
|
||||
# Show current values
|
||||
hosts = cfg.get("hosts", {})
|
||||
hermes = hosts.get(HOST, {})
|
||||
user = hermes.get('peerName') or cfg.get('peerName') or '(not set)'
|
||||
ai = hermes.get('aiPeer') or cfg.get('aiPeer') or HOST
|
||||
lvl = hermes.get("dialecticReasoningLevel") or cfg.get("dialecticReasoningLevel") or "low"
|
||||
max_chars = hermes.get("dialecticMaxChars") or cfg.get("dialecticMaxChars") or 600
|
||||
print(f"\nHoncho peers\n" + "─" * 40)
|
||||
print(f" User peer: {user}")
|
||||
print(f" Your identity in Honcho. Messages you send build this peer's card.")
|
||||
print(f" AI peer: {ai}")
|
||||
print(f" Hermes' identity in Honcho. Seed with 'hermes honcho identity <file>'.")
|
||||
print(f" Dialectic calls ask this peer questions to warm session context.")
|
||||
print()
|
||||
print(f" Dialectic reasoning: {lvl} ({', '.join(REASONING_LEVELS)})")
|
||||
print(f" Dialectic cap: {max_chars} chars\n")
|
||||
return
|
||||
|
||||
if user_name is not None:
|
||||
cfg.setdefault("hosts", {}).setdefault(HOST, {})["peerName"] = user_name.strip()
|
||||
changed = True
|
||||
print(f" User peer → {user_name.strip()}")
|
||||
|
||||
if ai_name is not None:
|
||||
cfg.setdefault("hosts", {}).setdefault(HOST, {})["aiPeer"] = ai_name.strip()
|
||||
changed = True
|
||||
print(f" AI peer → {ai_name.strip()}")
|
||||
|
||||
if reasoning is not None:
|
||||
if reasoning not in REASONING_LEVELS:
|
||||
print(f" Invalid reasoning level '{reasoning}'. Options: {', '.join(REASONING_LEVELS)}")
|
||||
return
|
||||
cfg.setdefault("hosts", {}).setdefault(HOST, {})["dialecticReasoningLevel"] = reasoning
|
||||
changed = True
|
||||
print(f" Dialectic reasoning level → {reasoning}")
|
||||
|
||||
if changed:
|
||||
_write_config(cfg)
|
||||
print(f" Saved to {_config_path()}\n")
|
||||
|
||||
|
||||
def cmd_mode(args) -> None:
|
||||
"""Show or set the memory mode."""
|
||||
MODES = {
|
||||
"hybrid": "write to both Honcho and local MEMORY.md (default)",
|
||||
"honcho": "Honcho only — MEMORY.md writes disabled",
|
||||
}
|
||||
cfg = _read_config()
|
||||
mode_arg = getattr(args, "mode", None)
|
||||
|
||||
if mode_arg is None:
|
||||
current = (
|
||||
(cfg.get("hosts") or {}).get(HOST, {}).get("memoryMode")
|
||||
or cfg.get("memoryMode")
|
||||
or "hybrid"
|
||||
)
|
||||
print(f"\nHoncho memory mode\n" + "─" * 40)
|
||||
for m, desc in MODES.items():
|
||||
marker = " ←" if m == current else ""
|
||||
print(f" {m:<8} {desc}{marker}")
|
||||
print(f"\n Set with: hermes honcho mode [hybrid|honcho]\n")
|
||||
return
|
||||
|
||||
if mode_arg not in MODES:
|
||||
print(f" Invalid mode '{mode_arg}'. Options: {', '.join(MODES)}\n")
|
||||
return
|
||||
|
||||
cfg.setdefault("hosts", {}).setdefault(HOST, {})["memoryMode"] = mode_arg
|
||||
_write_config(cfg)
|
||||
print(f" Memory mode → {mode_arg} ({MODES[mode_arg]})\n")
|
||||
|
||||
|
||||
def cmd_tokens(args) -> None:
|
||||
"""Show or set token budget settings."""
|
||||
cfg = _read_config()
|
||||
hosts = cfg.get("hosts", {})
|
||||
hermes = hosts.get(HOST, {})
|
||||
|
||||
context = getattr(args, "context", None)
|
||||
dialectic = getattr(args, "dialectic", None)
|
||||
|
||||
if context is None and dialectic is None:
|
||||
ctx_tokens = hermes.get("contextTokens") or cfg.get("contextTokens") or "(Honcho default)"
|
||||
d_chars = hermes.get("dialecticMaxChars") or cfg.get("dialecticMaxChars") or 600
|
||||
d_level = hermes.get("dialecticReasoningLevel") or cfg.get("dialecticReasoningLevel") or "low"
|
||||
print(f"\nHoncho budgets\n" + "─" * 40)
|
||||
print()
|
||||
print(f" Context {ctx_tokens} tokens")
|
||||
print(f" Raw memory retrieval. Honcho returns stored facts/history about")
|
||||
print(f" the user and session, injected directly into the system prompt.")
|
||||
print()
|
||||
print(f" Dialectic {d_chars} chars, reasoning: {d_level}")
|
||||
print(f" AI-to-AI inference. Hermes asks Honcho's AI peer a question")
|
||||
print(f" (e.g. \"what were we working on?\") and Honcho runs its own model")
|
||||
print(f" to synthesize an answer. Used for first-turn session continuity.")
|
||||
print(f" Level controls how much reasoning Honcho spends on the answer.")
|
||||
print(f"\n Set with: hermes honcho tokens [--context N] [--dialectic N]\n")
|
||||
return
|
||||
|
||||
changed = False
|
||||
if context is not None:
|
||||
cfg.setdefault("hosts", {}).setdefault(HOST, {})["contextTokens"] = context
|
||||
print(f" context tokens → {context}")
|
||||
changed = True
|
||||
if dialectic is not None:
|
||||
cfg.setdefault("hosts", {}).setdefault(HOST, {})["dialecticMaxChars"] = dialectic
|
||||
print(f" dialectic cap → {dialectic} chars")
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
_write_config(cfg)
|
||||
print(f" Saved to {_config_path()}\n")
|
||||
|
||||
|
||||
def cmd_identity(args) -> None:
|
||||
"""Seed AI peer identity or show both peer representations."""
|
||||
cfg = _read_config()
|
||||
if not _resolve_api_key(cfg):
|
||||
print(" No API key configured. Run 'hermes honcho setup' first.\n")
|
||||
return
|
||||
|
||||
file_path = getattr(args, "file", None)
|
||||
show = getattr(args, "show", False)
|
||||
|
||||
try:
|
||||
from honcho_integration.client import HonchoClientConfig, get_honcho_client
|
||||
from honcho_integration.session import HonchoSessionManager
|
||||
hcfg = HonchoClientConfig.from_global_config()
|
||||
client = get_honcho_client(hcfg)
|
||||
mgr = HonchoSessionManager(honcho=client, config=hcfg)
|
||||
session_key = hcfg.resolve_session_name()
|
||||
mgr.get_or_create(session_key)
|
||||
except Exception as e:
|
||||
print(f" Honcho connection failed: {e}\n")
|
||||
return
|
||||
|
||||
if show:
|
||||
# ── User peer ────────────────────────────────────────────────────────
|
||||
user_card = mgr.get_peer_card(session_key)
|
||||
print(f"\nUser peer ({hcfg.peer_name or 'not set'})\n" + "─" * 40)
|
||||
if user_card:
|
||||
for fact in user_card:
|
||||
print(f" {fact}")
|
||||
else:
|
||||
print(" No user peer card yet. Send a few messages to build one.")
|
||||
|
||||
# ── AI peer ──────────────────────────────────────────────────────────
|
||||
ai_rep = mgr.get_ai_representation(session_key)
|
||||
print(f"\nAI peer ({hcfg.ai_peer})\n" + "─" * 40)
|
||||
if ai_rep.get("representation"):
|
||||
print(ai_rep["representation"])
|
||||
elif ai_rep.get("card"):
|
||||
print(ai_rep["card"])
|
||||
else:
|
||||
print(" No representation built yet.")
|
||||
print(" Run 'hermes honcho identity <file>' to seed one.")
|
||||
print()
|
||||
return
|
||||
|
||||
if not file_path:
|
||||
print("\nHoncho identity management\n" + "─" * 40)
|
||||
print(f" User peer: {hcfg.peer_name or 'not set'}")
|
||||
print(f" AI peer: {hcfg.ai_peer}")
|
||||
print()
|
||||
print(" hermes honcho identity --show — show both peer representations")
|
||||
print(" hermes honcho identity <file> — seed AI peer from SOUL.md or any .md/.txt\n")
|
||||
return
|
||||
|
||||
from pathlib import Path
|
||||
p = Path(file_path).expanduser()
|
||||
if not p.exists():
|
||||
print(f" File not found: {p}\n")
|
||||
return
|
||||
|
||||
content = p.read_text(encoding="utf-8").strip()
|
||||
if not content:
|
||||
print(f" File is empty: {p}\n")
|
||||
return
|
||||
|
||||
source = p.name
|
||||
ok = mgr.seed_ai_identity(session_key, content, source=source)
|
||||
if ok:
|
||||
print(f" Seeded AI peer identity from {p.name} into session '{session_key}'")
|
||||
print(f" Honcho will incorporate this into {hcfg.ai_peer}'s representation over time.\n")
|
||||
else:
|
||||
print(f" Failed to seed identity. Check logs for details.\n")
|
||||
|
||||
|
||||
def cmd_migrate(args) -> None:
|
||||
"""Step-by-step migration guide: OpenClaw native memory → Hermes + Honcho."""
|
||||
from pathlib import Path
|
||||
|
||||
# ── Detect OpenClaw native memory files ──────────────────────────────────
|
||||
cwd = Path(os.getcwd())
|
||||
openclaw_home = Path.home() / ".openclaw"
|
||||
|
||||
# User peer: facts about the user
|
||||
user_file_names = ["USER.md", "MEMORY.md"]
|
||||
# AI peer: agent identity / configuration
|
||||
agent_file_names = ["SOUL.md", "IDENTITY.md", "AGENTS.md", "TOOLS.md", "BOOTSTRAP.md"]
|
||||
|
||||
user_files: list[Path] = []
|
||||
agent_files: list[Path] = []
|
||||
for name in user_file_names:
|
||||
for d in [cwd, openclaw_home]:
|
||||
p = d / name
|
||||
if p.exists() and p not in user_files:
|
||||
user_files.append(p)
|
||||
for name in agent_file_names:
|
||||
for d in [cwd, openclaw_home]:
|
||||
p = d / name
|
||||
if p.exists() and p not in agent_files:
|
||||
agent_files.append(p)
|
||||
|
||||
cfg = _read_config()
|
||||
has_key = bool(_resolve_api_key(cfg))
|
||||
|
||||
print("\nHoncho migration: OpenClaw native memory → Hermes\n" + "─" * 50)
|
||||
print()
|
||||
print(" OpenClaw's native memory stores context in local markdown files")
|
||||
print(" (USER.md, MEMORY.md, SOUL.md, ...) and injects them via QMD search.")
|
||||
print(" Honcho replaces that with a cloud-backed, LLM-observable memory layer:")
|
||||
print(" context is retrieved semantically, injected automatically each turn,")
|
||||
print(" and enriched by a dialectic reasoning layer that builds over time.")
|
||||
print()
|
||||
|
||||
# ── Step 1: Honcho account ────────────────────────────────────────────────
|
||||
print("Step 1 Create a Honcho account")
|
||||
print()
|
||||
if has_key:
|
||||
masked = f"...{cfg['apiKey'][-8:]}" if len(cfg["apiKey"]) > 8 else "set"
|
||||
print(f" Honcho API key already configured: {masked}")
|
||||
print(" Skip to Step 2.")
|
||||
else:
|
||||
print(" Honcho is a cloud memory service that gives Hermes persistent memory")
|
||||
print(" across sessions. You need an API key to use it.")
|
||||
print()
|
||||
print(" 1. Get your API key at https://app.honcho.dev")
|
||||
print(" 2. Run: hermes honcho setup")
|
||||
print(" Paste the key when prompted.")
|
||||
print()
|
||||
answer = _prompt(" Run 'hermes honcho setup' now?", default="y")
|
||||
if answer.lower() in ("y", "yes"):
|
||||
cmd_setup(args)
|
||||
cfg = _read_config()
|
||||
has_key = bool(cfg.get("apiKey", ""))
|
||||
else:
|
||||
print()
|
||||
print(" Run 'hermes honcho setup' when ready, then re-run this walkthrough.")
|
||||
|
||||
# ── Step 2: Detected files ────────────────────────────────────────────────
|
||||
print()
|
||||
print("Step 2 Detected OpenClaw memory files")
|
||||
print()
|
||||
if user_files or agent_files:
|
||||
if user_files:
|
||||
print(f" User memory ({len(user_files)} file(s)) — will go to Honcho user peer:")
|
||||
for f in user_files:
|
||||
print(f" {f}")
|
||||
if agent_files:
|
||||
print(f" Agent identity ({len(agent_files)} file(s)) — will go to Honcho AI peer:")
|
||||
for f in agent_files:
|
||||
print(f" {f}")
|
||||
else:
|
||||
print(" No OpenClaw native memory files found in cwd or ~/.openclaw/.")
|
||||
print(" If your files are elsewhere, copy them here before continuing,")
|
||||
print(" or seed them manually: hermes honcho identity <path/to/file>")
|
||||
|
||||
# ── Step 3: Migrate user memory ───────────────────────────────────────────
|
||||
print()
|
||||
print("Step 3 Migrate user memory files → Honcho user peer")
|
||||
print()
|
||||
print(" USER.md and MEMORY.md contain facts about you that the agent should")
|
||||
print(" remember across sessions. Honcho will store these under your user peer")
|
||||
print(" and inject relevant excerpts into the system prompt automatically.")
|
||||
print()
|
||||
if user_files:
|
||||
print(f" Found: {', '.join(f.name for f in user_files)}")
|
||||
print()
|
||||
print(" These are picked up automatically the first time you run 'hermes'")
|
||||
print(" with Honcho configured and no prior session history.")
|
||||
print(" (Hermes calls migrate_memory_files() on first session init.)")
|
||||
print()
|
||||
print(" If you want to migrate them now without starting a session:")
|
||||
for f in user_files:
|
||||
print(f" hermes honcho migrate — this step handles it interactively")
|
||||
if has_key:
|
||||
answer = _prompt(" Upload user memory files to Honcho now?", default="y")
|
||||
if answer.lower() in ("y", "yes"):
|
||||
try:
|
||||
from honcho_integration.client import (
|
||||
HonchoClientConfig,
|
||||
get_honcho_client,
|
||||
reset_honcho_client,
|
||||
)
|
||||
from honcho_integration.session import HonchoSessionManager
|
||||
|
||||
reset_honcho_client()
|
||||
hcfg = HonchoClientConfig.from_global_config()
|
||||
client = get_honcho_client(hcfg)
|
||||
mgr = HonchoSessionManager(honcho=client, config=hcfg)
|
||||
session_key = hcfg.resolve_session_name()
|
||||
mgr.get_or_create(session_key)
|
||||
# Upload from each directory that had user files
|
||||
dirs_with_files = set(str(f.parent) for f in user_files)
|
||||
any_uploaded = False
|
||||
for d in dirs_with_files:
|
||||
if mgr.migrate_memory_files(session_key, d):
|
||||
any_uploaded = True
|
||||
if any_uploaded:
|
||||
print(f" Uploaded user memory files from: {', '.join(dirs_with_files)}")
|
||||
else:
|
||||
print(" Nothing uploaded (files may already be migrated or empty).")
|
||||
except Exception as e:
|
||||
print(f" Failed: {e}")
|
||||
else:
|
||||
print(" Run 'hermes honcho setup' first, then re-run this step.")
|
||||
else:
|
||||
print(" No user memory files detected. Nothing to migrate here.")
|
||||
|
||||
# ── Step 4: Seed AI identity ──────────────────────────────────────────────
|
||||
print()
|
||||
print("Step 4 Seed AI identity files → Honcho AI peer")
|
||||
print()
|
||||
print(" SOUL.md, IDENTITY.md, AGENTS.md, TOOLS.md, BOOTSTRAP.md define the")
|
||||
print(" agent's character, capabilities, and behavioral rules. In OpenClaw")
|
||||
print(" these are injected via file search at prompt-build time.")
|
||||
print()
|
||||
print(" In Hermes, they are seeded once into Honcho's AI peer through the")
|
||||
print(" observation pipeline. Honcho builds a representation from them and")
|
||||
print(" from every subsequent assistant message (observe_me=True). Over time")
|
||||
print(" the representation reflects actual behavior, not just declaration.")
|
||||
print()
|
||||
if agent_files:
|
||||
print(f" Found: {', '.join(f.name for f in agent_files)}")
|
||||
print()
|
||||
if has_key:
|
||||
answer = _prompt(" Seed AI identity from all detected files now?", default="y")
|
||||
if answer.lower() in ("y", "yes"):
|
||||
try:
|
||||
from honcho_integration.client import (
|
||||
HonchoClientConfig,
|
||||
get_honcho_client,
|
||||
reset_honcho_client,
|
||||
)
|
||||
from honcho_integration.session import HonchoSessionManager
|
||||
|
||||
reset_honcho_client()
|
||||
hcfg = HonchoClientConfig.from_global_config()
|
||||
client = get_honcho_client(hcfg)
|
||||
mgr = HonchoSessionManager(honcho=client, config=hcfg)
|
||||
session_key = hcfg.resolve_session_name()
|
||||
mgr.get_or_create(session_key)
|
||||
for f in agent_files:
|
||||
content = f.read_text(encoding="utf-8").strip()
|
||||
if content:
|
||||
ok = mgr.seed_ai_identity(session_key, content, source=f.name)
|
||||
status = "seeded" if ok else "failed"
|
||||
print(f" {f.name}: {status}")
|
||||
except Exception as e:
|
||||
print(f" Failed: {e}")
|
||||
else:
|
||||
print(" Run 'hermes honcho setup' first, then seed manually:")
|
||||
for f in agent_files:
|
||||
print(f" hermes honcho identity {f}")
|
||||
else:
|
||||
print(" No agent identity files detected.")
|
||||
print(" To seed manually: hermes honcho identity <path/to/SOUL.md>")
|
||||
|
||||
# ── Step 5: What changes ──────────────────────────────────────────────────
|
||||
print()
|
||||
print("Step 5 What changes vs. OpenClaw native memory")
|
||||
print()
|
||||
print(" Storage")
|
||||
print(" OpenClaw: markdown files on disk, searched via QMD at prompt-build time.")
|
||||
print(" Hermes: cloud-backed Honcho peers. Files can stay on disk as source")
|
||||
print(" of truth; Honcho holds the live representation.")
|
||||
print()
|
||||
print(" Context injection")
|
||||
print(" OpenClaw: file excerpts injected synchronously before each LLM call.")
|
||||
print(" Hermes: Honcho context fetched async at turn end, injected next turn.")
|
||||
print(" First turn has no Honcho context; subsequent turns are loaded.")
|
||||
print()
|
||||
print(" Memory growth")
|
||||
print(" OpenClaw: you edit files manually to update memory.")
|
||||
print(" Hermes: Honcho observes every message and updates representations")
|
||||
print(" automatically. Files become the seed, not the live store.")
|
||||
print()
|
||||
print(" Honcho tools (available to the agent during conversation)")
|
||||
print(" honcho_context — ask Honcho a question, get a synthesized answer (LLM)")
|
||||
print(" honcho_search — semantic search over stored context (no LLM)")
|
||||
print(" honcho_profile — fast peer card snapshot (no LLM)")
|
||||
print(" honcho_conclude — write a conclusion/fact back to memory (no LLM)")
|
||||
print()
|
||||
print(" Session naming")
|
||||
print(" OpenClaw: no persistent session concept — files are global.")
|
||||
print(" Hermes: per-session by default — each run gets its own session")
|
||||
print(" Map a custom name: hermes honcho map <session-name>")
|
||||
|
||||
# ── Step 6: Next steps ────────────────────────────────────────────────────
|
||||
print()
|
||||
print("Step 6 Next steps")
|
||||
print()
|
||||
if not has_key:
|
||||
print(" 1. hermes honcho setup — configure API key (required)")
|
||||
print(" 2. hermes honcho migrate — re-run this walkthrough")
|
||||
else:
|
||||
print(" 1. hermes honcho status — verify Honcho connection")
|
||||
print(" 2. hermes — start a session")
|
||||
print(" (user memory files auto-uploaded on first turn if not done above)")
|
||||
print(" 3. hermes honcho identity --show — verify AI peer representation")
|
||||
print(" 4. hermes honcho tokens — tune context and dialectic budgets")
|
||||
print(" 5. hermes honcho mode — view or change memory mode")
|
||||
print()
|
||||
|
||||
|
||||
def honcho_command(args) -> None:
|
||||
"""Route honcho subcommands."""
|
||||
sub = getattr(args, "honcho_command", None)
|
||||
if sub == "setup" or sub is None:
|
||||
cmd_setup(args)
|
||||
elif sub == "status":
|
||||
cmd_status(args)
|
||||
elif sub == "sessions":
|
||||
cmd_sessions(args)
|
||||
elif sub == "map":
|
||||
cmd_map(args)
|
||||
elif sub == "peer":
|
||||
cmd_peer(args)
|
||||
elif sub == "mode":
|
||||
cmd_mode(args)
|
||||
elif sub == "tokens":
|
||||
cmd_tokens(args)
|
||||
elif sub == "identity":
|
||||
cmd_identity(args)
|
||||
elif sub == "migrate":
|
||||
cmd_migrate(args)
|
||||
else:
|
||||
print(f" Unknown honcho command: {sub}")
|
||||
print(" Available: setup, status, sessions, map, peer, mode, tokens, identity, migrate\n")
|
||||
439
hermes_code/honcho_integration/client.py
Normal file
439
hermes_code/honcho_integration/client.py
Normal file
|
|
@ -0,0 +1,439 @@
|
|||
"""Honcho client initialization and configuration.
|
||||
|
||||
Resolution order for config file:
|
||||
1. $HERMES_HOME/honcho.json (instance-local, enables isolated Hermes instances)
|
||||
2. ~/.honcho/config.json (global, shared across all Honcho-enabled apps)
|
||||
3. Environment variables (HONCHO_API_KEY, HONCHO_ENVIRONMENT)
|
||||
|
||||
Resolution order for host-specific settings:
|
||||
1. Explicit host block fields (always win)
|
||||
2. Flat/global fields from config root
|
||||
3. Defaults (host name as workspace/peer)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from honcho import Honcho
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
GLOBAL_CONFIG_PATH = Path.home() / ".honcho" / "config.json"
|
||||
HOST = "hermes"
|
||||
|
||||
|
||||
def _get_hermes_home() -> Path:
|
||||
"""Get HERMES_HOME without importing hermes_cli (avoids circular deps)."""
|
||||
return Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
|
||||
|
||||
|
||||
def resolve_config_path() -> Path:
|
||||
"""Return the active Honcho config path.
|
||||
|
||||
Checks $HERMES_HOME/honcho.json first (instance-local), then falls back
|
||||
to ~/.honcho/config.json (global). Returns the global path if neither
|
||||
exists (for first-time setup writes).
|
||||
"""
|
||||
local_path = _get_hermes_home() / "honcho.json"
|
||||
if local_path.exists():
|
||||
return local_path
|
||||
return GLOBAL_CONFIG_PATH
|
||||
|
||||
|
||||
_RECALL_MODE_ALIASES = {"auto": "hybrid"}
|
||||
_VALID_RECALL_MODES = {"hybrid", "context", "tools"}
|
||||
|
||||
|
||||
def _normalize_recall_mode(val: str) -> str:
|
||||
"""Normalize legacy recall mode values (e.g. 'auto' → 'hybrid')."""
|
||||
val = _RECALL_MODE_ALIASES.get(val, val)
|
||||
return val if val in _VALID_RECALL_MODES else "hybrid"
|
||||
|
||||
|
||||
def _resolve_memory_mode(
|
||||
global_val: str | dict,
|
||||
host_val: str | dict | None,
|
||||
) -> dict:
|
||||
"""Parse memoryMode (string or object) into memory_mode + peer_memory_modes.
|
||||
|
||||
Resolution order: host-level wins over global.
|
||||
String form: applies as the default for all peers.
|
||||
Object form: { "default": "hybrid", "hermes": "honcho", ... }
|
||||
"default" key sets the fallback; other keys are per-peer overrides.
|
||||
"""
|
||||
# Pick the winning value (host beats global)
|
||||
val = host_val if host_val is not None else global_val
|
||||
|
||||
if isinstance(val, dict):
|
||||
default = val.get("default", "hybrid")
|
||||
overrides = {k: v for k, v in val.items() if k != "default"}
|
||||
else:
|
||||
default = str(val) if val else "hybrid"
|
||||
overrides = {}
|
||||
|
||||
return {"memory_mode": default, "peer_memory_modes": overrides}
|
||||
|
||||
|
||||
@dataclass
|
||||
class HonchoClientConfig:
|
||||
"""Configuration for Honcho client, resolved for a specific host."""
|
||||
|
||||
host: str = HOST
|
||||
workspace_id: str = "hermes"
|
||||
api_key: str | None = None
|
||||
environment: str = "production"
|
||||
# Optional base URL for self-hosted Honcho (overrides environment mapping)
|
||||
base_url: str | None = None
|
||||
# Identity
|
||||
peer_name: str | None = None
|
||||
ai_peer: str = "hermes"
|
||||
linked_hosts: list[str] = field(default_factory=list)
|
||||
# Toggles
|
||||
enabled: bool = False
|
||||
save_messages: bool = True
|
||||
# memoryMode: default for all peers. "hybrid" / "honcho"
|
||||
memory_mode: str = "hybrid"
|
||||
# Per-peer overrides — any named Honcho peer. Override memory_mode when set.
|
||||
# Config object form: "memoryMode": { "default": "hybrid", "hermes": "honcho" }
|
||||
peer_memory_modes: dict[str, str] = field(default_factory=dict)
|
||||
|
||||
def peer_memory_mode(self, peer_name: str) -> str:
|
||||
"""Return the effective memory mode for a named peer.
|
||||
|
||||
Resolution: per-peer override → global memory_mode default.
|
||||
"""
|
||||
return self.peer_memory_modes.get(peer_name, self.memory_mode)
|
||||
# Write frequency: "async" (background thread), "turn" (sync per turn),
|
||||
# "session" (flush on session end), or int (every N turns)
|
||||
write_frequency: str | int = "async"
|
||||
# Prefetch budget
|
||||
context_tokens: int | None = None
|
||||
# Dialectic (peer.chat) settings
|
||||
# reasoning_level: "minimal" | "low" | "medium" | "high" | "max"
|
||||
# Used as the default; prefetch_dialectic may bump it dynamically.
|
||||
dialectic_reasoning_level: str = "low"
|
||||
# Max chars of dialectic result to inject into Hermes system prompt
|
||||
dialectic_max_chars: int = 600
|
||||
# Recall mode: how memory retrieval works when Honcho is active.
|
||||
# "hybrid" — auto-injected context + Honcho tools available (model decides)
|
||||
# "context" — auto-injected context only, Honcho tools removed
|
||||
# "tools" — Honcho tools only, no auto-injected context
|
||||
recall_mode: str = "hybrid"
|
||||
# Session resolution
|
||||
session_strategy: str = "per-directory"
|
||||
session_peer_prefix: bool = False
|
||||
sessions: dict[str, str] = field(default_factory=dict)
|
||||
# Raw global config for anything else consumers need
|
||||
raw: dict[str, Any] = field(default_factory=dict)
|
||||
# True when Honcho was explicitly configured for this host (hosts.hermes
|
||||
# block exists or enabled was set explicitly), vs auto-enabled from a
|
||||
# stray HONCHO_API_KEY env var.
|
||||
explicitly_configured: bool = False
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, workspace_id: str = "hermes") -> HonchoClientConfig:
|
||||
"""Create config from environment variables (fallback)."""
|
||||
api_key = os.environ.get("HONCHO_API_KEY")
|
||||
base_url = os.environ.get("HONCHO_BASE_URL", "").strip() or None
|
||||
return cls(
|
||||
workspace_id=workspace_id,
|
||||
api_key=api_key,
|
||||
environment=os.environ.get("HONCHO_ENVIRONMENT", "production"),
|
||||
base_url=base_url,
|
||||
enabled=bool(api_key or base_url),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_global_config(
|
||||
cls,
|
||||
host: str = HOST,
|
||||
config_path: Path | None = None,
|
||||
) -> HonchoClientConfig:
|
||||
"""Create config from the resolved Honcho config path.
|
||||
|
||||
Resolution: $HERMES_HOME/honcho.json -> ~/.honcho/config.json -> env vars.
|
||||
"""
|
||||
path = config_path or resolve_config_path()
|
||||
if not path.exists():
|
||||
logger.debug("No global Honcho config at %s, falling back to env", path)
|
||||
return cls.from_env()
|
||||
|
||||
try:
|
||||
raw = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.warning("Failed to read %s: %s, falling back to env", path, e)
|
||||
return cls.from_env()
|
||||
|
||||
host_block = (raw.get("hosts") or {}).get(host, {})
|
||||
# A hosts.hermes block or explicit enabled flag means the user
|
||||
# intentionally configured Honcho for this host.
|
||||
_explicitly_configured = bool(host_block) or raw.get("enabled") is True
|
||||
|
||||
# Explicit host block fields win, then flat/global, then defaults
|
||||
workspace = (
|
||||
host_block.get("workspace")
|
||||
or raw.get("workspace")
|
||||
or host
|
||||
)
|
||||
ai_peer = (
|
||||
host_block.get("aiPeer")
|
||||
or raw.get("aiPeer")
|
||||
or host
|
||||
)
|
||||
linked_hosts = host_block.get("linkedHosts", [])
|
||||
|
||||
api_key = (
|
||||
host_block.get("apiKey")
|
||||
or raw.get("apiKey")
|
||||
or os.environ.get("HONCHO_API_KEY")
|
||||
)
|
||||
|
||||
environment = (
|
||||
host_block.get("environment")
|
||||
or raw.get("environment", "production")
|
||||
)
|
||||
|
||||
base_url = (
|
||||
raw.get("baseUrl")
|
||||
or os.environ.get("HONCHO_BASE_URL", "").strip()
|
||||
or None
|
||||
)
|
||||
|
||||
# Auto-enable when API key or base_url is present (unless explicitly disabled)
|
||||
# Host-level enabled wins, then root-level, then auto-enable if key/url exists.
|
||||
host_enabled = host_block.get("enabled")
|
||||
root_enabled = raw.get("enabled")
|
||||
if host_enabled is not None:
|
||||
enabled = host_enabled
|
||||
elif root_enabled is not None:
|
||||
enabled = root_enabled
|
||||
else:
|
||||
# Not explicitly set anywhere -> auto-enable if API key or base_url exists
|
||||
enabled = bool(api_key or base_url)
|
||||
|
||||
# write_frequency: accept int or string
|
||||
raw_wf = (
|
||||
host_block.get("writeFrequency")
|
||||
or raw.get("writeFrequency")
|
||||
or "async"
|
||||
)
|
||||
try:
|
||||
write_frequency: str | int = int(raw_wf)
|
||||
except (TypeError, ValueError):
|
||||
write_frequency = str(raw_wf)
|
||||
|
||||
# saveMessages: host wins (None-aware since False is valid)
|
||||
host_save = host_block.get("saveMessages")
|
||||
save_messages = host_save if host_save is not None else raw.get("saveMessages", True)
|
||||
|
||||
# sessionStrategy / sessionPeerPrefix: host first, root fallback
|
||||
session_strategy = (
|
||||
host_block.get("sessionStrategy")
|
||||
or raw.get("sessionStrategy", "per-directory")
|
||||
)
|
||||
host_prefix = host_block.get("sessionPeerPrefix")
|
||||
session_peer_prefix = (
|
||||
host_prefix if host_prefix is not None
|
||||
else raw.get("sessionPeerPrefix", False)
|
||||
)
|
||||
|
||||
return cls(
|
||||
host=host,
|
||||
workspace_id=workspace,
|
||||
api_key=api_key,
|
||||
environment=environment,
|
||||
base_url=base_url,
|
||||
peer_name=host_block.get("peerName") or raw.get("peerName"),
|
||||
ai_peer=ai_peer,
|
||||
linked_hosts=linked_hosts,
|
||||
enabled=enabled,
|
||||
save_messages=save_messages,
|
||||
**_resolve_memory_mode(
|
||||
raw.get("memoryMode", "hybrid"),
|
||||
host_block.get("memoryMode"),
|
||||
),
|
||||
write_frequency=write_frequency,
|
||||
context_tokens=host_block.get("contextTokens") or raw.get("contextTokens"),
|
||||
dialectic_reasoning_level=(
|
||||
host_block.get("dialecticReasoningLevel")
|
||||
or raw.get("dialecticReasoningLevel")
|
||||
or "low"
|
||||
),
|
||||
dialectic_max_chars=int(
|
||||
host_block.get("dialecticMaxChars")
|
||||
or raw.get("dialecticMaxChars")
|
||||
or 600
|
||||
),
|
||||
recall_mode=_normalize_recall_mode(
|
||||
host_block.get("recallMode")
|
||||
or raw.get("recallMode")
|
||||
or "hybrid"
|
||||
),
|
||||
session_strategy=session_strategy,
|
||||
session_peer_prefix=session_peer_prefix,
|
||||
sessions=raw.get("sessions", {}),
|
||||
raw=raw,
|
||||
explicitly_configured=_explicitly_configured,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _git_repo_name(cwd: str) -> str | None:
|
||||
"""Return the git repo root directory name, or None if not in a repo."""
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
root = subprocess.run(
|
||||
["git", "rev-parse", "--show-toplevel"],
|
||||
capture_output=True, text=True, cwd=cwd, timeout=5,
|
||||
)
|
||||
if root.returncode == 0:
|
||||
return Path(root.stdout.strip()).name
|
||||
except (OSError, subprocess.TimeoutExpired):
|
||||
pass
|
||||
return None
|
||||
|
||||
def resolve_session_name(
|
||||
self,
|
||||
cwd: str | None = None,
|
||||
session_title: str | None = None,
|
||||
session_id: str | None = None,
|
||||
) -> str | None:
|
||||
"""Resolve Honcho session name.
|
||||
|
||||
Resolution order:
|
||||
1. Manual directory override from sessions map
|
||||
2. Hermes session title (from /title command)
|
||||
3. per-session strategy — Hermes session_id ({timestamp}_{hex})
|
||||
4. per-repo strategy — git repo root directory name
|
||||
5. per-directory strategy — directory basename
|
||||
6. global strategy — workspace name
|
||||
"""
|
||||
import re
|
||||
|
||||
if not cwd:
|
||||
cwd = os.getcwd()
|
||||
|
||||
# Manual override always wins
|
||||
manual = self.sessions.get(cwd)
|
||||
if manual:
|
||||
return manual
|
||||
|
||||
# /title mid-session remap
|
||||
if session_title:
|
||||
sanitized = re.sub(r'[^a-zA-Z0-9_-]', '-', session_title).strip('-')
|
||||
if sanitized:
|
||||
if self.session_peer_prefix and self.peer_name:
|
||||
return f"{self.peer_name}-{sanitized}"
|
||||
return sanitized
|
||||
|
||||
# per-session: inherit Hermes session_id (new Honcho session each run)
|
||||
if self.session_strategy == "per-session" and session_id:
|
||||
if self.session_peer_prefix and self.peer_name:
|
||||
return f"{self.peer_name}-{session_id}"
|
||||
return session_id
|
||||
|
||||
# per-repo: one Honcho session per git repository
|
||||
if self.session_strategy == "per-repo":
|
||||
base = self._git_repo_name(cwd) or Path(cwd).name
|
||||
if self.session_peer_prefix and self.peer_name:
|
||||
return f"{self.peer_name}-{base}"
|
||||
return base
|
||||
|
||||
# per-directory: one Honcho session per working directory (default)
|
||||
if self.session_strategy in ("per-directory", "per-session"):
|
||||
base = Path(cwd).name
|
||||
if self.session_peer_prefix and self.peer_name:
|
||||
return f"{self.peer_name}-{base}"
|
||||
return base
|
||||
|
||||
# global: single session across all directories
|
||||
return self.workspace_id
|
||||
|
||||
def get_linked_workspaces(self) -> list[str]:
|
||||
"""Resolve linked host keys to workspace names."""
|
||||
hosts = self.raw.get("hosts", {})
|
||||
workspaces = []
|
||||
for host_key in self.linked_hosts:
|
||||
block = hosts.get(host_key, {})
|
||||
ws = block.get("workspace") or host_key
|
||||
if ws != self.workspace_id:
|
||||
workspaces.append(ws)
|
||||
return workspaces
|
||||
|
||||
|
||||
_honcho_client: Honcho | None = None
|
||||
|
||||
|
||||
def get_honcho_client(config: HonchoClientConfig | None = None) -> Honcho:
|
||||
"""Get or create the Honcho client singleton.
|
||||
|
||||
When no config is provided, attempts to load ~/.honcho/config.json
|
||||
first, falling back to environment variables.
|
||||
"""
|
||||
global _honcho_client
|
||||
|
||||
if _honcho_client is not None:
|
||||
return _honcho_client
|
||||
|
||||
if config is None:
|
||||
config = HonchoClientConfig.from_global_config()
|
||||
|
||||
if not config.api_key and not config.base_url:
|
||||
raise ValueError(
|
||||
"Honcho API key not found. "
|
||||
"Get your API key at https://app.honcho.dev, "
|
||||
"then run 'hermes honcho setup' or set HONCHO_API_KEY. "
|
||||
"For local instances, set HONCHO_BASE_URL instead."
|
||||
)
|
||||
|
||||
try:
|
||||
from honcho import Honcho
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"honcho-ai is required for Honcho integration. "
|
||||
"Install it with: pip install honcho-ai"
|
||||
)
|
||||
|
||||
# Allow config.yaml honcho.base_url to override the SDK's environment
|
||||
# mapping, enabling remote self-hosted Honcho deployments without
|
||||
# requiring the server to live on localhost.
|
||||
resolved_base_url = config.base_url
|
||||
if not resolved_base_url:
|
||||
try:
|
||||
from hermes_cli.config import load_config
|
||||
hermes_cfg = load_config()
|
||||
honcho_cfg = hermes_cfg.get("honcho", {})
|
||||
if isinstance(honcho_cfg, dict):
|
||||
resolved_base_url = honcho_cfg.get("base_url", "").strip() or None
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if resolved_base_url:
|
||||
logger.info("Initializing Honcho client (base_url: %s, workspace: %s)", resolved_base_url, config.workspace_id)
|
||||
else:
|
||||
logger.info("Initializing Honcho client (host: %s, workspace: %s)", config.host, config.workspace_id)
|
||||
|
||||
kwargs: dict = {
|
||||
"workspace_id": config.workspace_id,
|
||||
"api_key": config.api_key,
|
||||
"environment": config.environment,
|
||||
}
|
||||
if resolved_base_url:
|
||||
kwargs["base_url"] = resolved_base_url
|
||||
|
||||
_honcho_client = Honcho(**kwargs)
|
||||
|
||||
return _honcho_client
|
||||
|
||||
|
||||
def reset_honcho_client() -> None:
|
||||
"""Reset the Honcho client singleton (useful for testing)."""
|
||||
global _honcho_client
|
||||
_honcho_client = None
|
||||
991
hermes_code/honcho_integration/session.py
Normal file
991
hermes_code/honcho_integration/session.py
Normal file
|
|
@ -0,0 +1,991 @@
|
|||
"""Honcho-based session management for conversation history."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import queue
|
||||
import re
|
||||
import logging
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
from honcho_integration.client import get_honcho_client
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from honcho import Honcho
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Sentinel to signal the async writer thread to shut down
|
||||
_ASYNC_SHUTDOWN = object()
|
||||
|
||||
|
||||
@dataclass
|
||||
class HonchoSession:
|
||||
"""
|
||||
A conversation session backed by Honcho.
|
||||
|
||||
Provides a local message cache that syncs to Honcho's
|
||||
AI-native memory system for user modeling.
|
||||
"""
|
||||
|
||||
key: str # channel:chat_id
|
||||
user_peer_id: str # Honcho peer ID for the user
|
||||
assistant_peer_id: str # Honcho peer ID for the assistant
|
||||
honcho_session_id: str # Honcho session ID
|
||||
messages: list[dict[str, Any]] = field(default_factory=list)
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
updated_at: datetime = field(default_factory=datetime.now)
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def add_message(self, role: str, content: str, **kwargs: Any) -> None:
|
||||
"""Add a message to the local cache."""
|
||||
msg = {
|
||||
"role": role,
|
||||
"content": content,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
**kwargs,
|
||||
}
|
||||
self.messages.append(msg)
|
||||
self.updated_at = datetime.now()
|
||||
|
||||
def get_history(self, max_messages: int = 50) -> list[dict[str, Any]]:
|
||||
"""Get message history for LLM context."""
|
||||
recent = (
|
||||
self.messages[-max_messages:]
|
||||
if len(self.messages) > max_messages
|
||||
else self.messages
|
||||
)
|
||||
return [{"role": m["role"], "content": m["content"]} for m in recent]
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all messages in the session."""
|
||||
self.messages = []
|
||||
self.updated_at = datetime.now()
|
||||
|
||||
|
||||
class HonchoSessionManager:
|
||||
"""
|
||||
Manages conversation sessions using Honcho.
|
||||
|
||||
Runs alongside hermes' existing SQLite state and file-based memory,
|
||||
adding persistent cross-session user modeling via Honcho's AI-native memory.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
honcho: Honcho | None = None,
|
||||
context_tokens: int | None = None,
|
||||
config: Any | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize the session manager.
|
||||
|
||||
Args:
|
||||
honcho: Optional Honcho client. If not provided, uses the singleton.
|
||||
context_tokens: Max tokens for context() calls (None = Honcho default).
|
||||
config: HonchoClientConfig from global config (provides peer_name, ai_peer,
|
||||
write_frequency, memory_mode, etc.).
|
||||
"""
|
||||
self._honcho = honcho
|
||||
self._context_tokens = context_tokens
|
||||
self._config = config
|
||||
self._cache: dict[str, HonchoSession] = {}
|
||||
self._peers_cache: dict[str, Any] = {}
|
||||
self._sessions_cache: dict[str, Any] = {}
|
||||
|
||||
# Write frequency state
|
||||
write_frequency = (config.write_frequency if config else "async")
|
||||
self._write_frequency = write_frequency
|
||||
self._turn_counter: int = 0
|
||||
|
||||
# Prefetch caches: session_key → last result (consumed once per turn)
|
||||
self._context_cache: dict[str, dict] = {}
|
||||
self._dialectic_cache: dict[str, str] = {}
|
||||
self._prefetch_cache_lock = threading.Lock()
|
||||
self._dialectic_reasoning_level: str = (
|
||||
config.dialectic_reasoning_level if config else "low"
|
||||
)
|
||||
self._dialectic_max_chars: int = (
|
||||
config.dialectic_max_chars if config else 600
|
||||
)
|
||||
|
||||
# Async write queue — started lazily on first enqueue
|
||||
self._async_queue: queue.Queue | None = None
|
||||
self._async_thread: threading.Thread | None = None
|
||||
if write_frequency == "async":
|
||||
self._async_queue = queue.Queue()
|
||||
self._async_thread = threading.Thread(
|
||||
target=self._async_writer_loop,
|
||||
name="honcho-async-writer",
|
||||
daemon=True,
|
||||
)
|
||||
self._async_thread.start()
|
||||
|
||||
@property
|
||||
def honcho(self) -> Honcho:
|
||||
"""Get the Honcho client, initializing if needed."""
|
||||
if self._honcho is None:
|
||||
self._honcho = get_honcho_client()
|
||||
return self._honcho
|
||||
|
||||
def _get_or_create_peer(self, peer_id: str) -> Any:
|
||||
"""
|
||||
Get or create a Honcho peer.
|
||||
|
||||
Peers are lazy -- no API call until first use.
|
||||
Observation settings are controlled per-session via SessionPeerConfig.
|
||||
"""
|
||||
if peer_id in self._peers_cache:
|
||||
return self._peers_cache[peer_id]
|
||||
|
||||
peer = self.honcho.peer(peer_id)
|
||||
self._peers_cache[peer_id] = peer
|
||||
return peer
|
||||
|
||||
def _get_or_create_honcho_session(
|
||||
self, session_id: str, user_peer: Any, assistant_peer: Any
|
||||
) -> tuple[Any, list]:
|
||||
"""
|
||||
Get or create a Honcho session with peers configured.
|
||||
|
||||
Returns:
|
||||
Tuple of (honcho_session, existing_messages).
|
||||
"""
|
||||
if session_id in self._sessions_cache:
|
||||
logger.debug("Honcho session '%s' retrieved from cache", session_id)
|
||||
return self._sessions_cache[session_id], []
|
||||
|
||||
session = self.honcho.session(session_id)
|
||||
|
||||
# Configure peer observation settings.
|
||||
# observe_me=True for AI peer so Honcho watches what the agent says
|
||||
# and builds its representation over time — enabling identity formation.
|
||||
from honcho.session import SessionPeerConfig
|
||||
user_config = SessionPeerConfig(observe_me=True, observe_others=True)
|
||||
ai_config = SessionPeerConfig(observe_me=True, observe_others=True)
|
||||
|
||||
session.add_peers([(user_peer, user_config), (assistant_peer, ai_config)])
|
||||
|
||||
# Load existing messages via context() - single call for messages + metadata
|
||||
existing_messages = []
|
||||
try:
|
||||
ctx = session.context(summary=True, tokens=self._context_tokens)
|
||||
existing_messages = ctx.messages or []
|
||||
|
||||
# Verify chronological ordering
|
||||
if existing_messages and len(existing_messages) > 1:
|
||||
timestamps = [m.created_at for m in existing_messages if m.created_at]
|
||||
if timestamps and timestamps != sorted(timestamps):
|
||||
logger.warning(
|
||||
"Honcho messages not chronologically ordered for session '%s', sorting",
|
||||
session_id,
|
||||
)
|
||||
existing_messages = sorted(
|
||||
existing_messages,
|
||||
key=lambda m: m.created_at or datetime.min,
|
||||
)
|
||||
|
||||
if existing_messages:
|
||||
logger.info(
|
||||
"Honcho session '%s' retrieved (%d existing messages)",
|
||||
session_id, len(existing_messages),
|
||||
)
|
||||
else:
|
||||
logger.info("Honcho session '%s' created (new)", session_id)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Honcho session '%s' loaded (failed to fetch context: %s)",
|
||||
session_id, e,
|
||||
)
|
||||
|
||||
self._sessions_cache[session_id] = session
|
||||
return session, existing_messages
|
||||
|
||||
def _sanitize_id(self, id_str: str) -> str:
|
||||
"""Sanitize an ID to match Honcho's pattern: ^[a-zA-Z0-9_-]+"""
|
||||
return re.sub(r'[^a-zA-Z0-9_-]', '-', id_str)
|
||||
|
||||
def get_or_create(self, key: str) -> HonchoSession:
|
||||
"""
|
||||
Get an existing session or create a new one.
|
||||
|
||||
Args:
|
||||
key: Session key (usually channel:chat_id).
|
||||
|
||||
Returns:
|
||||
The session.
|
||||
"""
|
||||
if key in self._cache:
|
||||
logger.debug("Local session cache hit: %s", key)
|
||||
return self._cache[key]
|
||||
|
||||
# Use peer names from global config when available
|
||||
if self._config and self._config.peer_name:
|
||||
user_peer_id = self._sanitize_id(self._config.peer_name)
|
||||
else:
|
||||
# Fallback: derive from session key
|
||||
parts = key.split(":", 1)
|
||||
channel = parts[0] if len(parts) > 1 else "default"
|
||||
chat_id = parts[1] if len(parts) > 1 else key
|
||||
user_peer_id = self._sanitize_id(f"user-{channel}-{chat_id}")
|
||||
|
||||
assistant_peer_id = (
|
||||
self._config.ai_peer if self._config else "hermes-assistant"
|
||||
)
|
||||
|
||||
# Sanitize session ID for Honcho
|
||||
honcho_session_id = self._sanitize_id(key)
|
||||
|
||||
# Get or create peers
|
||||
user_peer = self._get_or_create_peer(user_peer_id)
|
||||
assistant_peer = self._get_or_create_peer(assistant_peer_id)
|
||||
|
||||
# Get or create Honcho session
|
||||
honcho_session, existing_messages = self._get_or_create_honcho_session(
|
||||
honcho_session_id, user_peer, assistant_peer
|
||||
)
|
||||
|
||||
# Convert Honcho messages to local format
|
||||
local_messages = []
|
||||
for msg in existing_messages:
|
||||
role = "assistant" if msg.peer_id == assistant_peer_id else "user"
|
||||
local_messages.append({
|
||||
"role": role,
|
||||
"content": msg.content,
|
||||
"timestamp": msg.created_at.isoformat() if msg.created_at else "",
|
||||
"_synced": True, # Already in Honcho
|
||||
})
|
||||
|
||||
# Create local session wrapper with existing messages
|
||||
session = HonchoSession(
|
||||
key=key,
|
||||
user_peer_id=user_peer_id,
|
||||
assistant_peer_id=assistant_peer_id,
|
||||
honcho_session_id=honcho_session_id,
|
||||
messages=local_messages,
|
||||
)
|
||||
|
||||
self._cache[key] = session
|
||||
return session
|
||||
|
||||
def _flush_session(self, session: HonchoSession) -> bool:
|
||||
"""Internal: write unsynced messages to Honcho synchronously."""
|
||||
if not session.messages:
|
||||
return True
|
||||
|
||||
user_peer = self._get_or_create_peer(session.user_peer_id)
|
||||
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
|
||||
if not honcho_session:
|
||||
honcho_session, _ = self._get_or_create_honcho_session(
|
||||
session.honcho_session_id, user_peer, assistant_peer
|
||||
)
|
||||
|
||||
new_messages = [m for m in session.messages if not m.get("_synced")]
|
||||
if not new_messages:
|
||||
return True
|
||||
|
||||
honcho_messages = []
|
||||
for msg in new_messages:
|
||||
peer = user_peer if msg["role"] == "user" else assistant_peer
|
||||
honcho_messages.append(peer.message(msg["content"]))
|
||||
|
||||
try:
|
||||
honcho_session.add_messages(honcho_messages)
|
||||
for msg in new_messages:
|
||||
msg["_synced"] = True
|
||||
logger.debug("Synced %d messages to Honcho for %s", len(honcho_messages), session.key)
|
||||
self._cache[session.key] = session
|
||||
return True
|
||||
except Exception as e:
|
||||
for msg in new_messages:
|
||||
msg["_synced"] = False
|
||||
logger.error("Failed to sync messages to Honcho: %s", e)
|
||||
self._cache[session.key] = session
|
||||
return False
|
||||
|
||||
def _async_writer_loop(self) -> None:
|
||||
"""Background daemon thread: drains the async write queue."""
|
||||
while True:
|
||||
try:
|
||||
item = self._async_queue.get(timeout=5)
|
||||
if item is _ASYNC_SHUTDOWN:
|
||||
break
|
||||
|
||||
first_error: Exception | None = None
|
||||
try:
|
||||
success = self._flush_session(item)
|
||||
except Exception as e:
|
||||
success = False
|
||||
first_error = e
|
||||
|
||||
if success:
|
||||
continue
|
||||
|
||||
if first_error is not None:
|
||||
logger.warning("Honcho async write failed, retrying once: %s", first_error)
|
||||
else:
|
||||
logger.warning("Honcho async write failed, retrying once")
|
||||
|
||||
import time as _time
|
||||
_time.sleep(2)
|
||||
|
||||
try:
|
||||
retry_success = self._flush_session(item)
|
||||
except Exception as e2:
|
||||
logger.error("Honcho async write retry failed, dropping batch: %s", e2)
|
||||
continue
|
||||
|
||||
if not retry_success:
|
||||
logger.error("Honcho async write retry failed, dropping batch")
|
||||
except queue.Empty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Honcho async writer error: %s", e)
|
||||
|
||||
def save(self, session: HonchoSession) -> None:
|
||||
"""Save messages to Honcho, respecting write_frequency.
|
||||
|
||||
write_frequency modes:
|
||||
"async" — enqueue for background thread (zero blocking, zero token cost)
|
||||
"turn" — flush synchronously every turn
|
||||
"session" — defer until flush_session() is called explicitly
|
||||
N (int) — flush every N turns
|
||||
"""
|
||||
self._turn_counter += 1
|
||||
wf = self._write_frequency
|
||||
|
||||
if wf == "async":
|
||||
if self._async_queue is not None:
|
||||
self._async_queue.put(session)
|
||||
elif wf == "turn":
|
||||
self._flush_session(session)
|
||||
elif wf == "session":
|
||||
# Accumulate; caller must call flush_all() at session end
|
||||
pass
|
||||
elif isinstance(wf, int) and wf > 0:
|
||||
if self._turn_counter % wf == 0:
|
||||
self._flush_session(session)
|
||||
|
||||
def flush_all(self) -> None:
|
||||
"""Flush all pending unsynced messages for all cached sessions.
|
||||
|
||||
Called at session end for "session" write_frequency, or to force
|
||||
a sync before process exit regardless of mode.
|
||||
"""
|
||||
for session in list(self._cache.values()):
|
||||
try:
|
||||
self._flush_session(session)
|
||||
except Exception as e:
|
||||
logger.error("Honcho flush_all error for %s: %s", session.key, e)
|
||||
|
||||
# Drain async queue synchronously if it exists
|
||||
if self._async_queue is not None:
|
||||
while not self._async_queue.empty():
|
||||
try:
|
||||
item = self._async_queue.get_nowait()
|
||||
if item is not _ASYNC_SHUTDOWN:
|
||||
self._flush_session(item)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Gracefully shut down the async writer thread."""
|
||||
if self._async_queue is not None and self._async_thread is not None:
|
||||
self.flush_all()
|
||||
self._async_queue.put(_ASYNC_SHUTDOWN)
|
||||
self._async_thread.join(timeout=10)
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete a session from local cache."""
|
||||
if key in self._cache:
|
||||
del self._cache[key]
|
||||
return True
|
||||
return False
|
||||
|
||||
def new_session(self, key: str) -> HonchoSession:
|
||||
"""
|
||||
Create a new session, preserving the old one for user modeling.
|
||||
|
||||
Creates a fresh session with a new ID while keeping the old
|
||||
session's data in Honcho for continued user modeling.
|
||||
"""
|
||||
import time
|
||||
|
||||
# Remove old session from caches (but don't delete from Honcho)
|
||||
old_session = self._cache.pop(key, None)
|
||||
if old_session:
|
||||
self._sessions_cache.pop(old_session.honcho_session_id, None)
|
||||
|
||||
# Create new session with timestamp suffix
|
||||
timestamp = int(time.time())
|
||||
new_key = f"{key}:{timestamp}"
|
||||
|
||||
# get_or_create will create a fresh session
|
||||
session = self.get_or_create(new_key)
|
||||
|
||||
# Cache under the original key so callers find it by the expected name
|
||||
self._cache[key] = session
|
||||
|
||||
logger.info("Created new session for %s (honcho: %s)", key, session.honcho_session_id)
|
||||
return session
|
||||
|
||||
_REASONING_LEVELS = ("minimal", "low", "medium", "high", "max")
|
||||
|
||||
def _dynamic_reasoning_level(self, query: str) -> str:
|
||||
"""
|
||||
Pick a reasoning level based on message complexity.
|
||||
|
||||
Uses the configured default as a floor; bumps up for longer or
|
||||
more complex messages so Honcho applies more inference where it matters.
|
||||
|
||||
< 120 chars → default (typically "low")
|
||||
120–400 chars → one level above default (cap at "high")
|
||||
> 400 chars → two levels above default (cap at "high")
|
||||
|
||||
"max" is never selected automatically — reserve it for explicit config.
|
||||
"""
|
||||
levels = self._REASONING_LEVELS
|
||||
default_idx = levels.index(self._dialectic_reasoning_level) if self._dialectic_reasoning_level in levels else 1
|
||||
n = len(query)
|
||||
if n < 120:
|
||||
bump = 0
|
||||
elif n < 400:
|
||||
bump = 1
|
||||
else:
|
||||
bump = 2
|
||||
# Cap at "high" (index 3) for auto-selection
|
||||
idx = min(default_idx + bump, 3)
|
||||
return levels[idx]
|
||||
|
||||
def dialectic_query(
|
||||
self, session_key: str, query: str,
|
||||
reasoning_level: str | None = None,
|
||||
peer: str = "user",
|
||||
) -> str:
|
||||
"""
|
||||
Query Honcho's dialectic endpoint about a peer.
|
||||
|
||||
Runs an LLM on Honcho's backend against the target peer's full
|
||||
representation. Higher latency than context() — call async via
|
||||
prefetch_dialectic() to avoid blocking the response.
|
||||
|
||||
Args:
|
||||
session_key: The session key to query against.
|
||||
query: Natural language question.
|
||||
reasoning_level: Override the config default. If None, uses
|
||||
_dynamic_reasoning_level(query).
|
||||
peer: Which peer to query — "user" (default) or "ai".
|
||||
|
||||
Returns:
|
||||
Honcho's synthesized answer, or empty string on failure.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return ""
|
||||
|
||||
peer_id = session.assistant_peer_id if peer == "ai" else session.user_peer_id
|
||||
target_peer = self._get_or_create_peer(peer_id)
|
||||
level = reasoning_level or self._dynamic_reasoning_level(query)
|
||||
|
||||
try:
|
||||
result = target_peer.chat(query, reasoning_level=level) or ""
|
||||
# Apply Hermes-side char cap before caching
|
||||
if result and self._dialectic_max_chars and len(result) > self._dialectic_max_chars:
|
||||
result = result[:self._dialectic_max_chars].rsplit(" ", 1)[0] + " …"
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.warning("Honcho dialectic query failed: %s", e)
|
||||
return ""
|
||||
|
||||
def prefetch_dialectic(self, session_key: str, query: str) -> None:
|
||||
"""
|
||||
Fire a dialectic_query in a background thread, caching the result.
|
||||
|
||||
Non-blocking. The result is available via pop_dialectic_result()
|
||||
on the next call (typically the following turn). Reasoning level
|
||||
is selected dynamically based on query complexity.
|
||||
|
||||
Args:
|
||||
session_key: The session key to query against.
|
||||
query: The user's current message, used as the query.
|
||||
"""
|
||||
def _run():
|
||||
result = self.dialectic_query(session_key, query)
|
||||
if result:
|
||||
self.set_dialectic_result(session_key, result)
|
||||
|
||||
t = threading.Thread(target=_run, name="honcho-dialectic-prefetch", daemon=True)
|
||||
t.start()
|
||||
|
||||
def set_dialectic_result(self, session_key: str, result: str) -> None:
|
||||
"""Store a prefetched dialectic result in a thread-safe way."""
|
||||
if not result:
|
||||
return
|
||||
with self._prefetch_cache_lock:
|
||||
self._dialectic_cache[session_key] = result
|
||||
|
||||
def pop_dialectic_result(self, session_key: str) -> str:
|
||||
"""
|
||||
Return and clear the cached dialectic result for this session.
|
||||
|
||||
Returns empty string if no result is ready yet.
|
||||
"""
|
||||
with self._prefetch_cache_lock:
|
||||
return self._dialectic_cache.pop(session_key, "")
|
||||
|
||||
def prefetch_context(self, session_key: str, user_message: str | None = None) -> None:
|
||||
"""
|
||||
Fire get_prefetch_context in a background thread, caching the result.
|
||||
|
||||
Non-blocking. Consumed next turn via pop_context_result(). This avoids
|
||||
a synchronous HTTP round-trip blocking every response.
|
||||
"""
|
||||
def _run():
|
||||
result = self.get_prefetch_context(session_key, user_message)
|
||||
if result:
|
||||
self.set_context_result(session_key, result)
|
||||
|
||||
t = threading.Thread(target=_run, name="honcho-context-prefetch", daemon=True)
|
||||
t.start()
|
||||
|
||||
def set_context_result(self, session_key: str, result: dict[str, str]) -> None:
|
||||
"""Store a prefetched context result in a thread-safe way."""
|
||||
if not result:
|
||||
return
|
||||
with self._prefetch_cache_lock:
|
||||
self._context_cache[session_key] = result
|
||||
|
||||
def pop_context_result(self, session_key: str) -> dict[str, str]:
|
||||
"""
|
||||
Return and clear the cached context result for this session.
|
||||
|
||||
Returns empty dict if no result is ready yet (first turn).
|
||||
"""
|
||||
with self._prefetch_cache_lock:
|
||||
return self._context_cache.pop(session_key, {})
|
||||
|
||||
def get_prefetch_context(self, session_key: str, user_message: str | None = None) -> dict[str, str]:
|
||||
"""
|
||||
Pre-fetch user and AI peer context from Honcho.
|
||||
|
||||
Fetches peer_representation and peer_card for both peers. search_query
|
||||
is intentionally omitted — it would only affect additional excerpts
|
||||
that this code does not consume, and passing the raw message exposes
|
||||
conversation content in server access logs.
|
||||
|
||||
Args:
|
||||
session_key: The session key to get context for.
|
||||
user_message: Unused; kept for call-site compatibility.
|
||||
|
||||
Returns:
|
||||
Dictionary with 'representation', 'card', 'ai_representation',
|
||||
and 'ai_card' keys.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return {}
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
return {}
|
||||
|
||||
result: dict[str, str] = {}
|
||||
try:
|
||||
ctx = honcho_session.context(
|
||||
summary=False,
|
||||
tokens=self._context_tokens,
|
||||
peer_target=session.user_peer_id,
|
||||
peer_perspective=session.assistant_peer_id,
|
||||
)
|
||||
card = ctx.peer_card or []
|
||||
result["representation"] = ctx.peer_representation or ""
|
||||
result["card"] = "\n".join(card) if isinstance(card, list) else str(card)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to fetch user context from Honcho: %s", e)
|
||||
|
||||
# Also fetch AI peer's own representation so Hermes knows itself.
|
||||
try:
|
||||
ai_ctx = honcho_session.context(
|
||||
summary=False,
|
||||
tokens=self._context_tokens,
|
||||
peer_target=session.assistant_peer_id,
|
||||
peer_perspective=session.user_peer_id,
|
||||
)
|
||||
ai_card = ai_ctx.peer_card or []
|
||||
result["ai_representation"] = ai_ctx.peer_representation or ""
|
||||
result["ai_card"] = "\n".join(ai_card) if isinstance(ai_card, list) else str(ai_card)
|
||||
except Exception as e:
|
||||
logger.debug("Failed to fetch AI peer context from Honcho: %s", e)
|
||||
|
||||
return result
|
||||
|
||||
def migrate_local_history(self, session_key: str, messages: list[dict[str, Any]]) -> bool:
|
||||
"""
|
||||
Upload local session history to Honcho as a file.
|
||||
|
||||
Used when Honcho activates mid-conversation to preserve prior context.
|
||||
|
||||
Args:
|
||||
session_key: The session key (e.g., "telegram:123456").
|
||||
messages: Local messages (dicts with role, content, timestamp).
|
||||
|
||||
Returns:
|
||||
True if upload succeeded, False otherwise.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
logger.warning("No local session cached for '%s', skipping migration", session_key)
|
||||
return False
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
logger.warning("No Honcho session cached for '%s', skipping migration", session_key)
|
||||
return False
|
||||
|
||||
user_peer = self._get_or_create_peer(session.user_peer_id)
|
||||
|
||||
content_bytes = self._format_migration_transcript(session_key, messages)
|
||||
first_ts = messages[0].get("timestamp") if messages else None
|
||||
|
||||
try:
|
||||
honcho_session.upload_file(
|
||||
file=("prior_history.txt", content_bytes, "text/plain"),
|
||||
peer=user_peer,
|
||||
metadata={"source": "local_jsonl", "count": len(messages)},
|
||||
created_at=first_ts,
|
||||
)
|
||||
logger.info("Migrated %d local messages to Honcho for %s", len(messages), session_key)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to upload local history to Honcho for %s: %s", session_key, e)
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _format_migration_transcript(session_key: str, messages: list[dict[str, Any]]) -> bytes:
|
||||
"""Format local messages as an XML transcript for Honcho file upload."""
|
||||
timestamps = [m.get("timestamp", "") for m in messages]
|
||||
time_range = f"{timestamps[0]} to {timestamps[-1]}" if timestamps else "unknown"
|
||||
|
||||
lines = [
|
||||
"<prior_conversation_history>",
|
||||
"<context>",
|
||||
"This conversation history occurred BEFORE the Honcho memory system was activated.",
|
||||
"These messages are the preceding elements of this conversation session and should",
|
||||
"be treated as foundational context for all subsequent interactions. The user and",
|
||||
"assistant have already established rapport through these exchanges.",
|
||||
"</context>",
|
||||
"",
|
||||
f'<transcript session_key="{session_key}" message_count="{len(messages)}"',
|
||||
f' time_range="{time_range}">',
|
||||
"",
|
||||
]
|
||||
for msg in messages:
|
||||
ts = msg.get("timestamp", "?")
|
||||
role = msg.get("role", "unknown")
|
||||
content = msg.get("content") or ""
|
||||
lines.append(f"[{ts}] {role}: {content}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("</transcript>")
|
||||
lines.append("</prior_conversation_history>")
|
||||
|
||||
return "\n".join(lines).encode("utf-8")
|
||||
|
||||
def migrate_memory_files(self, session_key: str, memory_dir: str) -> bool:
|
||||
"""
|
||||
Upload MEMORY.md and USER.md to Honcho as files.
|
||||
|
||||
Used when Honcho activates on an instance that already has locally
|
||||
consolidated memory. Backwards compatible -- skips if files don't exist.
|
||||
|
||||
Args:
|
||||
session_key: The session key to associate files with.
|
||||
memory_dir: Path to the memories directory (~/.hermes/memories/).
|
||||
|
||||
Returns:
|
||||
True if at least one file was uploaded, False otherwise.
|
||||
"""
|
||||
from pathlib import Path
|
||||
memory_path = Path(memory_dir)
|
||||
|
||||
if not memory_path.exists():
|
||||
return False
|
||||
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
logger.warning("No local session cached for '%s', skipping memory migration", session_key)
|
||||
return False
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
logger.warning("No Honcho session cached for '%s', skipping memory migration", session_key)
|
||||
return False
|
||||
|
||||
user_peer = self._get_or_create_peer(session.user_peer_id)
|
||||
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
|
||||
|
||||
uploaded = False
|
||||
files = [
|
||||
(
|
||||
"MEMORY.md",
|
||||
"consolidated_memory.md",
|
||||
"Long-term agent notes and preferences",
|
||||
user_peer,
|
||||
"user",
|
||||
),
|
||||
(
|
||||
"USER.md",
|
||||
"user_profile.md",
|
||||
"User profile and preferences",
|
||||
user_peer,
|
||||
"user",
|
||||
),
|
||||
(
|
||||
"SOUL.md",
|
||||
"agent_soul.md",
|
||||
"Agent persona and identity configuration",
|
||||
assistant_peer,
|
||||
"ai",
|
||||
),
|
||||
]
|
||||
|
||||
for filename, upload_name, description, target_peer, target_kind in files:
|
||||
filepath = memory_path / filename
|
||||
if not filepath.exists():
|
||||
continue
|
||||
content = filepath.read_text(encoding="utf-8").strip()
|
||||
if not content:
|
||||
continue
|
||||
|
||||
wrapped = (
|
||||
f"<prior_memory_file>\n"
|
||||
f"<context>\n"
|
||||
f"This file was consolidated from local conversations BEFORE Honcho was activated.\n"
|
||||
f"{description}. Treat as foundational context for this user.\n"
|
||||
f"</context>\n"
|
||||
f"\n"
|
||||
f"{content}\n"
|
||||
f"</prior_memory_file>\n"
|
||||
)
|
||||
|
||||
try:
|
||||
honcho_session.upload_file(
|
||||
file=(upload_name, wrapped.encode("utf-8"), "text/plain"),
|
||||
peer=target_peer,
|
||||
metadata={
|
||||
"source": "local_memory",
|
||||
"original_file": filename,
|
||||
"target_peer": target_kind,
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
"Uploaded %s to Honcho for %s (%s peer)",
|
||||
filename,
|
||||
session_key,
|
||||
target_kind,
|
||||
)
|
||||
uploaded = True
|
||||
except Exception as e:
|
||||
logger.error("Failed to upload %s to Honcho: %s", filename, e)
|
||||
|
||||
return uploaded
|
||||
|
||||
def get_peer_card(self, session_key: str) -> list[str]:
|
||||
"""
|
||||
Fetch the user peer's card — a curated list of key facts.
|
||||
|
||||
Fast, no LLM reasoning. Returns raw structured facts Honcho has
|
||||
inferred about the user (name, role, preferences, patterns).
|
||||
Empty list if unavailable.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return []
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
return []
|
||||
|
||||
try:
|
||||
ctx = honcho_session.context(
|
||||
summary=False,
|
||||
tokens=200,
|
||||
peer_target=session.user_peer_id,
|
||||
peer_perspective=session.assistant_peer_id,
|
||||
)
|
||||
card = ctx.peer_card or []
|
||||
return card if isinstance(card, list) else [str(card)]
|
||||
except Exception as e:
|
||||
logger.debug("Failed to fetch peer card from Honcho: %s", e)
|
||||
return []
|
||||
|
||||
def search_context(self, session_key: str, query: str, max_tokens: int = 800) -> str:
|
||||
"""
|
||||
Semantic search over Honcho session context.
|
||||
|
||||
Returns raw excerpts ranked by relevance to the query. No LLM
|
||||
reasoning — cheaper and faster than dialectic_query. Good for
|
||||
factual lookups where the model will do its own synthesis.
|
||||
|
||||
Args:
|
||||
session_key: Session to search against.
|
||||
query: Search query for semantic matching.
|
||||
max_tokens: Token budget for returned content.
|
||||
|
||||
Returns:
|
||||
Relevant context excerpts as a string, or empty string if none.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return ""
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
return ""
|
||||
|
||||
try:
|
||||
ctx = honcho_session.context(
|
||||
summary=False,
|
||||
tokens=max_tokens,
|
||||
peer_target=session.user_peer_id,
|
||||
peer_perspective=session.assistant_peer_id,
|
||||
search_query=query,
|
||||
)
|
||||
parts = []
|
||||
if ctx.peer_representation:
|
||||
parts.append(ctx.peer_representation)
|
||||
card = ctx.peer_card or []
|
||||
if card:
|
||||
facts = card if isinstance(card, list) else [str(card)]
|
||||
parts.append("\n".join(f"- {f}" for f in facts))
|
||||
return "\n\n".join(parts)
|
||||
except Exception as e:
|
||||
logger.debug("Honcho search_context failed: %s", e)
|
||||
return ""
|
||||
|
||||
def create_conclusion(self, session_key: str, content: str) -> bool:
|
||||
"""Write a conclusion about the user back to Honcho.
|
||||
|
||||
Conclusions are facts the AI peer observes about the user —
|
||||
preferences, corrections, clarifications, project context.
|
||||
They feed into the user's peer card and representation.
|
||||
|
||||
Args:
|
||||
session_key: Session to associate the conclusion with.
|
||||
content: The conclusion text (e.g. "User prefers dark mode").
|
||||
|
||||
Returns:
|
||||
True on success, False on failure.
|
||||
"""
|
||||
if not content or not content.strip():
|
||||
return False
|
||||
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
logger.warning("No session cached for '%s', skipping conclusion", session_key)
|
||||
return False
|
||||
|
||||
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
|
||||
try:
|
||||
conclusions_scope = assistant_peer.conclusions_of(session.user_peer_id)
|
||||
conclusions_scope.create([{
|
||||
"content": content.strip(),
|
||||
"session_id": session.honcho_session_id,
|
||||
}])
|
||||
logger.info("Created conclusion for %s: %s", session_key, content[:80])
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to create conclusion: %s", e)
|
||||
return False
|
||||
|
||||
def seed_ai_identity(self, session_key: str, content: str, source: str = "manual") -> bool:
|
||||
"""
|
||||
Seed the AI peer's Honcho representation from text content.
|
||||
|
||||
Useful for priming AI identity from SOUL.md, exported chats, or
|
||||
any structured description. The content is sent as an assistant
|
||||
peer message so Honcho's reasoning model can incorporate it.
|
||||
|
||||
Args:
|
||||
session_key: The session key to associate with.
|
||||
content: The identity/persona content to seed.
|
||||
source: Metadata tag for the source (e.g. "soul_md", "export").
|
||||
|
||||
Returns:
|
||||
True on success, False on failure.
|
||||
"""
|
||||
if not content or not content.strip():
|
||||
return False
|
||||
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
logger.warning("No session cached for '%s', skipping AI seed", session_key)
|
||||
return False
|
||||
|
||||
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
logger.warning("No Honcho session cached for '%s', skipping AI seed", session_key)
|
||||
return False
|
||||
|
||||
try:
|
||||
wrapped = (
|
||||
f"<ai_identity_seed>\n"
|
||||
f"<source>{source}</source>\n"
|
||||
f"\n"
|
||||
f"{content.strip()}\n"
|
||||
f"</ai_identity_seed>"
|
||||
)
|
||||
honcho_session.add_messages([assistant_peer.message(wrapped)])
|
||||
logger.info("Seeded AI identity from '%s' into %s", source, session_key)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to seed AI identity: %s", e)
|
||||
return False
|
||||
|
||||
def get_ai_representation(self, session_key: str) -> dict[str, str]:
|
||||
"""
|
||||
Fetch the AI peer's current Honcho representation.
|
||||
|
||||
Returns:
|
||||
Dict with 'representation' and 'card' keys, empty strings if unavailable.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return {"representation": "", "card": ""}
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
return {"representation": "", "card": ""}
|
||||
|
||||
try:
|
||||
ctx = honcho_session.context(
|
||||
summary=False,
|
||||
tokens=self._context_tokens,
|
||||
peer_target=session.assistant_peer_id,
|
||||
peer_perspective=session.user_peer_id,
|
||||
)
|
||||
ai_card = ctx.peer_card or []
|
||||
return {
|
||||
"representation": ctx.peer_representation or "",
|
||||
"card": "\n".join(ai_card) if isinstance(ai_card, list) else str(ai_card),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug("Failed to fetch AI representation: %s", e)
|
||||
return {"representation": "", "card": ""}
|
||||
|
||||
def list_sessions(self) -> list[dict[str, Any]]:
|
||||
"""List all cached sessions."""
|
||||
return [
|
||||
{
|
||||
"key": s.key,
|
||||
"created_at": s.created_at.isoformat(),
|
||||
"updated_at": s.updated_at.isoformat(),
|
||||
"message_count": len(s.messages),
|
||||
}
|
||||
for s in self._cache.values()
|
||||
]
|
||||
Loading…
Add table
Add a link
Reference in a new issue