fix(honcho): isolate session routing for multi-user gateway (#1500)

Salvaged from PR #1470 by adavyas.

Core fix: Honcho tool calls in a multi-session gateway could route to
the wrong session because honcho_tools.py relied on process-global
state. Now threads session context through the call chain:
  AIAgent._invoke_tool() → handle_function_call() → registry.dispatch()
  → handler **kw → _resolve_session_context()

Changes:
- Add _resolve_session_context() to prefer per-call context over globals
- Plumb honcho_manager + honcho_session_key through handle_function_call
- Add sync_honcho=False to run_conversation() for synthetic flush turns
- Pass honcho_session_key through gateway memory flush lifecycle
- Harden gateway PID detection when /proc cmdline is unreadable
- Make interrupt test scripts import-safe for pytest-xdist
- Wrap BibTeX examples in Jekyll raw blocks for docs build
- Fix thread-order-dependent assertion in client lifecycle test
- Expand Honcho docs: session isolation, lifecycle, routing internals

Dropped from original PR:
- Indentation change in _create_request_openai_client that would move
  client creation inside the lock (causes unnecessary contention)

Co-authored-by: adavyas <adavyas@users.noreply.github.com>
This commit is contained in:
Teknium 2026-03-16 00:23:47 -07:00 committed by GitHub
parent eb4f0348e1
commit dd7921d514
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 522 additions and 252 deletions

View file

@ -3790,6 +3790,8 @@ class AIAgent:
return handle_function_call(
function_name, function_args, effective_task_id,
enabled_tools=list(self.valid_tool_names) if self.valid_tool_names else None,
honcho_manager=self._honcho,
honcho_session_key=self._honcho_session_key,
)
def _execute_tool_calls_concurrent(self, assistant_message, messages: list, effective_task_id: str, api_call_count: int = 0) -> None:
@ -4132,6 +4134,8 @@ class AIAgent:
function_result = handle_function_call(
function_name, function_args, effective_task_id,
enabled_tools=list(self.valid_tool_names) if self.valid_tool_names else None,
honcho_manager=self._honcho,
honcho_session_key=self._honcho_session_key,
)
_spinner_result = function_result
except Exception as tool_error:
@ -4146,6 +4150,8 @@ class AIAgent:
function_result = handle_function_call(
function_name, function_args, effective_task_id,
enabled_tools=list(self.valid_tool_names) if self.valid_tool_names else None,
honcho_manager=self._honcho,
honcho_session_key=self._honcho_session_key,
)
except Exception as tool_error:
function_result = f"Error executing tool '{function_name}': {tool_error}"
@ -4410,6 +4416,7 @@ class AIAgent:
task_id: str = None,
stream_callback: Optional[callable] = None,
persist_user_message: Optional[str] = None,
sync_honcho: bool = True,
) -> Dict[str, Any]:
"""
Run a complete conversation with tool calling until completion.
@ -4425,6 +4432,8 @@ class AIAgent:
persist_user_message: Optional clean user message to store in
transcripts/history when user_message contains API-only
synthetic prefixes.
sync_honcho: When False, skip writing the final synthetic turn back
to Honcho or queuing follow-up prefetch work.
Returns:
Dict: Complete conversation result with final response and message history
@ -5933,7 +5942,7 @@ class AIAgent:
self._persist_session(messages, conversation_history)
# Sync conversation to Honcho for user modeling
if final_response and not interrupted:
if final_response and not interrupted and sync_honcho:
self._honcho_sync(original_user_message, final_response)
self._queue_honcho_prefetch(original_user_message)