fix: backfill model on gateway sessions after agent runs

Gateway sessions end up with model=NULL because the session row is
created before AIAgent is constructed.  After the agent responds,
update_session() writes token counts but never fills in the model.

Thread agent.model through _run_agent()'s return dict into
update_session() → update_token_counts().  The SQL uses
COALESCE(model, ?) so it only fills NULL rows — never overwrites
a model already set at creation time (e.g. CLI sessions).

If the agent falls back to a different provider, agent.model is
updated in-place by _try_activate_fallback(), so the recorded value
reflects whichever model actually produced the response.

Fixes #987
This commit is contained in:
ac (sourcetree) 2026-03-11 17:44:37 -07:00 committed by teknium1
parent c1cca65168
commit 2046a4c08c
3 changed files with 13 additions and 6 deletions

View file

@ -1578,10 +1578,11 @@ class GatewayRunner:
skip_db=agent_persisted,
)
# Update session with actual prompt token count from the agent
# Update session with actual prompt token count and model from the agent
self.session_store.update_session(
session_entry.session_key,
last_prompt_tokens=agent_result.get("last_prompt_tokens", 0),
model=agent_result.get("model"),
)
return response
@ -3586,6 +3587,7 @@ class GatewayRunner:
"tools": tools_holder[0] or [],
"history_offset": len(agent_history),
"last_prompt_tokens": _last_prompt_toks,
"model": agent_holder[0].model if agent_holder[0] else None,
}
# Scan tool results for MEDIA:<path> tags that need to be delivered
@ -3648,6 +3650,7 @@ class GatewayRunner:
"tools": tools_holder[0] or [],
"history_offset": len(agent_history),
"last_prompt_tokens": _last_prompt_toks,
"model": agent_holder[0].model if agent_holder[0] else None,
"session_id": effective_session_id,
}