fix: verbose mode shows full untruncated output
* fix(cli): silence tirith prefetch install warnings at startup * fix: verbose mode now shows full untruncated tool args, results, content, and think blocks When tool progress is set to 'verbose' (via /verbose or config), the display was still truncating tool arguments to 100 chars, tool results to 100-200 chars, assistant content to 100 chars, and think blocks to 5 lines. This defeated the purpose of verbose mode. Changes: - Tool args: show full JSON args (not truncated to log_prefix_chars) - Tool results: show full result content in both display and debug logs - Assistant content: show full content during tool-call loops - Think blocks: show full reasoning text (not truncated to 5 lines/100 chars) - Auto-enable reasoning display when verbose mode is active - Fix initial agent creation to respect verbose config (was always quiet_mode=True) - Updated verbose label to mention think blocks
This commit is contained in:
parent
a56937735e
commit
103f7b1ebc
2 changed files with 52 additions and 24 deletions
27
cli.py
27
cli.py
|
|
@ -1414,7 +1414,7 @@ class HermesCLI:
|
||||||
max_iterations=self.max_turns,
|
max_iterations=self.max_turns,
|
||||||
enabled_toolsets=self.enabled_toolsets,
|
enabled_toolsets=self.enabled_toolsets,
|
||||||
verbose_logging=self.verbose,
|
verbose_logging=self.verbose,
|
||||||
quiet_mode=True,
|
quiet_mode=not self.verbose,
|
||||||
ephemeral_system_prompt=self.system_prompt if self.system_prompt else None,
|
ephemeral_system_prompt=self.system_prompt if self.system_prompt else None,
|
||||||
prefill_messages=self.prefill_messages or None,
|
prefill_messages=self.prefill_messages or None,
|
||||||
reasoning_config=self.reasoning_config,
|
reasoning_config=self.reasoning_config,
|
||||||
|
|
@ -1428,7 +1428,7 @@ class HermesCLI:
|
||||||
platform="cli",
|
platform="cli",
|
||||||
session_db=self._session_db,
|
session_db=self._session_db,
|
||||||
clarify_callback=self._clarify_callback,
|
clarify_callback=self._clarify_callback,
|
||||||
reasoning_callback=self._on_reasoning if self.show_reasoning else None,
|
reasoning_callback=self._on_reasoning if (self.show_reasoning or self.verbose) else None,
|
||||||
honcho_session_key=None, # resolved by run_agent via config sessions map / title
|
honcho_session_key=None, # resolved by run_agent via config sessions map / title
|
||||||
fallback_model=self._fallback_model,
|
fallback_model=self._fallback_model,
|
||||||
thinking_callback=self._on_thinking,
|
thinking_callback=self._on_thinking,
|
||||||
|
|
@ -3285,12 +3285,17 @@ class HermesCLI:
|
||||||
if self.agent:
|
if self.agent:
|
||||||
self.agent.verbose_logging = self.verbose
|
self.agent.verbose_logging = self.verbose
|
||||||
self.agent.quiet_mode = not self.verbose
|
self.agent.quiet_mode = not self.verbose
|
||||||
|
# Auto-enable reasoning display in verbose mode
|
||||||
|
if self.verbose:
|
||||||
|
self.agent.reasoning_callback = self._on_reasoning
|
||||||
|
elif not self.show_reasoning:
|
||||||
|
self.agent.reasoning_callback = None
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"off": "[dim]Tool progress: OFF[/] — silent mode, just the final response.",
|
"off": "[dim]Tool progress: OFF[/] — silent mode, just the final response.",
|
||||||
"new": "[yellow]Tool progress: NEW[/] — show each new tool (skip repeats).",
|
"new": "[yellow]Tool progress: NEW[/] — show each new tool (skip repeats).",
|
||||||
"all": "[green]Tool progress: ALL[/] — show every tool call.",
|
"all": "[green]Tool progress: ALL[/] — show every tool call.",
|
||||||
"verbose": "[bold green]Tool progress: VERBOSE[/] — full args, results, and debug logs.",
|
"verbose": "[bold green]Tool progress: VERBOSE[/] — full args, results, think blocks, and debug logs.",
|
||||||
}
|
}
|
||||||
self.console.print(labels.get(self.tool_progress_mode, ""))
|
self.console.print(labels.get(self.tool_progress_mode, ""))
|
||||||
|
|
||||||
|
|
@ -3357,13 +3362,17 @@ class HermesCLI:
|
||||||
|
|
||||||
def _on_reasoning(self, reasoning_text: str):
|
def _on_reasoning(self, reasoning_text: str):
|
||||||
"""Callback for intermediate reasoning display during tool-call loops."""
|
"""Callback for intermediate reasoning display during tool-call loops."""
|
||||||
lines = reasoning_text.strip().splitlines()
|
if self.verbose:
|
||||||
if len(lines) > 5:
|
# Verbose mode: show full reasoning text
|
||||||
preview = "\n".join(lines[:5])
|
_cprint(f" {_DIM}[thinking] {reasoning_text.strip()}{_RST}")
|
||||||
preview += f"\n ... ({len(lines) - 5} more lines)"
|
|
||||||
else:
|
else:
|
||||||
preview = reasoning_text.strip()
|
lines = reasoning_text.strip().splitlines()
|
||||||
_cprint(f" {_DIM}[thinking] {preview}{_RST}")
|
if len(lines) > 5:
|
||||||
|
preview = "\n".join(lines[:5])
|
||||||
|
preview += f"\n ... ({len(lines) - 5} more lines)"
|
||||||
|
else:
|
||||||
|
preview = reasoning_text.strip()
|
||||||
|
_cprint(f" {_DIM}[thinking] {preview}{_RST}")
|
||||||
|
|
||||||
def _manual_compress(self):
|
def _manual_compress(self):
|
||||||
"""Manually trigger context compression on the current conversation."""
|
"""Manually trigger context compression on the current conversation."""
|
||||||
|
|
|
||||||
49
run_agent.py
49
run_agent.py
|
|
@ -3345,8 +3345,7 @@ class AIAgent:
|
||||||
reasoning_text = combined or None
|
reasoning_text = combined or None
|
||||||
|
|
||||||
if reasoning_text and self.verbose_logging:
|
if reasoning_text and self.verbose_logging:
|
||||||
preview = reasoning_text[:100] + "..." if len(reasoning_text) > 100 else reasoning_text
|
logging.debug(f"Captured reasoning ({len(reasoning_text)} chars): {reasoning_text}")
|
||||||
logging.debug(f"Captured reasoning ({len(reasoning_text)} chars): {preview}")
|
|
||||||
|
|
||||||
if reasoning_text and self.reasoning_callback:
|
if reasoning_text and self.reasoning_callback:
|
||||||
try:
|
try:
|
||||||
|
|
@ -3823,8 +3822,12 @@ class AIAgent:
|
||||||
print(f" ⚡ Concurrent: {num_tools} tool calls — {tool_names_str}")
|
print(f" ⚡ Concurrent: {num_tools} tool calls — {tool_names_str}")
|
||||||
for i, (tc, name, args) in enumerate(parsed_calls, 1):
|
for i, (tc, name, args) in enumerate(parsed_calls, 1):
|
||||||
args_str = json.dumps(args, ensure_ascii=False)
|
args_str = json.dumps(args, ensure_ascii=False)
|
||||||
args_preview = args_str[:self.log_prefix_chars] + "..." if len(args_str) > self.log_prefix_chars else args_str
|
if self.verbose_logging:
|
||||||
print(f" 📞 Tool {i}: {name}({list(args.keys())}) - {args_preview}")
|
print(f" 📞 Tool {i}: {name}({list(args.keys())})")
|
||||||
|
print(f" Args: {args_str}")
|
||||||
|
else:
|
||||||
|
args_preview = args_str[:self.log_prefix_chars] + "..." if len(args_str) > self.log_prefix_chars else args_str
|
||||||
|
print(f" 📞 Tool {i}: {name}({list(args.keys())}) - {args_preview}")
|
||||||
|
|
||||||
for _, name, args in parsed_calls:
|
for _, name, args in parsed_calls:
|
||||||
if self.tool_progress_callback:
|
if self.tool_progress_callback:
|
||||||
|
|
@ -3889,17 +3892,20 @@ class AIAgent:
|
||||||
logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview)
|
logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview)
|
||||||
|
|
||||||
if self.verbose_logging:
|
if self.verbose_logging:
|
||||||
result_preview = function_result[:200] if len(function_result) > 200 else function_result
|
|
||||||
logging.debug(f"Tool {function_name} completed in {tool_duration:.2f}s")
|
logging.debug(f"Tool {function_name} completed in {tool_duration:.2f}s")
|
||||||
logging.debug(f"Tool result preview: {result_preview}...")
|
logging.debug(f"Tool result ({len(function_result)} chars): {function_result}")
|
||||||
|
|
||||||
# Print cute message per tool
|
# Print cute message per tool
|
||||||
if self.quiet_mode:
|
if self.quiet_mode:
|
||||||
cute_msg = _get_cute_tool_message_impl(name, args, tool_duration, result=function_result)
|
cute_msg = _get_cute_tool_message_impl(name, args, tool_duration, result=function_result)
|
||||||
print(f" {cute_msg}")
|
print(f" {cute_msg}")
|
||||||
elif not self.quiet_mode:
|
elif not self.quiet_mode:
|
||||||
response_preview = function_result[:self.log_prefix_chars] + "..." if len(function_result) > self.log_prefix_chars else function_result
|
if self.verbose_logging:
|
||||||
print(f" ✅ Tool {i+1} completed in {tool_duration:.2f}s - {response_preview}")
|
print(f" ✅ Tool {i+1} completed in {tool_duration:.2f}s")
|
||||||
|
print(f" Result: {function_result}")
|
||||||
|
else:
|
||||||
|
response_preview = function_result[:self.log_prefix_chars] + "..." if len(function_result) > self.log_prefix_chars else function_result
|
||||||
|
print(f" ✅ Tool {i+1} completed in {tool_duration:.2f}s - {response_preview}")
|
||||||
|
|
||||||
# Truncate oversized results
|
# Truncate oversized results
|
||||||
MAX_TOOL_RESULT_CHARS = 100_000
|
MAX_TOOL_RESULT_CHARS = 100_000
|
||||||
|
|
@ -3975,8 +3981,12 @@ class AIAgent:
|
||||||
|
|
||||||
if not self.quiet_mode:
|
if not self.quiet_mode:
|
||||||
args_str = json.dumps(function_args, ensure_ascii=False)
|
args_str = json.dumps(function_args, ensure_ascii=False)
|
||||||
args_preview = args_str[:self.log_prefix_chars] + "..." if len(args_str) > self.log_prefix_chars else args_str
|
if self.verbose_logging:
|
||||||
print(f" 📞 Tool {i}: {function_name}({list(function_args.keys())}) - {args_preview}")
|
print(f" 📞 Tool {i}: {function_name}({list(function_args.keys())})")
|
||||||
|
print(f" Args: {args_str}")
|
||||||
|
else:
|
||||||
|
args_preview = args_str[:self.log_prefix_chars] + "..." if len(args_str) > self.log_prefix_chars else args_str
|
||||||
|
print(f" 📞 Tool {i}: {function_name}({list(function_args.keys())}) - {args_preview}")
|
||||||
|
|
||||||
if self.tool_progress_callback:
|
if self.tool_progress_callback:
|
||||||
try:
|
try:
|
||||||
|
|
@ -4132,7 +4142,9 @@ class AIAgent:
|
||||||
logger.error("handle_function_call raised for %s: %s", function_name, tool_error, exc_info=True)
|
logger.error("handle_function_call raised for %s: %s", function_name, tool_error, exc_info=True)
|
||||||
tool_duration = time.time() - tool_start_time
|
tool_duration = time.time() - tool_start_time
|
||||||
|
|
||||||
result_preview = function_result[:200] if len(function_result) > 200 else function_result
|
result_preview = function_result if self.verbose_logging else (
|
||||||
|
function_result[:200] if len(function_result) > 200 else function_result
|
||||||
|
)
|
||||||
|
|
||||||
# Log tool errors to the persistent error log so [error] tags
|
# Log tool errors to the persistent error log so [error] tags
|
||||||
# in the UI always have a corresponding detailed entry on disk.
|
# in the UI always have a corresponding detailed entry on disk.
|
||||||
|
|
@ -4142,7 +4154,7 @@ class AIAgent:
|
||||||
|
|
||||||
if self.verbose_logging:
|
if self.verbose_logging:
|
||||||
logging.debug(f"Tool {function_name} completed in {tool_duration:.2f}s")
|
logging.debug(f"Tool {function_name} completed in {tool_duration:.2f}s")
|
||||||
logging.debug(f"Tool result preview: {result_preview}...")
|
logging.debug(f"Tool result ({len(function_result)} chars): {function_result}")
|
||||||
|
|
||||||
# Guard against tools returning absurdly large content that would
|
# Guard against tools returning absurdly large content that would
|
||||||
# blow up the context window. 100K chars ≈ 25K tokens — generous
|
# blow up the context window. 100K chars ≈ 25K tokens — generous
|
||||||
|
|
@ -4165,8 +4177,12 @@ class AIAgent:
|
||||||
messages.append(tool_msg)
|
messages.append(tool_msg)
|
||||||
|
|
||||||
if not self.quiet_mode:
|
if not self.quiet_mode:
|
||||||
response_preview = function_result[:self.log_prefix_chars] + "..." if len(function_result) > self.log_prefix_chars else function_result
|
if self.verbose_logging:
|
||||||
print(f" ✅ Tool {i} completed in {tool_duration:.2f}s - {response_preview}")
|
print(f" ✅ Tool {i} completed in {tool_duration:.2f}s")
|
||||||
|
print(f" Result: {function_result}")
|
||||||
|
else:
|
||||||
|
response_preview = function_result[:self.log_prefix_chars] + "..." if len(function_result) > self.log_prefix_chars else function_result
|
||||||
|
print(f" ✅ Tool {i} completed in {tool_duration:.2f}s - {response_preview}")
|
||||||
|
|
||||||
if self._interrupt_requested and i < len(assistant_message.tool_calls):
|
if self._interrupt_requested and i < len(assistant_message.tool_calls):
|
||||||
remaining = len(assistant_message.tool_calls) - i
|
remaining = len(assistant_message.tool_calls) - i
|
||||||
|
|
@ -5418,7 +5434,10 @@ class AIAgent:
|
||||||
|
|
||||||
# Handle assistant response
|
# Handle assistant response
|
||||||
if assistant_message.content and not self.quiet_mode:
|
if assistant_message.content and not self.quiet_mode:
|
||||||
self._vprint(f"{self.log_prefix}🤖 Assistant: {assistant_message.content[:100]}{'...' if len(assistant_message.content) > 100 else ''}")
|
if self.verbose_logging:
|
||||||
|
self._vprint(f"{self.log_prefix}🤖 Assistant: {assistant_message.content}")
|
||||||
|
else:
|
||||||
|
self._vprint(f"{self.log_prefix}🤖 Assistant: {assistant_message.content[:100]}{'...' if len(assistant_message.content) > 100 else ''}")
|
||||||
|
|
||||||
# Notify progress callback of model's thinking (used by subagent
|
# Notify progress callback of model's thinking (used by subagent
|
||||||
# delegation to relay the child's reasoning to the parent display).
|
# delegation to relay the child's reasoning to the parent display).
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue