merge: resolve file_tools.py conflict with origin/main

Combine read/search loop detection with main's redact_sensitive_text
and truncation hint features. Add tracker reset to TestSearchHints
to prevent cross-test state leakage.
This commit is contained in:
0xbyt4 2026-03-09 13:21:46 +03:00
commit 4684aaffdc
104 changed files with 13720 additions and 2489 deletions

View file

@ -0,0 +1,276 @@
"""Tests for browser_console tool and browser_vision annotate param."""
import json
import os
import sys
from unittest.mock import patch, MagicMock
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
# ── browser_console ──────────────────────────────────────────────────
class TestBrowserConsole:
"""browser_console() returns console messages + JS errors in one call."""
def test_returns_console_messages_and_errors(self):
from tools.browser_tool import browser_console
console_response = {
"success": True,
"data": {
"messages": [
{"text": "hello", "type": "log", "timestamp": 1},
{"text": "oops", "type": "error", "timestamp": 2},
]
},
}
errors_response = {
"success": True,
"data": {
"errors": [
{"message": "Uncaught TypeError", "timestamp": 3},
]
},
}
with patch("tools.browser_tool._run_browser_command") as mock_cmd:
mock_cmd.side_effect = [console_response, errors_response]
result = json.loads(browser_console(task_id="test"))
assert result["success"] is True
assert result["total_messages"] == 2
assert result["total_errors"] == 1
assert result["console_messages"][0]["text"] == "hello"
assert result["console_messages"][1]["text"] == "oops"
assert result["js_errors"][0]["message"] == "Uncaught TypeError"
def test_passes_clear_flag(self):
from tools.browser_tool import browser_console
empty = {"success": True, "data": {"messages": [], "errors": []}}
with patch("tools.browser_tool._run_browser_command", return_value=empty) as mock_cmd:
browser_console(clear=True, task_id="test")
calls = mock_cmd.call_args_list
# Both console and errors should get --clear
assert calls[0][0] == ("test", "console", ["--clear"])
assert calls[1][0] == ("test", "errors", ["--clear"])
def test_no_clear_by_default(self):
from tools.browser_tool import browser_console
empty = {"success": True, "data": {"messages": [], "errors": []}}
with patch("tools.browser_tool._run_browser_command", return_value=empty) as mock_cmd:
browser_console(task_id="test")
calls = mock_cmd.call_args_list
assert calls[0][0] == ("test", "console", [])
assert calls[1][0] == ("test", "errors", [])
def test_empty_console_and_errors(self):
from tools.browser_tool import browser_console
empty = {"success": True, "data": {"messages": [], "errors": []}}
with patch("tools.browser_tool._run_browser_command", return_value=empty):
result = json.loads(browser_console(task_id="test"))
assert result["total_messages"] == 0
assert result["total_errors"] == 0
assert result["console_messages"] == []
assert result["js_errors"] == []
def test_handles_failed_commands(self):
from tools.browser_tool import browser_console
failed = {"success": False, "error": "No session"}
with patch("tools.browser_tool._run_browser_command", return_value=failed):
result = json.loads(browser_console(task_id="test"))
# Should still return success with empty data
assert result["success"] is True
assert result["total_messages"] == 0
assert result["total_errors"] == 0
# ── browser_console schema ───────────────────────────────────────────
class TestBrowserConsoleSchema:
"""browser_console is properly registered in the tool registry."""
def test_schema_in_browser_schemas(self):
from tools.browser_tool import BROWSER_TOOL_SCHEMAS
names = [s["name"] for s in BROWSER_TOOL_SCHEMAS]
assert "browser_console" in names
def test_schema_has_clear_param(self):
from tools.browser_tool import BROWSER_TOOL_SCHEMAS
schema = next(s for s in BROWSER_TOOL_SCHEMAS if s["name"] == "browser_console")
props = schema["parameters"]["properties"]
assert "clear" in props
assert props["clear"]["type"] == "boolean"
# ── browser_vision annotate ──────────────────────────────────────────
class TestBrowserVisionAnnotate:
"""browser_vision supports annotate parameter."""
def test_schema_has_annotate_param(self):
from tools.browser_tool import BROWSER_TOOL_SCHEMAS
schema = next(s for s in BROWSER_TOOL_SCHEMAS if s["name"] == "browser_vision")
props = schema["parameters"]["properties"]
assert "annotate" in props
assert props["annotate"]["type"] == "boolean"
def test_annotate_false_no_flag(self):
"""Without annotate, screenshot command has no --annotate flag."""
from tools.browser_tool import browser_vision
with (
patch("tools.browser_tool._run_browser_command") as mock_cmd,
patch("tools.browser_tool._aux_vision_client") as mock_client,
patch("tools.browser_tool._DEFAULT_VISION_MODEL", "test-model"),
patch("tools.browser_tool._get_vision_model", return_value="test-model"),
):
mock_cmd.return_value = {"success": True, "data": {}}
# Will fail at screenshot file read, but we can check the command
try:
browser_vision("test", annotate=False, task_id="test")
except Exception:
pass
if mock_cmd.called:
args = mock_cmd.call_args[0]
cmd_args = args[2] if len(args) > 2 else []
assert "--annotate" not in cmd_args
def test_annotate_true_adds_flag(self):
"""With annotate=True, screenshot command includes --annotate."""
from tools.browser_tool import browser_vision
with (
patch("tools.browser_tool._run_browser_command") as mock_cmd,
patch("tools.browser_tool._aux_vision_client") as mock_client,
patch("tools.browser_tool._DEFAULT_VISION_MODEL", "test-model"),
patch("tools.browser_tool._get_vision_model", return_value="test-model"),
):
mock_cmd.return_value = {"success": True, "data": {}}
try:
browser_vision("test", annotate=True, task_id="test")
except Exception:
pass
if mock_cmd.called:
args = mock_cmd.call_args[0]
cmd_args = args[2] if len(args) > 2 else []
assert "--annotate" in cmd_args
# ── auto-recording config ────────────────────────────────────────────
class TestRecordSessionsConfig:
"""browser.record_sessions config option."""
def test_default_config_has_record_sessions(self):
from hermes_cli.config import DEFAULT_CONFIG
browser_cfg = DEFAULT_CONFIG.get("browser", {})
assert "record_sessions" in browser_cfg
assert browser_cfg["record_sessions"] is False
def test_maybe_start_recording_disabled(self):
"""Recording doesn't start when config says record_sessions: false."""
from tools.browser_tool import _maybe_start_recording, _recording_sessions
with (
patch("tools.browser_tool._run_browser_command") as mock_cmd,
patch("builtins.open", side_effect=FileNotFoundError),
):
_maybe_start_recording("test-task")
mock_cmd.assert_not_called()
assert "test-task" not in _recording_sessions
def test_maybe_stop_recording_noop_when_not_recording(self):
"""Stopping when not recording is a no-op."""
from tools.browser_tool import _maybe_stop_recording, _recording_sessions
_recording_sessions.discard("test-task") # ensure not in set
with patch("tools.browser_tool._run_browser_command") as mock_cmd:
_maybe_stop_recording("test-task")
mock_cmd.assert_not_called()
# ── dogfood skill files ──────────────────────────────────────────────
class TestDogfoodSkill:
"""Dogfood skill files exist and have correct structure."""
@pytest.fixture(autouse=True)
def _skill_dir(self):
# Use the actual repo skills dir (not temp)
self.skill_dir = os.path.join(
os.path.dirname(__file__), "..", "..", "skills", "dogfood"
)
def test_skill_md_exists(self):
assert os.path.exists(os.path.join(self.skill_dir, "SKILL.md"))
def test_taxonomy_exists(self):
assert os.path.exists(
os.path.join(self.skill_dir, "references", "issue-taxonomy.md")
)
def test_report_template_exists(self):
assert os.path.exists(
os.path.join(self.skill_dir, "templates", "dogfood-report-template.md")
)
def test_skill_md_has_frontmatter(self):
with open(os.path.join(self.skill_dir, "SKILL.md")) as f:
content = f.read()
assert content.startswith("---")
assert "name: dogfood" in content
assert "description:" in content
def test_skill_references_browser_console(self):
with open(os.path.join(self.skill_dir, "SKILL.md")) as f:
content = f.read()
assert "browser_console" in content
def test_skill_references_annotate(self):
with open(os.path.join(self.skill_dir, "SKILL.md")) as f:
content = f.read()
assert "annotate" in content
def test_taxonomy_has_severity_levels(self):
with open(
os.path.join(self.skill_dir, "references", "issue-taxonomy.md")
) as f:
content = f.read()
assert "Critical" in content
assert "High" in content
assert "Medium" in content
assert "Low" in content
def test_taxonomy_has_categories(self):
with open(
os.path.join(self.skill_dir, "references", "issue-taxonomy.md")
) as f:
content = f.read()
assert "Functional" in content
assert "Visual" in content
assert "Accessibility" in content
assert "Console" in content

View file

@ -550,14 +550,13 @@ class TestConvertToPng:
"""BMP file should still be reported as success if no converter available."""
dest = tmp_path / "img.png"
dest.write_bytes(FAKE_BMP) # it's a BMP but named .png
# Both Pillow and ImageMagick fail
with patch("hermes_cli.clipboard.subprocess.run", side_effect=FileNotFoundError):
# Pillow import fails
with pytest.raises(Exception):
from PIL import Image # noqa — this may or may not work
# The function should still return True if file exists and has content
# (raw BMP is better than nothing)
assert dest.exists() and dest.stat().st_size > 0
# Both Pillow and ImageMagick unavailable
with patch.dict(sys.modules, {"PIL": None, "PIL.Image": None}):
with patch("hermes_cli.clipboard.subprocess.run", side_effect=FileNotFoundError):
result = _convert_to_png(dest)
# Raw BMP is better than nothing — function should return True
assert result is True
assert dest.exists() and dest.stat().st_size > 0
# ── has_clipboard_image dispatch ─────────────────────────────────────────

View file

@ -259,6 +259,70 @@ class TestShellFileOpsHelpers:
assert ops.cwd == "/"
class TestSearchPathValidation:
"""Test that search() returns an error for non-existent paths."""
def test_search_nonexistent_path_returns_error(self, mock_env):
"""search() should return an error when the path doesn't exist."""
def side_effect(command, **kwargs):
if "test -e" in command:
return {"output": "not_found", "returncode": 1}
if "command -v" in command:
return {"output": "yes", "returncode": 0}
return {"output": "", "returncode": 0}
mock_env.execute.side_effect = side_effect
ops = ShellFileOperations(mock_env)
result = ops.search("pattern", path="/nonexistent/path")
assert result.error is not None
assert "not found" in result.error.lower() or "Path not found" in result.error
def test_search_nonexistent_path_files_mode(self, mock_env):
"""search(target='files') should also return error for bad paths."""
def side_effect(command, **kwargs):
if "test -e" in command:
return {"output": "not_found", "returncode": 1}
if "command -v" in command:
return {"output": "yes", "returncode": 0}
return {"output": "", "returncode": 0}
mock_env.execute.side_effect = side_effect
ops = ShellFileOperations(mock_env)
result = ops.search("*.py", path="/nonexistent/path", target="files")
assert result.error is not None
assert "not found" in result.error.lower() or "Path not found" in result.error
def test_search_existing_path_proceeds(self, mock_env):
"""search() should proceed normally when the path exists."""
def side_effect(command, **kwargs):
if "test -e" in command:
return {"output": "exists", "returncode": 0}
if "command -v" in command:
return {"output": "yes", "returncode": 0}
# rg returns exit 1 (no matches) with empty output
return {"output": "", "returncode": 1}
mock_env.execute.side_effect = side_effect
ops = ShellFileOperations(mock_env)
result = ops.search("pattern", path="/existing/path")
assert result.error is None
assert result.total_count == 0 # No matches but no error
def test_search_rg_error_exit_code(self, mock_env):
"""search() should report error when rg returns exit code 2."""
call_count = {"n": 0}
def side_effect(command, **kwargs):
call_count["n"] += 1
if "test -e" in command:
return {"output": "exists", "returncode": 0}
if "command -v" in command:
return {"output": "yes", "returncode": 0}
# rg returns exit 2 (error) with empty output
return {"output": "", "returncode": 2}
mock_env.execute.side_effect = side_effect
ops = ShellFileOperations(mock_env)
result = ops.search("pattern", path="/some/path")
assert result.error is not None
assert "search failed" in result.error.lower() or "Search error" in result.error
class TestShellFileOpsWriteDenied:
def test_write_file_denied_path(self, file_ops):
result = file_ops.write_file("~/.ssh/authorized_keys", "evil key")

View file

@ -38,6 +38,7 @@ class TestReadFileHandler:
def test_returns_file_content(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.content = "line1\nline2"
result_obj.to_dict.return_value = {"content": "line1\nline2", "total_lines": 2}
mock_ops.read_file.return_value = result_obj
mock_get.return_value = mock_ops
@ -52,6 +53,7 @@ class TestReadFileHandler:
def test_custom_offset_and_limit(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.content = "line10"
result_obj.to_dict.return_value = {"content": "line10", "total_lines": 50}
mock_ops.read_file.return_value = result_obj
mock_get.return_value = mock_ops
@ -200,3 +202,96 @@ class TestSearchHandler:
from tools.file_tools import search_tool
result = json.loads(search_tool(pattern="x"))
assert "error" in result
# ---------------------------------------------------------------------------
# Tool result hint tests (#722)
# ---------------------------------------------------------------------------
class TestPatchHints:
"""Patch tool should hint when old_string is not found."""
@patch("tools.file_tools._get_file_ops")
def test_no_match_includes_hint(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.to_dict.return_value = {
"error": "Could not find match for old_string in foo.py"
}
mock_ops.patch_replace.return_value = result_obj
mock_get.return_value = mock_ops
from tools.file_tools import patch_tool
raw = patch_tool(mode="replace", path="foo.py", old_string="x", new_string="y")
assert "[Hint:" in raw
assert "read_file" in raw
@patch("tools.file_tools._get_file_ops")
def test_success_no_hint(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.to_dict.return_value = {"success": True, "diff": "--- a\n+++ b"}
mock_ops.patch_replace.return_value = result_obj
mock_get.return_value = mock_ops
from tools.file_tools import patch_tool
raw = patch_tool(mode="replace", path="foo.py", old_string="x", new_string="y")
assert "[Hint:" not in raw
class TestSearchHints:
"""Search tool should hint when results are truncated."""
def setup_method(self):
"""Clear read/search tracker between tests to avoid cross-test state."""
from tools.file_tools import clear_read_tracker
clear_read_tracker()
@patch("tools.file_tools._get_file_ops")
def test_truncated_results_hint(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.to_dict.return_value = {
"total_count": 100,
"matches": [{"path": "a.py", "line": 1, "content": "x"}] * 50,
"truncated": True,
}
mock_ops.search.return_value = result_obj
mock_get.return_value = mock_ops
from tools.file_tools import search_tool
raw = search_tool(pattern="foo", offset=0, limit=50)
assert "[Hint:" in raw
assert "offset=50" in raw
@patch("tools.file_tools._get_file_ops")
def test_non_truncated_no_hint(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.to_dict.return_value = {
"total_count": 3,
"matches": [{"path": "a.py", "line": 1, "content": "x"}] * 3,
}
mock_ops.search.return_value = result_obj
mock_get.return_value = mock_ops
from tools.file_tools import search_tool
raw = search_tool(pattern="foo")
assert "[Hint:" not in raw
@patch("tools.file_tools._get_file_ops")
def test_truncated_hint_with_nonzero_offset(self, mock_get):
mock_ops = MagicMock()
result_obj = MagicMock()
result_obj.to_dict.return_value = {
"total_count": 150,
"matches": [{"path": "a.py", "line": 1, "content": "x"}] * 50,
"truncated": True,
}
mock_ops.search.return_value = result_obj
mock_get.return_value = mock_ops
from tools.file_tools import search_tool
raw = search_tool(pattern="foo", offset=50, limit=50)
assert "[Hint:" in raw
assert "offset=100" in raw