fix: update all test mocks for call_llm migration
Update 14 test files to use the new call_llm/async_call_llm mock patterns instead of the old get_text_auxiliary_client/ get_vision_auxiliary_client tuple returns. - vision_tools tests: mock async_call_llm instead of _aux_async_client - browser tests: mock call_llm instead of _aux_vision_client - flush_memories tests: mock call_llm instead of get_text_auxiliary_client - session_search tests: mock async_call_llm with RuntimeError - mcp_tool tests: fix whitelist model config, use side_effect for multi-response tests - auxiliary_config_bridge: update for model=None (resolved in router) 3251 passed, 2 pre-existing unrelated failures.
This commit is contained in:
parent
0aa31cd3cb
commit
29ef69c703
7 changed files with 40 additions and 55 deletions
|
|
@ -137,8 +137,7 @@ class TestBrowserVisionAnnotate:
|
|||
|
||||
with (
|
||||
patch("tools.browser_tool._run_browser_command") as mock_cmd,
|
||||
patch("tools.browser_tool._aux_vision_client") as mock_client,
|
||||
patch("tools.browser_tool._DEFAULT_VISION_MODEL", "test-model"),
|
||||
patch("tools.browser_tool.call_llm") as mock_call_llm,
|
||||
patch("tools.browser_tool._get_vision_model", return_value="test-model"),
|
||||
):
|
||||
mock_cmd.return_value = {"success": True, "data": {}}
|
||||
|
|
@ -159,8 +158,7 @@ class TestBrowserVisionAnnotate:
|
|||
|
||||
with (
|
||||
patch("tools.browser_tool._run_browser_command") as mock_cmd,
|
||||
patch("tools.browser_tool._aux_vision_client") as mock_client,
|
||||
patch("tools.browser_tool._DEFAULT_VISION_MODEL", "test-model"),
|
||||
patch("tools.browser_tool.call_llm") as mock_call_llm,
|
||||
patch("tools.browser_tool._get_vision_model", return_value="test-model"),
|
||||
):
|
||||
mock_cmd.return_value = {"success": True, "data": {}}
|
||||
|
|
|
|||
|
|
@ -1956,24 +1956,26 @@ class TestToolLoopGovernance:
|
|||
def test_text_response_resets_counter(self):
|
||||
"""A text response resets the tool loop counter."""
|
||||
handler = SamplingHandler("tl2", {"max_tool_rounds": 1})
|
||||
fake_client = MagicMock()
|
||||
|
||||
# Use a list to hold the current response, so the side_effect can
|
||||
# pick up changes between calls.
|
||||
responses = [_make_llm_tool_response()]
|
||||
|
||||
with patch(
|
||||
"agent.auxiliary_client.call_llm",
|
||||
return_value=fake_client.chat.completions.create.return_value,
|
||||
side_effect=lambda **kw: responses[0],
|
||||
):
|
||||
# Tool response (round 1 of 1 allowed)
|
||||
fake_client.chat.completions.create.return_value = _make_llm_tool_response()
|
||||
r1 = asyncio.run(handler(None, _make_sampling_params()))
|
||||
assert isinstance(r1, CreateMessageResultWithTools)
|
||||
|
||||
# Text response resets counter
|
||||
fake_client.chat.completions.create.return_value = _make_llm_response()
|
||||
responses[0] = _make_llm_response()
|
||||
r2 = asyncio.run(handler(None, _make_sampling_params()))
|
||||
assert isinstance(r2, CreateMessageResult)
|
||||
|
||||
# Tool response again (should succeed since counter was reset)
|
||||
fake_client.chat.completions.create.return_value = _make_llm_tool_response()
|
||||
responses[0] = _make_llm_tool_response()
|
||||
r3 = asyncio.run(handler(None, _make_sampling_params()))
|
||||
assert isinstance(r3, CreateMessageResultWithTools)
|
||||
|
||||
|
|
@ -2122,7 +2124,7 @@ class TestModelWhitelist:
|
|||
assert isinstance(result, CreateMessageResult)
|
||||
|
||||
def test_disallowed_model_rejected(self):
|
||||
handler = SamplingHandler("wl2", {"allowed_models": ["gpt-4o"]})
|
||||
handler = SamplingHandler("wl2", {"allowed_models": ["gpt-4o"], "model": "test-model"})
|
||||
fake_client = MagicMock()
|
||||
|
||||
with patch(
|
||||
|
|
|
|||
|
|
@ -189,16 +189,14 @@ class TestSessionSearch:
|
|||
{"role": "assistant", "content": "hi there"},
|
||||
]
|
||||
|
||||
# Mock the summarizer to return a simple summary
|
||||
import tools.session_search_tool as sst
|
||||
original_client = sst._async_aux_client
|
||||
sst._async_aux_client = None # Disable summarizer → returns None
|
||||
|
||||
result = json.loads(session_search(
|
||||
query="test", db=mock_db, current_session_id=current_sid,
|
||||
))
|
||||
|
||||
sst._async_aux_client = original_client
|
||||
# Mock async_call_llm to raise RuntimeError → summarizer returns None
|
||||
from unittest.mock import AsyncMock, patch as _patch
|
||||
with _patch("tools.session_search_tool.async_call_llm",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=RuntimeError("no provider")):
|
||||
result = json.loads(session_search(
|
||||
query="test", db=mock_db, current_session_id=current_sid,
|
||||
))
|
||||
|
||||
assert result["success"] is True
|
||||
# Current session should be skipped, only other_sid should appear
|
||||
|
|
|
|||
|
|
@ -202,7 +202,7 @@ class TestHandleVisionAnalyze:
|
|||
assert model == "custom/model-v1"
|
||||
|
||||
def test_falls_back_to_default_model(self):
|
||||
"""Without AUXILIARY_VISION_MODEL, should use DEFAULT_VISION_MODEL or fallback."""
|
||||
"""Without AUXILIARY_VISION_MODEL, model should be None (let call_llm resolve default)."""
|
||||
with (
|
||||
patch(
|
||||
"tools.vision_tools.vision_analyze_tool", new_callable=AsyncMock
|
||||
|
|
@ -218,9 +218,9 @@ class TestHandleVisionAnalyze:
|
|||
coro.close()
|
||||
call_args = mock_tool.call_args
|
||||
model = call_args[0][2]
|
||||
# Should be DEFAULT_VISION_MODEL or the hardcoded fallback
|
||||
assert model is not None
|
||||
assert len(model) > 0
|
||||
# With no AUXILIARY_VISION_MODEL set, model should be None
|
||||
# (the centralized call_llm router picks the default)
|
||||
assert model is None
|
||||
|
||||
def test_empty_args_graceful(self):
|
||||
"""Missing keys should default to empty strings, not raise."""
|
||||
|
|
@ -277,8 +277,6 @@ class TestErrorLoggingExcInfo:
|
|||
new_callable=AsyncMock,
|
||||
side_effect=Exception("download boom"),
|
||||
),
|
||||
patch("tools.vision_tools._aux_async_client", MagicMock()),
|
||||
patch("tools.vision_tools.DEFAULT_VISION_MODEL", "test/model"),
|
||||
caplog.at_level(logging.ERROR, logger="tools.vision_tools"),
|
||||
):
|
||||
result = await vision_analyze_tool(
|
||||
|
|
@ -311,25 +309,16 @@ class TestErrorLoggingExcInfo:
|
|||
"tools.vision_tools._image_to_base64_data_url",
|
||||
return_value="data:image/jpeg;base64,abc",
|
||||
),
|
||||
patch("agent.auxiliary_client.get_auxiliary_extra_body", return_value=None),
|
||||
patch(
|
||||
"agent.auxiliary_client.auxiliary_max_tokens_param",
|
||||
return_value={"max_tokens": 2000},
|
||||
),
|
||||
caplog.at_level(logging.WARNING, logger="tools.vision_tools"),
|
||||
):
|
||||
# Mock the vision client
|
||||
mock_client = AsyncMock()
|
||||
# Mock the async_call_llm function to return a mock response
|
||||
mock_response = MagicMock()
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message.content = "A test image description"
|
||||
mock_response.choices = [mock_choice]
|
||||
mock_client.chat.completions.create = AsyncMock(return_value=mock_response)
|
||||
|
||||
# Patch module-level _aux_async_client so the tool doesn't bail early
|
||||
with (
|
||||
patch("tools.vision_tools._aux_async_client", mock_client),
|
||||
patch("tools.vision_tools.DEFAULT_VISION_MODEL", "test/model"),
|
||||
patch("tools.vision_tools.async_call_llm", new_callable=AsyncMock, return_value=mock_response),
|
||||
):
|
||||
# Make unlink fail to trigger cleanup warning
|
||||
original_unlink = Path.unlink
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue