feat: add high-value tool result hints for patch and search_files (#722)

Add contextual [Hint: ...] suffixes to tool results where they save
real iterations:

- patch (no match): suggests read_file/search_files to verify content
  before retrying — addresses the common pattern where the agent retries
  with stale old_string instead of re-reading the file.
- search_files (truncated): provides explicit next offset and suggests
  narrowing the search — clearer than relying on total_count inference.

Other hints proposed in #722 (terminal, web_search, web_extract,
browser_snapshot, search zero-results, search content-matches) were
evaluated and found to be low-value: either already covered by existing
mechanisms (read_file pagination, similar-files, schema descriptions)
or guidance the agent already follows from its own reasoning.

5 new tests covering hint presence/absence for both tools.
This commit is contained in:
teknium1 2026-03-08 17:46:28 -07:00
parent 97b1c76b14
commit 491605cfea
2 changed files with 103 additions and 2 deletions

View file

@ -164,7 +164,13 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
else:
return json.dumps({"error": f"Unknown mode: {mode}"})
return json.dumps(result.to_dict(), ensure_ascii=False)
result_dict = result.to_dict()
result_json = json.dumps(result_dict, ensure_ascii=False)
# Hint when old_string not found — saves iterations where the agent
# retries with stale content instead of re-reading the file.
if result_dict.get("error") and "Could not find" in str(result_dict["error"]):
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
return result_json
except Exception as e:
return json.dumps({"error": str(e)}, ensure_ascii=False)
@ -180,7 +186,14 @@ def search_tool(pattern: str, target: str = "content", path: str = ".",
pattern=pattern, path=path, target=target, file_glob=file_glob,
limit=limit, offset=offset, output_mode=output_mode, context=context
)
return json.dumps(result.to_dict(), ensure_ascii=False)
result_dict = result.to_dict()
result_json = json.dumps(result_dict, ensure_ascii=False)
# Hint when results were truncated — explicit next offset is clearer
# than relying on the model to infer it from total_count vs match count.
if result_dict.get("truncated"):
next_offset = offset + limit
result_json += f"\n\n[Hint: Results truncated. Use offset={next_offset} to see more, or narrow with a more specific pattern or file_glob.]"
return result_json
except Exception as e:
return json.dumps({"error": str(e)}, ensure_ascii=False)