Merge origin/main, resolve conflicts (self._base_url_lower)
This commit is contained in:
commit
e7844e9c8d
54 changed files with 2281 additions and 179 deletions
|
|
@ -173,10 +173,6 @@ def _build_child_agent(
|
|||
from run_agent import AIAgent
|
||||
import model_tools
|
||||
|
||||
# Save the parent's resolved tool names before the child agent can
|
||||
# overwrite the process-global via get_tool_definitions().
|
||||
_saved_tool_names = list(model_tools._last_resolved_tool_names)
|
||||
|
||||
# When no explicit toolsets given, inherit from parent's enabled toolsets
|
||||
# so disabled tools (e.g. web) don't leak to subagents.
|
||||
if toolsets:
|
||||
|
|
@ -268,6 +264,13 @@ def _run_single_child(
|
|||
# Get the progress callback from the child agent
|
||||
child_progress_cb = getattr(child, 'tool_progress_callback', None)
|
||||
|
||||
# Save the parent's resolved tool names before the child agent can
|
||||
# overwrite the process-global via get_tool_definitions().
|
||||
# This must be in _run_single_child (not _build_child_agent) so the
|
||||
# save/restore happens in the same scope as the try/finally.
|
||||
import model_tools
|
||||
_saved_tool_names = list(model_tools._last_resolved_tool_names)
|
||||
|
||||
try:
|
||||
result = child.run_conversation(user_message=goal)
|
||||
|
||||
|
|
|
|||
|
|
@ -1238,6 +1238,57 @@ def _convert_mcp_schema(server_name: str, mcp_tool) -> dict:
|
|||
}
|
||||
|
||||
|
||||
def _sync_mcp_toolsets(server_names: Optional[List[str]] = None) -> None:
|
||||
"""Expose each MCP server as a standalone toolset and inject into hermes-* sets.
|
||||
|
||||
Creates a real toolset entry in TOOLSETS for each server name (e.g.
|
||||
TOOLSETS["github"] = {"tools": ["mcp_github_list_files", ...]}). This
|
||||
makes raw server names resolvable in platform_toolsets overrides.
|
||||
|
||||
Also injects all MCP tools into hermes-* umbrella toolsets for the
|
||||
default behavior.
|
||||
|
||||
Skips server names that collide with built-in toolsets.
|
||||
"""
|
||||
from toolsets import TOOLSETS
|
||||
|
||||
if server_names is None:
|
||||
server_names = list(_load_mcp_config().keys())
|
||||
|
||||
existing = _existing_tool_names()
|
||||
all_mcp_tools: List[str] = []
|
||||
|
||||
for server_name in server_names:
|
||||
safe_prefix = f"mcp_{server_name.replace('-', '_').replace('.', '_')}_"
|
||||
server_tools = sorted(
|
||||
t for t in existing if t.startswith(safe_prefix)
|
||||
)
|
||||
all_mcp_tools.extend(server_tools)
|
||||
|
||||
# Don't overwrite a built-in toolset that happens to share the name.
|
||||
existing_ts = TOOLSETS.get(server_name)
|
||||
if existing_ts and not str(existing_ts.get("description", "")).startswith("MCP server '"):
|
||||
logger.warning(
|
||||
"Skipping MCP toolset alias '%s' — a built-in toolset already uses that name",
|
||||
server_name,
|
||||
)
|
||||
continue
|
||||
|
||||
TOOLSETS[server_name] = {
|
||||
"description": f"MCP server '{server_name}' tools",
|
||||
"tools": server_tools,
|
||||
"includes": [],
|
||||
}
|
||||
|
||||
# Also inject into hermes-* umbrella toolsets for default behavior.
|
||||
for ts_name, ts in TOOLSETS.items():
|
||||
if not ts_name.startswith("hermes-"):
|
||||
continue
|
||||
for tool_name in all_mcp_tools:
|
||||
if tool_name not in ts["tools"]:
|
||||
ts["tools"].append(tool_name)
|
||||
|
||||
|
||||
def _build_utility_schemas(server_name: str) -> List[dict]:
|
||||
"""Build schemas for the MCP utility tools (resources & prompts).
|
||||
|
||||
|
|
@ -1523,6 +1574,7 @@ def discover_mcp_tools() -> List[str]:
|
|||
}
|
||||
|
||||
if not new_servers:
|
||||
_sync_mcp_toolsets(list(servers.keys()))
|
||||
return _existing_tool_names()
|
||||
|
||||
# Start the background event loop for MCP connections
|
||||
|
|
@ -1562,14 +1614,7 @@ def discover_mcp_tools() -> List[str]:
|
|||
# The outer timeout is generous: 120s total for parallel discovery.
|
||||
_run_on_mcp_loop(_discover_all(), timeout=120)
|
||||
|
||||
if all_tools:
|
||||
# Dynamically inject into all hermes-* platform toolsets
|
||||
from toolsets import TOOLSETS
|
||||
for ts_name, ts in TOOLSETS.items():
|
||||
if ts_name.startswith("hermes-"):
|
||||
for tool_name in all_tools:
|
||||
if tool_name not in ts["tools"]:
|
||||
ts["tools"].append(tool_name)
|
||||
_sync_mcp_toolsets(list(servers.keys()))
|
||||
|
||||
# Print summary
|
||||
total_servers = len(new_servers)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ Usage:
|
|||
python -m tools.neutts_synth --text "Hello" --out output.wav \
|
||||
--ref-audio samples/jo.wav --ref-text samples/jo.txt
|
||||
|
||||
Requires: pip install neutts[all]
|
||||
Requires: python -m pip install -U neutts[all]
|
||||
System: apt install espeak-ng (or brew install espeak-ng)
|
||||
"""
|
||||
|
||||
|
|
@ -75,7 +75,7 @@ def main():
|
|||
try:
|
||||
from neutts import NeuTTS
|
||||
except ImportError:
|
||||
print("Error: neutts not installed. Run: pip install neutts[all]", file=sys.stderr)
|
||||
print("Error: neutts not installed. Run: python -m pip install -U neutts[all]", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
tts = NeuTTS(
|
||||
|
|
|
|||
|
|
@ -1009,7 +1009,7 @@ async def rl_list_runs() -> str:
|
|||
TEST_MODELS = [
|
||||
{"id": "qwen/qwen3-8b", "name": "Qwen3 8B", "scale": "small"},
|
||||
{"id": "z-ai/glm-4.7-flash", "name": "GLM-4.7 Flash", "scale": "medium"},
|
||||
{"id": "minimax/minimax-m2.5", "name": "MiniMax M2.5", "scale": "large"},
|
||||
{"id": "minimax/minimax-m2.7", "name": "MiniMax M2.7", "scale": "large"},
|
||||
]
|
||||
|
||||
# Default test parameters - quick but representative
|
||||
|
|
@ -1370,7 +1370,7 @@ RL_CHECK_STATUS_SCHEMA = {"name": "rl_check_status", "description": "Get status
|
|||
RL_STOP_TRAINING_SCHEMA = {"name": "rl_stop_training", "description": "Stop a running training job. Use if metrics look bad, training is stagnant, or you want to try different settings.", "parameters": {"type": "object", "properties": {"run_id": {"type": "string", "description": "The run ID to stop"}}, "required": ["run_id"]}}
|
||||
RL_GET_RESULTS_SCHEMA = {"name": "rl_get_results", "description": "Get final results and metrics for a completed training run. Returns final metrics and path to trained weights.", "parameters": {"type": "object", "properties": {"run_id": {"type": "string", "description": "The run ID to get results for"}}, "required": ["run_id"]}}
|
||||
RL_LIST_RUNS_SCHEMA = {"name": "rl_list_runs", "description": "List all training runs (active and completed) with their status.", "parameters": {"type": "object", "properties": {}, "required": []}}
|
||||
RL_TEST_INFERENCE_SCHEMA = {"name": "rl_test_inference", "description": "Quick inference test for any environment. Runs a few steps of inference + scoring using OpenRouter. Default: 3 steps x 16 completions = 48 rollouts per model, testing 3 models = 144 total. Tests environment loading, prompt construction, inference parsing, and verifier logic. Use BEFORE training to catch issues.", "parameters": {"type": "object", "properties": {"num_steps": {"type": "integer", "description": "Number of steps to run (default: 3, recommended max for testing)", "default": 3}, "group_size": {"type": "integer", "description": "Completions per step (default: 16, like training)", "default": 16}, "models": {"type": "array", "items": {"type": "string"}, "description": "Optional list of OpenRouter model IDs. Default: qwen/qwen3-8b, z-ai/glm-4.7-flash, minimax/minimax-m2.5"}}, "required": []}}
|
||||
RL_TEST_INFERENCE_SCHEMA = {"name": "rl_test_inference", "description": "Quick inference test for any environment. Runs a few steps of inference + scoring using OpenRouter. Default: 3 steps x 16 completions = 48 rollouts per model, testing 3 models = 144 total. Tests environment loading, prompt construction, inference parsing, and verifier logic. Use BEFORE training to catch issues.", "parameters": {"type": "object", "properties": {"num_steps": {"type": "integer", "description": "Number of steps to run (default: 3, recommended max for testing)", "default": 3}, "group_size": {"type": "integer", "description": "Completions per step (default: 16, like training)", "default": 16}, "models": {"type": "array", "items": {"type": "string"}, "description": "Optional list of OpenRouter model IDs. Default: qwen/qwen3-8b, z-ai/glm-4.7-flash, minimax/minimax-m2.7"}}, "required": []}}
|
||||
|
||||
_rl_env = ["TINKER_API_KEY", "WANDB_API_KEY"]
|
||||
|
||||
|
|
|
|||
|
|
@ -920,6 +920,20 @@ def skill_view(name: str, file_path: str = None, task_id: str = None) -> str:
|
|||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
# Check if the skill is disabled by the user
|
||||
resolved_name = parsed_frontmatter.get("name", skill_md.parent.name)
|
||||
if _is_skill_disabled(resolved_name):
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"error": (
|
||||
f"Skill '{resolved_name}' is disabled. "
|
||||
"Enable it with `hermes skills` or inspect the files directly on disk."
|
||||
),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
# If a specific file path is requested, read that instead
|
||||
if file_path and skill_dir:
|
||||
# Security: Prevent path traversal attacks
|
||||
|
|
|
|||
|
|
@ -423,8 +423,8 @@ def text_to_speech_tool(
|
|||
if not _check_neutts_available():
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": "NeuTTS provider selected but neutts_cli is not installed. "
|
||||
"Install the NeuTTS skill and run the bootstrap helper first."
|
||||
"error": "NeuTTS provider selected but neutts is not installed. "
|
||||
"Run hermes setup and choose NeuTTS, or install espeak-ng and run python -m pip install -U neutts[all]."
|
||||
}, ensure_ascii=False)
|
||||
logger.info("Generating speech with NeuTTS (local)...")
|
||||
_generate_neutts(text, file_str, tts_config)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue