Merge origin/main into hermes/hermes-dd253d81
This commit is contained in:
commit
31db8c28a4
64 changed files with 3203 additions and 1055 deletions
|
|
@ -236,6 +236,43 @@ def _write_claude_code_credentials(access_token: str, refresh_token: str, expire
|
|||
logger.debug("Failed to write refreshed credentials: %s", e)
|
||||
|
||||
|
||||
def _resolve_claude_code_token_from_credentials(creds: Optional[Dict[str, Any]] = None) -> Optional[str]:
|
||||
"""Resolve a token from Claude Code credential files, refreshing if needed."""
|
||||
creds = creds or read_claude_code_credentials()
|
||||
if creds and is_claude_code_token_valid(creds):
|
||||
logger.debug("Using Claude Code credentials (auto-detected)")
|
||||
return creds["accessToken"]
|
||||
if creds:
|
||||
logger.debug("Claude Code credentials expired — attempting refresh")
|
||||
refreshed = _refresh_oauth_token(creds)
|
||||
if refreshed:
|
||||
return refreshed
|
||||
logger.debug("Token refresh failed — re-run 'claude setup-token' to reauthenticate")
|
||||
return None
|
||||
|
||||
|
||||
def _prefer_refreshable_claude_code_token(env_token: str, creds: Optional[Dict[str, Any]]) -> Optional[str]:
|
||||
"""Prefer Claude Code creds when a persisted env OAuth token would shadow refresh.
|
||||
|
||||
Hermes historically persisted setup tokens into ANTHROPIC_TOKEN. That makes
|
||||
later refresh impossible because the static env token wins before we ever
|
||||
inspect Claude Code's refreshable credential file. If we have a refreshable
|
||||
Claude Code credential record, prefer it over the static env OAuth token.
|
||||
"""
|
||||
if not env_token or not _is_oauth_token(env_token) or not isinstance(creds, dict):
|
||||
return None
|
||||
if not creds.get("refreshToken"):
|
||||
return None
|
||||
|
||||
resolved = _resolve_claude_code_token_from_credentials(creds)
|
||||
if resolved and resolved != env_token:
|
||||
logger.debug(
|
||||
"Preferring Claude Code credential file over static env OAuth token so refresh can proceed"
|
||||
)
|
||||
return resolved
|
||||
return None
|
||||
|
||||
|
||||
def resolve_anthropic_token() -> Optional[str]:
|
||||
"""Resolve an Anthropic token from all available sources.
|
||||
|
||||
|
|
@ -248,28 +285,28 @@ def resolve_anthropic_token() -> Optional[str]:
|
|||
|
||||
Returns the token string or None.
|
||||
"""
|
||||
creds = read_claude_code_credentials()
|
||||
|
||||
# 1. Hermes-managed OAuth/setup token env var
|
||||
token = os.getenv("ANTHROPIC_TOKEN", "").strip()
|
||||
if token:
|
||||
preferred = _prefer_refreshable_claude_code_token(token, creds)
|
||||
if preferred:
|
||||
return preferred
|
||||
return token
|
||||
|
||||
# 2. CLAUDE_CODE_OAUTH_TOKEN (used by Claude Code for setup-tokens)
|
||||
cc_token = os.getenv("CLAUDE_CODE_OAUTH_TOKEN", "").strip()
|
||||
if cc_token:
|
||||
preferred = _prefer_refreshable_claude_code_token(cc_token, creds)
|
||||
if preferred:
|
||||
return preferred
|
||||
return cc_token
|
||||
|
||||
# 3. Claude Code credential file
|
||||
creds = read_claude_code_credentials()
|
||||
if creds and is_claude_code_token_valid(creds):
|
||||
logger.debug("Using Claude Code credentials (auto-detected)")
|
||||
return creds["accessToken"]
|
||||
elif creds:
|
||||
# Token expired — attempt to refresh
|
||||
logger.debug("Claude Code credentials expired — attempting refresh")
|
||||
refreshed = _refresh_oauth_token(creds)
|
||||
if refreshed:
|
||||
return refreshed
|
||||
logger.debug("Token refresh failed — re-run 'claude setup-token' to reauthenticate")
|
||||
resolved_claude_token = _resolve_claude_code_token_from_credentials(creds)
|
||||
if resolved_claude_token:
|
||||
return resolved_claude_token
|
||||
|
||||
# 4. Regular API key, or a legacy OAuth token saved in ANTHROPIC_API_KEY.
|
||||
# This remains as a compatibility fallback for pre-migration Hermes configs.
|
||||
|
|
|
|||
|
|
@ -768,48 +768,107 @@ def get_async_text_auxiliary_client(task: str = ""):
|
|||
return resolve_provider_client("auto", async_mode=True)
|
||||
|
||||
|
||||
def get_vision_auxiliary_client() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
"""Return (client, default_model_slug) for vision/multimodal auxiliary tasks.
|
||||
_VISION_AUTO_PROVIDER_ORDER = (
|
||||
"openrouter",
|
||||
"nous",
|
||||
"openai-codex",
|
||||
"custom",
|
||||
)
|
||||
|
||||
Checks AUXILIARY_VISION_PROVIDER for a forced provider, otherwise
|
||||
auto-detects. Callers may override the returned model with
|
||||
AUXILIARY_VISION_MODEL.
|
||||
|
||||
In auto mode, only providers known to support multimodal are tried:
|
||||
OpenRouter, Nous Portal, and Codex OAuth (gpt-5.3-codex supports
|
||||
vision via the Responses API). Custom endpoints and API-key
|
||||
providers are skipped — they may not handle vision input. To use
|
||||
them, set AUXILIARY_VISION_PROVIDER explicitly.
|
||||
"""
|
||||
forced = _get_auxiliary_provider("vision")
|
||||
if forced != "auto":
|
||||
return resolve_provider_client(forced)
|
||||
# Auto: try providers known to support multimodal first, then fall
|
||||
# back to the user's custom endpoint. Many local models (Qwen-VL,
|
||||
# LLaVA, Pixtral, etc.) support vision — skipping them entirely
|
||||
# caused silent failures for local-only users.
|
||||
for try_fn in (_try_openrouter, _try_nous, _try_codex,
|
||||
_try_custom_endpoint):
|
||||
client, model = try_fn()
|
||||
if client is not None:
|
||||
return client, model
|
||||
logger.debug("Auxiliary vision client: none available")
|
||||
def _normalize_vision_provider(provider: Optional[str]) -> str:
|
||||
provider = (provider or "auto").strip().lower()
|
||||
if provider == "codex":
|
||||
return "openai-codex"
|
||||
if provider == "main":
|
||||
return "custom"
|
||||
return provider
|
||||
|
||||
|
||||
def _resolve_strict_vision_backend(provider: str) -> Tuple[Optional[Any], Optional[str]]:
|
||||
provider = _normalize_vision_provider(provider)
|
||||
if provider == "openrouter":
|
||||
return _try_openrouter()
|
||||
if provider == "nous":
|
||||
return _try_nous()
|
||||
if provider == "openai-codex":
|
||||
return _try_codex()
|
||||
if provider == "custom":
|
||||
return _try_custom_endpoint()
|
||||
return None, None
|
||||
|
||||
|
||||
def get_async_vision_auxiliary_client():
|
||||
"""Return (async_client, model_slug) for async vision consumers.
|
||||
def _strict_vision_backend_available(provider: str) -> bool:
|
||||
return _resolve_strict_vision_backend(provider)[0] is not None
|
||||
|
||||
Properly handles Codex routing — unlike manually constructing
|
||||
AsyncOpenAI from a sync client, this preserves the Responses API
|
||||
adapter for Codex providers.
|
||||
|
||||
Returns (None, None) when no provider is available.
|
||||
def get_available_vision_backends() -> List[str]:
|
||||
"""Return the currently available vision backends in auto-selection order.
|
||||
|
||||
This is the single source of truth for setup, tool gating, and runtime
|
||||
auto-routing of vision tasks. Phase 1 keeps the auto list conservative:
|
||||
OpenRouter, Nous Portal, Codex OAuth, then custom OpenAI-compatible
|
||||
endpoints. Explicit provider overrides can still route elsewhere.
|
||||
"""
|
||||
sync_client, model = get_vision_auxiliary_client()
|
||||
if sync_client is None:
|
||||
return None, None
|
||||
return _to_async_client(sync_client, model)
|
||||
return [
|
||||
provider
|
||||
for provider in _VISION_AUTO_PROVIDER_ORDER
|
||||
if _strict_vision_backend_available(provider)
|
||||
]
|
||||
|
||||
|
||||
def resolve_vision_provider_client(
|
||||
provider: Optional[str] = None,
|
||||
model: Optional[str] = None,
|
||||
*,
|
||||
async_mode: bool = False,
|
||||
) -> Tuple[Optional[str], Optional[Any], Optional[str]]:
|
||||
"""Resolve the client actually used for vision tasks.
|
||||
|
||||
Explicit provider overrides still use the generic provider router for
|
||||
non-standard backends, so users can intentionally force experimental
|
||||
providers. Auto mode stays conservative and only tries vision backends
|
||||
known to work today.
|
||||
"""
|
||||
requested = _normalize_vision_provider(provider or _get_auxiliary_provider("vision"))
|
||||
|
||||
def _finalize(resolved_provider: str, sync_client: Any, default_model: Optional[str]):
|
||||
if sync_client is None:
|
||||
return resolved_provider, None, None
|
||||
final_model = model or default_model
|
||||
if async_mode:
|
||||
async_client, async_model = _to_async_client(sync_client, final_model)
|
||||
return resolved_provider, async_client, async_model
|
||||
return resolved_provider, sync_client, final_model
|
||||
|
||||
if requested == "auto":
|
||||
for candidate in get_available_vision_backends():
|
||||
sync_client, default_model = _resolve_strict_vision_backend(candidate)
|
||||
if sync_client is not None:
|
||||
return _finalize(candidate, sync_client, default_model)
|
||||
logger.debug("Auxiliary vision client: none available")
|
||||
return None, None, None
|
||||
|
||||
if requested in _VISION_AUTO_PROVIDER_ORDER:
|
||||
sync_client, default_model = _resolve_strict_vision_backend(requested)
|
||||
return _finalize(requested, sync_client, default_model)
|
||||
|
||||
client, final_model = _get_cached_client(requested, model, async_mode)
|
||||
if client is None:
|
||||
return requested, None, None
|
||||
return requested, client, final_model
|
||||
|
||||
|
||||
def get_vision_auxiliary_client() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
"""Return (client, default_model_slug) for vision/multimodal auxiliary tasks."""
|
||||
_, client, final_model = resolve_vision_provider_client(async_mode=False)
|
||||
return client, final_model
|
||||
|
||||
|
||||
def get_async_vision_auxiliary_client():
|
||||
"""Return (async_client, model_slug) for async vision consumers."""
|
||||
_, client, final_model = resolve_vision_provider_client(async_mode=True)
|
||||
return client, final_model
|
||||
|
||||
|
||||
def get_auxiliary_extra_body() -> dict:
|
||||
|
|
@ -1010,18 +1069,41 @@ def call_llm(
|
|||
resolved_provider, resolved_model = _resolve_task_provider_model(
|
||||
task, provider, model)
|
||||
|
||||
client, final_model = _get_cached_client(resolved_provider, resolved_model)
|
||||
if client is None:
|
||||
# Fallback: try openrouter
|
||||
if resolved_provider != "openrouter":
|
||||
logger.warning("Provider %s unavailable, falling back to openrouter",
|
||||
resolved_provider)
|
||||
client, final_model = _get_cached_client(
|
||||
"openrouter", resolved_model or _OPENROUTER_MODEL)
|
||||
if client is None:
|
||||
raise RuntimeError(
|
||||
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
||||
f"Run: hermes setup")
|
||||
if task == "vision":
|
||||
effective_provider, client, final_model = resolve_vision_provider_client(
|
||||
provider=resolved_provider,
|
||||
model=resolved_model,
|
||||
async_mode=False,
|
||||
)
|
||||
if client is None and resolved_provider != "auto":
|
||||
logger.warning(
|
||||
"Vision provider %s unavailable, falling back to auto vision backends",
|
||||
resolved_provider,
|
||||
)
|
||||
effective_provider, client, final_model = resolve_vision_provider_client(
|
||||
provider="auto",
|
||||
model=resolved_model,
|
||||
async_mode=False,
|
||||
)
|
||||
if client is None:
|
||||
raise RuntimeError(
|
||||
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
||||
f"Run: hermes setup"
|
||||
)
|
||||
resolved_provider = effective_provider or resolved_provider
|
||||
else:
|
||||
client, final_model = _get_cached_client(resolved_provider, resolved_model)
|
||||
if client is None:
|
||||
# Fallback: try openrouter
|
||||
if resolved_provider != "openrouter":
|
||||
logger.warning("Provider %s unavailable, falling back to openrouter",
|
||||
resolved_provider)
|
||||
client, final_model = _get_cached_client(
|
||||
"openrouter", resolved_model or _OPENROUTER_MODEL)
|
||||
if client is None:
|
||||
raise RuntimeError(
|
||||
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
||||
f"Run: hermes setup")
|
||||
|
||||
kwargs = _build_call_kwargs(
|
||||
resolved_provider, final_model, messages,
|
||||
|
|
@ -1059,19 +1141,42 @@ async def async_call_llm(
|
|||
resolved_provider, resolved_model = _resolve_task_provider_model(
|
||||
task, provider, model)
|
||||
|
||||
client, final_model = _get_cached_client(
|
||||
resolved_provider, resolved_model, async_mode=True)
|
||||
if client is None:
|
||||
if resolved_provider != "openrouter":
|
||||
logger.warning("Provider %s unavailable, falling back to openrouter",
|
||||
resolved_provider)
|
||||
client, final_model = _get_cached_client(
|
||||
"openrouter", resolved_model or _OPENROUTER_MODEL,
|
||||
async_mode=True)
|
||||
if client is None:
|
||||
raise RuntimeError(
|
||||
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
||||
f"Run: hermes setup")
|
||||
if task == "vision":
|
||||
effective_provider, client, final_model = resolve_vision_provider_client(
|
||||
provider=resolved_provider,
|
||||
model=resolved_model,
|
||||
async_mode=True,
|
||||
)
|
||||
if client is None and resolved_provider != "auto":
|
||||
logger.warning(
|
||||
"Vision provider %s unavailable, falling back to auto vision backends",
|
||||
resolved_provider,
|
||||
)
|
||||
effective_provider, client, final_model = resolve_vision_provider_client(
|
||||
provider="auto",
|
||||
model=resolved_model,
|
||||
async_mode=True,
|
||||
)
|
||||
if client is None:
|
||||
raise RuntimeError(
|
||||
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
||||
f"Run: hermes setup"
|
||||
)
|
||||
resolved_provider = effective_provider or resolved_provider
|
||||
else:
|
||||
client, final_model = _get_cached_client(
|
||||
resolved_provider, resolved_model, async_mode=True)
|
||||
if client is None:
|
||||
if resolved_provider != "openrouter":
|
||||
logger.warning("Provider %s unavailable, falling back to openrouter",
|
||||
resolved_provider)
|
||||
client, final_model = _get_cached_client(
|
||||
"openrouter", resolved_model or _OPENROUTER_MODEL,
|
||||
async_mode=True)
|
||||
if client is None:
|
||||
raise RuntimeError(
|
||||
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
||||
f"Run: hermes setup")
|
||||
|
||||
kwargs = _build_call_kwargs(
|
||||
resolved_provider, final_model, messages,
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str | N
|
|||
"image_generate": "prompt", "text_to_speech": "text",
|
||||
"vision_analyze": "question", "mixture_of_agents": "user_prompt",
|
||||
"skill_view": "name", "skills_list": "category",
|
||||
"schedule_cronjob": "name",
|
||||
"cronjob": "action",
|
||||
"execute_code": "code", "delegate_task": "goal",
|
||||
"clarify": "question", "skill_manage": "name",
|
||||
}
|
||||
|
|
@ -513,12 +513,15 @@ def get_cute_tool_message(
|
|||
return _wrap(f"┊ 🧠 reason {_trunc(args.get('user_prompt', ''), 30)} {dur}")
|
||||
if tool_name == "send_message":
|
||||
return _wrap(f"┊ 📨 send {args.get('target', '?')}: \"{_trunc(args.get('message', ''), 25)}\" {dur}")
|
||||
if tool_name == "schedule_cronjob":
|
||||
return _wrap(f"┊ ⏰ schedule {_trunc(args.get('name', args.get('prompt', 'task')), 30)} {dur}")
|
||||
if tool_name == "list_cronjobs":
|
||||
return _wrap(f"┊ ⏰ jobs listing {dur}")
|
||||
if tool_name == "remove_cronjob":
|
||||
return _wrap(f"┊ ⏰ remove job {args.get('job_id', '?')} {dur}")
|
||||
if tool_name == "cronjob":
|
||||
action = args.get("action", "?")
|
||||
if action == "create":
|
||||
skills = args.get("skills") or ([] if not args.get("skill") else [args.get("skill")])
|
||||
label = args.get("name") or (skills[0] if skills else None) or args.get("prompt", "task")
|
||||
return _wrap(f"┊ ⏰ cron create {_trunc(label, 24)} {dur}")
|
||||
if action == "list":
|
||||
return _wrap(f"┊ ⏰ cron listing {dur}")
|
||||
return _wrap(f"┊ ⏰ cron {action} {args.get('job_id', '')} {dur}")
|
||||
if tool_name.startswith("rl_"):
|
||||
rl = {
|
||||
"rl_list_environments": "list envs", "rl_select_environment": f"select {args.get('name', '')}",
|
||||
|
|
|
|||
|
|
@ -14,6 +14,110 @@ logger = logging.getLogger(__name__)
|
|||
_skill_commands: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
|
||||
def _load_skill_payload(skill_identifier: str, task_id: str | None = None) -> tuple[dict[str, Any], Path | None, str] | None:
|
||||
"""Load a skill by name/path and return (loaded_payload, skill_dir, display_name)."""
|
||||
raw_identifier = (skill_identifier or "").strip()
|
||||
if not raw_identifier:
|
||||
return None
|
||||
|
||||
try:
|
||||
from tools.skills_tool import SKILLS_DIR, skill_view
|
||||
|
||||
identifier_path = Path(raw_identifier).expanduser()
|
||||
if identifier_path.is_absolute():
|
||||
try:
|
||||
normalized = str(identifier_path.resolve().relative_to(SKILLS_DIR.resolve()))
|
||||
except Exception:
|
||||
normalized = raw_identifier
|
||||
else:
|
||||
normalized = raw_identifier.lstrip("/")
|
||||
|
||||
loaded_skill = json.loads(skill_view(normalized, task_id=task_id))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if not loaded_skill.get("success"):
|
||||
return None
|
||||
|
||||
skill_name = str(loaded_skill.get("name") or normalized)
|
||||
skill_path = str(loaded_skill.get("path") or "")
|
||||
skill_dir = None
|
||||
if skill_path:
|
||||
try:
|
||||
skill_dir = SKILLS_DIR / Path(skill_path).parent
|
||||
except Exception:
|
||||
skill_dir = None
|
||||
|
||||
return loaded_skill, skill_dir, skill_name
|
||||
|
||||
|
||||
def _build_skill_message(
|
||||
loaded_skill: dict[str, Any],
|
||||
skill_dir: Path | None,
|
||||
activation_note: str,
|
||||
user_instruction: str = "",
|
||||
) -> str:
|
||||
"""Format a loaded skill into a user/system message payload."""
|
||||
from tools.skills_tool import SKILLS_DIR
|
||||
|
||||
content = str(loaded_skill.get("content") or "")
|
||||
|
||||
parts = [activation_note, "", content.strip()]
|
||||
|
||||
if loaded_skill.get("setup_skipped"):
|
||||
parts.extend(
|
||||
[
|
||||
"",
|
||||
"[Skill setup note: Required environment setup was skipped. Continue loading the skill and explain any reduced functionality if it matters.]",
|
||||
]
|
||||
)
|
||||
elif loaded_skill.get("gateway_setup_hint"):
|
||||
parts.extend(
|
||||
[
|
||||
"",
|
||||
f"[Skill setup note: {loaded_skill['gateway_setup_hint']}]",
|
||||
]
|
||||
)
|
||||
elif loaded_skill.get("setup_needed") and loaded_skill.get("setup_note"):
|
||||
parts.extend(
|
||||
[
|
||||
"",
|
||||
f"[Skill setup note: {loaded_skill['setup_note']}]",
|
||||
]
|
||||
)
|
||||
|
||||
supporting = []
|
||||
linked_files = loaded_skill.get("linked_files") or {}
|
||||
for entries in linked_files.values():
|
||||
if isinstance(entries, list):
|
||||
supporting.extend(entries)
|
||||
|
||||
if not supporting and skill_dir:
|
||||
for subdir in ("references", "templates", "scripts", "assets"):
|
||||
subdir_path = skill_dir / subdir
|
||||
if subdir_path.exists():
|
||||
for f in sorted(subdir_path.rglob("*")):
|
||||
if f.is_file():
|
||||
rel = str(f.relative_to(skill_dir))
|
||||
supporting.append(rel)
|
||||
|
||||
if supporting and skill_dir:
|
||||
skill_view_target = str(skill_dir.relative_to(SKILLS_DIR))
|
||||
parts.append("")
|
||||
parts.append("[This skill has supporting files you can load with the skill_view tool:]")
|
||||
for sf in supporting:
|
||||
parts.append(f"- {sf}")
|
||||
parts.append(
|
||||
f'\nTo view any of these, use: skill_view(name="{skill_view_target}", file_path="<path>")'
|
||||
)
|
||||
|
||||
if user_instruction:
|
||||
parts.append("")
|
||||
parts.append(f"The user has provided the following instruction alongside the skill invocation: {user_instruction}")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
"""Scan ~/.hermes/skills/ and return a mapping of /command -> skill info.
|
||||
|
||||
|
|
@ -83,77 +187,60 @@ def build_skill_invocation_message(
|
|||
if not skill_info:
|
||||
return None
|
||||
|
||||
skill_name = skill_info["name"]
|
||||
skill_path = skill_info["skill_dir"]
|
||||
loaded = _load_skill_payload(skill_info["skill_dir"], task_id=task_id)
|
||||
if not loaded:
|
||||
return f"[Failed to load skill: {skill_info['name']}]"
|
||||
|
||||
try:
|
||||
from tools.skills_tool import SKILLS_DIR, skill_view
|
||||
loaded_skill, skill_dir, skill_name = loaded
|
||||
activation_note = (
|
||||
f'[SYSTEM: The user has invoked the "{skill_name}" skill, indicating they want '
|
||||
"you to follow its instructions. The full skill content is loaded below.]"
|
||||
)
|
||||
return _build_skill_message(
|
||||
loaded_skill,
|
||||
skill_dir,
|
||||
activation_note,
|
||||
user_instruction=user_instruction,
|
||||
)
|
||||
|
||||
loaded_skill = json.loads(skill_view(skill_path, task_id=task_id))
|
||||
except Exception:
|
||||
return f"[Failed to load skill: {skill_name}]"
|
||||
|
||||
if not loaded_skill.get("success"):
|
||||
return f"[Failed to load skill: {skill_name}]"
|
||||
def build_preloaded_skills_prompt(
|
||||
skill_identifiers: list[str],
|
||||
task_id: str | None = None,
|
||||
) -> tuple[str, list[str], list[str]]:
|
||||
"""Load one or more skills for session-wide CLI preloading.
|
||||
|
||||
content = str(loaded_skill.get("content") or "")
|
||||
skill_dir = Path(skill_info["skill_dir"])
|
||||
Returns (prompt_text, loaded_skill_names, missing_identifiers).
|
||||
"""
|
||||
prompt_parts: list[str] = []
|
||||
loaded_names: list[str] = []
|
||||
missing: list[str] = []
|
||||
|
||||
parts = [
|
||||
f'[SYSTEM: The user has invoked the "{skill_name}" skill, indicating they want you to follow its instructions. The full skill content is loaded below.]',
|
||||
"",
|
||||
content.strip(),
|
||||
]
|
||||
seen: set[str] = set()
|
||||
for raw_identifier in skill_identifiers:
|
||||
identifier = (raw_identifier or "").strip()
|
||||
if not identifier or identifier in seen:
|
||||
continue
|
||||
seen.add(identifier)
|
||||
|
||||
if loaded_skill.get("setup_skipped"):
|
||||
parts.extend(
|
||||
[
|
||||
"",
|
||||
"[Skill setup note: Required environment setup was skipped. Continue loading the skill and explain any reduced functionality if it matters.]",
|
||||
]
|
||||
loaded = _load_skill_payload(identifier, task_id=task_id)
|
||||
if not loaded:
|
||||
missing.append(identifier)
|
||||
continue
|
||||
|
||||
loaded_skill, skill_dir, skill_name = loaded
|
||||
activation_note = (
|
||||
f'[SYSTEM: The user launched this CLI session with the "{skill_name}" skill '
|
||||
"preloaded. Treat its instructions as active guidance for the duration of this "
|
||||
"session unless the user overrides them.]"
|
||||
)
|
||||
elif loaded_skill.get("gateway_setup_hint"):
|
||||
parts.extend(
|
||||
[
|
||||
"",
|
||||
f"[Skill setup note: {loaded_skill['gateway_setup_hint']}]",
|
||||
]
|
||||
)
|
||||
elif loaded_skill.get("setup_needed") and loaded_skill.get("setup_note"):
|
||||
parts.extend(
|
||||
[
|
||||
"",
|
||||
f"[Skill setup note: {loaded_skill['setup_note']}]",
|
||||
]
|
||||
prompt_parts.append(
|
||||
_build_skill_message(
|
||||
loaded_skill,
|
||||
skill_dir,
|
||||
activation_note,
|
||||
)
|
||||
)
|
||||
loaded_names.append(skill_name)
|
||||
|
||||
supporting = []
|
||||
linked_files = loaded_skill.get("linked_files") or {}
|
||||
for entries in linked_files.values():
|
||||
if isinstance(entries, list):
|
||||
supporting.extend(entries)
|
||||
|
||||
if not supporting:
|
||||
for subdir in ("references", "templates", "scripts", "assets"):
|
||||
subdir_path = skill_dir / subdir
|
||||
if subdir_path.exists():
|
||||
for f in sorted(subdir_path.rglob("*")):
|
||||
if f.is_file():
|
||||
rel = str(f.relative_to(skill_dir))
|
||||
supporting.append(rel)
|
||||
|
||||
if supporting:
|
||||
skill_view_target = str(Path(skill_path).relative_to(SKILLS_DIR))
|
||||
parts.append("")
|
||||
parts.append("[This skill has supporting files you can load with the skill_view tool:]")
|
||||
for sf in supporting:
|
||||
parts.append(f"- {sf}")
|
||||
parts.append(
|
||||
f'\nTo view any of these, use: skill_view(name="{skill_view_target}", file_path="<path>")'
|
||||
)
|
||||
|
||||
if user_instruction:
|
||||
parts.append("")
|
||||
parts.append(f"The user has provided the following instruction alongside the skill invocation: {user_instruction}")
|
||||
|
||||
return "\n".join(parts)
|
||||
return "\n\n".join(prompt_parts), loaded_names, missing
|
||||
|
|
|
|||
|
|
@ -456,7 +456,7 @@ platform_toolsets:
|
|||
# moa - mixture_of_agents (requires OPENROUTER_API_KEY)
|
||||
# todo - todo (in-memory task planning, no deps)
|
||||
# tts - text_to_speech (Edge TTS free, or ELEVENLABS/OPENAI key)
|
||||
# cronjob - schedule_cronjob, list_cronjobs, remove_cronjob
|
||||
# cronjob - cronjob (create/list/update/pause/resume/run/remove scheduled tasks)
|
||||
# rl - rl_list_environments, rl_start_training, etc. (requires TINKER_API_KEY)
|
||||
#
|
||||
# PRESETS (curated bundles):
|
||||
|
|
|
|||
375
cli.py
375
cli.py
|
|
@ -8,6 +8,7 @@ Features ASCII art branding, interactive REPL, toolset selection, and rich forma
|
|||
Usage:
|
||||
python cli.py # Start interactive mode with all tools
|
||||
python cli.py --toolsets web,terminal # Start with specific toolsets
|
||||
python cli.py --skills hermes-agent-dev,github-auth
|
||||
python cli.py -q "your question" # Single query mode
|
||||
python cli.py --list-tools # List available tools and exit
|
||||
"""
|
||||
|
|
@ -428,8 +429,8 @@ from hermes_cli.commands import COMMANDS, SlashCommandCompleter
|
|||
from hermes_cli import callbacks as _callbacks
|
||||
from toolsets import get_all_toolsets, get_toolset_info, resolve_toolset, validate_toolset
|
||||
|
||||
# Cron job system for scheduled tasks (CRUD only — execution is handled by the gateway)
|
||||
from cron import create_job, list_jobs, remove_job, get_job
|
||||
# Cron job system for scheduled tasks (execution is handled by the gateway)
|
||||
from cron import get_job
|
||||
|
||||
# Resource cleanup imports for safe shutdown (terminal VMs, browser sessions)
|
||||
from tools.terminal_tool import cleanup_all_environments as _cleanup_all_terminals
|
||||
|
|
@ -1043,11 +1044,40 @@ def build_welcome_banner(console: Console, model: str, cwd: str, tools: List[dic
|
|||
# Skill Slash Commands — dynamic commands generated from installed skills
|
||||
# ============================================================================
|
||||
|
||||
from agent.skill_commands import scan_skill_commands, get_skill_commands, build_skill_invocation_message
|
||||
from agent.skill_commands import (
|
||||
scan_skill_commands,
|
||||
get_skill_commands,
|
||||
build_skill_invocation_message,
|
||||
build_preloaded_skills_prompt,
|
||||
)
|
||||
|
||||
_skill_commands = scan_skill_commands()
|
||||
|
||||
|
||||
def _parse_skills_argument(skills: str | list[str] | tuple[str, ...] | None) -> list[str]:
|
||||
"""Normalize a CLI skills flag into a deduplicated list of skill identifiers."""
|
||||
if not skills:
|
||||
return []
|
||||
|
||||
if isinstance(skills, str):
|
||||
raw_values = [skills]
|
||||
elif isinstance(skills, (list, tuple)):
|
||||
raw_values = [str(item) for item in skills if item is not None]
|
||||
else:
|
||||
raw_values = [str(skills)]
|
||||
|
||||
parsed: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for raw in raw_values:
|
||||
for part in raw.split(","):
|
||||
normalized = part.strip()
|
||||
if not normalized or normalized in seen:
|
||||
continue
|
||||
seen.add(normalized)
|
||||
parsed.append(normalized)
|
||||
return parsed
|
||||
|
||||
|
||||
def save_config_value(key_path: str, value: any) -> bool:
|
||||
"""
|
||||
Save a value to the active config file at the specified key path.
|
||||
|
|
@ -1313,6 +1343,8 @@ class HermesCLI:
|
|||
self._command_status = ""
|
||||
self._attached_images: list[Path] = []
|
||||
self._image_counter = 0
|
||||
self.preloaded_skills: list[str] = []
|
||||
self._startup_skills_line_shown = False
|
||||
|
||||
# Voice mode state (also reinitialized inside run() for interactive TUI).
|
||||
self._voice_lock = threading.Lock()
|
||||
|
|
@ -1599,6 +1631,13 @@ class HermesCLI:
|
|||
def show_banner(self):
|
||||
"""Display the welcome banner in Claude Code style."""
|
||||
self.console.clear()
|
||||
if self.preloaded_skills and not self._startup_skills_line_shown:
|
||||
skills_label = ", ".join(self.preloaded_skills)
|
||||
self.console.print(
|
||||
f"[bold {_accent_hex()}]Activated skills:[/] {skills_label}"
|
||||
)
|
||||
self.console.print()
|
||||
self._startup_skills_line_shown = True
|
||||
|
||||
# Auto-compact for narrow terminals — the full banner with caduceus
|
||||
# + tool list needs ~80 columns minimum to render without wrapping.
|
||||
|
|
@ -2588,139 +2627,248 @@ class HermesCLI:
|
|||
|
||||
def _handle_cron_command(self, cmd: str):
|
||||
"""Handle the /cron command to manage scheduled tasks."""
|
||||
parts = cmd.split(maxsplit=2)
|
||||
|
||||
if len(parts) == 1:
|
||||
# /cron - show help and list
|
||||
import shlex
|
||||
from tools.cronjob_tools import cronjob as cronjob_tool
|
||||
|
||||
def _cron_api(**kwargs):
|
||||
return json.loads(cronjob_tool(**kwargs))
|
||||
|
||||
def _normalize_skills(values):
|
||||
normalized = []
|
||||
for value in values:
|
||||
text = str(value or "").strip()
|
||||
if text and text not in normalized:
|
||||
normalized.append(text)
|
||||
return normalized
|
||||
|
||||
def _parse_flags(tokens):
|
||||
opts = {
|
||||
"name": None,
|
||||
"deliver": None,
|
||||
"repeat": None,
|
||||
"skills": [],
|
||||
"add_skills": [],
|
||||
"remove_skills": [],
|
||||
"clear_skills": False,
|
||||
"all": False,
|
||||
"prompt": None,
|
||||
"schedule": None,
|
||||
"positionals": [],
|
||||
}
|
||||
i = 0
|
||||
while i < len(tokens):
|
||||
token = tokens[i]
|
||||
if token == "--name" and i + 1 < len(tokens):
|
||||
opts["name"] = tokens[i + 1]
|
||||
i += 2
|
||||
elif token == "--deliver" and i + 1 < len(tokens):
|
||||
opts["deliver"] = tokens[i + 1]
|
||||
i += 2
|
||||
elif token == "--repeat" and i + 1 < len(tokens):
|
||||
try:
|
||||
opts["repeat"] = int(tokens[i + 1])
|
||||
except ValueError:
|
||||
print("(._.) --repeat must be an integer")
|
||||
return None
|
||||
i += 2
|
||||
elif token == "--skill" and i + 1 < len(tokens):
|
||||
opts["skills"].append(tokens[i + 1])
|
||||
i += 2
|
||||
elif token == "--add-skill" and i + 1 < len(tokens):
|
||||
opts["add_skills"].append(tokens[i + 1])
|
||||
i += 2
|
||||
elif token == "--remove-skill" and i + 1 < len(tokens):
|
||||
opts["remove_skills"].append(tokens[i + 1])
|
||||
i += 2
|
||||
elif token == "--clear-skills":
|
||||
opts["clear_skills"] = True
|
||||
i += 1
|
||||
elif token == "--all":
|
||||
opts["all"] = True
|
||||
i += 1
|
||||
elif token == "--prompt" and i + 1 < len(tokens):
|
||||
opts["prompt"] = tokens[i + 1]
|
||||
i += 2
|
||||
elif token == "--schedule" and i + 1 < len(tokens):
|
||||
opts["schedule"] = tokens[i + 1]
|
||||
i += 2
|
||||
else:
|
||||
opts["positionals"].append(token)
|
||||
i += 1
|
||||
return opts
|
||||
|
||||
tokens = shlex.split(cmd)
|
||||
|
||||
if len(tokens) == 1:
|
||||
print()
|
||||
print("+" + "-" * 60 + "+")
|
||||
print("|" + " " * 18 + "(^_^) Scheduled Tasks" + " " * 19 + "|")
|
||||
print("+" + "-" * 60 + "+")
|
||||
print("+" + "-" * 68 + "+")
|
||||
print("|" + " " * 22 + "(^_^) Scheduled Tasks" + " " * 23 + "|")
|
||||
print("+" + "-" * 68 + "+")
|
||||
print()
|
||||
print(" Commands:")
|
||||
print(" /cron - List scheduled jobs")
|
||||
print(" /cron list - List scheduled jobs")
|
||||
print(' /cron add <schedule> <prompt> - Add a new job')
|
||||
print(" /cron remove <job_id> - Remove a job")
|
||||
print(" /cron list")
|
||||
print(' /cron add "every 2h" "Check server status" [--skill blogwatcher]')
|
||||
print(' /cron edit <job_id> --schedule "every 4h" --prompt "New task"')
|
||||
print(" /cron edit <job_id> --skill blogwatcher --skill find-nearby")
|
||||
print(" /cron edit <job_id> --remove-skill blogwatcher")
|
||||
print(" /cron edit <job_id> --clear-skills")
|
||||
print(" /cron pause <job_id>")
|
||||
print(" /cron resume <job_id>")
|
||||
print(" /cron run <job_id>")
|
||||
print(" /cron remove <job_id>")
|
||||
print()
|
||||
print(" Schedule formats:")
|
||||
print(" 30m, 2h, 1d - One-shot delay")
|
||||
print(' "every 30m", "every 2h" - Recurring interval')
|
||||
print(' "0 9 * * *" - Cron expression')
|
||||
print()
|
||||
|
||||
# Show current jobs
|
||||
jobs = list_jobs()
|
||||
result = _cron_api(action="list")
|
||||
jobs = result.get("jobs", []) if result.get("success") else []
|
||||
if jobs:
|
||||
print(" Current Jobs:")
|
||||
print(" " + "-" * 55)
|
||||
print(" " + "-" * 63)
|
||||
for job in jobs:
|
||||
# Format repeat status
|
||||
times = job["repeat"].get("times")
|
||||
completed = job["repeat"].get("completed", 0)
|
||||
if times is None:
|
||||
repeat_str = "forever"
|
||||
else:
|
||||
repeat_str = f"{completed}/{times}"
|
||||
|
||||
print(f" {job['id'][:12]:<12} | {job['schedule_display']:<15} | {repeat_str:<8}")
|
||||
prompt_preview = job['prompt'][:45] + "..." if len(job['prompt']) > 45 else job['prompt']
|
||||
print(f" {prompt_preview}")
|
||||
repeat_str = job.get("repeat", "?")
|
||||
print(f" {job['job_id'][:12]:<12} | {job['schedule']:<15} | {repeat_str:<8}")
|
||||
if job.get("skills"):
|
||||
print(f" Skills: {', '.join(job['skills'])}")
|
||||
print(f" {job.get('prompt_preview', '')}")
|
||||
if job.get("next_run_at"):
|
||||
from datetime import datetime
|
||||
next_run = datetime.fromisoformat(job["next_run_at"])
|
||||
print(f" Next: {next_run.strftime('%Y-%m-%d %H:%M')}")
|
||||
print(f" Next: {job['next_run_at']}")
|
||||
print()
|
||||
else:
|
||||
print(" No scheduled jobs. Use '/cron add' to create one.")
|
||||
print()
|
||||
return
|
||||
|
||||
subcommand = parts[1].lower()
|
||||
|
||||
|
||||
subcommand = tokens[1].lower()
|
||||
opts = _parse_flags(tokens[2:])
|
||||
if opts is None:
|
||||
return
|
||||
|
||||
if subcommand == "list":
|
||||
# /cron list - just show jobs
|
||||
jobs = list_jobs()
|
||||
result = _cron_api(action="list", include_disabled=opts["all"])
|
||||
jobs = result.get("jobs", []) if result.get("success") else []
|
||||
if not jobs:
|
||||
print("(._.) No scheduled jobs.")
|
||||
return
|
||||
|
||||
|
||||
print()
|
||||
print("Scheduled Jobs:")
|
||||
print("-" * 70)
|
||||
print("-" * 80)
|
||||
for job in jobs:
|
||||
times = job["repeat"].get("times")
|
||||
completed = job["repeat"].get("completed", 0)
|
||||
repeat_str = "forever" if times is None else f"{completed}/{times}"
|
||||
|
||||
print(f" ID: {job['id']}")
|
||||
print(f" ID: {job['job_id']}")
|
||||
print(f" Name: {job['name']}")
|
||||
print(f" Schedule: {job['schedule_display']} ({repeat_str})")
|
||||
print(f" State: {job.get('state', '?')}")
|
||||
print(f" Schedule: {job['schedule']} ({job.get('repeat', '?')})")
|
||||
print(f" Next run: {job.get('next_run_at', 'N/A')}")
|
||||
print(f" Prompt: {job['prompt'][:80]}{'...' if len(job['prompt']) > 80 else ''}")
|
||||
if job.get("skills"):
|
||||
print(f" Skills: {', '.join(job['skills'])}")
|
||||
print(f" Prompt: {job.get('prompt_preview', '')}")
|
||||
if job.get("last_run_at"):
|
||||
print(f" Last run: {job['last_run_at']} ({job.get('last_status', '?')})")
|
||||
print()
|
||||
|
||||
elif subcommand == "add":
|
||||
# /cron add <schedule> <prompt>
|
||||
if len(parts) < 3:
|
||||
return
|
||||
|
||||
if subcommand in {"add", "create"}:
|
||||
positionals = opts["positionals"]
|
||||
if not positionals:
|
||||
print("(._.) Usage: /cron add <schedule> <prompt>")
|
||||
print(" Example: /cron add 30m Remind me to take a break")
|
||||
print(' Example: /cron add "every 2h" Check server status at 192.168.1.1')
|
||||
return
|
||||
|
||||
# Parse schedule and prompt
|
||||
rest = parts[2].strip()
|
||||
|
||||
# Handle quoted schedule (e.g., "every 30m" or "0 9 * * *")
|
||||
if rest.startswith('"'):
|
||||
# Find closing quote
|
||||
close_quote = rest.find('"', 1)
|
||||
if close_quote == -1:
|
||||
print("(._.) Unmatched quote in schedule")
|
||||
return
|
||||
schedule = rest[1:close_quote]
|
||||
prompt = rest[close_quote + 1:].strip()
|
||||
schedule = opts["schedule"] or positionals[0]
|
||||
prompt = opts["prompt"] or " ".join(positionals[1:])
|
||||
skills = _normalize_skills(opts["skills"])
|
||||
if not prompt and not skills:
|
||||
print("(._.) Please provide a prompt or at least one skill")
|
||||
return
|
||||
result = _cron_api(
|
||||
action="create",
|
||||
schedule=schedule,
|
||||
prompt=prompt or None,
|
||||
name=opts["name"],
|
||||
deliver=opts["deliver"],
|
||||
repeat=opts["repeat"],
|
||||
skills=skills or None,
|
||||
)
|
||||
if result.get("success"):
|
||||
print(f"(^_^)b Created job: {result['job_id']}")
|
||||
print(f" Schedule: {result['schedule']}")
|
||||
if result.get("skills"):
|
||||
print(f" Skills: {', '.join(result['skills'])}")
|
||||
print(f" Next run: {result['next_run_at']}")
|
||||
else:
|
||||
# First word is schedule
|
||||
schedule_parts = rest.split(maxsplit=1)
|
||||
schedule = schedule_parts[0]
|
||||
prompt = schedule_parts[1] if len(schedule_parts) > 1 else ""
|
||||
|
||||
if not prompt:
|
||||
print("(._.) Please provide a prompt for the job")
|
||||
print(f"(x_x) Failed to create job: {result.get('error')}")
|
||||
return
|
||||
|
||||
if subcommand == "edit":
|
||||
positionals = opts["positionals"]
|
||||
if not positionals:
|
||||
print("(._.) Usage: /cron edit <job_id> [--schedule ...] [--prompt ...] [--skill ...]")
|
||||
return
|
||||
|
||||
try:
|
||||
job = create_job(prompt=prompt, schedule=schedule)
|
||||
print(f"(^_^)b Created job: {job['id']}")
|
||||
print(f" Schedule: {job['schedule_display']}")
|
||||
print(f" Next run: {job['next_run_at']}")
|
||||
except Exception as e:
|
||||
print(f"(x_x) Failed to create job: {e}")
|
||||
|
||||
elif subcommand == "remove" or subcommand == "rm" or subcommand == "delete":
|
||||
# /cron remove <job_id>
|
||||
if len(parts) < 3:
|
||||
print("(._.) Usage: /cron remove <job_id>")
|
||||
return
|
||||
|
||||
job_id = parts[2].strip()
|
||||
job = get_job(job_id)
|
||||
|
||||
if not job:
|
||||
job_id = positionals[0]
|
||||
existing = get_job(job_id)
|
||||
if not existing:
|
||||
print(f"(._.) Job not found: {job_id}")
|
||||
return
|
||||
|
||||
if remove_job(job_id):
|
||||
print(f"(^_^)b Removed job: {job['name']} ({job_id})")
|
||||
|
||||
final_skills = None
|
||||
replacement_skills = _normalize_skills(opts["skills"])
|
||||
add_skills = _normalize_skills(opts["add_skills"])
|
||||
remove_skills = set(_normalize_skills(opts["remove_skills"]))
|
||||
existing_skills = list(existing.get("skills") or ([] if not existing.get("skill") else [existing.get("skill")]))
|
||||
if opts["clear_skills"]:
|
||||
final_skills = []
|
||||
elif replacement_skills:
|
||||
final_skills = replacement_skills
|
||||
elif add_skills or remove_skills:
|
||||
final_skills = [skill for skill in existing_skills if skill not in remove_skills]
|
||||
for skill in add_skills:
|
||||
if skill not in final_skills:
|
||||
final_skills.append(skill)
|
||||
|
||||
result = _cron_api(
|
||||
action="update",
|
||||
job_id=job_id,
|
||||
schedule=opts["schedule"],
|
||||
prompt=opts["prompt"],
|
||||
name=opts["name"],
|
||||
deliver=opts["deliver"],
|
||||
repeat=opts["repeat"],
|
||||
skills=final_skills,
|
||||
)
|
||||
if result.get("success"):
|
||||
job = result["job"]
|
||||
print(f"(^_^)b Updated job: {job['job_id']}")
|
||||
print(f" Schedule: {job['schedule']}")
|
||||
if job.get("skills"):
|
||||
print(f" Skills: {', '.join(job['skills'])}")
|
||||
else:
|
||||
print(" Skills: none")
|
||||
else:
|
||||
print(f"(x_x) Failed to remove job: {job_id}")
|
||||
|
||||
else:
|
||||
print(f"(._.) Unknown cron command: {subcommand}")
|
||||
print(" Available: list, add, remove")
|
||||
print(f"(x_x) Failed to update job: {result.get('error')}")
|
||||
return
|
||||
|
||||
if subcommand in {"pause", "resume", "run", "remove", "rm", "delete"}:
|
||||
positionals = opts["positionals"]
|
||||
if not positionals:
|
||||
print(f"(._.) Usage: /cron {subcommand} <job_id>")
|
||||
return
|
||||
job_id = positionals[0]
|
||||
action = "remove" if subcommand in {"remove", "rm", "delete"} else subcommand
|
||||
result = _cron_api(action=action, job_id=job_id, reason="paused from /cron" if action == "pause" else None)
|
||||
if not result.get("success"):
|
||||
print(f"(x_x) Failed to {action} job: {result.get('error')}")
|
||||
return
|
||||
if action == "pause":
|
||||
print(f"(^_^)b Paused job: {result['job']['name']} ({job_id})")
|
||||
elif action == "resume":
|
||||
print(f"(^_^)b Resumed job: {result['job']['name']} ({job_id})")
|
||||
print(f" Next run: {result['job'].get('next_run_at')}")
|
||||
elif action == "run":
|
||||
print(f"(^_^)b Triggered job: {result['job']['name']} ({job_id})")
|
||||
print(" It will run on the next scheduler tick.")
|
||||
else:
|
||||
removed = result.get("removed_job", {})
|
||||
print(f"(^_^)b Removed job: {removed.get('name', job_id)} ({job_id})")
|
||||
return
|
||||
|
||||
print(f"(._.) Unknown cron command: {subcommand}")
|
||||
print(" Available: list, add, edit, pause, resume, run, remove")
|
||||
|
||||
def _handle_skills_command(self, cmd: str):
|
||||
"""Handle /skills slash command — delegates to hermes_cli.skills_hub."""
|
||||
|
|
@ -5829,6 +5977,7 @@ def main(
|
|||
query: str = None,
|
||||
q: str = None,
|
||||
toolsets: str = None,
|
||||
skills: str | list[str] | tuple[str, ...] = None,
|
||||
model: str = None,
|
||||
provider: str = None,
|
||||
api_key: str = None,
|
||||
|
|
@ -5853,6 +6002,7 @@ def main(
|
|||
query: Single query to execute (then exit). Alias: -q
|
||||
q: Shorthand for --query
|
||||
toolsets: Comma-separated list of toolsets to enable (e.g., "web,terminal")
|
||||
skills: Comma-separated or repeated list of skills to preload for the session
|
||||
model: Model to use (default: anthropic/claude-opus-4-20250514)
|
||||
provider: Inference provider ("auto", "openrouter", "nous", "openai-codex", "zai", "kimi-coding", "minimax", "minimax-cn")
|
||||
api_key: API key for authentication
|
||||
|
|
@ -5869,6 +6019,7 @@ def main(
|
|||
Examples:
|
||||
python cli.py # Start interactive mode
|
||||
python cli.py --toolsets web,terminal # Use specific toolsets
|
||||
python cli.py --skills hermes-agent-dev,github-auth
|
||||
python cli.py -q "What is Python?" # Single query mode
|
||||
python cli.py --list-tools # List tools and exit
|
||||
python cli.py --resume 20260225_143052_a1b2c3 # Resume session
|
||||
|
|
@ -5938,6 +6089,8 @@ def main(
|
|||
else:
|
||||
toolsets_list = ["hermes-cli"]
|
||||
|
||||
parsed_skills = _parse_skills_argument(skills)
|
||||
|
||||
# Create CLI instance
|
||||
cli = HermesCLI(
|
||||
model=model,
|
||||
|
|
@ -5953,6 +6106,20 @@ def main(
|
|||
pass_session_id=pass_session_id,
|
||||
)
|
||||
|
||||
if parsed_skills:
|
||||
skills_prompt, loaded_skills, missing_skills = build_preloaded_skills_prompt(
|
||||
parsed_skills,
|
||||
task_id=cli.session_id,
|
||||
)
|
||||
if missing_skills:
|
||||
missing_display = ", ".join(missing_skills)
|
||||
raise ValueError(f"Unknown skill(s): {missing_display}")
|
||||
if skills_prompt:
|
||||
cli.system_prompt = "\n\n".join(
|
||||
part for part in (cli.system_prompt, skills_prompt) if part
|
||||
).strip()
|
||||
cli.preloaded_skills = loaded_skills
|
||||
|
||||
# Inject worktree context into agent's system prompt
|
||||
if wt_info:
|
||||
wt_note = (
|
||||
|
|
|
|||
|
|
@ -20,6 +20,9 @@ from cron.jobs import (
|
|||
list_jobs,
|
||||
remove_job,
|
||||
update_job,
|
||||
pause_job,
|
||||
resume_job,
|
||||
trigger_job,
|
||||
JOBS_FILE,
|
||||
)
|
||||
from cron.scheduler import tick
|
||||
|
|
@ -30,6 +33,9 @@ __all__ = [
|
|||
"list_jobs",
|
||||
"remove_job",
|
||||
"update_job",
|
||||
"pause_job",
|
||||
"resume_job",
|
||||
"trigger_job",
|
||||
"tick",
|
||||
"JOBS_FILE",
|
||||
]
|
||||
|
|
|
|||
161
cron/jobs.py
161
cron/jobs.py
|
|
@ -32,6 +32,32 @@ JOBS_FILE = CRON_DIR / "jobs.json"
|
|||
OUTPUT_DIR = CRON_DIR / "output"
|
||||
|
||||
|
||||
def _normalize_skill_list(skill: Optional[str] = None, skills: Optional[Any] = None) -> List[str]:
|
||||
"""Normalize legacy/single-skill and multi-skill inputs into a unique ordered list."""
|
||||
if skills is None:
|
||||
raw_items = [skill] if skill else []
|
||||
elif isinstance(skills, str):
|
||||
raw_items = [skills]
|
||||
else:
|
||||
raw_items = list(skills)
|
||||
|
||||
normalized: List[str] = []
|
||||
for item in raw_items:
|
||||
text = str(item or "").strip()
|
||||
if text and text not in normalized:
|
||||
normalized.append(text)
|
||||
return normalized
|
||||
|
||||
|
||||
def _apply_skill_fields(job: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return a job dict with canonical `skills` and legacy `skill` fields aligned."""
|
||||
normalized = dict(job)
|
||||
skills = _normalize_skill_list(normalized.get("skill"), normalized.get("skills"))
|
||||
normalized["skills"] = skills
|
||||
normalized["skill"] = skills[0] if skills else None
|
||||
return normalized
|
||||
|
||||
|
||||
def _secure_dir(path: Path):
|
||||
"""Set directory to owner-only access (0700). No-op on Windows."""
|
||||
try:
|
||||
|
|
@ -263,39 +289,47 @@ def create_job(
|
|||
name: Optional[str] = None,
|
||||
repeat: Optional[int] = None,
|
||||
deliver: Optional[str] = None,
|
||||
origin: Optional[Dict[str, Any]] = None
|
||||
origin: Optional[Dict[str, Any]] = None,
|
||||
skill: Optional[str] = None,
|
||||
skills: Optional[List[str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create a new cron job.
|
||||
|
||||
|
||||
Args:
|
||||
prompt: The prompt to run (must be self-contained)
|
||||
prompt: The prompt to run (must be self-contained, or a task instruction when skill is set)
|
||||
schedule: Schedule string (see parse_schedule)
|
||||
name: Optional friendly name
|
||||
repeat: How many times to run (None = forever, 1 = once)
|
||||
deliver: Where to deliver output ("origin", "local", "telegram", etc.)
|
||||
origin: Source info where job was created (for "origin" delivery)
|
||||
|
||||
skill: Optional legacy single skill name to load before running the prompt
|
||||
skills: Optional ordered list of skills to load before running the prompt
|
||||
|
||||
Returns:
|
||||
The created job dict
|
||||
"""
|
||||
parsed_schedule = parse_schedule(schedule)
|
||||
|
||||
|
||||
# Auto-set repeat=1 for one-shot schedules if not specified
|
||||
if parsed_schedule["kind"] == "once" and repeat is None:
|
||||
repeat = 1
|
||||
|
||||
|
||||
# Default delivery to origin if available, otherwise local
|
||||
if deliver is None:
|
||||
deliver = "origin" if origin else "local"
|
||||
|
||||
|
||||
job_id = uuid.uuid4().hex[:12]
|
||||
now = _hermes_now().isoformat()
|
||||
|
||||
|
||||
normalized_skills = _normalize_skill_list(skill, skills)
|
||||
label_source = (prompt or (normalized_skills[0] if normalized_skills else None)) or "cron job"
|
||||
job = {
|
||||
"id": job_id,
|
||||
"name": name or prompt[:50].strip(),
|
||||
"name": name or label_source[:50].strip(),
|
||||
"prompt": prompt,
|
||||
"skills": normalized_skills,
|
||||
"skill": normalized_skills[0] if normalized_skills else None,
|
||||
"schedule": parsed_schedule,
|
||||
"schedule_display": parsed_schedule.get("display", schedule),
|
||||
"repeat": {
|
||||
|
|
@ -303,6 +337,9 @@ def create_job(
|
|||
"completed": 0
|
||||
},
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
"paused_at": None,
|
||||
"paused_reason": None,
|
||||
"created_at": now,
|
||||
"next_run_at": compute_next_run(parsed_schedule),
|
||||
"last_run_at": None,
|
||||
|
|
@ -312,11 +349,11 @@ def create_job(
|
|||
"deliver": deliver,
|
||||
"origin": origin, # Tracks where job was created for "origin" delivery
|
||||
}
|
||||
|
||||
|
||||
jobs = load_jobs()
|
||||
jobs.append(job)
|
||||
save_jobs(jobs)
|
||||
|
||||
|
||||
return job
|
||||
|
||||
|
||||
|
|
@ -325,29 +362,100 @@ def get_job(job_id: str) -> Optional[Dict[str, Any]]:
|
|||
jobs = load_jobs()
|
||||
for job in jobs:
|
||||
if job["id"] == job_id:
|
||||
return job
|
||||
return _apply_skill_fields(job)
|
||||
return None
|
||||
|
||||
|
||||
def list_jobs(include_disabled: bool = False) -> List[Dict[str, Any]]:
|
||||
"""List all jobs, optionally including disabled ones."""
|
||||
jobs = load_jobs()
|
||||
jobs = [_apply_skill_fields(j) for j in load_jobs()]
|
||||
if not include_disabled:
|
||||
jobs = [j for j in jobs if j.get("enabled", True)]
|
||||
return jobs
|
||||
|
||||
|
||||
def update_job(job_id: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""Update a job by ID."""
|
||||
"""Update a job by ID, refreshing derived schedule fields when needed."""
|
||||
jobs = load_jobs()
|
||||
for i, job in enumerate(jobs):
|
||||
if job["id"] == job_id:
|
||||
jobs[i] = {**job, **updates}
|
||||
save_jobs(jobs)
|
||||
return jobs[i]
|
||||
if job["id"] != job_id:
|
||||
continue
|
||||
|
||||
updated = _apply_skill_fields({**job, **updates})
|
||||
schedule_changed = "schedule" in updates
|
||||
|
||||
if "skills" in updates or "skill" in updates:
|
||||
normalized_skills = _normalize_skill_list(updated.get("skill"), updated.get("skills"))
|
||||
updated["skills"] = normalized_skills
|
||||
updated["skill"] = normalized_skills[0] if normalized_skills else None
|
||||
|
||||
if schedule_changed:
|
||||
updated_schedule = updated["schedule"]
|
||||
updated["schedule_display"] = updates.get(
|
||||
"schedule_display",
|
||||
updated_schedule.get("display", updated.get("schedule_display")),
|
||||
)
|
||||
if updated.get("state") != "paused":
|
||||
updated["next_run_at"] = compute_next_run(updated_schedule)
|
||||
|
||||
if updated.get("enabled", True) and updated.get("state") != "paused" and not updated.get("next_run_at"):
|
||||
updated["next_run_at"] = compute_next_run(updated["schedule"])
|
||||
|
||||
jobs[i] = updated
|
||||
save_jobs(jobs)
|
||||
return _apply_skill_fields(jobs[i])
|
||||
return None
|
||||
|
||||
|
||||
def pause_job(job_id: str, reason: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
||||
"""Pause a job without deleting it."""
|
||||
return update_job(
|
||||
job_id,
|
||||
{
|
||||
"enabled": False,
|
||||
"state": "paused",
|
||||
"paused_at": _hermes_now().isoformat(),
|
||||
"paused_reason": reason,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def resume_job(job_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Resume a paused job and compute the next future run from now."""
|
||||
job = get_job(job_id)
|
||||
if not job:
|
||||
return None
|
||||
|
||||
next_run_at = compute_next_run(job["schedule"])
|
||||
return update_job(
|
||||
job_id,
|
||||
{
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
"paused_at": None,
|
||||
"paused_reason": None,
|
||||
"next_run_at": next_run_at,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def trigger_job(job_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Schedule a job to run on the next scheduler tick."""
|
||||
job = get_job(job_id)
|
||||
if not job:
|
||||
return None
|
||||
return update_job(
|
||||
job_id,
|
||||
{
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
"paused_at": None,
|
||||
"paused_reason": None,
|
||||
"next_run_at": _hermes_now().isoformat(),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def remove_job(job_id: str) -> bool:
|
||||
"""Remove a job by ID."""
|
||||
jobs = load_jobs()
|
||||
|
|
@ -389,11 +497,14 @@ def mark_job_run(job_id: str, success: bool, error: Optional[str] = None):
|
|||
|
||||
# Compute next run
|
||||
job["next_run_at"] = compute_next_run(job["schedule"], now)
|
||||
|
||||
|
||||
# If no next run (one-shot completed), disable
|
||||
if job["next_run_at"] is None:
|
||||
job["enabled"] = False
|
||||
|
||||
job["state"] = "completed"
|
||||
elif job.get("state") != "paused":
|
||||
job["state"] = "scheduled"
|
||||
|
||||
save_jobs(jobs)
|
||||
return
|
||||
|
||||
|
|
@ -403,21 +514,21 @@ def mark_job_run(job_id: str, success: bool, error: Optional[str] = None):
|
|||
def get_due_jobs() -> List[Dict[str, Any]]:
|
||||
"""Get all jobs that are due to run now."""
|
||||
now = _hermes_now()
|
||||
jobs = load_jobs()
|
||||
jobs = [_apply_skill_fields(j) for j in load_jobs()]
|
||||
due = []
|
||||
|
||||
|
||||
for job in jobs:
|
||||
if not job.get("enabled", True):
|
||||
continue
|
||||
|
||||
|
||||
next_run = job.get("next_run_at")
|
||||
if not next_run:
|
||||
continue
|
||||
|
||||
|
||||
next_run_dt = _ensure_aware(datetime.fromisoformat(next_run))
|
||||
if next_run_dt <= now:
|
||||
due.append(job)
|
||||
|
||||
|
||||
return due
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ runs at a time if multiple processes overlap.
|
|||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
|
@ -174,6 +175,43 @@ def _deliver_result(job: dict, content: str) -> None:
|
|||
logger.warning("Job '%s': mirror_to_session failed: %s", job["id"], e)
|
||||
|
||||
|
||||
def _build_job_prompt(job: dict) -> str:
|
||||
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
|
||||
prompt = job.get("prompt", "")
|
||||
skills = job.get("skills")
|
||||
if skills is None:
|
||||
legacy = job.get("skill")
|
||||
skills = [legacy] if legacy else []
|
||||
|
||||
skill_names = [str(name).strip() for name in skills if str(name).strip()]
|
||||
if not skill_names:
|
||||
return prompt
|
||||
|
||||
from tools.skills_tool import skill_view
|
||||
|
||||
parts = []
|
||||
for skill_name in skill_names:
|
||||
loaded = json.loads(skill_view(skill_name))
|
||||
if not loaded.get("success"):
|
||||
error = loaded.get("error") or f"Failed to load skill '{skill_name}'"
|
||||
raise RuntimeError(error)
|
||||
|
||||
content = str(loaded.get("content") or "").strip()
|
||||
if parts:
|
||||
parts.append("")
|
||||
parts.extend(
|
||||
[
|
||||
f'[SYSTEM: The user has invoked the "{skill_name}" skill, indicating they want you to follow its instructions. The full skill content is loaded below.]',
|
||||
"",
|
||||
content,
|
||||
]
|
||||
)
|
||||
|
||||
if prompt:
|
||||
parts.extend(["", f"The user has provided the following instruction alongside the skill invocation: {prompt}"])
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
"""
|
||||
Execute a single cron job.
|
||||
|
|
@ -194,9 +232,9 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
|||
|
||||
job_id = job["id"]
|
||||
job_name = job["name"]
|
||||
prompt = job["prompt"]
|
||||
prompt = _build_job_prompt(job)
|
||||
origin = _resolve_origin(job)
|
||||
|
||||
|
||||
logger.info("Running job '%s' (ID: %s)", job_name, job_id)
|
||||
logger.info("Prompt: %s", prompt[:100])
|
||||
|
||||
|
|
@ -302,6 +340,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
|||
providers_ignored=pr.get("ignore"),
|
||||
providers_order=pr.get("order"),
|
||||
provider_sort=pr.get("sort"),
|
||||
disabled_toolsets=["cronjob"],
|
||||
quiet_mode=True,
|
||||
platform="cron",
|
||||
session_id=f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}",
|
||||
|
|
|
|||
|
|
@ -315,7 +315,7 @@ def build_delivery_context_for_tool(
|
|||
origin: Optional[SessionSource] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build context for the schedule_cronjob tool to understand delivery options.
|
||||
Build context for the unified cronjob tool to understand delivery options.
|
||||
|
||||
This is passed to the tool so it can validate and explain delivery targets.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ platform_map = {
|
|||
}
|
||||
```
|
||||
|
||||
Without this, `schedule_cronjob(deliver="your_platform")` silently fails.
|
||||
Without this, `cronjob(action="create", deliver="your_platform", ...)` silently fails.
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -3794,9 +3794,7 @@ class GatewayRunner:
|
|||
"memory": "🧠",
|
||||
"session_search": "🔍",
|
||||
"send_message": "📨",
|
||||
"schedule_cronjob": "⏰",
|
||||
"list_cronjobs": "⏰",
|
||||
"remove_cronjob": "⏰",
|
||||
"cronjob": "⏰",
|
||||
"execute_code": "🐍",
|
||||
"delegate_task": "🔀",
|
||||
"clarify": "❓",
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ COMMANDS_BY_CATEGORY = {
|
|||
"/tools": "List available tools",
|
||||
"/toolsets": "List available toolsets",
|
||||
"/skills": "Search, install, inspect, or manage skills from online registries",
|
||||
"/cron": "Manage scheduled tasks (list, add, remove)",
|
||||
"/cron": "Manage scheduled tasks (list, add/create, edit, pause, resume, run, remove)",
|
||||
"/reload-mcp": "Reload MCP servers from config.yaml",
|
||||
},
|
||||
"Info": {
|
||||
|
|
|
|||
|
|
@ -1092,6 +1092,13 @@ def save_anthropic_oauth_token(value: str, save_fn=None):
|
|||
writer("ANTHROPIC_API_KEY", "")
|
||||
|
||||
|
||||
def use_anthropic_claude_code_credentials(save_fn=None):
|
||||
"""Use Claude Code's own credential files instead of persisting env tokens."""
|
||||
writer = save_fn or save_env_value
|
||||
writer("ANTHROPIC_TOKEN", "")
|
||||
writer("ANTHROPIC_API_KEY", "")
|
||||
|
||||
|
||||
def save_anthropic_api_key(value: str, save_fn=None):
|
||||
"""Persist an Anthropic API key and clear the OAuth/setup-token slot."""
|
||||
writer = save_fn or save_env_value
|
||||
|
|
|
|||
|
|
@ -1,15 +1,14 @@
|
|||
"""
|
||||
Cron subcommand for hermes CLI.
|
||||
|
||||
Handles: hermes cron [list|status|tick]
|
||||
|
||||
Cronjobs are executed automatically by the gateway daemon (hermes gateway).
|
||||
Install the gateway as a service for background execution:
|
||||
hermes gateway install
|
||||
Handles standalone cron management commands like list, create, edit,
|
||||
pause/resume/run/remove, status, and tick.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Optional
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
|
@ -17,58 +16,82 @@ sys.path.insert(0, str(PROJECT_ROOT))
|
|||
from hermes_cli.colors import Colors, color
|
||||
|
||||
|
||||
def _normalize_skills(single_skill=None, skills: Optional[Iterable[str]] = None) -> Optional[List[str]]:
|
||||
if skills is None:
|
||||
if single_skill is None:
|
||||
return None
|
||||
raw_items = [single_skill]
|
||||
else:
|
||||
raw_items = list(skills)
|
||||
|
||||
normalized: List[str] = []
|
||||
for item in raw_items:
|
||||
text = str(item or "").strip()
|
||||
if text and text not in normalized:
|
||||
normalized.append(text)
|
||||
return normalized
|
||||
|
||||
|
||||
def _cron_api(**kwargs):
|
||||
from tools.cronjob_tools import cronjob as cronjob_tool
|
||||
|
||||
return json.loads(cronjob_tool(**kwargs))
|
||||
|
||||
|
||||
def cron_list(show_all: bool = False):
|
||||
"""List all scheduled jobs."""
|
||||
from cron.jobs import list_jobs
|
||||
|
||||
|
||||
jobs = list_jobs(include_disabled=show_all)
|
||||
|
||||
|
||||
if not jobs:
|
||||
print(color("No scheduled jobs.", Colors.DIM))
|
||||
print(color("Create one with the /cron add command in chat, or via Telegram.", Colors.DIM))
|
||||
print(color("Create one with 'hermes cron create ...' or the /cron command in chat.", Colors.DIM))
|
||||
return
|
||||
|
||||
|
||||
print()
|
||||
print(color("┌─────────────────────────────────────────────────────────────────────────┐", Colors.CYAN))
|
||||
print(color("│ Scheduled Jobs │", Colors.CYAN))
|
||||
print(color("└─────────────────────────────────────────────────────────────────────────┘", Colors.CYAN))
|
||||
print()
|
||||
|
||||
|
||||
for job in jobs:
|
||||
job_id = job.get("id", "?")[:8]
|
||||
name = job.get("name", "(unnamed)")
|
||||
schedule = job.get("schedule_display", job.get("schedule", {}).get("value", "?"))
|
||||
enabled = job.get("enabled", True)
|
||||
state = job.get("state", "scheduled" if job.get("enabled", True) else "paused")
|
||||
next_run = job.get("next_run_at", "?")
|
||||
|
||||
|
||||
repeat_info = job.get("repeat", {})
|
||||
repeat_times = repeat_info.get("times")
|
||||
repeat_completed = repeat_info.get("completed", 0)
|
||||
|
||||
if repeat_times:
|
||||
repeat_str = f"{repeat_completed}/{repeat_times}"
|
||||
else:
|
||||
repeat_str = "∞"
|
||||
|
||||
repeat_str = f"{repeat_completed}/{repeat_times}" if repeat_times else "∞"
|
||||
|
||||
deliver = job.get("deliver", ["local"])
|
||||
if isinstance(deliver, str):
|
||||
deliver = [deliver]
|
||||
deliver_str = ", ".join(deliver)
|
||||
|
||||
if not enabled:
|
||||
status = color("[disabled]", Colors.RED)
|
||||
else:
|
||||
|
||||
skills = job.get("skills") or ([job["skill"]] if job.get("skill") else [])
|
||||
if state == "paused":
|
||||
status = color("[paused]", Colors.YELLOW)
|
||||
elif state == "completed":
|
||||
status = color("[completed]", Colors.BLUE)
|
||||
elif job.get("enabled", True):
|
||||
status = color("[active]", Colors.GREEN)
|
||||
|
||||
else:
|
||||
status = color("[disabled]", Colors.RED)
|
||||
|
||||
print(f" {color(job_id, Colors.YELLOW)} {status}")
|
||||
print(f" Name: {name}")
|
||||
print(f" Schedule: {schedule}")
|
||||
print(f" Repeat: {repeat_str}")
|
||||
print(f" Next run: {next_run}")
|
||||
print(f" Deliver: {deliver_str}")
|
||||
if skills:
|
||||
print(f" Skills: {', '.join(skills)}")
|
||||
print()
|
||||
|
||||
# Warn if gateway isn't running
|
||||
|
||||
from hermes_cli.gateway import find_gateway_pids
|
||||
if not find_gateway_pids():
|
||||
print(color(" ⚠ Gateway is not running — jobs won't fire automatically.", Colors.YELLOW))
|
||||
|
|
@ -86,9 +109,9 @@ def cron_status():
|
|||
"""Show cron execution status."""
|
||||
from cron.jobs import list_jobs
|
||||
from hermes_cli.gateway import find_gateway_pids
|
||||
|
||||
|
||||
print()
|
||||
|
||||
|
||||
pids = find_gateway_pids()
|
||||
if pids:
|
||||
print(color("✓ Gateway is running — cron jobs will fire automatically", Colors.GREEN))
|
||||
|
|
@ -99,9 +122,9 @@ def cron_status():
|
|||
print(" To enable automatic execution:")
|
||||
print(" hermes gateway install # Install as system service (recommended)")
|
||||
print(" hermes gateway # Or run in foreground")
|
||||
|
||||
|
||||
print()
|
||||
|
||||
|
||||
jobs = list_jobs(include_disabled=False)
|
||||
if jobs:
|
||||
next_runs = [j.get("next_run_at") for j in jobs if j.get("next_run_at")]
|
||||
|
|
@ -110,25 +133,131 @@ def cron_status():
|
|||
print(f" Next run: {min(next_runs)}")
|
||||
else:
|
||||
print(" No active jobs")
|
||||
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def cron_create(args):
|
||||
result = _cron_api(
|
||||
action="create",
|
||||
schedule=args.schedule,
|
||||
prompt=args.prompt,
|
||||
name=getattr(args, "name", None),
|
||||
deliver=getattr(args, "deliver", None),
|
||||
repeat=getattr(args, "repeat", None),
|
||||
skill=getattr(args, "skill", None),
|
||||
skills=_normalize_skills(getattr(args, "skill", None), getattr(args, "skills", None)),
|
||||
)
|
||||
if not result.get("success"):
|
||||
print(color(f"Failed to create job: {result.get('error', 'unknown error')}", Colors.RED))
|
||||
return 1
|
||||
print(color(f"Created job: {result['job_id']}", Colors.GREEN))
|
||||
print(f" Name: {result['name']}")
|
||||
print(f" Schedule: {result['schedule']}")
|
||||
if result.get("skills"):
|
||||
print(f" Skills: {', '.join(result['skills'])}")
|
||||
print(f" Next run: {result['next_run_at']}")
|
||||
return 0
|
||||
|
||||
|
||||
def cron_edit(args):
|
||||
from cron.jobs import get_job
|
||||
|
||||
job = get_job(args.job_id)
|
||||
if not job:
|
||||
print(color(f"Job not found: {args.job_id}", Colors.RED))
|
||||
return 1
|
||||
|
||||
existing_skills = list(job.get("skills") or ([] if not job.get("skill") else [job.get("skill")]))
|
||||
replacement_skills = _normalize_skills(getattr(args, "skill", None), getattr(args, "skills", None))
|
||||
add_skills = _normalize_skills(None, getattr(args, "add_skills", None)) or []
|
||||
remove_skills = set(_normalize_skills(None, getattr(args, "remove_skills", None)) or [])
|
||||
|
||||
final_skills = None
|
||||
if getattr(args, "clear_skills", False):
|
||||
final_skills = []
|
||||
elif replacement_skills is not None:
|
||||
final_skills = replacement_skills
|
||||
elif add_skills or remove_skills:
|
||||
final_skills = [skill for skill in existing_skills if skill not in remove_skills]
|
||||
for skill in add_skills:
|
||||
if skill not in final_skills:
|
||||
final_skills.append(skill)
|
||||
|
||||
result = _cron_api(
|
||||
action="update",
|
||||
job_id=args.job_id,
|
||||
schedule=getattr(args, "schedule", None),
|
||||
prompt=getattr(args, "prompt", None),
|
||||
name=getattr(args, "name", None),
|
||||
deliver=getattr(args, "deliver", None),
|
||||
repeat=getattr(args, "repeat", None),
|
||||
skills=final_skills,
|
||||
)
|
||||
if not result.get("success"):
|
||||
print(color(f"Failed to update job: {result.get('error', 'unknown error')}", Colors.RED))
|
||||
return 1
|
||||
|
||||
updated = result["job"]
|
||||
print(color(f"Updated job: {updated['job_id']}", Colors.GREEN))
|
||||
print(f" Name: {updated['name']}")
|
||||
print(f" Schedule: {updated['schedule']}")
|
||||
if updated.get("skills"):
|
||||
print(f" Skills: {', '.join(updated['skills'])}")
|
||||
else:
|
||||
print(" Skills: none")
|
||||
return 0
|
||||
|
||||
|
||||
def _job_action(action: str, job_id: str, success_verb: str) -> int:
|
||||
result = _cron_api(action=action, job_id=job_id)
|
||||
if not result.get("success"):
|
||||
print(color(f"Failed to {action} job: {result.get('error', 'unknown error')}", Colors.RED))
|
||||
return 1
|
||||
job = result.get("job") or result.get("removed_job") or {}
|
||||
print(color(f"{success_verb} job: {job.get('name', job_id)} ({job_id})", Colors.GREEN))
|
||||
if action in {"resume", "run"} and result.get("job", {}).get("next_run_at"):
|
||||
print(f" Next run: {result['job']['next_run_at']}")
|
||||
if action == "run":
|
||||
print(" It will run on the next scheduler tick.")
|
||||
return 0
|
||||
|
||||
|
||||
def cron_command(args):
|
||||
"""Handle cron subcommands."""
|
||||
subcmd = getattr(args, 'cron_command', None)
|
||||
|
||||
|
||||
if subcmd is None or subcmd == "list":
|
||||
show_all = getattr(args, 'all', False)
|
||||
cron_list(show_all)
|
||||
|
||||
elif subcmd == "tick":
|
||||
cron_tick()
|
||||
|
||||
elif subcmd == "status":
|
||||
return 0
|
||||
|
||||
if subcmd == "status":
|
||||
cron_status()
|
||||
|
||||
else:
|
||||
print(f"Unknown cron command: {subcmd}")
|
||||
print("Usage: hermes cron [list|status|tick]")
|
||||
sys.exit(1)
|
||||
return 0
|
||||
|
||||
if subcmd == "tick":
|
||||
cron_tick()
|
||||
return 0
|
||||
|
||||
if subcmd in {"create", "add"}:
|
||||
return cron_create(args)
|
||||
|
||||
if subcmd == "edit":
|
||||
return cron_edit(args)
|
||||
|
||||
if subcmd == "pause":
|
||||
return _job_action("pause", args.job_id, "Paused")
|
||||
|
||||
if subcmd == "resume":
|
||||
return _job_action("resume", args.job_id, "Resumed")
|
||||
|
||||
if subcmd == "run":
|
||||
return _job_action("run", args.job_id, "Triggered")
|
||||
|
||||
if subcmd in {"remove", "rm", "delete"}:
|
||||
return _job_action("remove", args.job_id, "Removed")
|
||||
|
||||
print(f"Unknown cron command: {subcmd}")
|
||||
print("Usage: hermes cron [list|create|edit|pause|resume|run|remove|status|tick]")
|
||||
sys.exit(1)
|
||||
|
|
|
|||
|
|
@ -499,6 +499,7 @@ def cmd_chat(args):
|
|||
"model": args.model,
|
||||
"provider": getattr(args, "provider", None),
|
||||
"toolsets": args.toolsets,
|
||||
"skills": getattr(args, "skills", None),
|
||||
"verbose": args.verbose,
|
||||
"quiet": getattr(args, "quiet", False),
|
||||
"query": args.query,
|
||||
|
|
@ -510,7 +511,11 @@ def cmd_chat(args):
|
|||
# Filter out None values
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
|
||||
cli_main(**kwargs)
|
||||
try:
|
||||
cli_main(**kwargs)
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def cmd_gateway(args):
|
||||
|
|
@ -1368,6 +1373,12 @@ _PROVIDER_MODELS = {
|
|||
"kimi-k2-turbo-preview",
|
||||
"kimi-k2-0905-preview",
|
||||
],
|
||||
"moonshot": [
|
||||
"kimi-k2.5",
|
||||
"kimi-k2-thinking",
|
||||
"kimi-k2-turbo-preview",
|
||||
"kimi-k2-0905-preview",
|
||||
],
|
||||
"minimax": [
|
||||
"MiniMax-M2.5",
|
||||
"MiniMax-M2.5-highspeed",
|
||||
|
|
@ -1449,8 +1460,8 @@ def _model_flow_kimi(config, current_model=""):
|
|||
"kimi-k2-thinking-turbo",
|
||||
]
|
||||
else:
|
||||
# Legacy Moonshot models
|
||||
model_list = _PROVIDER_MODELS.get(provider_id, [])
|
||||
# Legacy Moonshot models (excludes Coding Plan-only models)
|
||||
model_list = _PROVIDER_MODELS.get("moonshot", [])
|
||||
|
||||
if model_list:
|
||||
selected = _prompt_model_selection(model_list, current_model=current_model)
|
||||
|
|
@ -1586,8 +1597,30 @@ def _model_flow_api_key_provider(config, provider_id, current_model=""):
|
|||
|
||||
def _run_anthropic_oauth_flow(save_env_value):
|
||||
"""Run the Claude OAuth setup-token flow. Returns True if credentials were saved."""
|
||||
from agent.anthropic_adapter import run_oauth_setup_token
|
||||
from hermes_cli.config import save_anthropic_oauth_token
|
||||
from agent.anthropic_adapter import (
|
||||
run_oauth_setup_token,
|
||||
read_claude_code_credentials,
|
||||
is_claude_code_token_valid,
|
||||
)
|
||||
from hermes_cli.config import (
|
||||
save_anthropic_oauth_token,
|
||||
use_anthropic_claude_code_credentials,
|
||||
)
|
||||
|
||||
def _activate_claude_code_credentials_if_available() -> bool:
|
||||
try:
|
||||
creds = read_claude_code_credentials()
|
||||
except Exception:
|
||||
creds = None
|
||||
if creds and (
|
||||
is_claude_code_token_valid(creds)
|
||||
or bool(creds.get("refreshToken"))
|
||||
):
|
||||
use_anthropic_claude_code_credentials(save_fn=save_env_value)
|
||||
print(" ✓ Claude Code credentials linked.")
|
||||
print(" Hermes will use Claude's credential store directly instead of copying a setup-token into ~/.hermes/.env.")
|
||||
return True
|
||||
return False
|
||||
|
||||
try:
|
||||
print()
|
||||
|
|
@ -1596,6 +1629,8 @@ def _run_anthropic_oauth_flow(save_env_value):
|
|||
print()
|
||||
token = run_oauth_setup_token()
|
||||
if token:
|
||||
if _activate_claude_code_credentials_if_available():
|
||||
return True
|
||||
save_anthropic_oauth_token(token, save_fn=save_env_value)
|
||||
print(" ✓ OAuth credentials saved.")
|
||||
return True
|
||||
|
|
@ -2276,6 +2311,7 @@ Examples:
|
|||
hermes config edit Edit config in $EDITOR
|
||||
hermes config set model gpt-4 Set a config value
|
||||
hermes gateway Run messaging gateway
|
||||
hermes -s hermes-agent-dev,github-auth
|
||||
hermes -w Start in isolated git worktree
|
||||
hermes gateway install Install as system service
|
||||
hermes sessions list List past sessions
|
||||
|
|
@ -2314,6 +2350,12 @@ For more help on a command:
|
|||
default=False,
|
||||
help="Run in an isolated git worktree (for parallel agents)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skills", "-s",
|
||||
action="append",
|
||||
default=None,
|
||||
help="Preload one or more skills for the session (repeat flag or comma-separate)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--yolo",
|
||||
action="store_true",
|
||||
|
|
@ -2349,6 +2391,12 @@ For more help on a command:
|
|||
"-t", "--toolsets",
|
||||
help="Comma-separated toolsets to enable"
|
||||
)
|
||||
chat_parser.add_argument(
|
||||
"-s", "--skills",
|
||||
action="append",
|
||||
default=None,
|
||||
help="Preload one or more skills for the session (repeat flag or comma-separate)"
|
||||
)
|
||||
chat_parser.add_argument(
|
||||
"--provider",
|
||||
choices=["auto", "openrouter", "nous", "openai-codex", "anthropic", "zai", "kimi-coding", "minimax", "minimax-cn"],
|
||||
|
|
@ -2597,13 +2645,48 @@ For more help on a command:
|
|||
# cron list
|
||||
cron_list = cron_subparsers.add_parser("list", help="List scheduled jobs")
|
||||
cron_list.add_argument("--all", action="store_true", help="Include disabled jobs")
|
||||
|
||||
|
||||
# cron create/add
|
||||
cron_create = cron_subparsers.add_parser("create", aliases=["add"], help="Create a scheduled job")
|
||||
cron_create.add_argument("schedule", help="Schedule like '30m', 'every 2h', or '0 9 * * *'")
|
||||
cron_create.add_argument("prompt", nargs="?", help="Optional self-contained prompt or task instruction")
|
||||
cron_create.add_argument("--name", help="Optional human-friendly job name")
|
||||
cron_create.add_argument("--deliver", help="Delivery target: origin, local, telegram, discord, signal, or platform:chat_id")
|
||||
cron_create.add_argument("--repeat", type=int, help="Optional repeat count")
|
||||
cron_create.add_argument("--skill", dest="skills", action="append", help="Attach a skill. Repeat to add multiple skills.")
|
||||
|
||||
# cron edit
|
||||
cron_edit = cron_subparsers.add_parser("edit", help="Edit an existing scheduled job")
|
||||
cron_edit.add_argument("job_id", help="Job ID to edit")
|
||||
cron_edit.add_argument("--schedule", help="New schedule")
|
||||
cron_edit.add_argument("--prompt", help="New prompt/task instruction")
|
||||
cron_edit.add_argument("--name", help="New job name")
|
||||
cron_edit.add_argument("--deliver", help="New delivery target")
|
||||
cron_edit.add_argument("--repeat", type=int, help="New repeat count")
|
||||
cron_edit.add_argument("--skill", dest="skills", action="append", help="Replace the job's skills with this set. Repeat to attach multiple skills.")
|
||||
cron_edit.add_argument("--add-skill", dest="add_skills", action="append", help="Append a skill without replacing the existing list. Repeatable.")
|
||||
cron_edit.add_argument("--remove-skill", dest="remove_skills", action="append", help="Remove a specific attached skill. Repeatable.")
|
||||
cron_edit.add_argument("--clear-skills", action="store_true", help="Remove all attached skills from the job")
|
||||
|
||||
# lifecycle actions
|
||||
cron_pause = cron_subparsers.add_parser("pause", help="Pause a scheduled job")
|
||||
cron_pause.add_argument("job_id", help="Job ID to pause")
|
||||
|
||||
cron_resume = cron_subparsers.add_parser("resume", help="Resume a paused job")
|
||||
cron_resume.add_argument("job_id", help="Job ID to resume")
|
||||
|
||||
cron_run = cron_subparsers.add_parser("run", help="Run a job on the next scheduler tick")
|
||||
cron_run.add_argument("job_id", help="Job ID to trigger")
|
||||
|
||||
cron_remove = cron_subparsers.add_parser("remove", aliases=["rm", "delete"], help="Remove a scheduled job")
|
||||
cron_remove.add_argument("job_id", help="Job ID to remove")
|
||||
|
||||
# cron status
|
||||
cron_subparsers.add_parser("status", help="Check if cron scheduler is running")
|
||||
|
||||
|
||||
# cron tick (mostly for debugging)
|
||||
cron_subparsers.add_parser("tick", help="Run due jobs once and exit")
|
||||
|
||||
|
||||
cron_parser.set_defaults(func=cmd_cron)
|
||||
|
||||
# =========================================================================
|
||||
|
|
|
|||
|
|
@ -460,33 +460,15 @@ def _print_setup_summary(config: dict, hermes_home):
|
|||
|
||||
tool_status = []
|
||||
|
||||
# Vision — works with OpenRouter, Nous OAuth, Codex OAuth, or OpenAI endpoint
|
||||
_has_vision = False
|
||||
if get_env_value("OPENROUTER_API_KEY"):
|
||||
_has_vision = True
|
||||
else:
|
||||
try:
|
||||
_vauth_path = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes")) / "auth.json"
|
||||
if _vauth_path.is_file():
|
||||
import json as _vjson
|
||||
# Vision — use the same runtime resolver as the actual vision tools
|
||||
try:
|
||||
from agent.auxiliary_client import get_available_vision_backends
|
||||
|
||||
_vauth = _vjson.loads(_vauth_path.read_text())
|
||||
if _vauth.get("active_provider") == "nous":
|
||||
_np = _vauth.get("providers", {}).get("nous", {})
|
||||
if _np.get("agent_key") or _np.get("access_token"):
|
||||
_has_vision = True
|
||||
elif _vauth.get("active_provider") == "openai-codex":
|
||||
_cp = _vauth.get("providers", {}).get("openai-codex", {})
|
||||
if _cp.get("tokens", {}).get("access_token"):
|
||||
_has_vision = True
|
||||
except Exception:
|
||||
pass
|
||||
if not _has_vision:
|
||||
_oai_base = get_env_value("OPENAI_BASE_URL") or ""
|
||||
if get_env_value("OPENAI_API_KEY") and "api.openai.com" in _oai_base.lower():
|
||||
_has_vision = True
|
||||
_vision_backends = get_available_vision_backends()
|
||||
except Exception:
|
||||
_vision_backends = []
|
||||
|
||||
if _has_vision:
|
||||
if _vision_backends:
|
||||
tool_status.append(("Vision (image analysis)", True, None))
|
||||
else:
|
||||
tool_status.append(("Vision (image analysis)", False, "run 'hermes setup' to configure"))
|
||||
|
|
@ -1276,58 +1258,22 @@ def setup_model_provider(config: dict):
|
|||
selected_provider = "openrouter"
|
||||
|
||||
# ── Vision & Image Analysis Setup ──
|
||||
# Vision requires a multimodal-capable provider. Check whether the user's
|
||||
# chosen provider already covers it — if so, skip the prompt entirely.
|
||||
_vision_needs_setup = True
|
||||
# Keep setup aligned with the actual runtime resolver the vision tools use.
|
||||
try:
|
||||
from agent.auxiliary_client import get_available_vision_backends
|
||||
|
||||
if selected_provider == "openrouter":
|
||||
# OpenRouter → Gemini for vision, already configured
|
||||
_vision_needs_setup = False
|
||||
elif selected_provider == "nous":
|
||||
# Nous Portal OAuth → Gemini via Nous, already configured
|
||||
_vision_needs_setup = False
|
||||
elif selected_provider == "openai-codex":
|
||||
# Codex OAuth → gpt-5.3-codex supports vision
|
||||
_vision_needs_setup = False
|
||||
elif selected_provider == "custom":
|
||||
_custom_base = (get_env_value("OPENAI_BASE_URL") or "").lower()
|
||||
if "api.openai.com" in _custom_base:
|
||||
# Direct OpenAI endpoint — show vision model picker
|
||||
print()
|
||||
print_header("Vision Model")
|
||||
print_info("Your OpenAI endpoint supports vision. Pick a model for image analysis:")
|
||||
_oai_vision_models = ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"]
|
||||
_vm_choices = _oai_vision_models + ["Keep default (gpt-4o-mini)"]
|
||||
_vm_idx = prompt_choice("Select vision model:", _vm_choices, len(_vm_choices) - 1)
|
||||
_selected_vision_model = (
|
||||
_oai_vision_models[_vm_idx]
|
||||
if _vm_idx < len(_oai_vision_models)
|
||||
else "gpt-4o-mini"
|
||||
)
|
||||
save_env_value("AUXILIARY_VISION_MODEL", _selected_vision_model)
|
||||
print_success(f"Vision model set to {_selected_vision_model}")
|
||||
_vision_needs_setup = False
|
||||
_vision_backends = set(get_available_vision_backends())
|
||||
except Exception:
|
||||
_vision_backends = set()
|
||||
|
||||
# Even for providers without native vision, check if existing credentials
|
||||
# from a previous setup already cover it (e.g. user had OpenRouter before
|
||||
# switching to z.ai)
|
||||
if _vision_needs_setup:
|
||||
if get_env_value("OPENROUTER_API_KEY"):
|
||||
_vision_needs_setup = False
|
||||
else:
|
||||
# Check for Nous Portal OAuth in auth.json
|
||||
try:
|
||||
_auth_path = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes")) / "auth.json"
|
||||
if _auth_path.is_file():
|
||||
import json as _json
|
||||
_vision_needs_setup = not bool(_vision_backends)
|
||||
|
||||
_auth_data = _json.loads(_auth_path.read_text())
|
||||
if _auth_data.get("active_provider") == "nous":
|
||||
_nous_p = _auth_data.get("providers", {}).get("nous", {})
|
||||
if _nous_p.get("agent_key") or _nous_p.get("access_token"):
|
||||
_vision_needs_setup = False
|
||||
except Exception:
|
||||
pass
|
||||
if selected_provider in {"openrouter", "nous", "openai-codex"}:
|
||||
# If the user just selected one of our known-good vision backends during
|
||||
# setup, treat vision as covered. Auth/setup failure returns earlier.
|
||||
_vision_needs_setup = False
|
||||
elif selected_provider == "custom" and "custom" in _vision_backends:
|
||||
_vision_needs_setup = False
|
||||
|
||||
if _vision_needs_setup:
|
||||
_prov_names = {
|
||||
|
|
@ -1343,44 +1289,54 @@ def setup_model_provider(config: dict):
|
|||
|
||||
print()
|
||||
print_header("Vision & Image Analysis (optional)")
|
||||
print_info(f"Vision requires a multimodal-capable provider. {_prov_display}")
|
||||
print_info("doesn't natively support it. Choose how to enable vision,")
|
||||
print_info("or skip to configure later.")
|
||||
print_info(f"Vision uses a separate multimodal backend. {_prov_display}")
|
||||
print_info("doesn't currently provide one Hermes can auto-use for vision,")
|
||||
print_info("so choose a backend now or skip and configure later.")
|
||||
print()
|
||||
|
||||
_vision_choices = [
|
||||
"OpenRouter — uses Gemini (free tier at openrouter.ai/keys)",
|
||||
"OpenAI — enter API key & choose a vision model",
|
||||
"OpenAI-compatible endpoint — base URL, API key, and vision model",
|
||||
"Skip for now",
|
||||
]
|
||||
_vision_idx = prompt_choice("Configure vision:", _vision_choices, 2)
|
||||
|
||||
if _vision_idx == 0: # OpenRouter
|
||||
_or_key = prompt(" OpenRouter API key", password=True)
|
||||
_or_key = prompt(" OpenRouter API key", password=True).strip()
|
||||
if _or_key:
|
||||
save_env_value("OPENROUTER_API_KEY", _or_key)
|
||||
print_success("OpenRouter key saved — vision will use Gemini")
|
||||
else:
|
||||
print_info("Skipped — vision won't be available")
|
||||
elif _vision_idx == 1: # OpenAI
|
||||
_oai_key = prompt(" OpenAI API key", password=True)
|
||||
elif _vision_idx == 1: # OpenAI-compatible endpoint
|
||||
_base_url = prompt(" Base URL (blank for OpenAI)").strip() or "https://api.openai.com/v1"
|
||||
_api_key_label = " API key"
|
||||
if "api.openai.com" in _base_url.lower():
|
||||
_api_key_label = " OpenAI API key"
|
||||
_oai_key = prompt(_api_key_label, password=True).strip()
|
||||
if _oai_key:
|
||||
save_env_value("OPENAI_API_KEY", _oai_key)
|
||||
save_env_value("OPENAI_BASE_URL", "https://api.openai.com/v1")
|
||||
_oai_vision_models = ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"]
|
||||
_vm_choices = _oai_vision_models + ["Use default (gpt-4o-mini)"]
|
||||
_vm_idx = prompt_choice("Select vision model:", _vm_choices, 0)
|
||||
_selected_vision_model = (
|
||||
_oai_vision_models[_vm_idx]
|
||||
if _vm_idx < len(_oai_vision_models)
|
||||
else "gpt-4o-mini"
|
||||
)
|
||||
save_env_value("OPENAI_BASE_URL", _base_url)
|
||||
if "api.openai.com" in _base_url.lower():
|
||||
_oai_vision_models = ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"]
|
||||
_vm_choices = _oai_vision_models + ["Use default (gpt-4o-mini)"]
|
||||
_vm_idx = prompt_choice("Select vision model:", _vm_choices, 0)
|
||||
_selected_vision_model = (
|
||||
_oai_vision_models[_vm_idx]
|
||||
if _vm_idx < len(_oai_vision_models)
|
||||
else "gpt-4o-mini"
|
||||
)
|
||||
else:
|
||||
_selected_vision_model = prompt(" Vision model (blank = use main/custom default)").strip()
|
||||
save_env_value("AUXILIARY_VISION_MODEL", _selected_vision_model)
|
||||
print_success(f"Vision configured with OpenAI ({_selected_vision_model})")
|
||||
print_success(
|
||||
f"Vision configured with {_base_url}"
|
||||
+ (f" ({_selected_vision_model})" if _selected_vision_model else "")
|
||||
)
|
||||
else:
|
||||
print_info("Skipped — vision won't be available")
|
||||
else:
|
||||
print_info("Skipped — add later with 'hermes config set OPENROUTER_API_KEY ...'")
|
||||
print_info("Skipped — add later with 'hermes setup' or configure AUXILIARY_VISION_* settings")
|
||||
|
||||
# ── Model Selection (adapts based on provider) ──
|
||||
if selected_provider != "custom": # Custom already prompted for model name
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ CONFIGURABLE_TOOLSETS = [
|
|||
("session_search", "🔎 Session Search", "search past conversations"),
|
||||
("clarify", "❓ Clarifying Questions", "clarify"),
|
||||
("delegation", "👥 Task Delegation", "delegate_task"),
|
||||
("cronjob", "⏰ Cron Jobs", "schedule, list, remove"),
|
||||
("cronjob", "⏰ Cron Jobs", "create/list/update/pause/resume/run, with optional attached skills"),
|
||||
("rl", "🧪 RL Training", "Tinker-Atropos training tools"),
|
||||
("homeassistant", "🏠 Home Assistant", "smart home device control"),
|
||||
]
|
||||
|
|
@ -362,14 +362,21 @@ def _save_platform_tools(config: dict, platform: str, enabled_toolset_keys: Set[
|
|||
|
||||
def _toolset_has_keys(ts_key: str) -> bool:
|
||||
"""Check if a toolset's required API keys are configured."""
|
||||
if ts_key == "vision":
|
||||
try:
|
||||
from agent.auxiliary_client import resolve_vision_provider_client
|
||||
|
||||
_provider, client, _model = resolve_vision_provider_client()
|
||||
return client is not None
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Check TOOL_CATEGORIES first (provider-aware)
|
||||
cat = TOOL_CATEGORIES.get(ts_key)
|
||||
if cat:
|
||||
for provider in cat["providers"]:
|
||||
for provider in cat.get("providers", []):
|
||||
env_vars = provider.get("env_vars", [])
|
||||
if not env_vars:
|
||||
return True # Free provider (e.g., Edge TTS)
|
||||
if all(get_env_value(v["key"]) for v in env_vars):
|
||||
if env_vars and all(get_env_value(e["key"]) for e in env_vars):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
@ -628,6 +635,39 @@ def _configure_provider(provider: dict, config: dict):
|
|||
|
||||
def _configure_simple_requirements(ts_key: str):
|
||||
"""Simple fallback for toolsets that just need env vars (no provider selection)."""
|
||||
if ts_key == "vision":
|
||||
if _toolset_has_keys("vision"):
|
||||
return
|
||||
print()
|
||||
print(color(" Vision / Image Analysis requires a multimodal backend:", Colors.YELLOW))
|
||||
choices = [
|
||||
"OpenRouter — uses Gemini",
|
||||
"OpenAI-compatible endpoint — base URL, API key, and vision model",
|
||||
"Skip",
|
||||
]
|
||||
idx = _prompt_choice(" Configure vision backend", choices, 2)
|
||||
if idx == 0:
|
||||
_print_info(" Get key at: https://openrouter.ai/keys")
|
||||
value = _prompt(" OPENROUTER_API_KEY", password=True)
|
||||
if value and value.strip():
|
||||
save_env_value("OPENROUTER_API_KEY", value.strip())
|
||||
_print_success(" Saved")
|
||||
else:
|
||||
_print_warning(" Skipped")
|
||||
elif idx == 1:
|
||||
base_url = _prompt(" OPENAI_BASE_URL (blank for OpenAI)").strip() or "https://api.openai.com/v1"
|
||||
key_label = " OPENAI_API_KEY" if "api.openai.com" in base_url.lower() else " API key"
|
||||
api_key = _prompt(key_label, password=True)
|
||||
if api_key and api_key.strip():
|
||||
save_env_value("OPENAI_BASE_URL", base_url)
|
||||
save_env_value("OPENAI_API_KEY", api_key.strip())
|
||||
if "api.openai.com" in base_url.lower():
|
||||
save_env_value("AUXILIARY_VISION_MODEL", "gpt-4o-mini")
|
||||
_print_success(" Saved")
|
||||
else:
|
||||
_print_warning(" Skipped")
|
||||
return
|
||||
|
||||
requirements = TOOLSET_ENV_REQUIREMENTS.get(ts_key, [])
|
||||
if not requirements:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ _LEGACY_TOOLSET_MAP = {
|
|||
"browser_press", "browser_close", "browser_get_images",
|
||||
"browser_vision"
|
||||
],
|
||||
"cronjob_tools": ["schedule_cronjob", "list_cronjobs", "remove_cronjob"],
|
||||
"cronjob_tools": ["cronjob"],
|
||||
"rl_tools": [
|
||||
"rl_list_environments", "rl_select_environment",
|
||||
"rl_get_current_config", "rl_edit_config",
|
||||
|
|
|
|||
55
run_agent.py
55
run_agent.py
|
|
@ -2613,6 +2613,43 @@ class AIAgent:
|
|||
|
||||
return True
|
||||
|
||||
def _try_refresh_anthropic_client_credentials(self) -> bool:
|
||||
if self.api_mode != "anthropic_messages" or not hasattr(self, "_anthropic_api_key"):
|
||||
return False
|
||||
|
||||
try:
|
||||
from agent.anthropic_adapter import resolve_anthropic_token, build_anthropic_client
|
||||
|
||||
new_token = resolve_anthropic_token()
|
||||
except Exception as exc:
|
||||
logger.debug("Anthropic credential refresh failed: %s", exc)
|
||||
return False
|
||||
|
||||
if not isinstance(new_token, str) or not new_token.strip():
|
||||
return False
|
||||
new_token = new_token.strip()
|
||||
if new_token == self._anthropic_api_key:
|
||||
return False
|
||||
|
||||
try:
|
||||
self._anthropic_client.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self._anthropic_client = build_anthropic_client(new_token, getattr(self, "_anthropic_base_url", None))
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to rebuild Anthropic client after credential refresh: %s", exc)
|
||||
return False
|
||||
|
||||
self._anthropic_api_key = new_token
|
||||
return True
|
||||
|
||||
def _anthropic_messages_create(self, api_kwargs: dict):
|
||||
if self.api_mode == "anthropic_messages":
|
||||
self._try_refresh_anthropic_client_credentials()
|
||||
return self._anthropic_client.messages.create(**api_kwargs)
|
||||
|
||||
def _interruptible_api_call(self, api_kwargs: dict):
|
||||
"""
|
||||
Run the API call in a background thread so the main conversation loop
|
||||
|
|
@ -2629,7 +2666,7 @@ class AIAgent:
|
|||
if self.api_mode == "codex_responses":
|
||||
result["response"] = self._run_codex_stream(api_kwargs)
|
||||
elif self.api_mode == "anthropic_messages":
|
||||
result["response"] = self._anthropic_client.messages.create(**api_kwargs)
|
||||
result["response"] = self._anthropic_messages_create(api_kwargs)
|
||||
else:
|
||||
result["response"] = self.client.chat.completions.create(**api_kwargs)
|
||||
except Exception as e:
|
||||
|
|
@ -3267,7 +3304,7 @@ class AIAgent:
|
|||
tools=[memory_tool_def], max_tokens=5120,
|
||||
reasoning_config=None,
|
||||
)
|
||||
response = self._anthropic_client.messages.create(**ant_kwargs)
|
||||
response = self._anthropic_messages_create(ant_kwargs)
|
||||
elif not _aux_available:
|
||||
api_kwargs = {
|
||||
"model": self.model,
|
||||
|
|
@ -3804,7 +3841,7 @@ class AIAgent:
|
|||
'image_generate': '🎨', 'text_to_speech': '🔊',
|
||||
'vision_analyze': '👁️', 'mixture_of_agents': '🧠',
|
||||
'skills_list': '📚', 'skill_view': '📚',
|
||||
'schedule_cronjob': '⏰', 'list_cronjobs': '⏰', 'remove_cronjob': '⏰',
|
||||
'cronjob': '⏰',
|
||||
'send_message': '📨', 'todo': '📋', 'memory': '🧠', 'session_search': '🔍',
|
||||
'clarify': '❓', 'execute_code': '🐍', 'delegate_task': '🔀',
|
||||
}
|
||||
|
|
@ -4018,7 +4055,7 @@ class AIAgent:
|
|||
from agent.anthropic_adapter import build_anthropic_kwargs as _bak, normalize_anthropic_response as _nar
|
||||
_ant_kw = _bak(model=self.model, messages=api_messages, tools=None,
|
||||
max_tokens=self.max_tokens, reasoning_config=self.reasoning_config)
|
||||
summary_response = self._anthropic_client.messages.create(**_ant_kw)
|
||||
summary_response = self._anthropic_messages_create(_ant_kw)
|
||||
_msg, _ = _nar(summary_response)
|
||||
final_response = (_msg.content or "").strip()
|
||||
else:
|
||||
|
|
@ -4048,7 +4085,7 @@ class AIAgent:
|
|||
from agent.anthropic_adapter import build_anthropic_kwargs as _bak2, normalize_anthropic_response as _nar2
|
||||
_ant_kw2 = _bak2(model=self.model, messages=api_messages, tools=None,
|
||||
max_tokens=self.max_tokens, reasoning_config=self.reasoning_config)
|
||||
retry_response = self._anthropic_client.messages.create(**_ant_kw2)
|
||||
retry_response = self._anthropic_messages_create(_ant_kw2)
|
||||
_retry_msg, _ = _nar2(retry_response)
|
||||
final_response = (_retry_msg.content or "").strip()
|
||||
else:
|
||||
|
|
@ -4822,12 +4859,8 @@ class AIAgent:
|
|||
and not anthropic_auth_retry_attempted
|
||||
):
|
||||
anthropic_auth_retry_attempted = True
|
||||
# Try re-reading Claude Code credentials (they may have been refreshed)
|
||||
from agent.anthropic_adapter import resolve_anthropic_token, build_anthropic_client, _is_oauth_token
|
||||
new_token = resolve_anthropic_token()
|
||||
if new_token and new_token != self._anthropic_api_key:
|
||||
self._anthropic_api_key = new_token
|
||||
self._anthropic_client = build_anthropic_client(new_token, getattr(self, "_anthropic_base_url", None))
|
||||
from agent.anthropic_adapter import _is_oauth_token
|
||||
if self._try_refresh_anthropic_client_credentials():
|
||||
print(f"{self.log_prefix}🔐 Anthropic credentials refreshed after 401. Retrying request...")
|
||||
continue
|
||||
# Credential refresh didn't help — show diagnostic info
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ terminal(command="hermes chat -q 'Summarize this codebase' --model google/gemini
|
|||
|
||||
## Gateway Cron Integration
|
||||
|
||||
For scheduled autonomous tasks, use the `schedule_cronjob` tool instead of spawning processes — cron jobs handle delivery, retry, and persistence automatically.
|
||||
For scheduled autonomous tasks, use the unified `cronjob` tool instead of spawning processes — cron jobs handle delivery, retry, and persistence automatically.
|
||||
|
||||
## Key Differences Between Modes
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,11 @@ import os
|
|||
from unittest.mock import patch
|
||||
|
||||
import tools.skills_tool as skills_tool_module
|
||||
from agent.skill_commands import scan_skill_commands, build_skill_invocation_message
|
||||
from agent.skill_commands import (
|
||||
scan_skill_commands,
|
||||
build_skill_invocation_message,
|
||||
build_preloaded_skills_prompt,
|
||||
)
|
||||
|
||||
|
||||
def _make_skill(
|
||||
|
|
@ -79,6 +83,33 @@ class TestScanSkillCommands:
|
|||
assert "/generic-tool" in result
|
||||
|
||||
|
||||
class TestBuildPreloadedSkillsPrompt:
|
||||
def test_builds_prompt_for_multiple_named_skills(self, tmp_path):
|
||||
with patch("tools.skills_tool.SKILLS_DIR", tmp_path):
|
||||
_make_skill(tmp_path, "first-skill")
|
||||
_make_skill(tmp_path, "second-skill")
|
||||
prompt, loaded, missing = build_preloaded_skills_prompt(
|
||||
["first-skill", "second-skill"]
|
||||
)
|
||||
|
||||
assert missing == []
|
||||
assert loaded == ["first-skill", "second-skill"]
|
||||
assert "first-skill" in prompt
|
||||
assert "second-skill" in prompt
|
||||
assert "preloaded" in prompt.lower()
|
||||
|
||||
def test_reports_missing_named_skills(self, tmp_path):
|
||||
with patch("tools.skills_tool.SKILLS_DIR", tmp_path):
|
||||
_make_skill(tmp_path, "present-skill")
|
||||
prompt, loaded, missing = build_preloaded_skills_prompt(
|
||||
["present-skill", "missing-skill"]
|
||||
)
|
||||
|
||||
assert "present-skill" in prompt
|
||||
assert loaded == ["present-skill"]
|
||||
assert missing == ["missing-skill"]
|
||||
|
||||
|
||||
class TestBuildSkillInvocationMessage:
|
||||
def test_loads_skill_by_stored_path_when_frontmatter_name_differs(self, tmp_path):
|
||||
skill_dir = tmp_path / "mlops" / "audiocraft"
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ from cron.jobs import (
|
|||
get_job,
|
||||
list_jobs,
|
||||
update_job,
|
||||
pause_job,
|
||||
resume_job,
|
||||
remove_job,
|
||||
mark_job_run,
|
||||
get_due_jobs,
|
||||
|
|
@ -233,14 +235,18 @@ class TestUpdateJob:
|
|||
job = create_job(prompt="Daily report", schedule="every 1h")
|
||||
assert job["schedule"]["kind"] == "interval"
|
||||
assert job["schedule"]["minutes"] == 60
|
||||
old_next_run = job["next_run_at"]
|
||||
new_schedule = parse_schedule("every 2h")
|
||||
updated = update_job(job["id"], {"schedule": new_schedule})
|
||||
updated = update_job(job["id"], {"schedule": new_schedule, "schedule_display": new_schedule["display"]})
|
||||
assert updated is not None
|
||||
assert updated["schedule"]["kind"] == "interval"
|
||||
assert updated["schedule"]["minutes"] == 120
|
||||
assert updated["schedule_display"] == "every 120m"
|
||||
assert updated["next_run_at"] != old_next_run
|
||||
# Verify persisted to disk
|
||||
fetched = get_job(job["id"])
|
||||
assert fetched["schedule"]["minutes"] == 120
|
||||
assert fetched["schedule_display"] == "every 120m"
|
||||
|
||||
def test_update_enable_disable(self, tmp_cron_dir):
|
||||
job = create_job(prompt="Toggle me", schedule="every 1h")
|
||||
|
|
@ -255,6 +261,26 @@ class TestUpdateJob:
|
|||
assert result is None
|
||||
|
||||
|
||||
class TestPauseResumeJob:
|
||||
def test_pause_sets_state(self, tmp_cron_dir):
|
||||
job = create_job(prompt="Pause me", schedule="every 1h")
|
||||
paused = pause_job(job["id"], reason="user paused")
|
||||
assert paused is not None
|
||||
assert paused["enabled"] is False
|
||||
assert paused["state"] == "paused"
|
||||
assert paused["paused_reason"] == "user paused"
|
||||
|
||||
def test_resume_reenables_job(self, tmp_cron_dir):
|
||||
job = create_job(prompt="Resume me", schedule="every 1h")
|
||||
pause_job(job["id"], reason="user paused")
|
||||
resumed = resume_job(job["id"])
|
||||
assert resumed is not None
|
||||
assert resumed["enabled"] is True
|
||||
assert resumed["state"] == "scheduled"
|
||||
assert resumed["paused_at"] is None
|
||||
assert resumed["paused_reason"] is None
|
||||
|
||||
|
||||
class TestMarkJobRun:
|
||||
def test_increments_completed(self, tmp_cron_dir):
|
||||
job = create_job(prompt="Test", schedule="every 1h")
|
||||
|
|
|
|||
|
|
@ -307,3 +307,94 @@ class TestRunJobConfigLogging:
|
|||
|
||||
assert any("failed to parse prefill messages" in r.message for r in caplog.records), \
|
||||
f"Expected 'failed to parse prefill messages' warning in logs, got: {[r.message for r in caplog.records]}"
|
||||
|
||||
|
||||
class TestRunJobSkillBacked:
|
||||
def test_run_job_loads_skill_and_disables_recursive_cron_tools(self, tmp_path):
|
||||
job = {
|
||||
"id": "skill-job",
|
||||
"name": "skill test",
|
||||
"prompt": "Check the feeds and summarize anything new.",
|
||||
"skill": "blogwatcher",
|
||||
}
|
||||
|
||||
fake_db = MagicMock()
|
||||
|
||||
with patch("cron.scheduler._hermes_home", tmp_path), \
|
||||
patch("cron.scheduler._resolve_origin", return_value=None), \
|
||||
patch("dotenv.load_dotenv"), \
|
||||
patch("hermes_state.SessionDB", return_value=fake_db), \
|
||||
patch(
|
||||
"hermes_cli.runtime_provider.resolve_runtime_provider",
|
||||
return_value={
|
||||
"api_key": "***",
|
||||
"base_url": "https://example.invalid/v1",
|
||||
"provider": "openrouter",
|
||||
"api_mode": "chat_completions",
|
||||
},
|
||||
), \
|
||||
patch("tools.skills_tool.skill_view", return_value=json.dumps({"success": True, "content": "# Blogwatcher\nFollow this skill."})), \
|
||||
patch("run_agent.AIAgent") as mock_agent_cls:
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run_conversation.return_value = {"final_response": "ok"}
|
||||
mock_agent_cls.return_value = mock_agent
|
||||
|
||||
success, output, final_response, error = run_job(job)
|
||||
|
||||
assert success is True
|
||||
assert error is None
|
||||
assert final_response == "ok"
|
||||
|
||||
kwargs = mock_agent_cls.call_args.kwargs
|
||||
assert "cronjob" in (kwargs["disabled_toolsets"] or [])
|
||||
|
||||
prompt_arg = mock_agent.run_conversation.call_args.args[0]
|
||||
assert "blogwatcher" in prompt_arg
|
||||
assert "Follow this skill" in prompt_arg
|
||||
assert "Check the feeds and summarize anything new." in prompt_arg
|
||||
|
||||
def test_run_job_loads_multiple_skills_in_order(self, tmp_path):
|
||||
job = {
|
||||
"id": "multi-skill-job",
|
||||
"name": "multi skill test",
|
||||
"prompt": "Combine the results.",
|
||||
"skills": ["blogwatcher", "find-nearby"],
|
||||
}
|
||||
|
||||
fake_db = MagicMock()
|
||||
|
||||
def _skill_view(name):
|
||||
return json.dumps({"success": True, "content": f"# {name}\nInstructions for {name}."})
|
||||
|
||||
with patch("cron.scheduler._hermes_home", tmp_path), \
|
||||
patch("cron.scheduler._resolve_origin", return_value=None), \
|
||||
patch("dotenv.load_dotenv"), \
|
||||
patch("hermes_state.SessionDB", return_value=fake_db), \
|
||||
patch(
|
||||
"hermes_cli.runtime_provider.resolve_runtime_provider",
|
||||
return_value={
|
||||
"api_key": "***",
|
||||
"base_url": "https://example.invalid/v1",
|
||||
"provider": "openrouter",
|
||||
"api_mode": "chat_completions",
|
||||
},
|
||||
), \
|
||||
patch("tools.skills_tool.skill_view", side_effect=_skill_view) as skill_view_mock, \
|
||||
patch("run_agent.AIAgent") as mock_agent_cls:
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run_conversation.return_value = {"final_response": "ok"}
|
||||
mock_agent_cls.return_value = mock_agent
|
||||
|
||||
success, output, final_response, error = run_job(job)
|
||||
|
||||
assert success is True
|
||||
assert error is None
|
||||
assert final_response == "ok"
|
||||
assert skill_view_mock.call_count == 2
|
||||
assert [call.args[0] for call in skill_view_mock.call_args_list] == ["blogwatcher", "find-nearby"]
|
||||
|
||||
prompt_arg = mock_agent.run_conversation.call_args.args[0]
|
||||
assert prompt_arg.index("blogwatcher") < prompt_arg.index("find-nearby")
|
||||
assert "Instructions for blogwatcher." in prompt_arg
|
||||
assert "Instructions for find-nearby." in prompt_arg
|
||||
assert "Combine the results." in prompt_arg
|
||||
|
|
|
|||
77
tests/hermes_cli/test_chat_skills_flag.py
Normal file
77
tests/hermes_cli/test_chat_skills_flag.py
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
import sys
|
||||
|
||||
|
||||
def test_top_level_skills_flag_defaults_to_chat(monkeypatch):
|
||||
import hermes_cli.main as main_mod
|
||||
|
||||
captured = {}
|
||||
|
||||
def fake_cmd_chat(args):
|
||||
captured["skills"] = args.skills
|
||||
captured["command"] = args.command
|
||||
|
||||
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
|
||||
monkeypatch.setattr(
|
||||
sys,
|
||||
"argv",
|
||||
["hermes", "-s", "hermes-agent-dev,github-auth"],
|
||||
)
|
||||
|
||||
main_mod.main()
|
||||
|
||||
assert captured == {
|
||||
"skills": ["hermes-agent-dev,github-auth"],
|
||||
"command": None,
|
||||
}
|
||||
|
||||
|
||||
def test_chat_subcommand_accepts_skills_flag(monkeypatch):
|
||||
import hermes_cli.main as main_mod
|
||||
|
||||
captured = {}
|
||||
|
||||
def fake_cmd_chat(args):
|
||||
captured["skills"] = args.skills
|
||||
captured["query"] = args.query
|
||||
|
||||
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
|
||||
monkeypatch.setattr(
|
||||
sys,
|
||||
"argv",
|
||||
["hermes", "chat", "-s", "github-auth", "-q", "hello"],
|
||||
)
|
||||
|
||||
main_mod.main()
|
||||
|
||||
assert captured == {
|
||||
"skills": ["github-auth"],
|
||||
"query": "hello",
|
||||
}
|
||||
|
||||
|
||||
def test_continue_worktree_and_skills_flags_work_together(monkeypatch):
|
||||
import hermes_cli.main as main_mod
|
||||
|
||||
captured = {}
|
||||
|
||||
def fake_cmd_chat(args):
|
||||
captured["continue_last"] = args.continue_last
|
||||
captured["worktree"] = args.worktree
|
||||
captured["skills"] = args.skills
|
||||
captured["command"] = args.command
|
||||
|
||||
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
|
||||
monkeypatch.setattr(
|
||||
sys,
|
||||
"argv",
|
||||
["hermes", "-c", "-w", "-s", "hermes-agent-dev"],
|
||||
)
|
||||
|
||||
main_mod.main()
|
||||
|
||||
assert captured == {
|
||||
"continue_last": True,
|
||||
"worktree": True,
|
||||
"skills": ["hermes-agent-dev"],
|
||||
"command": "chat",
|
||||
}
|
||||
107
tests/hermes_cli/test_cron.py
Normal file
107
tests/hermes_cli/test_cron.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
"""Tests for hermes_cli.cron command handling."""
|
||||
|
||||
from argparse import Namespace
|
||||
|
||||
import pytest
|
||||
|
||||
from cron.jobs import create_job, get_job, list_jobs
|
||||
from hermes_cli.cron import cron_command
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def tmp_cron_dir(tmp_path, monkeypatch):
|
||||
monkeypatch.setattr("cron.jobs.CRON_DIR", tmp_path / "cron")
|
||||
monkeypatch.setattr("cron.jobs.JOBS_FILE", tmp_path / "cron" / "jobs.json")
|
||||
monkeypatch.setattr("cron.jobs.OUTPUT_DIR", tmp_path / "cron" / "output")
|
||||
return tmp_path
|
||||
|
||||
|
||||
class TestCronCommandLifecycle:
|
||||
def test_pause_resume_run(self, tmp_cron_dir, capsys):
|
||||
job = create_job(prompt="Check server status", schedule="every 1h")
|
||||
|
||||
cron_command(Namespace(cron_command="pause", job_id=job["id"]))
|
||||
paused = get_job(job["id"])
|
||||
assert paused["state"] == "paused"
|
||||
|
||||
cron_command(Namespace(cron_command="resume", job_id=job["id"]))
|
||||
resumed = get_job(job["id"])
|
||||
assert resumed["state"] == "scheduled"
|
||||
|
||||
cron_command(Namespace(cron_command="run", job_id=job["id"]))
|
||||
triggered = get_job(job["id"])
|
||||
assert triggered["state"] == "scheduled"
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "Paused job" in out
|
||||
assert "Resumed job" in out
|
||||
assert "Triggered job" in out
|
||||
|
||||
def test_edit_can_replace_and_clear_skills(self, tmp_cron_dir, capsys):
|
||||
job = create_job(
|
||||
prompt="Combine skill outputs",
|
||||
schedule="every 1h",
|
||||
skill="blogwatcher",
|
||||
)
|
||||
|
||||
cron_command(
|
||||
Namespace(
|
||||
cron_command="edit",
|
||||
job_id=job["id"],
|
||||
schedule="every 2h",
|
||||
prompt="Revised prompt",
|
||||
name="Edited Job",
|
||||
deliver=None,
|
||||
repeat=None,
|
||||
skill=None,
|
||||
skills=["find-nearby", "blogwatcher"],
|
||||
clear_skills=False,
|
||||
)
|
||||
)
|
||||
updated = get_job(job["id"])
|
||||
assert updated["skills"] == ["find-nearby", "blogwatcher"]
|
||||
assert updated["name"] == "Edited Job"
|
||||
assert updated["prompt"] == "Revised prompt"
|
||||
assert updated["schedule_display"] == "every 120m"
|
||||
|
||||
cron_command(
|
||||
Namespace(
|
||||
cron_command="edit",
|
||||
job_id=job["id"],
|
||||
schedule=None,
|
||||
prompt=None,
|
||||
name=None,
|
||||
deliver=None,
|
||||
repeat=None,
|
||||
skill=None,
|
||||
skills=None,
|
||||
clear_skills=True,
|
||||
)
|
||||
)
|
||||
cleared = get_job(job["id"])
|
||||
assert cleared["skills"] == []
|
||||
assert cleared["skill"] is None
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "Updated job" in out
|
||||
|
||||
def test_create_with_multiple_skills(self, tmp_cron_dir, capsys):
|
||||
cron_command(
|
||||
Namespace(
|
||||
cron_command="create",
|
||||
schedule="every 1h",
|
||||
prompt="Use both skills",
|
||||
name="Skill combo",
|
||||
deliver=None,
|
||||
repeat=None,
|
||||
skill=None,
|
||||
skills=["blogwatcher", "find-nearby"],
|
||||
)
|
||||
)
|
||||
out = capsys.readouterr().out
|
||||
assert "Created job" in out
|
||||
|
||||
jobs = list_jobs()
|
||||
assert len(jobs) == 1
|
||||
assert jobs[0]["skills"] == ["blogwatcher", "find-nearby"]
|
||||
assert jobs[0]["name"] == "Skill combo"
|
||||
|
|
@ -39,6 +39,8 @@ def test_setup_keep_current_custom_from_config_does_not_fall_through(tmp_path, m
|
|||
"""Keep-current custom should not fall through to the generic model menu."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1")
|
||||
save_env_value("OPENAI_API_KEY", "custom-key")
|
||||
|
||||
config = load_config()
|
||||
config["model"] = {
|
||||
|
|
@ -55,10 +57,6 @@ def test_setup_keep_current_custom_from_config_does_not_fall_through(tmp_path, m
|
|||
if calls["count"] == 1:
|
||||
assert choices[-1] == "Keep current (Custom: https://example.invalid/v1)"
|
||||
return len(choices) - 1
|
||||
if calls["count"] == 2:
|
||||
assert question == "Configure vision:"
|
||||
assert choices[-1] == "Skip for now"
|
||||
return len(choices) - 1
|
||||
raise AssertionError("Model menu should not appear for keep-current custom")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
|
|
@ -74,7 +72,7 @@ def test_setup_keep_current_custom_from_config_does_not_fall_through(tmp_path, m
|
|||
assert reloaded["model"]["provider"] == "custom"
|
||||
assert reloaded["model"]["default"] == "custom/model"
|
||||
assert reloaded["model"]["base_url"] == "https://example.invalid/v1"
|
||||
assert calls["count"] == 2
|
||||
assert calls["count"] == 1
|
||||
|
||||
|
||||
def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(tmp_path, monkeypatch):
|
||||
|
|
@ -214,7 +212,7 @@ def test_setup_summary_marks_codex_auth_as_vision_available(tmp_path, monkeypatc
|
|||
_clear_provider_env(monkeypatch)
|
||||
|
||||
(tmp_path / "auth.json").write_text(
|
||||
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token":"tok"}}}}'
|
||||
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token": "***", "refresh_token": "***"}}}}'
|
||||
)
|
||||
|
||||
monkeypatch.setattr("shutil.which", lambda _name: None)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
"""Tests for hermes_cli.tools_config platform tool persistence."""
|
||||
|
||||
from hermes_cli.tools_config import _get_platform_tools, _platform_toolset_summary
|
||||
from hermes_cli.tools_config import _get_platform_tools, _platform_toolset_summary, _toolset_has_keys
|
||||
|
||||
|
||||
def test_get_platform_tools_uses_default_when_platform_not_configured():
|
||||
|
|
@ -26,3 +26,17 @@ def test_platform_toolset_summary_uses_explicit_platform_list():
|
|||
|
||||
assert set(summary.keys()) == {"cli"}
|
||||
assert summary["cli"] == _get_platform_tools(config, "cli")
|
||||
|
||||
|
||||
def test_toolset_has_keys_for_vision_accepts_codex_auth(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
(tmp_path / "auth.json").write_text(
|
||||
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token":"codex-access-token","refresh_token":"codex-refresh-token"}}}}'
|
||||
)
|
||||
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
|
||||
monkeypatch.delenv("OPENAI_BASE_URL", raising=False)
|
||||
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
|
||||
monkeypatch.delenv("AUXILIARY_VISION_PROVIDER", raising=False)
|
||||
monkeypatch.delenv("CONTEXT_VISION_PROVIDER", raising=False)
|
||||
|
||||
assert _toolset_has_keys("vision") is True
|
||||
|
|
|
|||
|
|
@ -181,6 +181,33 @@ class TestResolveAnthropicToken:
|
|||
monkeypatch.setattr("agent.anthropic_adapter.Path.home", lambda: tmp_path)
|
||||
assert resolve_anthropic_token() == "cc-auto-token"
|
||||
|
||||
def test_prefers_refreshable_claude_code_credentials_over_static_anthropic_token(self, monkeypatch, tmp_path):
|
||||
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
|
||||
monkeypatch.setenv("ANTHROPIC_TOKEN", "sk-ant-oat01-static-token")
|
||||
monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False)
|
||||
cred_file = tmp_path / ".claude" / ".credentials.json"
|
||||
cred_file.parent.mkdir(parents=True)
|
||||
cred_file.write_text(json.dumps({
|
||||
"claudeAiOauth": {
|
||||
"accessToken": "cc-auto-token",
|
||||
"refreshToken": "refresh-token",
|
||||
"expiresAt": int(time.time() * 1000) + 3600_000,
|
||||
}
|
||||
}))
|
||||
monkeypatch.setattr("agent.anthropic_adapter.Path.home", lambda: tmp_path)
|
||||
|
||||
assert resolve_anthropic_token() == "cc-auto-token"
|
||||
|
||||
def test_keeps_static_anthropic_token_when_only_non_refreshable_claude_key_exists(self, monkeypatch, tmp_path):
|
||||
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
|
||||
monkeypatch.setenv("ANTHROPIC_TOKEN", "sk-ant-oat01-static-token")
|
||||
monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False)
|
||||
claude_json = tmp_path / ".claude.json"
|
||||
claude_json.write_text(json.dumps({"primaryApiKey": "sk-ant-api03-managed-key"}))
|
||||
monkeypatch.setattr("agent.anthropic_adapter.Path.home", lambda: tmp_path)
|
||||
|
||||
assert resolve_anthropic_token() == "sk-ant-oat01-static-token"
|
||||
|
||||
|
||||
class TestRefreshOauthToken:
|
||||
def test_returns_none_without_refresh_token(self):
|
||||
|
|
@ -279,6 +306,27 @@ class TestResolveWithRefresh:
|
|||
|
||||
assert result == "refreshed-token"
|
||||
|
||||
def test_static_env_oauth_token_does_not_block_refreshable_claude_creds(self, monkeypatch, tmp_path):
|
||||
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
|
||||
monkeypatch.setenv("ANTHROPIC_TOKEN", "sk-ant-oat01-expired-env-token")
|
||||
monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False)
|
||||
|
||||
cred_file = tmp_path / ".claude" / ".credentials.json"
|
||||
cred_file.parent.mkdir(parents=True)
|
||||
cred_file.write_text(json.dumps({
|
||||
"claudeAiOauth": {
|
||||
"accessToken": "expired-claude-creds-token",
|
||||
"refreshToken": "valid-refresh",
|
||||
"expiresAt": int(time.time() * 1000) - 3600_000,
|
||||
}
|
||||
}))
|
||||
monkeypatch.setattr("agent.anthropic_adapter.Path.home", lambda: tmp_path)
|
||||
|
||||
with patch("agent.anthropic_adapter._refresh_oauth_token", return_value="refreshed-token"):
|
||||
result = resolve_anthropic_token()
|
||||
|
||||
assert result == "refreshed-token"
|
||||
|
||||
|
||||
class TestRunOauthSetupToken:
|
||||
def test_raises_when_claude_not_installed(self, monkeypatch):
|
||||
|
|
|
|||
51
tests/test_anthropic_oauth_flow.py
Normal file
51
tests/test_anthropic_oauth_flow.py
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
"""Tests for Anthropic OAuth setup flow behavior."""
|
||||
|
||||
from hermes_cli.config import load_env, save_env_value
|
||||
|
||||
|
||||
def test_run_anthropic_oauth_flow_prefers_claude_code_credentials(tmp_path, monkeypatch, capsys):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
monkeypatch.setattr(
|
||||
"agent.anthropic_adapter.run_oauth_setup_token",
|
||||
lambda: "sk-ant-oat01-from-claude-setup",
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.anthropic_adapter.read_claude_code_credentials",
|
||||
lambda: {
|
||||
"accessToken": "cc-access-token",
|
||||
"refreshToken": "cc-refresh-token",
|
||||
"expiresAt": 9999999999999,
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.anthropic_adapter.is_claude_code_token_valid",
|
||||
lambda creds: True,
|
||||
)
|
||||
|
||||
from hermes_cli.main import _run_anthropic_oauth_flow
|
||||
|
||||
save_env_value("ANTHROPIC_TOKEN", "stale-env-token")
|
||||
assert _run_anthropic_oauth_flow(save_env_value) is True
|
||||
|
||||
env_vars = load_env()
|
||||
assert env_vars["ANTHROPIC_TOKEN"] == ""
|
||||
assert env_vars["ANTHROPIC_API_KEY"] == ""
|
||||
output = capsys.readouterr().out
|
||||
assert "Claude Code credentials linked" in output
|
||||
|
||||
|
||||
def test_run_anthropic_oauth_flow_manual_token_still_persists(tmp_path, monkeypatch, capsys):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
monkeypatch.setattr("agent.anthropic_adapter.run_oauth_setup_token", lambda: None)
|
||||
monkeypatch.setattr("agent.anthropic_adapter.read_claude_code_credentials", lambda: None)
|
||||
monkeypatch.setattr("agent.anthropic_adapter.is_claude_code_token_valid", lambda creds: False)
|
||||
monkeypatch.setattr("builtins.input", lambda _prompt="": "sk-ant-oat01-manual-token")
|
||||
|
||||
from hermes_cli.main import _run_anthropic_oauth_flow
|
||||
|
||||
assert _run_anthropic_oauth_flow(save_env_value) is True
|
||||
|
||||
env_vars = load_env()
|
||||
assert env_vars["ANTHROPIC_TOKEN"] == "sk-ant-oat01-manual-token"
|
||||
output = capsys.readouterr().out
|
||||
assert "Setup-token saved" in output
|
||||
|
|
@ -17,6 +17,21 @@ def test_save_anthropic_oauth_token_uses_token_slot_and_clears_api_key(tmp_path,
|
|||
assert env_vars["ANTHROPIC_API_KEY"] == ""
|
||||
|
||||
|
||||
def test_use_anthropic_claude_code_credentials_clears_env_slots(tmp_path, monkeypatch):
|
||||
home = tmp_path / "hermes"
|
||||
home.mkdir()
|
||||
monkeypatch.setenv("HERMES_HOME", str(home))
|
||||
|
||||
from hermes_cli.config import save_anthropic_oauth_token, use_anthropic_claude_code_credentials
|
||||
|
||||
save_anthropic_oauth_token("sk-ant-oat01-token")
|
||||
use_anthropic_claude_code_credentials()
|
||||
|
||||
env_vars = load_env()
|
||||
assert env_vars["ANTHROPIC_TOKEN"] == ""
|
||||
assert env_vars["ANTHROPIC_API_KEY"] == ""
|
||||
|
||||
|
||||
def test_save_anthropic_api_key_uses_api_key_slot_and_clears_token(tmp_path, monkeypatch):
|
||||
home = tmp_path / "hermes"
|
||||
home.mkdir()
|
||||
|
|
@ -24,8 +39,8 @@ def test_save_anthropic_api_key_uses_api_key_slot_and_clears_token(tmp_path, mon
|
|||
|
||||
from hermes_cli.config import save_anthropic_api_key
|
||||
|
||||
save_anthropic_api_key("sk-ant-api03-test-key")
|
||||
save_anthropic_api_key("sk-ant-api03-key")
|
||||
|
||||
env_vars = load_env()
|
||||
assert env_vars["ANTHROPIC_API_KEY"] == "sk-ant-api03-test-key"
|
||||
assert env_vars["ANTHROPIC_API_KEY"] == "sk-ant-api03-key"
|
||||
assert env_vars["ANTHROPIC_TOKEN"] == ""
|
||||
|
|
|
|||
|
|
@ -426,3 +426,30 @@ class TestKimiCodeCredentialAutoDetect:
|
|||
monkeypatch.setenv("GLM_API_KEY", "sk-kimi-looks-like-kimi-but-isnt")
|
||||
creds = resolve_api_key_provider_credentials("zai")
|
||||
assert creds["base_url"] == "https://api.z.ai/api/paas/v4"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Kimi / Moonshot model list isolation tests
|
||||
# =============================================================================
|
||||
|
||||
class TestKimiMoonshotModelListIsolation:
|
||||
"""Moonshot (legacy) users must not see Coding Plan-only models."""
|
||||
|
||||
def test_moonshot_list_excludes_coding_plan_only_models(self):
|
||||
from hermes_cli.main import _PROVIDER_MODELS
|
||||
moonshot_models = _PROVIDER_MODELS["moonshot"]
|
||||
coding_plan_only = {"kimi-for-coding", "kimi-k2-thinking-turbo"}
|
||||
leaked = set(moonshot_models) & coding_plan_only
|
||||
assert not leaked, f"Moonshot list contains Coding Plan-only models: {leaked}"
|
||||
|
||||
def test_moonshot_list_contains_shared_models(self):
|
||||
from hermes_cli.main import _PROVIDER_MODELS
|
||||
moonshot_models = _PROVIDER_MODELS["moonshot"]
|
||||
assert "kimi-k2.5" in moonshot_models
|
||||
assert "kimi-k2-thinking" in moonshot_models
|
||||
|
||||
def test_coding_plan_list_contains_plan_specific_models(self):
|
||||
from hermes_cli.main import _PROVIDER_MODELS
|
||||
coding_models = _PROVIDER_MODELS["kimi-coding"]
|
||||
assert "kimi-for-coding" in coding_models
|
||||
assert "kimi-k2-thinking-turbo" in coding_models
|
||||
|
|
|
|||
130
tests/test_cli_preloaded_skills.py
Normal file
130
tests/test_cli_preloaded_skills.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def _make_real_cli(**kwargs):
|
||||
clean_config = {
|
||||
"model": {
|
||||
"default": "anthropic/claude-opus-4.6",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"provider": "auto",
|
||||
},
|
||||
"display": {"compact": False, "tool_progress": "all"},
|
||||
"agent": {},
|
||||
"terminal": {"env_type": "local"},
|
||||
}
|
||||
clean_env = {"LLM_MODEL": "", "HERMES_MAX_ITERATIONS": ""}
|
||||
prompt_toolkit_stubs = {
|
||||
"prompt_toolkit": MagicMock(),
|
||||
"prompt_toolkit.history": MagicMock(),
|
||||
"prompt_toolkit.styles": MagicMock(),
|
||||
"prompt_toolkit.patch_stdout": MagicMock(),
|
||||
"prompt_toolkit.application": MagicMock(),
|
||||
"prompt_toolkit.layout": MagicMock(),
|
||||
"prompt_toolkit.layout.processors": MagicMock(),
|
||||
"prompt_toolkit.filters": MagicMock(),
|
||||
"prompt_toolkit.layout.dimension": MagicMock(),
|
||||
"prompt_toolkit.layout.menus": MagicMock(),
|
||||
"prompt_toolkit.widgets": MagicMock(),
|
||||
"prompt_toolkit.key_binding": MagicMock(),
|
||||
"prompt_toolkit.completion": MagicMock(),
|
||||
"prompt_toolkit.formatted_text": MagicMock(),
|
||||
}
|
||||
with patch.dict(sys.modules, prompt_toolkit_stubs), patch.dict(
|
||||
"os.environ", clean_env, clear=False
|
||||
):
|
||||
import cli as cli_mod
|
||||
|
||||
cli_mod = importlib.reload(cli_mod)
|
||||
with patch.object(cli_mod, "get_tool_definitions", return_value=[]), patch.dict(
|
||||
cli_mod.__dict__, {"CLI_CONFIG": clean_config}
|
||||
):
|
||||
return cli_mod.HermesCLI(**kwargs)
|
||||
|
||||
|
||||
class _DummyCLI:
|
||||
def __init__(self, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
self.session_id = "session-123"
|
||||
self.system_prompt = "base prompt"
|
||||
self.preloaded_skills = []
|
||||
|
||||
def show_banner(self):
|
||||
return None
|
||||
|
||||
def show_tools(self):
|
||||
return None
|
||||
|
||||
def show_toolsets(self):
|
||||
return None
|
||||
|
||||
def run(self):
|
||||
return None
|
||||
|
||||
|
||||
def test_main_applies_preloaded_skills_to_system_prompt(monkeypatch):
|
||||
import cli as cli_mod
|
||||
|
||||
created = {}
|
||||
|
||||
def fake_cli(**kwargs):
|
||||
created["cli"] = _DummyCLI(**kwargs)
|
||||
return created["cli"]
|
||||
|
||||
monkeypatch.setattr(cli_mod, "HermesCLI", fake_cli)
|
||||
monkeypatch.setattr(
|
||||
cli_mod,
|
||||
"build_preloaded_skills_prompt",
|
||||
lambda skills, task_id=None: ("skill prompt", ["hermes-agent-dev", "github-auth"], []),
|
||||
)
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
cli_mod.main(skills="hermes-agent-dev,github-auth", list_tools=True)
|
||||
|
||||
cli_obj = created["cli"]
|
||||
assert cli_obj.system_prompt == "base prompt\n\nskill prompt"
|
||||
assert cli_obj.preloaded_skills == ["hermes-agent-dev", "github-auth"]
|
||||
|
||||
|
||||
def test_main_raises_for_unknown_preloaded_skill(monkeypatch):
|
||||
import cli as cli_mod
|
||||
|
||||
monkeypatch.setattr(cli_mod, "HermesCLI", lambda **kwargs: _DummyCLI(**kwargs))
|
||||
monkeypatch.setattr(
|
||||
cli_mod,
|
||||
"build_preloaded_skills_prompt",
|
||||
lambda skills, task_id=None: ("", [], ["missing-skill"]),
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match=r"Unknown skill\(s\): missing-skill"):
|
||||
cli_mod.main(skills="missing-skill", list_tools=True)
|
||||
|
||||
|
||||
def test_show_banner_prints_preloaded_skills_once_before_banner():
|
||||
cli_obj = _make_real_cli(compact=False)
|
||||
cli_obj.preloaded_skills = ["hermes-agent-dev", "github-auth"]
|
||||
cli_obj.console = MagicMock()
|
||||
|
||||
with patch("cli.build_welcome_banner") as mock_banner, patch(
|
||||
"shutil.get_terminal_size", return_value=os.terminal_size((120, 40))
|
||||
):
|
||||
cli_obj.show_banner()
|
||||
cli_obj.show_banner()
|
||||
|
||||
print_calls = [
|
||||
call.args[0]
|
||||
for call in cli_obj.console.print.call_args_list
|
||||
if call.args and isinstance(call.args[0], str)
|
||||
]
|
||||
startup_lines = [line for line in print_calls if "Activated skills:" in line]
|
||||
|
||||
assert len(startup_lines) == 1
|
||||
assert "Activated skills:" in startup_lines[0]
|
||||
assert "hermes-agent-dev, github-auth" in startup_lines[0]
|
||||
assert mock_banner.call_count == 2
|
||||
|
|
@ -2085,6 +2085,92 @@ class TestAnthropicBaseUrlPassthrough:
|
|||
assert not passed_url or passed_url is None
|
||||
|
||||
|
||||
class TestAnthropicCredentialRefresh:
|
||||
def test_try_refresh_anthropic_client_credentials_rebuilds_client(self):
|
||||
with (
|
||||
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
|
||||
patch("run_agent.check_toolset_requirements", return_value={}),
|
||||
patch("agent.anthropic_adapter.build_anthropic_client") as mock_build,
|
||||
):
|
||||
old_client = MagicMock()
|
||||
new_client = MagicMock()
|
||||
mock_build.side_effect = [old_client, new_client]
|
||||
agent = AIAgent(
|
||||
api_key="sk-ant-oat01-stale-token",
|
||||
api_mode="anthropic_messages",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
agent._anthropic_client = old_client
|
||||
agent._anthropic_api_key = "sk-ant-oat01-stale-token"
|
||||
agent._anthropic_base_url = "https://api.anthropic.com"
|
||||
|
||||
with (
|
||||
patch("agent.anthropic_adapter.resolve_anthropic_token", return_value="sk-ant-oat01-fresh-token"),
|
||||
patch("agent.anthropic_adapter.build_anthropic_client", return_value=new_client) as rebuild,
|
||||
):
|
||||
assert agent._try_refresh_anthropic_client_credentials() is True
|
||||
|
||||
old_client.close.assert_called_once()
|
||||
rebuild.assert_called_once_with("sk-ant-oat01-fresh-token", "https://api.anthropic.com")
|
||||
assert agent._anthropic_client is new_client
|
||||
assert agent._anthropic_api_key == "sk-ant-oat01-fresh-token"
|
||||
|
||||
def test_try_refresh_anthropic_client_credentials_returns_false_when_token_unchanged(self):
|
||||
with (
|
||||
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
|
||||
patch("run_agent.check_toolset_requirements", return_value={}),
|
||||
patch("agent.anthropic_adapter.build_anthropic_client", return_value=MagicMock()),
|
||||
):
|
||||
agent = AIAgent(
|
||||
api_key="sk-ant-oat01-same-token",
|
||||
api_mode="anthropic_messages",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
old_client = MagicMock()
|
||||
agent._anthropic_client = old_client
|
||||
agent._anthropic_api_key = "sk-ant-oat01-same-token"
|
||||
|
||||
with (
|
||||
patch("agent.anthropic_adapter.resolve_anthropic_token", return_value="sk-ant-oat01-same-token"),
|
||||
patch("agent.anthropic_adapter.build_anthropic_client") as rebuild,
|
||||
):
|
||||
assert agent._try_refresh_anthropic_client_credentials() is False
|
||||
|
||||
old_client.close.assert_not_called()
|
||||
rebuild.assert_not_called()
|
||||
|
||||
def test_anthropic_messages_create_preflights_refresh(self):
|
||||
with (
|
||||
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
|
||||
patch("run_agent.check_toolset_requirements", return_value={}),
|
||||
patch("agent.anthropic_adapter.build_anthropic_client", return_value=MagicMock()),
|
||||
):
|
||||
agent = AIAgent(
|
||||
api_key="sk-ant-oat01-current-token",
|
||||
api_mode="anthropic_messages",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
response = SimpleNamespace(content=[])
|
||||
agent._anthropic_client = MagicMock()
|
||||
agent._anthropic_client.messages.create.return_value = response
|
||||
|
||||
with patch.object(agent, "_try_refresh_anthropic_client_credentials", return_value=True) as refresh:
|
||||
result = agent._anthropic_messages_create({"model": "claude-sonnet-4-20250514"})
|
||||
|
||||
refresh.assert_called_once_with()
|
||||
agent._anthropic_client.messages.create.assert_called_once_with(model="claude-sonnet-4-20250514")
|
||||
assert result is response
|
||||
|
||||
|
||||
# ===================================================================
|
||||
# _streaming_api_call tests
|
||||
# ===================================================================
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from pathlib import Path
|
|||
|
||||
from tools.cronjob_tools import (
|
||||
_scan_cron_prompt,
|
||||
cronjob,
|
||||
schedule_cronjob,
|
||||
list_cronjobs,
|
||||
remove_cronjob,
|
||||
|
|
@ -180,3 +181,111 @@ class TestRemoveCronjob:
|
|||
result = json.loads(remove_cronjob("nonexistent_id"))
|
||||
assert result["success"] is False
|
||||
assert "not found" in result["error"].lower()
|
||||
|
||||
|
||||
class TestUnifiedCronjobTool:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_cron_dir(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr("cron.jobs.CRON_DIR", tmp_path / "cron")
|
||||
monkeypatch.setattr("cron.jobs.JOBS_FILE", tmp_path / "cron" / "jobs.json")
|
||||
monkeypatch.setattr("cron.jobs.OUTPUT_DIR", tmp_path / "cron" / "output")
|
||||
|
||||
def test_create_and_list(self):
|
||||
created = json.loads(
|
||||
cronjob(
|
||||
action="create",
|
||||
prompt="Check server status",
|
||||
schedule="every 1h",
|
||||
name="Server Check",
|
||||
)
|
||||
)
|
||||
assert created["success"] is True
|
||||
|
||||
listing = json.loads(cronjob(action="list"))
|
||||
assert listing["success"] is True
|
||||
assert listing["count"] == 1
|
||||
assert listing["jobs"][0]["name"] == "Server Check"
|
||||
assert listing["jobs"][0]["state"] == "scheduled"
|
||||
|
||||
def test_pause_and_resume(self):
|
||||
created = json.loads(cronjob(action="create", prompt="Check", schedule="every 1h"))
|
||||
job_id = created["job_id"]
|
||||
|
||||
paused = json.loads(cronjob(action="pause", job_id=job_id))
|
||||
assert paused["success"] is True
|
||||
assert paused["job"]["state"] == "paused"
|
||||
|
||||
resumed = json.loads(cronjob(action="resume", job_id=job_id))
|
||||
assert resumed["success"] is True
|
||||
assert resumed["job"]["state"] == "scheduled"
|
||||
|
||||
def test_update_schedule_recomputes_display(self):
|
||||
created = json.loads(cronjob(action="create", prompt="Check", schedule="every 1h"))
|
||||
job_id = created["job_id"]
|
||||
|
||||
updated = json.loads(
|
||||
cronjob(action="update", job_id=job_id, schedule="every 2h", name="New Name")
|
||||
)
|
||||
assert updated["success"] is True
|
||||
assert updated["job"]["name"] == "New Name"
|
||||
assert updated["job"]["schedule"] == "every 120m"
|
||||
|
||||
def test_create_skill_backed_job(self):
|
||||
result = json.loads(
|
||||
cronjob(
|
||||
action="create",
|
||||
skill="blogwatcher",
|
||||
prompt="Check the configured feeds and summarize anything new.",
|
||||
schedule="every 1h",
|
||||
name="Morning feeds",
|
||||
)
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["skill"] == "blogwatcher"
|
||||
|
||||
listing = json.loads(cronjob(action="list"))
|
||||
assert listing["jobs"][0]["skill"] == "blogwatcher"
|
||||
|
||||
def test_create_multi_skill_job(self):
|
||||
result = json.loads(
|
||||
cronjob(
|
||||
action="create",
|
||||
skills=["blogwatcher", "find-nearby"],
|
||||
prompt="Use both skills and combine the result.",
|
||||
schedule="every 1h",
|
||||
name="Combo job",
|
||||
)
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["skills"] == ["blogwatcher", "find-nearby"]
|
||||
|
||||
listing = json.loads(cronjob(action="list"))
|
||||
assert listing["jobs"][0]["skills"] == ["blogwatcher", "find-nearby"]
|
||||
|
||||
def test_multi_skill_default_name_prefers_prompt_when_present(self):
|
||||
result = json.loads(
|
||||
cronjob(
|
||||
action="create",
|
||||
skills=["blogwatcher", "find-nearby"],
|
||||
prompt="Use both skills and combine the result.",
|
||||
schedule="every 1h",
|
||||
)
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["name"] == "Use both skills and combine the result."
|
||||
|
||||
def test_update_can_clear_skills(self):
|
||||
created = json.loads(
|
||||
cronjob(
|
||||
action="create",
|
||||
skills=["blogwatcher", "find-nearby"],
|
||||
prompt="Use both skills and combine the result.",
|
||||
schedule="every 1h",
|
||||
)
|
||||
)
|
||||
updated = json.loads(
|
||||
cronjob(action="update", job_id=created["job_id"], skills=[])
|
||||
)
|
||||
assert updated["success"] is True
|
||||
assert updated["job"]["skills"] == []
|
||||
assert updated["job"]["skill"] is None
|
||||
|
|
|
|||
|
|
@ -351,6 +351,19 @@ class TestVisionRequirements:
|
|||
result = check_vision_requirements()
|
||||
assert isinstance(result, bool)
|
||||
|
||||
def test_check_requirements_accepts_codex_auth(self, monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
(tmp_path / "auth.json").write_text(
|
||||
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token":"codex-access-token","refresh_token":"codex-refresh-token"}}}}'
|
||||
)
|
||||
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
|
||||
monkeypatch.delenv("OPENAI_BASE_URL", raising=False)
|
||||
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
|
||||
monkeypatch.delenv("AUXILIARY_VISION_PROVIDER", raising=False)
|
||||
monkeypatch.delenv("CONTEXT_VISION_PROVIDER", raising=False)
|
||||
|
||||
assert check_vision_requirements() is True
|
||||
|
||||
def test_debug_session_info_returns_dict(self):
|
||||
info = get_debug_session_info()
|
||||
assert isinstance(info, dict)
|
||||
|
|
|
|||
|
|
@ -84,14 +84,13 @@ from .browser_tool import (
|
|||
|
||||
# Cronjob management tools (CLI-only, hermes-cli toolset)
|
||||
from .cronjob_tools import (
|
||||
cronjob,
|
||||
schedule_cronjob,
|
||||
list_cronjobs,
|
||||
remove_cronjob,
|
||||
check_cronjob_requirements,
|
||||
get_cronjob_tool_definitions,
|
||||
SCHEDULE_CRONJOB_SCHEMA,
|
||||
LIST_CRONJOBS_SCHEMA,
|
||||
REMOVE_CRONJOB_SCHEMA
|
||||
CRONJOB_SCHEMA,
|
||||
)
|
||||
|
||||
# RL Training tools (Tinker-Atropos)
|
||||
|
|
@ -211,14 +210,13 @@ __all__ = [
|
|||
'check_browser_requirements',
|
||||
'BROWSER_TOOL_SCHEMAS',
|
||||
# Cronjob management tools (CLI-only)
|
||||
'cronjob',
|
||||
'schedule_cronjob',
|
||||
'list_cronjobs',
|
||||
'remove_cronjob',
|
||||
'check_cronjob_requirements',
|
||||
'get_cronjob_tool_definitions',
|
||||
'SCHEDULE_CRONJOB_SCHEMA',
|
||||
'LIST_CRONJOBS_SCHEMA',
|
||||
'REMOVE_CRONJOB_SCHEMA',
|
||||
'CRONJOB_SCHEMA',
|
||||
# RL Training tools
|
||||
'rl_list_environments',
|
||||
'rl_select_environment',
|
||||
|
|
|
|||
|
|
@ -1,24 +1,31 @@
|
|||
"""
|
||||
Cron job management tools for Hermes Agent.
|
||||
|
||||
These tools allow the agent to schedule, list, and remove automated tasks.
|
||||
Only available when running via CLI (hermes-cli toolset).
|
||||
|
||||
IMPORTANT: Cronjobs run in isolated sessions with NO prior context.
|
||||
The prompt must contain ALL necessary information.
|
||||
Expose a single compressed action-oriented tool to avoid schema/context bloat.
|
||||
Compatibility wrappers remain for direct Python callers and legacy tests.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
# Import from cron module (will be available when properly installed)
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
# Import from cron module (will be available when properly installed)
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from cron.jobs import create_job, get_job, list_jobs, remove_job
|
||||
from cron.jobs import (
|
||||
create_job,
|
||||
get_job,
|
||||
list_jobs,
|
||||
parse_schedule,
|
||||
pause_job,
|
||||
remove_job,
|
||||
resume_job,
|
||||
trigger_job,
|
||||
update_job,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
|
@ -56,9 +63,207 @@ def _scan_cron_prompt(prompt: str) -> str:
|
|||
return ""
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Tool: schedule_cronjob
|
||||
# =============================================================================
|
||||
def _origin_from_env() -> Optional[Dict[str, str]]:
|
||||
origin_platform = os.getenv("HERMES_SESSION_PLATFORM")
|
||||
origin_chat_id = os.getenv("HERMES_SESSION_CHAT_ID")
|
||||
if origin_platform and origin_chat_id:
|
||||
return {
|
||||
"platform": origin_platform,
|
||||
"chat_id": origin_chat_id,
|
||||
"chat_name": os.getenv("HERMES_SESSION_CHAT_NAME"),
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
def _repeat_display(job: Dict[str, Any]) -> str:
|
||||
times = (job.get("repeat") or {}).get("times")
|
||||
completed = (job.get("repeat") or {}).get("completed", 0)
|
||||
if times is None:
|
||||
return "forever"
|
||||
if times == 1:
|
||||
return "once" if completed == 0 else "1/1"
|
||||
return f"{completed}/{times}" if completed else f"{times} times"
|
||||
|
||||
|
||||
def _canonical_skills(skill: Optional[str] = None, skills: Optional[Any] = None) -> List[str]:
|
||||
if skills is None:
|
||||
raw_items = [skill] if skill else []
|
||||
elif isinstance(skills, str):
|
||||
raw_items = [skills]
|
||||
else:
|
||||
raw_items = list(skills)
|
||||
|
||||
normalized: List[str] = []
|
||||
for item in raw_items:
|
||||
text = str(item or "").strip()
|
||||
if text and text not in normalized:
|
||||
normalized.append(text)
|
||||
return normalized
|
||||
|
||||
|
||||
|
||||
def _format_job(job: Dict[str, Any]) -> Dict[str, Any]:
|
||||
prompt = job.get("prompt", "")
|
||||
skills = _canonical_skills(job.get("skill"), job.get("skills"))
|
||||
return {
|
||||
"job_id": job["id"],
|
||||
"name": job["name"],
|
||||
"skill": skills[0] if skills else None,
|
||||
"skills": skills,
|
||||
"prompt_preview": prompt[:100] + "..." if len(prompt) > 100 else prompt,
|
||||
"schedule": job.get("schedule_display"),
|
||||
"repeat": _repeat_display(job),
|
||||
"deliver": job.get("deliver", "local"),
|
||||
"next_run_at": job.get("next_run_at"),
|
||||
"last_run_at": job.get("last_run_at"),
|
||||
"last_status": job.get("last_status"),
|
||||
"enabled": job.get("enabled", True),
|
||||
"state": job.get("state", "scheduled" if job.get("enabled", True) else "paused"),
|
||||
"paused_at": job.get("paused_at"),
|
||||
"paused_reason": job.get("paused_reason"),
|
||||
}
|
||||
|
||||
|
||||
def cronjob(
|
||||
action: str,
|
||||
job_id: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
schedule: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
repeat: Optional[int] = None,
|
||||
deliver: Optional[str] = None,
|
||||
include_disabled: bool = False,
|
||||
skill: Optional[str] = None,
|
||||
skills: Optional[List[str]] = None,
|
||||
reason: Optional[str] = None,
|
||||
task_id: str = None,
|
||||
) -> str:
|
||||
"""Unified cron job management tool."""
|
||||
del task_id # unused but kept for handler signature compatibility
|
||||
|
||||
try:
|
||||
normalized = (action or "").strip().lower()
|
||||
|
||||
if normalized == "create":
|
||||
if not schedule:
|
||||
return json.dumps({"success": False, "error": "schedule is required for create"}, indent=2)
|
||||
canonical_skills = _canonical_skills(skill, skills)
|
||||
if not prompt and not canonical_skills:
|
||||
return json.dumps({"success": False, "error": "create requires either prompt or at least one skill"}, indent=2)
|
||||
if prompt:
|
||||
scan_error = _scan_cron_prompt(prompt)
|
||||
if scan_error:
|
||||
return json.dumps({"success": False, "error": scan_error}, indent=2)
|
||||
|
||||
job = create_job(
|
||||
prompt=prompt or "",
|
||||
schedule=schedule,
|
||||
name=name,
|
||||
repeat=repeat,
|
||||
deliver=deliver,
|
||||
origin=_origin_from_env(),
|
||||
skills=canonical_skills,
|
||||
)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"job_id": job["id"],
|
||||
"name": job["name"],
|
||||
"skill": job.get("skill"),
|
||||
"skills": job.get("skills", []),
|
||||
"schedule": job["schedule_display"],
|
||||
"repeat": _repeat_display(job),
|
||||
"deliver": job.get("deliver", "local"),
|
||||
"next_run_at": job["next_run_at"],
|
||||
"job": _format_job(job),
|
||||
"message": f"Cron job '{job['name']}' created.",
|
||||
},
|
||||
indent=2,
|
||||
)
|
||||
|
||||
if normalized == "list":
|
||||
jobs = [_format_job(job) for job in list_jobs(include_disabled=include_disabled)]
|
||||
return json.dumps({"success": True, "count": len(jobs), "jobs": jobs}, indent=2)
|
||||
|
||||
if not job_id:
|
||||
return json.dumps({"success": False, "error": f"job_id is required for action '{normalized}'"}, indent=2)
|
||||
|
||||
job = get_job(job_id)
|
||||
if not job:
|
||||
return json.dumps(
|
||||
{"success": False, "error": f"Job with ID '{job_id}' not found. Use cronjob(action='list') to inspect jobs."},
|
||||
indent=2,
|
||||
)
|
||||
|
||||
if normalized == "remove":
|
||||
removed = remove_job(job_id)
|
||||
if not removed:
|
||||
return json.dumps({"success": False, "error": f"Failed to remove job '{job_id}'"}, indent=2)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Cron job '{job['name']}' removed.",
|
||||
"removed_job": {
|
||||
"id": job_id,
|
||||
"name": job["name"],
|
||||
"schedule": job.get("schedule_display"),
|
||||
},
|
||||
},
|
||||
indent=2,
|
||||
)
|
||||
|
||||
if normalized == "pause":
|
||||
updated = pause_job(job_id, reason=reason)
|
||||
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
|
||||
|
||||
if normalized == "resume":
|
||||
updated = resume_job(job_id)
|
||||
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
|
||||
|
||||
if normalized in {"run", "run_now", "trigger"}:
|
||||
updated = trigger_job(job_id)
|
||||
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
|
||||
|
||||
if normalized == "update":
|
||||
updates: Dict[str, Any] = {}
|
||||
if prompt is not None:
|
||||
scan_error = _scan_cron_prompt(prompt)
|
||||
if scan_error:
|
||||
return json.dumps({"success": False, "error": scan_error}, indent=2)
|
||||
updates["prompt"] = prompt
|
||||
if name is not None:
|
||||
updates["name"] = name
|
||||
if deliver is not None:
|
||||
updates["deliver"] = deliver
|
||||
if skills is not None or skill is not None:
|
||||
canonical_skills = _canonical_skills(skill, skills)
|
||||
updates["skills"] = canonical_skills
|
||||
updates["skill"] = canonical_skills[0] if canonical_skills else None
|
||||
if repeat is not None:
|
||||
repeat_state = dict(job.get("repeat") or {})
|
||||
repeat_state["times"] = repeat
|
||||
updates["repeat"] = repeat_state
|
||||
if schedule is not None:
|
||||
parsed_schedule = parse_schedule(schedule)
|
||||
updates["schedule"] = parsed_schedule
|
||||
updates["schedule_display"] = parsed_schedule.get("display", schedule)
|
||||
if job.get("state") != "paused":
|
||||
updates["state"] = "scheduled"
|
||||
updates["enabled"] = True
|
||||
if not updates:
|
||||
return json.dumps({"success": False, "error": "No updates provided."}, indent=2)
|
||||
updated = update_job(job_id, updates)
|
||||
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
|
||||
|
||||
return json.dumps({"success": False, "error": f"Unknown cron action '{action}'"}, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
return json.dumps({"success": False, "error": str(e)}, indent=2)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Compatibility wrappers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def schedule_cronjob(
|
||||
prompt: str,
|
||||
|
|
@ -66,329 +271,104 @@ def schedule_cronjob(
|
|||
name: Optional[str] = None,
|
||||
repeat: Optional[int] = None,
|
||||
deliver: Optional[str] = None,
|
||||
task_id: str = None
|
||||
task_id: str = None,
|
||||
) -> str:
|
||||
"""
|
||||
Schedule an automated task to run the agent on a schedule.
|
||||
|
||||
IMPORTANT: When the cronjob runs, it starts a COMPLETELY FRESH session.
|
||||
The agent will have NO memory of this conversation or any prior context.
|
||||
Therefore, the prompt MUST contain ALL necessary information:
|
||||
- Full context of what needs to be done
|
||||
- Specific file paths, URLs, or identifiers
|
||||
- Clear success criteria
|
||||
- Any relevant background information
|
||||
|
||||
BAD prompt: "Check on that server issue"
|
||||
GOOD prompt: "SSH into server 192.168.1.100 as user 'deploy', check if nginx
|
||||
is running with 'systemctl status nginx', and verify the site
|
||||
https://example.com returns HTTP 200. Report any issues found."
|
||||
|
||||
Args:
|
||||
prompt: Complete, self-contained instructions for the future agent.
|
||||
Must include ALL context needed - the agent won't remember anything.
|
||||
schedule: When to run. Either:
|
||||
- Duration for one-shot: "30m", "2h", "1d" (runs once)
|
||||
- Interval: "every 30m", "every 2h" (recurring)
|
||||
- Cron expression: "0 9 * * *" (daily at 9am)
|
||||
- ISO timestamp: "2026-02-03T14:00:00" (one-shot at specific time)
|
||||
name: Optional human-friendly name for the job (for listing/management)
|
||||
repeat: How many times to run. Omit for default behavior:
|
||||
- One-shot schedules default to repeat=1 (run once)
|
||||
- Intervals/cron default to forever
|
||||
- Set repeat=5 to run 5 times then auto-delete
|
||||
deliver: Where to send the output. Options:
|
||||
- "origin": Back to where this job was created (default)
|
||||
- "local": Save to local files only (~/.hermes/cron/output/)
|
||||
- "telegram": Send to Telegram home channel
|
||||
- "discord": Send to Discord home channel
|
||||
- "signal": Send to Signal home channel
|
||||
- "telegram:123456": Send to specific chat ID
|
||||
- "signal:+15551234567": Send to specific Signal number
|
||||
|
||||
Returns:
|
||||
JSON with job_id, next_run time, and confirmation
|
||||
"""
|
||||
# Scan prompt for critical threats before scheduling
|
||||
scan_error = _scan_cron_prompt(prompt)
|
||||
if scan_error:
|
||||
return json.dumps({"success": False, "error": scan_error}, indent=2)
|
||||
|
||||
# Get origin info from environment if available
|
||||
origin = None
|
||||
origin_platform = os.getenv("HERMES_SESSION_PLATFORM")
|
||||
origin_chat_id = os.getenv("HERMES_SESSION_CHAT_ID")
|
||||
if origin_platform and origin_chat_id:
|
||||
origin = {
|
||||
"platform": origin_platform,
|
||||
"chat_id": origin_chat_id,
|
||||
"chat_name": os.getenv("HERMES_SESSION_CHAT_NAME"),
|
||||
}
|
||||
|
||||
try:
|
||||
job = create_job(
|
||||
prompt=prompt,
|
||||
schedule=schedule,
|
||||
name=name,
|
||||
repeat=repeat,
|
||||
deliver=deliver,
|
||||
origin=origin
|
||||
)
|
||||
|
||||
# Format repeat info for display
|
||||
times = job["repeat"].get("times")
|
||||
if times is None:
|
||||
repeat_display = "forever"
|
||||
elif times == 1:
|
||||
repeat_display = "once"
|
||||
else:
|
||||
repeat_display = f"{times} times"
|
||||
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"job_id": job["id"],
|
||||
"name": job["name"],
|
||||
"schedule": job["schedule_display"],
|
||||
"repeat": repeat_display,
|
||||
"deliver": job.get("deliver", "local"),
|
||||
"next_run_at": job["next_run_at"],
|
||||
"message": f"Cronjob '{job['name']}' created. It will run {repeat_display}, deliver to {job.get('deliver', 'local')}, next at {job['next_run_at']}."
|
||||
}, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, indent=2)
|
||||
return cronjob(
|
||||
action="create",
|
||||
prompt=prompt,
|
||||
schedule=schedule,
|
||||
name=name,
|
||||
repeat=repeat,
|
||||
deliver=deliver,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
|
||||
SCHEDULE_CRONJOB_SCHEMA = {
|
||||
"name": "schedule_cronjob",
|
||||
"description": """Schedule an automated task to run the agent on a schedule.
|
||||
def list_cronjobs(include_disabled: bool = False, task_id: str = None) -> str:
|
||||
return cronjob(action="list", include_disabled=include_disabled, task_id=task_id)
|
||||
|
||||
⚠️ CRITICAL: The cronjob runs in a FRESH SESSION with NO CONTEXT from this conversation.
|
||||
The prompt must be COMPLETELY SELF-CONTAINED with ALL necessary information including:
|
||||
- Full context and background
|
||||
- Specific file paths, URLs, server addresses
|
||||
- Clear instructions and success criteria
|
||||
- Any credentials or configuration details
|
||||
|
||||
The future agent will NOT remember anything from the current conversation.
|
||||
def remove_cronjob(job_id: str, task_id: str = None) -> str:
|
||||
return cronjob(action="remove", job_id=job_id, task_id=task_id)
|
||||
|
||||
SCHEDULE FORMATS:
|
||||
- One-shot: "30m", "2h", "1d" (runs once after delay)
|
||||
- Interval: "every 30m", "every 2h" (recurring)
|
||||
- Cron: "0 9 * * *" (cron expression for precise scheduling)
|
||||
- Timestamp: "2026-02-03T14:00:00" (specific date/time)
|
||||
|
||||
REPEAT BEHAVIOR:
|
||||
- One-shot schedules: run once by default
|
||||
- Intervals/cron: run forever by default
|
||||
- Set repeat=N to run exactly N times then auto-delete
|
||||
CRONJOB_SCHEMA = {
|
||||
"name": "cronjob",
|
||||
"description": """Manage scheduled cron jobs with a single compressed tool.
|
||||
|
||||
DELIVERY OPTIONS (where output goes):
|
||||
- "origin": Back to current chat (default if in messaging platform)
|
||||
- "local": Save to local files only (default if in CLI)
|
||||
- "telegram": Send to Telegram home channel
|
||||
- "discord": Send to Discord home channel
|
||||
- "telegram:123456": Send to specific chat (if user provides ID)
|
||||
Use action='create' to schedule a new job from a prompt or one or more skills.
|
||||
Use action='list' to inspect jobs.
|
||||
Use action='update', 'pause', 'resume', 'remove', or 'run' to manage an existing job.
|
||||
|
||||
Jobs run in a fresh session with no current-chat context, so prompts must be self-contained.
|
||||
If skill or skills are provided on create, the future cron run loads those skills in order, then follows the prompt as the task instruction.
|
||||
On update, passing skills=[] clears attached skills.
|
||||
|
||||
NOTE: The agent's final response is auto-delivered to the target — do NOT use
|
||||
send_message in the prompt for that same destination. Same-target send_message
|
||||
calls are skipped so the cron doesn't double-message the user. Put the main
|
||||
calls are skipped to avoid duplicate cron deliveries. Put the primary
|
||||
user-facing content in the final response, and use send_message only for
|
||||
additional or different targets.
|
||||
|
||||
Use for: reminders, periodic checks, scheduled reports, automated maintenance.""",
|
||||
Important safety rule: cron-run sessions should not recursively schedule more cron jobs.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "One of: create, list, update, pause, resume, remove, run"
|
||||
},
|
||||
"job_id": {
|
||||
"type": "string",
|
||||
"description": "Required for update/pause/resume/remove/run"
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Complete, self-contained instructions. Must include ALL context - the future agent will have NO memory of this conversation."
|
||||
"description": "For create: the full self-contained prompt. If skill or skills are also provided, this becomes the task instruction paired with those skills."
|
||||
},
|
||||
"schedule": {
|
||||
"type": "string",
|
||||
"description": "When to run: '30m' (once in 30min), 'every 30m' (recurring), '0 9 * * *' (cron), or ISO timestamp"
|
||||
"description": "For create/update: '30m', 'every 2h', '0 9 * * *', or ISO timestamp"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional human-friendly name for the job"
|
||||
"description": "Optional human-friendly name"
|
||||
},
|
||||
"repeat": {
|
||||
"type": "integer",
|
||||
"description": "How many times to run. Omit for default (once for one-shot, forever for recurring). Set to N for exactly N runs."
|
||||
"description": "Optional repeat count. Omit for defaults (once for one-shot, forever for recurring)."
|
||||
},
|
||||
"deliver": {
|
||||
"type": "string",
|
||||
"description": "Where to send output: 'origin' (back to this chat), 'local' (files only), 'telegram', 'discord', 'signal', or 'platform:chat_id'"
|
||||
}
|
||||
},
|
||||
"required": ["prompt", "schedule"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Tool: list_cronjobs
|
||||
# =============================================================================
|
||||
|
||||
def list_cronjobs(include_disabled: bool = False, task_id: str = None) -> str:
|
||||
"""
|
||||
List all scheduled cronjobs.
|
||||
|
||||
Returns information about each job including:
|
||||
- Job ID (needed for removal)
|
||||
- Name
|
||||
- Schedule (human-readable)
|
||||
- Repeat status (completed/total or 'forever')
|
||||
- Next scheduled run time
|
||||
- Last run time and status (if any)
|
||||
|
||||
Args:
|
||||
include_disabled: Whether to include disabled/completed jobs
|
||||
|
||||
Returns:
|
||||
JSON array of all scheduled jobs
|
||||
"""
|
||||
try:
|
||||
jobs = list_jobs(include_disabled=include_disabled)
|
||||
|
||||
formatted_jobs = []
|
||||
for job in jobs:
|
||||
# Format repeat status
|
||||
times = job["repeat"].get("times")
|
||||
completed = job["repeat"].get("completed", 0)
|
||||
if times is None:
|
||||
repeat_status = "forever"
|
||||
else:
|
||||
repeat_status = f"{completed}/{times}"
|
||||
|
||||
formatted_jobs.append({
|
||||
"job_id": job["id"],
|
||||
"name": job["name"],
|
||||
"prompt_preview": job["prompt"][:100] + "..." if len(job["prompt"]) > 100 else job["prompt"],
|
||||
"schedule": job["schedule_display"],
|
||||
"repeat": repeat_status,
|
||||
"deliver": job.get("deliver", "local"),
|
||||
"next_run_at": job.get("next_run_at"),
|
||||
"last_run_at": job.get("last_run_at"),
|
||||
"last_status": job.get("last_status"),
|
||||
"enabled": job.get("enabled", True)
|
||||
})
|
||||
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"count": len(formatted_jobs),
|
||||
"jobs": formatted_jobs
|
||||
}, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, indent=2)
|
||||
|
||||
|
||||
LIST_CRONJOBS_SCHEMA = {
|
||||
"name": "list_cronjobs",
|
||||
"description": """List all scheduled cronjobs with their IDs, schedules, and status.
|
||||
|
||||
Use this to:
|
||||
- See what jobs are currently scheduled
|
||||
- Find job IDs for removal with remove_cronjob
|
||||
- Check job status and next run times
|
||||
|
||||
Returns job_id, name, schedule, repeat status, next/last run times.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": "Delivery target: origin, local, telegram, discord, signal, or platform:chat_id"
|
||||
},
|
||||
"include_disabled": {
|
||||
"type": "boolean",
|
||||
"description": "Include disabled/completed jobs in the list (default: false)"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Tool: remove_cronjob
|
||||
# =============================================================================
|
||||
|
||||
def remove_cronjob(job_id: str, task_id: str = None) -> str:
|
||||
"""
|
||||
Remove a scheduled cronjob by its ID.
|
||||
|
||||
Use list_cronjobs first to find the job_id of the job you want to remove.
|
||||
|
||||
Args:
|
||||
job_id: The ID of the job to remove (from list_cronjobs output)
|
||||
|
||||
Returns:
|
||||
JSON confirmation of removal
|
||||
"""
|
||||
try:
|
||||
job = get_job(job_id)
|
||||
if not job:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": f"Job with ID '{job_id}' not found. Use list_cronjobs to see available jobs."
|
||||
}, indent=2)
|
||||
|
||||
removed = remove_job(job_id)
|
||||
if removed:
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"message": f"Cronjob '{job['name']}' (ID: {job_id}) has been removed.",
|
||||
"removed_job": {
|
||||
"id": job_id,
|
||||
"name": job["name"],
|
||||
"schedule": job["schedule_display"]
|
||||
}
|
||||
}, indent=2)
|
||||
else:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": f"Failed to remove job '{job_id}'"
|
||||
}, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, indent=2)
|
||||
|
||||
|
||||
REMOVE_CRONJOB_SCHEMA = {
|
||||
"name": "remove_cronjob",
|
||||
"description": """Remove a scheduled cronjob by its ID.
|
||||
|
||||
Use list_cronjobs first to find the job_id of the job you want to remove.
|
||||
Jobs that have completed their repeat count are auto-removed, but you can
|
||||
use this to cancel a job before it completes.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"job_id": {
|
||||
"description": "For list: include paused/completed jobs"
|
||||
},
|
||||
"skill": {
|
||||
"type": "string",
|
||||
"description": "The ID of the cronjob to remove (from list_cronjobs output)"
|
||||
"description": "Optional single skill name to load before executing the cron prompt"
|
||||
},
|
||||
"skills": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Optional ordered list of skills to load before executing the cron prompt. On update, pass an empty array to clear attached skills."
|
||||
},
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Optional pause reason"
|
||||
}
|
||||
},
|
||||
"required": ["job_id"]
|
||||
"required": ["action"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Requirements check
|
||||
# =============================================================================
|
||||
|
||||
def check_cronjob_requirements() -> bool:
|
||||
"""
|
||||
Check if cronjob tools can be used.
|
||||
|
||||
|
||||
Available in interactive CLI mode and gateway/messaging platforms.
|
||||
Cronjobs are server-side scheduled tasks so they work from any interface.
|
||||
"""
|
||||
|
|
@ -399,66 +379,31 @@ def check_cronjob_requirements() -> bool:
|
|||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Exports
|
||||
# =============================================================================
|
||||
|
||||
def get_cronjob_tool_definitions():
|
||||
"""Return tool definitions for cronjob management."""
|
||||
return [
|
||||
SCHEDULE_CRONJOB_SCHEMA,
|
||||
LIST_CRONJOBS_SCHEMA,
|
||||
REMOVE_CRONJOB_SCHEMA
|
||||
]
|
||||
|
||||
|
||||
# For direct testing
|
||||
if __name__ == "__main__":
|
||||
# Test the tools
|
||||
print("Testing schedule_cronjob:")
|
||||
result = schedule_cronjob(
|
||||
prompt="Test prompt for cron job",
|
||||
schedule="5m",
|
||||
name="Test Job"
|
||||
)
|
||||
print(result)
|
||||
|
||||
print("\nTesting list_cronjobs:")
|
||||
result = list_cronjobs()
|
||||
print(result)
|
||||
return [CRONJOB_SCHEMA]
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="schedule_cronjob",
|
||||
name="cronjob",
|
||||
toolset="cronjob",
|
||||
schema=SCHEDULE_CRONJOB_SCHEMA,
|
||||
handler=lambda args, **kw: schedule_cronjob(
|
||||
prompt=args.get("prompt", ""),
|
||||
schedule=args.get("schedule", ""),
|
||||
schema=CRONJOB_SCHEMA,
|
||||
handler=lambda args, **kw: cronjob(
|
||||
action=args.get("action", ""),
|
||||
job_id=args.get("job_id"),
|
||||
prompt=args.get("prompt"),
|
||||
schedule=args.get("schedule"),
|
||||
name=args.get("name"),
|
||||
repeat=args.get("repeat"),
|
||||
deliver=args.get("deliver"),
|
||||
task_id=kw.get("task_id")),
|
||||
check_fn=check_cronjob_requirements,
|
||||
)
|
||||
registry.register(
|
||||
name="list_cronjobs",
|
||||
toolset="cronjob",
|
||||
schema=LIST_CRONJOBS_SCHEMA,
|
||||
handler=lambda args, **kw: list_cronjobs(
|
||||
include_disabled=args.get("include_disabled", False),
|
||||
task_id=kw.get("task_id")),
|
||||
check_fn=check_cronjob_requirements,
|
||||
)
|
||||
registry.register(
|
||||
name="remove_cronjob",
|
||||
toolset="cronjob",
|
||||
schema=REMOVE_CRONJOB_SCHEMA,
|
||||
handler=lambda args, **kw: remove_cronjob(
|
||||
job_id=args.get("job_id", ""),
|
||||
task_id=kw.get("task_id")),
|
||||
skill=args.get("skill"),
|
||||
skills=args.get("skills"),
|
||||
reason=args.get("reason"),
|
||||
task_id=kw.get("task_id"),
|
||||
),
|
||||
check_fn=check_cronjob_requirements,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -377,16 +377,11 @@ async def vision_analyze_tool(
|
|||
|
||||
|
||||
def check_vision_requirements() -> bool:
|
||||
"""Check if an auxiliary vision model is available."""
|
||||
"""Check if the configured runtime vision path can resolve a client."""
|
||||
try:
|
||||
from agent.auxiliary_client import resolve_provider_client
|
||||
client, _ = resolve_provider_client("openrouter")
|
||||
if client is not None:
|
||||
return True
|
||||
client, _ = resolve_provider_client("nous")
|
||||
if client is not None:
|
||||
return True
|
||||
client, _ = resolve_provider_client("custom")
|
||||
from agent.auxiliary_client import resolve_vision_provider_client
|
||||
|
||||
_provider, client, _model = resolve_vision_provider_client()
|
||||
return client is not None
|
||||
except Exception:
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ _HERMES_CORE_TOOLS = [
|
|||
# Code execution + delegation
|
||||
"execute_code", "delegate_task",
|
||||
# Cronjob management
|
||||
"schedule_cronjob", "list_cronjobs", "remove_cronjob",
|
||||
"cronjob",
|
||||
# Cross-platform messaging (gated on gateway running via check_fn)
|
||||
"send_message",
|
||||
# Honcho memory tools (gated on honcho being active via check_fn)
|
||||
|
|
@ -125,8 +125,8 @@ TOOLSETS = {
|
|||
},
|
||||
|
||||
"cronjob": {
|
||||
"description": "Cronjob management tools - schedule, list, and remove automated tasks",
|
||||
"tools": ["schedule_cronjob", "list_cronjobs", "remove_cronjob"],
|
||||
"description": "Cronjob management tool - create, list, update, pause, resume, remove, and trigger scheduled tasks",
|
||||
"tools": ["cronjob"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
|
|
|
|||
424
website/docs/developer-guide/adding-providers.md
Normal file
424
website/docs/developer-guide/adding-providers.md
Normal file
|
|
@ -0,0 +1,424 @@
|
|||
---
|
||||
sidebar_position: 5
|
||||
title: "Adding Providers"
|
||||
description: "How to add a new inference provider to Hermes Agent — auth, runtime resolution, CLI flows, adapters, tests, and docs"
|
||||
---
|
||||
|
||||
# Adding Providers
|
||||
|
||||
Hermes can already talk to any OpenAI-compatible endpoint through the custom provider path. Do not add a built-in provider unless you want first-class UX for that service:
|
||||
|
||||
- provider-specific auth or token refresh
|
||||
- a curated model catalog
|
||||
- setup / `hermes model` menu entries
|
||||
- provider aliases for `provider:model` syntax
|
||||
- a non-OpenAI API shape that needs an adapter
|
||||
|
||||
If the provider is just "another OpenAI-compatible base URL and API key", a named custom provider may be enough.
|
||||
|
||||
## The mental model
|
||||
|
||||
A built-in provider has to line up across a few layers:
|
||||
|
||||
1. `hermes_cli/auth.py` decides how credentials are found.
|
||||
2. `hermes_cli/runtime_provider.py` turns that into runtime data:
|
||||
- `provider`
|
||||
- `api_mode`
|
||||
- `base_url`
|
||||
- `api_key`
|
||||
- `source`
|
||||
3. `run_agent.py` uses `api_mode` to decide how requests are built and sent.
|
||||
4. `hermes_cli/models.py`, `hermes_cli/main.py`, and `hermes_cli/setup.py` make the provider show up in the CLI.
|
||||
5. `agent/auxiliary_client.py` and `agent/model_metadata.py` keep side tasks and token budgeting working.
|
||||
|
||||
The important abstraction is `api_mode`.
|
||||
|
||||
- Most providers use `chat_completions`.
|
||||
- Codex uses `codex_responses`.
|
||||
- Anthropic uses `anthropic_messages`.
|
||||
- A new non-OpenAI protocol usually means adding a new adapter and a new `api_mode` branch.
|
||||
|
||||
## Choose the implementation path first
|
||||
|
||||
### Path A — OpenAI-compatible provider
|
||||
|
||||
Use this when the provider accepts standard chat-completions style requests.
|
||||
|
||||
Typical work:
|
||||
|
||||
- add auth metadata
|
||||
- add model catalog / aliases
|
||||
- add runtime resolution
|
||||
- add CLI menu wiring
|
||||
- add aux-model defaults
|
||||
- add tests and user docs
|
||||
|
||||
You usually do not need a new adapter or a new `api_mode`.
|
||||
|
||||
### Path B — Native provider
|
||||
|
||||
Use this when the provider does not behave like OpenAI chat completions.
|
||||
|
||||
Examples in-tree today:
|
||||
|
||||
- `codex_responses`
|
||||
- `anthropic_messages`
|
||||
|
||||
This path includes everything from Path A plus:
|
||||
|
||||
- a provider adapter in `agent/`
|
||||
- `run_agent.py` branches for request building, dispatch, usage extraction, interrupt handling, and response normalization
|
||||
- adapter tests
|
||||
|
||||
## File checklist
|
||||
|
||||
### Required for every built-in provider
|
||||
|
||||
1. `hermes_cli/auth.py`
|
||||
2. `hermes_cli/models.py`
|
||||
3. `hermes_cli/runtime_provider.py`
|
||||
4. `hermes_cli/main.py`
|
||||
5. `hermes_cli/setup.py`
|
||||
6. `agent/auxiliary_client.py`
|
||||
7. `agent/model_metadata.py`
|
||||
8. tests
|
||||
9. user-facing docs under `website/docs/`
|
||||
|
||||
### Additional for native / non-OpenAI providers
|
||||
|
||||
10. `agent/<provider>_adapter.py`
|
||||
11. `run_agent.py`
|
||||
12. `pyproject.toml` if a provider SDK is required
|
||||
|
||||
## Step 1: Pick one canonical provider id
|
||||
|
||||
Choose a single provider id and use it everywhere.
|
||||
|
||||
Examples from the repo:
|
||||
|
||||
- `openai-codex`
|
||||
- `kimi-coding`
|
||||
- `minimax-cn`
|
||||
|
||||
That same id should appear in:
|
||||
|
||||
- `PROVIDER_REGISTRY` in `hermes_cli/auth.py`
|
||||
- `_PROVIDER_LABELS` in `hermes_cli/models.py`
|
||||
- `_PROVIDER_ALIASES` in both `hermes_cli/auth.py` and `hermes_cli/models.py`
|
||||
- CLI `--provider` choices in `hermes_cli/main.py`
|
||||
- setup / model selection branches
|
||||
- auxiliary-model defaults
|
||||
- tests
|
||||
|
||||
If the id differs between those files, the provider will feel half-wired: auth may work while `/model`, setup, or runtime resolution silently misses it.
|
||||
|
||||
## Step 2: Add auth metadata in `hermes_cli/auth.py`
|
||||
|
||||
For API-key providers, add a `ProviderConfig` entry to `PROVIDER_REGISTRY` with:
|
||||
|
||||
- `id`
|
||||
- `name`
|
||||
- `auth_type="api_key"`
|
||||
- `inference_base_url`
|
||||
- `api_key_env_vars`
|
||||
- optional `base_url_env_var`
|
||||
|
||||
Also add aliases to `_PROVIDER_ALIASES`.
|
||||
|
||||
Use the existing providers as templates:
|
||||
|
||||
- simple API-key path: Z.AI, MiniMax
|
||||
- API-key path with endpoint detection: Kimi, Z.AI
|
||||
- native token resolution: Anthropic
|
||||
- OAuth / auth-store path: Nous, OpenAI Codex
|
||||
|
||||
Questions to answer here:
|
||||
|
||||
- What env vars should Hermes check, and in what priority order?
|
||||
- Does the provider need base-URL overrides?
|
||||
- Does it need endpoint probing or token refresh?
|
||||
- What should the auth error say when credentials are missing?
|
||||
|
||||
If the provider needs something more than "look up an API key", add a dedicated credential resolver instead of shoving logic into unrelated branches.
|
||||
|
||||
## Step 3: Add model catalog and aliases in `hermes_cli/models.py`
|
||||
|
||||
Update the provider catalog so the provider works in menus and in `provider:model` syntax.
|
||||
|
||||
Typical edits:
|
||||
|
||||
- `_PROVIDER_MODELS`
|
||||
- `_PROVIDER_LABELS`
|
||||
- `_PROVIDER_ALIASES`
|
||||
- provider display order inside `list_available_providers()`
|
||||
- `provider_model_ids()` if the provider supports a live `/models` fetch
|
||||
|
||||
If the provider exposes a live model list, prefer that first and keep `_PROVIDER_MODELS` as the static fallback.
|
||||
|
||||
This file is also what makes inputs like these work:
|
||||
|
||||
```text
|
||||
anthropic:claude-sonnet-4-6
|
||||
kimi:model-name
|
||||
```
|
||||
|
||||
If aliases are missing here, the provider may authenticate correctly but still fail in `/model` parsing.
|
||||
|
||||
## Step 4: Resolve runtime data in `hermes_cli/runtime_provider.py`
|
||||
|
||||
`resolve_runtime_provider()` is the shared path used by CLI, gateway, cron, ACP, and helper clients.
|
||||
|
||||
Add a branch that returns a dict with at least:
|
||||
|
||||
```python
|
||||
{
|
||||
"provider": "your-provider",
|
||||
"api_mode": "chat_completions", # or your native mode
|
||||
"base_url": "https://...",
|
||||
"api_key": "...",
|
||||
"source": "env|portal|auth-store|explicit",
|
||||
"requested_provider": requested_provider,
|
||||
}
|
||||
```
|
||||
|
||||
If the provider is OpenAI-compatible, `api_mode` should usually stay `chat_completions`.
|
||||
|
||||
Be careful with API-key precedence. Hermes already contains logic to avoid leaking an OpenRouter key to unrelated endpoints. A new provider should be equally explicit about which key goes to which base URL.
|
||||
|
||||
## Step 5: Wire the CLI in `hermes_cli/main.py` and `hermes_cli/setup.py`
|
||||
|
||||
A provider is not discoverable until it shows up in the interactive flows.
|
||||
|
||||
Update:
|
||||
|
||||
### `hermes_cli/main.py`
|
||||
|
||||
- `provider_labels`
|
||||
- provider dispatch inside the `model` command
|
||||
- `--provider` argument choices
|
||||
- login/logout choices if the provider supports those flows
|
||||
- a `_model_flow_<provider>()` function, or reuse `_model_flow_api_key_provider()` if it fits
|
||||
|
||||
### `hermes_cli/setup.py`
|
||||
|
||||
- `provider_choices`
|
||||
- auth branch for the provider
|
||||
- model-selection branch
|
||||
- any provider-specific explanatory text
|
||||
- any place where a provider should be excluded from OpenRouter-only prompts or routing settings
|
||||
|
||||
If you only update one of these files, `hermes model` and `hermes setup` will drift.
|
||||
|
||||
## Step 6: Keep auxiliary calls working
|
||||
|
||||
Two files matter here:
|
||||
|
||||
### `agent/auxiliary_client.py`
|
||||
|
||||
Add a cheap / fast default aux model to `_API_KEY_PROVIDER_AUX_MODELS` if this is a direct API-key provider.
|
||||
|
||||
Auxiliary tasks include things like:
|
||||
|
||||
- vision summarization
|
||||
- web extraction summarization
|
||||
- context compression summaries
|
||||
- session-search summaries
|
||||
- memory flushes
|
||||
|
||||
If the provider has no sensible aux default, side tasks may fall back badly or use an expensive main model unexpectedly.
|
||||
|
||||
### `agent/model_metadata.py`
|
||||
|
||||
Add context lengths for the provider's models so token budgeting, compression thresholds, and limits stay sane.
|
||||
|
||||
## Step 7: If the provider is native, add an adapter and `run_agent.py` support
|
||||
|
||||
If the provider is not plain chat completions, isolate the provider-specific logic in `agent/<provider>_adapter.py`.
|
||||
|
||||
Keep `run_agent.py` focused on orchestration. It should call adapter helpers, not hand-build provider payloads inline all over the file.
|
||||
|
||||
A native provider usually needs work in these places:
|
||||
|
||||
### New adapter file
|
||||
|
||||
Typical responsibilities:
|
||||
|
||||
- build the SDK / HTTP client
|
||||
- resolve tokens
|
||||
- convert OpenAI-style conversation messages to the provider's request format
|
||||
- convert tool schemas if needed
|
||||
- normalize provider responses back into what `run_agent.py` expects
|
||||
- extract usage and finish-reason data
|
||||
|
||||
### `run_agent.py`
|
||||
|
||||
Search for `api_mode` and audit every switch point. At minimum, verify:
|
||||
|
||||
- `__init__` chooses the new `api_mode`
|
||||
- client construction works for the provider
|
||||
- `_build_api_kwargs()` knows how to format requests
|
||||
- `_api_call_with_interrupt()` dispatches to the right client call
|
||||
- interrupt / client rebuild paths work
|
||||
- response validation accepts the provider's shape
|
||||
- finish-reason extraction is correct
|
||||
- token-usage extraction is correct
|
||||
- fallback-model activation can switch into the new provider cleanly
|
||||
- summary-generation and memory-flush paths still work
|
||||
|
||||
Also search `run_agent.py` for `self.client.`. Any code path that assumes the standard OpenAI client exists can break when a native provider uses a different client object or `self.client = None`.
|
||||
|
||||
### Prompt caching and provider-specific request fields
|
||||
|
||||
Prompt caching and provider-specific knobs are easy to regress.
|
||||
|
||||
Examples already in-tree:
|
||||
|
||||
- Anthropic has a native prompt-caching path
|
||||
- OpenRouter gets provider-routing fields
|
||||
- not every provider should receive every request-side option
|
||||
|
||||
When you add a native provider, double-check that Hermes is only sending fields that provider actually understands.
|
||||
|
||||
## Step 8: Tests
|
||||
|
||||
At minimum, touch the tests that guard provider wiring.
|
||||
|
||||
Common places:
|
||||
|
||||
- `tests/test_runtime_provider_resolution.py`
|
||||
- `tests/test_cli_provider_resolution.py`
|
||||
- `tests/test_cli_model_command.py`
|
||||
- `tests/test_setup_model_selection.py`
|
||||
- `tests/test_provider_parity.py`
|
||||
- `tests/test_run_agent.py`
|
||||
- `tests/test_<provider>_adapter.py` for a native provider
|
||||
|
||||
For docs-only examples, the exact file set may differ. The point is to cover:
|
||||
|
||||
- auth resolution
|
||||
- CLI menu / provider selection
|
||||
- runtime provider resolution
|
||||
- agent execution path
|
||||
- provider:model parsing
|
||||
- any adapter-specific message conversion
|
||||
|
||||
Run tests with xdist disabled:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
python -m pytest tests/test_runtime_provider_resolution.py tests/test_cli_provider_resolution.py tests/test_cli_model_command.py tests/test_setup_model_selection.py -n0 -q
|
||||
```
|
||||
|
||||
For deeper changes, run the full suite before pushing:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
python -m pytest tests/ -n0 -q
|
||||
```
|
||||
|
||||
## Step 9: Live verification
|
||||
|
||||
After tests, run a real smoke test.
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
python -m hermes_cli.main chat -q "Say hello" --provider your-provider --model your-model
|
||||
```
|
||||
|
||||
Also test the interactive flows if you changed menus:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
python -m hermes_cli.main model
|
||||
python -m hermes_cli.main setup
|
||||
```
|
||||
|
||||
For native providers, verify at least one tool call too, not just a plain text response.
|
||||
|
||||
## Step 10: Update user-facing docs
|
||||
|
||||
If the provider is meant to ship as a first-class option, update the user docs too:
|
||||
|
||||
- `website/docs/getting-started/quickstart.md`
|
||||
- `website/docs/user-guide/configuration.md`
|
||||
- `website/docs/reference/environment-variables.md`
|
||||
|
||||
A developer can wire the provider perfectly and still leave users unable to discover the required env vars or setup flow.
|
||||
|
||||
## OpenAI-compatible provider checklist
|
||||
|
||||
Use this if the provider is standard chat completions.
|
||||
|
||||
- [ ] `ProviderConfig` added in `hermes_cli/auth.py`
|
||||
- [ ] aliases added in `hermes_cli/auth.py` and `hermes_cli/models.py`
|
||||
- [ ] model catalog added in `hermes_cli/models.py`
|
||||
- [ ] runtime branch added in `hermes_cli/runtime_provider.py`
|
||||
- [ ] CLI wiring added in `hermes_cli/main.py`
|
||||
- [ ] setup wiring added in `hermes_cli/setup.py`
|
||||
- [ ] aux model added in `agent/auxiliary_client.py`
|
||||
- [ ] context lengths added in `agent/model_metadata.py`
|
||||
- [ ] runtime / CLI tests updated
|
||||
- [ ] user docs updated
|
||||
|
||||
## Native provider checklist
|
||||
|
||||
Use this when the provider needs a new protocol path.
|
||||
|
||||
- [ ] everything in the OpenAI-compatible checklist
|
||||
- [ ] adapter added in `agent/<provider>_adapter.py`
|
||||
- [ ] new `api_mode` supported in `run_agent.py`
|
||||
- [ ] interrupt / rebuild path works
|
||||
- [ ] usage and finish-reason extraction works
|
||||
- [ ] fallback path works
|
||||
- [ ] adapter tests added
|
||||
- [ ] live smoke test passes
|
||||
|
||||
## Common pitfalls
|
||||
|
||||
### 1. Adding the provider to auth but not to model parsing
|
||||
|
||||
That makes credentials resolve correctly while `/model` and `provider:model` inputs fail.
|
||||
|
||||
### 2. Forgetting that `config["model"]` can be a string or a dict
|
||||
|
||||
A lot of provider-selection code has to normalize both forms.
|
||||
|
||||
### 3. Assuming a built-in provider is required
|
||||
|
||||
If the service is just OpenAI-compatible, a custom provider may already solve the user problem with less maintenance.
|
||||
|
||||
### 4. Forgetting auxiliary paths
|
||||
|
||||
The main chat path can work while summarization, memory flushes, or vision helpers fail because aux routing was never updated.
|
||||
|
||||
### 5. Native-provider branches hiding in `run_agent.py`
|
||||
|
||||
Search for `api_mode` and `self.client.`. Do not assume the obvious request path is the only one.
|
||||
|
||||
### 6. Sending OpenRouter-only knobs to other providers
|
||||
|
||||
Fields like provider routing belong only on the providers that support them.
|
||||
|
||||
### 7. Updating `hermes model` but not `hermes setup`
|
||||
|
||||
Both flows need to know about the provider.
|
||||
|
||||
## Good search targets while implementing
|
||||
|
||||
If you are hunting for all the places a provider touches, search these symbols:
|
||||
|
||||
- `PROVIDER_REGISTRY`
|
||||
- `_PROVIDER_ALIASES`
|
||||
- `_PROVIDER_MODELS`
|
||||
- `resolve_runtime_provider`
|
||||
- `_model_flow_`
|
||||
- `provider_choices`
|
||||
- `api_mode`
|
||||
- `_API_KEY_PROVIDER_AUX_MODELS`
|
||||
- `self.client.`
|
||||
|
||||
## Related docs
|
||||
|
||||
- [Provider Runtime Resolution](./provider-runtime.md)
|
||||
- [Architecture](./architecture.md)
|
||||
- [Contributing](./contributing.md)
|
||||
|
|
@ -41,12 +41,13 @@ If you are new to the codebase, read in this order:
|
|||
2. [Agent Loop Internals](./agent-loop.md)
|
||||
3. [Prompt Assembly](./prompt-assembly.md)
|
||||
4. [Provider Runtime Resolution](./provider-runtime.md)
|
||||
5. [Tools Runtime](./tools-runtime.md)
|
||||
6. [Session Storage](./session-storage.md)
|
||||
7. [Gateway Internals](./gateway-internals.md)
|
||||
8. [Context Compression & Prompt Caching](./context-compression-and-caching.md)
|
||||
9. [ACP Internals](./acp-internals.md)
|
||||
10. [Environments, Benchmarks & Data Generation](./environments.md)
|
||||
5. [Adding Providers](./adding-providers.md)
|
||||
6. [Tools Runtime](./tools-runtime.md)
|
||||
7. [Session Storage](./session-storage.md)
|
||||
8. [Gateway Internals](./gateway-internals.md)
|
||||
9. [Context Compression & Prompt Caching](./context-compression-and-caching.md)
|
||||
10. [ACP Internals](./acp-internals.md)
|
||||
11. [Environments, Benchmarks & Data Generation](./environments.md)
|
||||
|
||||
## Major subsystems
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,12 @@ We value contributions in this order:
|
|||
6. **New tools** — rarely needed; most capabilities should be skills
|
||||
7. **Documentation** — fixes, clarifications, new examples
|
||||
|
||||
## Common contribution paths
|
||||
|
||||
- Building a new tool? Start with [Adding Tools](./adding-tools.md)
|
||||
- Building a new skill? Start with [Creating Skills](./creating-skills.md)
|
||||
- Building a new inference provider? Start with [Adding Providers](./adding-providers.md)
|
||||
|
||||
## Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_position: 11
|
||||
title: "Cron Internals"
|
||||
description: "How Hermes stores, schedules, locks, and delivers cron jobs"
|
||||
description: "How Hermes stores, schedules, edits, pauses, skill-loads, and delivers cron jobs"
|
||||
---
|
||||
|
||||
# Cron Internals
|
||||
|
|
@ -10,7 +10,9 @@ Hermes cron support is implemented primarily in:
|
|||
|
||||
- `cron/jobs.py`
|
||||
- `cron/scheduler.py`
|
||||
- `tools/cronjob_tools.py`
|
||||
- `gateway/run.py`
|
||||
- `hermes_cli/cron.py`
|
||||
|
||||
## Scheduling model
|
||||
|
||||
|
|
@ -21,9 +23,30 @@ Hermes supports:
|
|||
- cron expressions
|
||||
- explicit timestamps
|
||||
|
||||
The model-facing surface is a single `cronjob` tool with action-style operations:
|
||||
|
||||
- `create`
|
||||
- `list`
|
||||
- `update`
|
||||
- `pause`
|
||||
- `resume`
|
||||
- `run`
|
||||
- `remove`
|
||||
|
||||
## Job storage
|
||||
|
||||
Cron jobs are stored in Hermes-managed local state with atomic save/update semantics.
|
||||
Cron jobs are stored in Hermes-managed local state (`~/.hermes/cron/jobs.json`) with atomic write semantics.
|
||||
|
||||
Each job can carry:
|
||||
|
||||
- prompt
|
||||
- schedule metadata
|
||||
- repeat counters
|
||||
- delivery target
|
||||
- lifecycle state (`scheduled`, `paused`, `completed`, etc.)
|
||||
- zero, one, or multiple attached skills
|
||||
|
||||
Backward compatibility is preserved for older jobs that only stored a legacy single `skill` field or none of the newer lifecycle fields.
|
||||
|
||||
## Runtime behavior
|
||||
|
||||
|
|
@ -32,11 +55,22 @@ The scheduler:
|
|||
- loads jobs
|
||||
- computes due work
|
||||
- executes jobs in fresh agent sessions
|
||||
- optionally injects one or more skills before the prompt
|
||||
- handles repeat counters
|
||||
- updates next-run metadata
|
||||
- updates next-run metadata and state
|
||||
|
||||
In gateway mode, cron ticking is integrated into the long-running gateway loop.
|
||||
|
||||
## Skill-backed jobs
|
||||
|
||||
A cron job may attach multiple skills. At runtime, Hermes loads those skills in order and then appends the job prompt as the task instruction.
|
||||
|
||||
This gives scheduled jobs reusable guidance without requiring the user to paste full skill bodies into the cron prompt.
|
||||
|
||||
## Recursion guard
|
||||
|
||||
Cron-run sessions disable the `cronjob` toolset. This prevents a scheduled job from recursively creating or mutating more cron jobs and accidentally exploding token usage or scheduler load.
|
||||
|
||||
## Delivery model
|
||||
|
||||
Cron jobs can deliver to:
|
||||
|
|
@ -48,7 +82,7 @@ Cron jobs can deliver to:
|
|||
|
||||
## Locking
|
||||
|
||||
Hermes uses lock-based protections so concurrent cron ticks or overlapping scheduler processes do not corrupt job state.
|
||||
Hermes uses lock-based protections so overlapping scheduler ticks do not execute the same due-job batch twice.
|
||||
|
||||
## Related docs
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ Primary implementation:
|
|||
- `hermes_cli/auth.py`
|
||||
- `agent/auxiliary_client.py`
|
||||
|
||||
If you are trying to add a new first-class inference provider, read [Adding Providers](./adding-providers.md) alongside this page.
|
||||
|
||||
## Resolution precedence
|
||||
|
||||
At a high level, provider resolution uses:
|
||||
|
|
@ -84,6 +86,13 @@ When provider resolution selects `anthropic`, Hermes uses:
|
|||
- the native Anthropic Messages API
|
||||
- `agent/anthropic_adapter.py` for translation
|
||||
|
||||
Credential resolution for native Anthropic now prefers refreshable Claude Code credentials over copied env tokens when both are present. In practice that means:
|
||||
|
||||
- Claude Code credential files are treated as the preferred source when they include refreshable auth
|
||||
- manual `ANTHROPIC_TOKEN` / `CLAUDE_CODE_OAUTH_TOKEN` values still work as explicit overrides
|
||||
- Hermes preflights Anthropic credential refresh before native Messages API calls
|
||||
- Hermes still retries once on a 401 after rebuilding the Anthropic client, as a fallback path
|
||||
|
||||
## OpenAI Codex path
|
||||
|
||||
Codex uses a separate Responses API path:
|
||||
|
|
|
|||
|
|
@ -119,6 +119,7 @@ uv pip install -e "."
|
|||
| `cli` | Terminal menu UI for setup wizard | `uv pip install -e ".[cli]"` |
|
||||
| `modal` | Modal cloud execution backend | `uv pip install -e ".[modal]"` |
|
||||
| `tts-premium` | ElevenLabs premium voices | `uv pip install -e ".[tts-premium]"` |
|
||||
| `voice` | CLI microphone input + audio playback | `uv pip install -e ".[voice]"` |
|
||||
| `pty` | PTY terminal support | `uv pip install -e ".[pty]"` |
|
||||
| `honcho` | AI-native memory (Honcho integration) | `uv pip install -e ".[honcho]"` |
|
||||
| `mcp` | Model Context Protocol support | `uv pip install -e ".[mcp]"` |
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ hermes setup # Or configure everything at once
|
|||
|----------|-----------|---------------|
|
||||
| **Nous Portal** | Subscription-based, zero-config | OAuth login via `hermes model` |
|
||||
| **OpenAI Codex** | ChatGPT OAuth, uses Codex models | Device code auth via `hermes model` |
|
||||
| **Anthropic** | Claude models directly (Pro/Max or API key) | API key or Claude Code setup-token |
|
||||
| **Anthropic** | Claude models directly (Pro/Max or API key) | `hermes model` with Claude Code auth, or an Anthropic API key |
|
||||
| **OpenRouter** | Multi-provider routing across many models | Enter your API key |
|
||||
| **Z.AI** | GLM / Zhipu-hosted models | Set `GLM_API_KEY` / `ZAI_API_KEY` |
|
||||
| **Kimi / Moonshot** | Moonshot-hosted coding and chat models | Set `KIMI_API_KEY` |
|
||||
|
|
@ -129,6 +129,25 @@ Chat with Hermes from your phone or other surfaces via Telegram, Discord, Slack,
|
|||
hermes gateway setup # Interactive platform configuration
|
||||
```
|
||||
|
||||
### Add voice mode
|
||||
|
||||
Want microphone input in the CLI or spoken replies in messaging?
|
||||
|
||||
```bash
|
||||
pip install hermes-agent[voice]
|
||||
|
||||
# Optional but recommended for free local speech-to-text
|
||||
pip install faster-whisper
|
||||
```
|
||||
|
||||
Then start Hermes and enable it inside the CLI:
|
||||
|
||||
```text
|
||||
/voice on
|
||||
```
|
||||
|
||||
Press `Ctrl+B` to record, or use `/voice tts` to have Hermes speak its replies. See [Voice Mode](../user-guide/features/voice-mode.md) for the full setup across CLI, Telegram, Discord, and Discord voice channels.
|
||||
|
||||
### Schedule automated tasks
|
||||
|
||||
```
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ and open source LLMs. Summarize the top 3 stories in a concise briefing
|
|||
with links. Use a friendly, professional tone. Deliver to telegram.
|
||||
```
|
||||
|
||||
Hermes will create the cron job for you using the `schedule_cronjob` tool.
|
||||
Hermes will create the cron job for you using the unified `cronjob` tool.
|
||||
|
||||
### Option B: CLI Slash Command
|
||||
|
||||
|
|
@ -232,7 +232,7 @@ Or ask conversationally:
|
|||
Remove my morning briefing cron job.
|
||||
```
|
||||
|
||||
Hermes will use `list_cronjobs` to find it and `remove_cronjob` to delete it.
|
||||
Hermes will use `cronjob(action="list")` to find it and `cronjob(action="remove")` to delete it.
|
||||
|
||||
### Check Gateway Status
|
||||
|
||||
|
|
|
|||
|
|
@ -181,12 +181,18 @@ hermes status [--all] [--deep]
|
|||
## `hermes cron`
|
||||
|
||||
```bash
|
||||
hermes cron <list|status|tick>
|
||||
hermes cron <list|create|edit|pause|resume|run|remove|status|tick>
|
||||
```
|
||||
|
||||
| Subcommand | Description |
|
||||
|------------|-------------|
|
||||
| `list` | Show scheduled jobs. |
|
||||
| `create` / `add` | Create a scheduled job from a prompt, optionally attaching one or more skills via repeated `--skill`. |
|
||||
| `edit` | Update a job's schedule, prompt, name, delivery, repeat count, or attached skills. Supports `--clear-skills`, `--add-skill`, and `--remove-skill`. |
|
||||
| `pause` | Pause a job without deleting it. |
|
||||
| `resume` | Resume a paused job and compute its next future run. |
|
||||
| `run` | Trigger a job on the next scheduler tick. |
|
||||
| `remove` | Delete a scheduled job. |
|
||||
| `status` | Check whether the cron scheduler is running. |
|
||||
| `tick` | Run due jobs once and exit. |
|
||||
|
||||
|
|
|
|||
|
|
@ -26,16 +26,18 @@ All variables go in `~/.hermes/.env`. You can also set them with `hermes config
|
|||
| `MINIMAX_BASE_URL` | Override MiniMax base URL (default: `https://api.minimax.io/v1`) |
|
||||
| `MINIMAX_CN_API_KEY` | MiniMax API key — China endpoint ([minimaxi.com](https://www.minimaxi.com)) |
|
||||
| `MINIMAX_CN_BASE_URL` | Override MiniMax China base URL (default: `https://api.minimaxi.com/v1`) |
|
||||
| `ANTHROPIC_API_KEY` | Anthropic API key or setup-token ([console.anthropic.com](https://console.anthropic.com/)) |
|
||||
| `ANTHROPIC_TOKEN` | Anthropic OAuth/setup token (alternative to `ANTHROPIC_API_KEY`) |
|
||||
| `CLAUDE_CODE_OAUTH_TOKEN` | Claude Code setup-token (same as `ANTHROPIC_TOKEN`) |
|
||||
| `ANTHROPIC_API_KEY` | Anthropic Console API key ([console.anthropic.com](https://console.anthropic.com/)) |
|
||||
| `ANTHROPIC_TOKEN` | Manual or legacy Anthropic OAuth/setup-token override |
|
||||
| `CLAUDE_CODE_OAUTH_TOKEN` | Explicit Claude Code token override if you export one manually |
|
||||
| `HERMES_MODEL` | Preferred model name (checked before `LLM_MODEL`, used by gateway) |
|
||||
| `LLM_MODEL` | Default model name (fallback when not set in config.yaml) |
|
||||
| `VOICE_TOOLS_OPENAI_KEY` | OpenAI key for TTS and voice transcription (separate from custom endpoint) |
|
||||
| `VOICE_TOOLS_OPENAI_KEY` | OpenAI key for OpenAI speech-to-text and text-to-speech providers |
|
||||
| `HERMES_HOME` | Override Hermes config directory (default: `~/.hermes`) |
|
||||
|
||||
## Provider Auth (OAuth)
|
||||
|
||||
For native Anthropic auth, Hermes prefers Claude Code's own credential files when they exist because those credentials can refresh automatically. Environment variables such as `ANTHROPIC_TOKEN` remain useful as manual overrides, but they are no longer the preferred path for Claude Pro/Max login.
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `HERMES_INFERENCE_PROVIDER` | Override provider selection: `auto`, `openrouter`, `nous`, `openai-codex`, `anthropic`, `zai`, `kimi-coding`, `minimax`, `minimax-cn` (default: `auto`) |
|
||||
|
|
@ -57,7 +59,12 @@ All variables go in `~/.hermes/.env`. You can also set them with `hermes config
|
|||
| `BROWSERBASE_PROJECT_ID` | Browserbase project ID |
|
||||
| `BROWSER_INACTIVITY_TIMEOUT` | Browser session inactivity timeout in seconds |
|
||||
| `FAL_KEY` | Image generation ([fal.ai](https://fal.ai/)) |
|
||||
| `ELEVENLABS_API_KEY` | Premium TTS voices ([elevenlabs.io](https://elevenlabs.io/)) |
|
||||
| `GROQ_API_KEY` | Groq Whisper STT API key ([groq.com](https://groq.com/)) |
|
||||
| `ELEVENLABS_API_KEY` | ElevenLabs premium TTS voices ([elevenlabs.io](https://elevenlabs.io/)) |
|
||||
| `STT_GROQ_MODEL` | Override the Groq STT model (default: `whisper-large-v3-turbo`) |
|
||||
| `GROQ_BASE_URL` | Override the Groq OpenAI-compatible STT endpoint |
|
||||
| `STT_OPENAI_MODEL` | Override the OpenAI STT model (default: `whisper-1`) |
|
||||
| `STT_OPENAI_BASE_URL` | Override the OpenAI-compatible STT endpoint |
|
||||
| `HONCHO_API_KEY` | Cross-session user modeling ([honcho.dev](https://honcho.dev/)) |
|
||||
| `TINKER_API_KEY` | RL training ([tinker-console.thinkingmachines.ai](https://tinker-console.thinkingmachines.ai/)) |
|
||||
| `WANDB_API_KEY` | RL training metrics ([wandb.ai](https://wandb.ai/)) |
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in
|
|||
| `/verbose` | Cycle tool progress display: off → new → all → verbose |
|
||||
| `/reasoning` | Manage reasoning effort and display (usage: /reasoning [level\|show\|hide]) |
|
||||
| `/skin` | Show or change the display skin/theme |
|
||||
| `/voice [on\|off\|tts\|status]` | Toggle CLI voice mode and spoken playback. Recording uses `voice.record_key` (default: `Ctrl+B`). |
|
||||
|
||||
### Tools & Skills
|
||||
|
||||
|
|
@ -53,7 +54,7 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in
|
|||
| `/tools` | List available tools |
|
||||
| `/toolsets` | List available toolsets |
|
||||
| `/skills` | Search, install, inspect, or manage skills from online registries |
|
||||
| `/cron` | Manage scheduled tasks (list, add, remove) |
|
||||
| `/cron` | Manage scheduled tasks (list, add/create, edit, pause, resume, run, remove) |
|
||||
| `/reload-mcp` | Reload MCP servers from config.yaml |
|
||||
|
||||
### Info
|
||||
|
|
@ -105,6 +106,7 @@ The messaging gateway supports the following built-in commands inside Telegram,
|
|||
| `/usage` | Show token usage for the current session. |
|
||||
| `/insights [days]` | Show usage analytics. |
|
||||
| `/reasoning [level\|show\|hide]` | Change reasoning effort or toggle reasoning display. |
|
||||
| `/voice [on\|off\|tts\|join\|channel\|leave\|status]` | Control spoken replies in chat. `join`/`channel`/`leave` manage Discord voice-channel mode. |
|
||||
| `/rollback [number]` | List or restore filesystem checkpoints. |
|
||||
| `/background <prompt>` | Run a prompt in a separate background session. |
|
||||
| `/reload-mcp` | Reload MCP servers from config. |
|
||||
|
|
@ -116,4 +118,5 @@ The messaging gateway supports the following built-in commands inside Telegram,
|
|||
|
||||
- `/skin`, `/tools`, `/toolsets`, `/config`, `/prompt`, `/cron`, `/skills`, `/platforms`, `/paste`, and `/verbose` are **CLI-only** commands.
|
||||
- `/status`, `/stop`, `/sethome`, `/resume`, `/background`, and `/update` are **messaging-only** commands.
|
||||
- `/reload-mcp` and `/rollback` work in **both** the CLI and the messaging gateway.
|
||||
- `/voice`, `/reload-mcp`, and `/rollback` work in **both** the CLI and the messaging gateway.
|
||||
- `/voice join`, `/voice channel`, and `/voice leave` are only meaningful on Discord.
|
||||
|
|
|
|||
|
|
@ -40,9 +40,7 @@ This page documents the built-in Hermes tool registry as it exists in code. Avai
|
|||
|
||||
| Tool | Description | Requires environment |
|
||||
|------|-------------|----------------------|
|
||||
| `list_cronjobs` | List all scheduled cronjobs with their IDs, schedules, and status. Use this to: - See what jobs are currently scheduled - Find job IDs for removal with remove_cronjob - Check job status and next run times Returns job_id, name, schedule, re… | — |
|
||||
| `remove_cronjob` | Remove a scheduled cronjob by its ID. Use list_cronjobs first to find the job_id of the job you want to remove. Jobs that have completed their repeat count are auto-removed, but you can use this to cancel a job before it completes. | — |
|
||||
| `schedule_cronjob` | Schedule an automated task to run the agent on a schedule. ⚠️ CRITICAL: The cronjob runs in a FRESH SESSION with NO CONTEXT from this conversation. The prompt must be COMPLETELY SELF-CONTAINED with ALL necessary information including: - Fu… | — |
|
||||
| `cronjob` | Unified scheduled-task manager. Use `action="create"`, `"list"`, `"update"`, `"pause"`, `"resume"`, `"run"`, or `"remove"` to manage jobs. Supports skill-backed jobs with one or more attached skills, and `skills=[]` on update clears attached skills. Cron runs happen in fresh sessions with no current-chat context. | — |
|
||||
|
||||
## `delegation` toolset
|
||||
|
||||
|
|
|
|||
|
|
@ -13,19 +13,19 @@ Toolsets are named bundles of tools that you can enable with `hermes chat --tool
|
|||
| `browser` | core | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `web_search` |
|
||||
| `clarify` | core | `clarify` |
|
||||
| `code_execution` | core | `execute_code` |
|
||||
| `cronjob` | core | `list_cronjobs`, `remove_cronjob`, `schedule_cronjob` |
|
||||
| `cronjob` | core | `cronjob` |
|
||||
| `debugging` | composite | `patch`, `process`, `read_file`, `search_files`, `terminal`, `web_extract`, `web_search`, `write_file` |
|
||||
| `delegation` | core | `delegate_task` |
|
||||
| `file` | core | `patch`, `read_file`, `search_files`, `write_file` |
|
||||
| `hermes-cli` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-discord` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-email` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-gateway` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-homeassistant` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-signal` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-slack` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-telegram` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-whatsapp` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `list_cronjobs`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `remove_cronjob`, `schedule_cronjob`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-cli` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-discord` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-email` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-gateway` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-homeassistant` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-signal` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-slack` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-telegram` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `hermes-whatsapp` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `cronjob`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` |
|
||||
| `homeassistant` | core | `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services` |
|
||||
| `honcho` | core | `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search` |
|
||||
| `image_gen` | core | `image_generate` |
|
||||
|
|
|
|||
|
|
@ -27,6 +27,10 @@ hermes chat --provider openrouter # Force OpenRouter
|
|||
# With specific toolsets
|
||||
hermes chat --toolsets "web,terminal,skills"
|
||||
|
||||
# Start with one or more skills preloaded
|
||||
hermes -s hermes-agent-dev,github-auth
|
||||
hermes chat -s github-pr-workflow -q "open a draft PR"
|
||||
|
||||
# Resume previous sessions
|
||||
hermes --continue # Resume the most recent CLI session (-c)
|
||||
hermes --resume <session_id> # Resume a specific session by ID (-r)
|
||||
|
|
@ -77,6 +81,7 @@ When resuming a previous session (`hermes -c` or `hermes --resume <id>`), a "Pre
|
|||
| `Alt+Enter` or `Ctrl+J` | New line (multi-line input) |
|
||||
| `Alt+V` | Paste an image from the clipboard when supported by the terminal |
|
||||
| `Ctrl+V` | Paste text and opportunistically attach clipboard images |
|
||||
| `Ctrl+B` | Start/stop voice recording when voice mode is enabled (`voice.record_key`, default: `ctrl+b`) |
|
||||
| `Ctrl+C` | Interrupt agent (double-press within 2s to force exit) |
|
||||
| `Ctrl+D` | Exit |
|
||||
| `Tab` | Autocomplete slash commands |
|
||||
|
|
@ -95,11 +100,15 @@ Common examples:
|
|||
| `/skills browse` | Browse the skills hub and official optional skills |
|
||||
| `/background <prompt>` | Run a prompt in a separate background session |
|
||||
| `/skin` | Show or switch the active CLI skin |
|
||||
| `/voice on` | Enable CLI voice mode (press `Ctrl+B` to record) |
|
||||
| `/voice tts` | Toggle spoken playback for Hermes replies |
|
||||
| `/reasoning high` | Increase reasoning effort |
|
||||
| `/title My Session` | Name the current session |
|
||||
|
||||
For the full built-in CLI and messaging lists, see [Slash Commands Reference](../reference/slash-commands.md).
|
||||
|
||||
For setup, providers, silence tuning, and messaging/Discord voice usage, see [Voice Mode](features/voice-mode.md).
|
||||
|
||||
:::tip
|
||||
Commands are case-insensitive — `/HELP` works the same as `/help`. Installed skills also become slash commands automatically.
|
||||
:::
|
||||
|
|
@ -121,6 +130,17 @@ quick_commands:
|
|||
|
||||
Then type `/status` or `/gpu` in any chat. See the [Configuration guide](/docs/user-guide/configuration#quick-commands) for more examples.
|
||||
|
||||
## Preloading Skills at Launch
|
||||
|
||||
If you already know which skills you want active for the session, pass them at launch time:
|
||||
|
||||
```bash
|
||||
hermes -s hermes-agent-dev,github-auth
|
||||
hermes chat -s github-pr-workflow -s github-auth
|
||||
```
|
||||
|
||||
Hermes loads each named skill into the session prompt before the first turn. The same flag works in interactive mode and single-query mode.
|
||||
|
||||
## Skill Slash Commands
|
||||
|
||||
Every installed skill in `~/.hermes/skills/` is automatically registered as a slash command. The skill name becomes the command:
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ You need at least one way to connect to an LLM. Use `hermes model` to switch pro
|
|||
|----------|-------|
|
||||
| **Nous Portal** | `hermes model` (OAuth, subscription-based) |
|
||||
| **OpenAI Codex** | `hermes model` (ChatGPT OAuth, uses Codex models) |
|
||||
| **Anthropic** | `hermes model` (API key, setup-token, or Claude Code auto-detect) |
|
||||
| **Anthropic** | `hermes model` (Claude Pro/Max via Claude Code auth, Anthropic API key, or manual setup-token) |
|
||||
| **OpenRouter** | `OPENROUTER_API_KEY` in `~/.hermes/.env` |
|
||||
| **z.ai / GLM** | `GLM_API_KEY` in `~/.hermes/.env` (provider: `zai`) |
|
||||
| **Kimi / Moonshot** | `KIMI_API_KEY` in `~/.hermes/.env` (provider: `kimi-coding`) |
|
||||
|
|
@ -85,17 +85,23 @@ Use Claude models directly through the Anthropic API — no OpenRouter proxy nee
|
|||
|
||||
```bash
|
||||
# With an API key (pay-per-token)
|
||||
export ANTHROPIC_API_KEY=sk-ant-api03-...
|
||||
export ANTHROPIC_API_KEY=***
|
||||
hermes chat --provider anthropic --model claude-sonnet-4-6
|
||||
|
||||
# With a Claude Code setup-token (Pro/Max subscription)
|
||||
export ANTHROPIC_API_KEY=sk-ant-oat01-... # from 'claude setup-token'
|
||||
# Preferred: authenticate through `hermes model`
|
||||
# Hermes will use Claude Code's credential store directly when available
|
||||
hermes model
|
||||
|
||||
# Manual override with a setup-token (fallback / legacy)
|
||||
export ANTHROPIC_TOKEN=*** # setup-token or manual OAuth token
|
||||
hermes chat --provider anthropic
|
||||
|
||||
# Auto-detect Claude Code credentials (if you have Claude Code installed)
|
||||
hermes chat --provider anthropic # reads ~/.claude.json automatically
|
||||
# Auto-detect Claude Code credentials (if you already use Claude Code)
|
||||
hermes chat --provider anthropic # reads Claude Code credential files automatically
|
||||
```
|
||||
|
||||
When you choose Anthropic OAuth through `hermes model`, Hermes prefers Claude Code's own credential store over copying the token into `~/.hermes/.env`. That keeps refreshable Claude credentials refreshable.
|
||||
|
||||
Or set it permanently:
|
||||
```yaml
|
||||
model:
|
||||
|
|
@ -695,6 +701,8 @@ tts:
|
|||
voice: "alloy" # alloy, echo, fable, onyx, nova, shimmer
|
||||
```
|
||||
|
||||
This controls both the `text_to_speech` tool and spoken replies in voice mode (`/voice tts` in the CLI or messaging gateway).
|
||||
|
||||
## Display Settings
|
||||
|
||||
```yaml
|
||||
|
|
@ -719,10 +727,43 @@ display:
|
|||
|
||||
```yaml
|
||||
stt:
|
||||
provider: "openai" # STT provider
|
||||
provider: "local" # "local" | "groq" | "openai"
|
||||
local:
|
||||
model: "base" # tiny, base, small, medium, large-v3
|
||||
openai:
|
||||
model: "whisper-1" # whisper-1 | gpt-4o-mini-transcribe | gpt-4o-transcribe
|
||||
# model: "whisper-1" # Legacy fallback key still respected
|
||||
```
|
||||
|
||||
Requires `VOICE_TOOLS_OPENAI_KEY` in `.env` for OpenAI STT.
|
||||
Provider behavior:
|
||||
|
||||
- `local` uses `faster-whisper` running on your machine. Install it separately with `pip install faster-whisper`.
|
||||
- `groq` uses Groq's Whisper-compatible endpoint and reads `GROQ_API_KEY`.
|
||||
- `openai` uses the OpenAI speech API and reads `VOICE_TOOLS_OPENAI_KEY`.
|
||||
|
||||
If the requested provider is unavailable, Hermes falls back automatically in this order: `local` → `groq` → `openai`.
|
||||
|
||||
Groq and OpenAI model overrides are environment-driven:
|
||||
|
||||
```bash
|
||||
STT_GROQ_MODEL=whisper-large-v3-turbo
|
||||
STT_OPENAI_MODEL=whisper-1
|
||||
GROQ_BASE_URL=https://api.groq.com/openai/v1
|
||||
STT_OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
```
|
||||
|
||||
## Voice Mode (CLI)
|
||||
|
||||
```yaml
|
||||
voice:
|
||||
record_key: "ctrl+b" # Push-to-talk key inside the CLI
|
||||
max_recording_seconds: 120 # Hard stop for long recordings
|
||||
auto_tts: false # Enable spoken replies automatically when /voice on
|
||||
silence_threshold: 200 # RMS threshold for speech detection
|
||||
silence_duration: 3.0 # Seconds of silence before auto-stop
|
||||
```
|
||||
|
||||
Use `/voice on` in the CLI to enable microphone mode, `record_key` to start/stop recording, and `/voice tts` to toggle spoken replies. See [Voice Mode](/docs/user-guide/features/voice-mode) for end-to-end setup and platform-specific behavior.
|
||||
|
||||
## Quick Commands
|
||||
|
||||
|
|
|
|||
|
|
@ -1,68 +1,183 @@
|
|||
---
|
||||
sidebar_position: 5
|
||||
title: "Scheduled Tasks (Cron)"
|
||||
description: "Schedule automated tasks with natural language — cron jobs, delivery options, and the gateway scheduler"
|
||||
description: "Schedule automated tasks with natural language, manage them with one cron tool, and attach one or more skills"
|
||||
---
|
||||
|
||||
# Scheduled Tasks (Cron)
|
||||
|
||||
Schedule tasks to run automatically with natural language or cron expressions. The agent can self-schedule using the `schedule_cronjob` tool from any platform.
|
||||
Schedule tasks to run automatically with natural language or cron expressions. Hermes exposes cron management through a single `cronjob` tool with action-style operations instead of separate schedule/list/remove tools.
|
||||
|
||||
## Creating Scheduled Tasks
|
||||
## What cron can do now
|
||||
|
||||
### In the CLI
|
||||
Cron jobs can:
|
||||
|
||||
Use the `/cron` slash command:
|
||||
- schedule one-shot or recurring tasks
|
||||
- pause, resume, edit, trigger, and remove jobs
|
||||
- attach zero, one, or multiple skills to a job
|
||||
- deliver results back to the origin chat, local files, or configured platform targets
|
||||
- run in fresh agent sessions with the normal static tool list
|
||||
|
||||
```
|
||||
:::warning
|
||||
Cron-run sessions cannot recursively create more cron jobs. Hermes disables cron management tools inside cron executions to prevent runaway scheduling loops.
|
||||
:::
|
||||
|
||||
## Creating scheduled tasks
|
||||
|
||||
### In chat with `/cron`
|
||||
|
||||
```bash
|
||||
/cron add 30m "Remind me to check the build"
|
||||
/cron add "every 2h" "Check server status"
|
||||
/cron add "0 9 * * *" "Morning briefing"
|
||||
/cron list
|
||||
/cron remove <job_id>
|
||||
/cron add "every 1h" "Summarize new feed items" --skill blogwatcher
|
||||
/cron add "every 1h" "Use both skills and combine the result" --skill blogwatcher --skill find-nearby
|
||||
```
|
||||
|
||||
### Through Natural Conversation
|
||||
|
||||
Simply ask the agent on any platform:
|
||||
### From the standalone CLI
|
||||
|
||||
```bash
|
||||
hermes cron create "every 2h" "Check server status"
|
||||
hermes cron create "every 1h" "Summarize new feed items" --skill blogwatcher
|
||||
hermes cron create "every 1h" "Use both skills and combine the result" \
|
||||
--skill blogwatcher \
|
||||
--skill find-nearby \
|
||||
--name "Skill combo"
|
||||
```
|
||||
|
||||
### Through natural conversation
|
||||
|
||||
Ask Hermes normally:
|
||||
|
||||
```text
|
||||
Every morning at 9am, check Hacker News for AI news and send me a summary on Telegram.
|
||||
```
|
||||
|
||||
The agent will use the `schedule_cronjob` tool to set it up.
|
||||
Hermes will use the unified `cronjob` tool internally.
|
||||
|
||||
## How It Works
|
||||
## Skill-backed cron jobs
|
||||
|
||||
**Cron execution is handled by the gateway daemon.** The gateway ticks the scheduler every 60 seconds, running any due jobs in isolated agent sessions:
|
||||
A cron job can load one or more skills before it runs the prompt.
|
||||
|
||||
### Single skill
|
||||
|
||||
```python
|
||||
cronjob(
|
||||
action="create",
|
||||
skill="blogwatcher",
|
||||
prompt="Check the configured feeds and summarize anything new.",
|
||||
schedule="0 9 * * *",
|
||||
name="Morning feeds",
|
||||
)
|
||||
```
|
||||
|
||||
### Multiple skills
|
||||
|
||||
Skills are loaded in order. The prompt becomes the task instruction layered on top of those skills.
|
||||
|
||||
```python
|
||||
cronjob(
|
||||
action="create",
|
||||
skills=["blogwatcher", "find-nearby"],
|
||||
prompt="Look for new local events and interesting nearby places, then combine them into one short brief.",
|
||||
schedule="every 6h",
|
||||
name="Local brief",
|
||||
)
|
||||
```
|
||||
|
||||
This is useful when you want a scheduled agent to inherit reusable workflows without stuffing the full skill text into the cron prompt itself.
|
||||
|
||||
## Editing jobs
|
||||
|
||||
You do not need to delete and recreate jobs just to change them.
|
||||
|
||||
### Chat
|
||||
|
||||
```bash
|
||||
/cron edit <job_id> --schedule "every 4h"
|
||||
/cron edit <job_id> --prompt "Use the revised task"
|
||||
/cron edit <job_id> --skill blogwatcher --skill find-nearby
|
||||
/cron edit <job_id> --remove-skill blogwatcher
|
||||
/cron edit <job_id> --clear-skills
|
||||
```
|
||||
|
||||
### Standalone CLI
|
||||
|
||||
```bash
|
||||
hermes cron edit <job_id> --schedule "every 4h"
|
||||
hermes cron edit <job_id> --prompt "Use the revised task"
|
||||
hermes cron edit <job_id> --skill blogwatcher --skill find-nearby
|
||||
hermes cron edit <job_id> --add-skill find-nearby
|
||||
hermes cron edit <job_id> --remove-skill blogwatcher
|
||||
hermes cron edit <job_id> --clear-skills
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- repeated `--skill` replaces the job's attached skill list
|
||||
- `--add-skill` appends to the existing list without replacing it
|
||||
- `--remove-skill` removes specific attached skills
|
||||
- `--clear-skills` removes all attached skills
|
||||
|
||||
## Lifecycle actions
|
||||
|
||||
Cron jobs now have a fuller lifecycle than just create/remove.
|
||||
|
||||
### Chat
|
||||
|
||||
```bash
|
||||
/cron list
|
||||
/cron pause <job_id>
|
||||
/cron resume <job_id>
|
||||
/cron run <job_id>
|
||||
/cron remove <job_id>
|
||||
```
|
||||
|
||||
### Standalone CLI
|
||||
|
||||
```bash
|
||||
hermes cron list
|
||||
hermes cron pause <job_id>
|
||||
hermes cron resume <job_id>
|
||||
hermes cron run <job_id>
|
||||
hermes cron remove <job_id>
|
||||
hermes cron status
|
||||
hermes cron tick
|
||||
```
|
||||
|
||||
What they do:
|
||||
|
||||
- `pause` — keep the job but stop scheduling it
|
||||
- `resume` — re-enable the job and compute the next future run
|
||||
- `run` — trigger the job on the next scheduler tick
|
||||
- `remove` — delete it entirely
|
||||
|
||||
## How it works
|
||||
|
||||
**Cron execution is handled by the gateway daemon.** The gateway ticks the scheduler every 60 seconds, running any due jobs in isolated agent sessions.
|
||||
|
||||
```bash
|
||||
hermes gateway install # Install as system service (recommended)
|
||||
hermes gateway # Or run in foreground
|
||||
|
||||
hermes cron list # View scheduled jobs
|
||||
hermes cron status # Check if gateway is running
|
||||
hermes cron list
|
||||
hermes cron status
|
||||
```
|
||||
|
||||
### The Gateway Scheduler
|
||||
### Gateway scheduler behavior
|
||||
|
||||
The scheduler runs as a background thread inside the gateway process. On each tick (every 60 seconds):
|
||||
On each tick Hermes:
|
||||
|
||||
1. It loads all jobs from `~/.hermes/cron/jobs.json`
|
||||
2. Checks each enabled job's `next_run_at` against the current time
|
||||
3. For each due job, spawns a fresh `AIAgent` session with the job's prompt
|
||||
4. The agent runs to completion with full tool access
|
||||
5. The final response is delivered to the configured target
|
||||
6. The job's run count is incremented and next run time computed
|
||||
7. Jobs that hit their repeat limit are auto-removed
|
||||
1. loads jobs from `~/.hermes/cron/jobs.json`
|
||||
2. checks `next_run_at` against the current time
|
||||
3. starts a fresh `AIAgent` session for each due job
|
||||
4. optionally injects one or more attached skills into that fresh session
|
||||
5. runs the prompt to completion
|
||||
6. delivers the final response
|
||||
7. updates run metadata and the next scheduled time
|
||||
|
||||
A **file-based lock** (`~/.hermes/cron/.tick.lock`) prevents duplicate execution if multiple processes overlap (e.g., gateway + manual tick).
|
||||
A file lock at `~/.hermes/cron/.tick.lock` prevents overlapping scheduler ticks from double-running the same job batch.
|
||||
|
||||
:::info
|
||||
Even if no messaging platforms are configured, the gateway stays running for cron. A file lock prevents duplicate execution if multiple processes overlap.
|
||||
:::
|
||||
|
||||
## Delivery Options
|
||||
## Delivery options
|
||||
|
||||
When scheduling jobs, you specify where the output goes:
|
||||
|
||||
|
|
@ -70,48 +185,36 @@ When scheduling jobs, you specify where the output goes:
|
|||
|--------|-------------|---------|
|
||||
| `"origin"` | Back to where the job was created | Default on messaging platforms |
|
||||
| `"local"` | Save to local files only (`~/.hermes/cron/output/`) | Default on CLI |
|
||||
| `"telegram"` | Telegram home channel | Uses `TELEGRAM_HOME_CHANNEL` env var |
|
||||
| `"discord"` | Discord home channel | Uses `DISCORD_HOME_CHANNEL` env var |
|
||||
| `"telegram:123456"` | Specific Telegram chat by ID | For directing output to a specific chat |
|
||||
| `"discord:987654"` | Specific Discord channel by ID | For directing output to a specific channel |
|
||||
| `"telegram"` | Telegram home channel | Uses `TELEGRAM_HOME_CHANNEL` |
|
||||
| `"discord"` | Discord home channel | Uses `DISCORD_HOME_CHANNEL` |
|
||||
| `"telegram:123456"` | Specific Telegram chat by ID | Direct delivery |
|
||||
| `"discord:987654"` | Specific Discord channel by ID | Direct delivery |
|
||||
|
||||
**How `"origin"` works:** When a job is created from a messaging platform, Hermes records the source platform and chat ID. When the job runs and deliver is `"origin"`, the output is sent back to that exact platform and chat. If origin info isn't available (e.g., job created from CLI), delivery falls back to local.
|
||||
The agent's final response is automatically delivered. You do not need to call `send_message` in the cron prompt.
|
||||
|
||||
**How platform names work:** When you specify a bare platform name like `"telegram"`, Hermes first checks if the job's origin matches that platform and uses the origin chat ID. Otherwise, it falls back to the platform's home channel configured via environment variable (e.g., `TELEGRAM_HOME_CHANNEL`).
|
||||
## Schedule formats
|
||||
|
||||
The agent's final response is automatically delivered — you do **not** need to include `send_message` in the cron prompt for that same destination. If a cron run calls `send_message` to the exact target the scheduler will already deliver to, Hermes skips that duplicate send and tells the model to put the user-facing content in the final response instead. Use `send_message` only for additional or different targets.
|
||||
|
||||
The agent knows your connected platforms and home channels — it'll choose sensible defaults.
|
||||
### Relative delays (one-shot)
|
||||
|
||||
## Schedule Formats
|
||||
|
||||
### Relative Delays (One-Shot)
|
||||
|
||||
Run once after a delay:
|
||||
|
||||
```
|
||||
```text
|
||||
30m → Run once in 30 minutes
|
||||
2h → Run once in 2 hours
|
||||
1d → Run once in 1 day
|
||||
```
|
||||
|
||||
Supported units: `m`/`min`/`minutes`, `h`/`hr`/`hours`, `d`/`day`/`days`.
|
||||
### Intervals (recurring)
|
||||
|
||||
### Intervals (Recurring)
|
||||
|
||||
Run repeatedly at fixed intervals:
|
||||
|
||||
```
|
||||
```text
|
||||
every 30m → Every 30 minutes
|
||||
every 2h → Every 2 hours
|
||||
every 1d → Every day
|
||||
```
|
||||
|
||||
### Cron Expressions
|
||||
### Cron expressions
|
||||
|
||||
Standard 5-field cron syntax for precise scheduling:
|
||||
|
||||
```
|
||||
```text
|
||||
0 9 * * * → Daily at 9:00 AM
|
||||
0 9 * * 1-5 → Weekdays at 9:00 AM
|
||||
0 */6 * * * → Every 6 hours
|
||||
|
|
@ -119,155 +222,63 @@ Standard 5-field cron syntax for precise scheduling:
|
|||
0 0 * * 0 → Every Sunday at midnight
|
||||
```
|
||||
|
||||
#### Cron Expression Cheat Sheet
|
||||
### ISO timestamps
|
||||
|
||||
```
|
||||
┌───── minute (0-59)
|
||||
│ ┌───── hour (0-23)
|
||||
│ │ ┌───── day of month (1-31)
|
||||
│ │ │ ┌───── month (1-12)
|
||||
│ │ │ │ ┌───── day of week (0-7, 0 and 7 = Sunday)
|
||||
│ │ │ │ │
|
||||
* * * * *
|
||||
|
||||
Special characters:
|
||||
* Any value
|
||||
, List separator (1,3,5)
|
||||
- Range (1-5)
|
||||
/ Step values (*/15 = every 15)
|
||||
```
|
||||
|
||||
:::note
|
||||
Cron expressions require the `croniter` Python package. Install with `pip install croniter` if not already available.
|
||||
:::
|
||||
|
||||
### ISO Timestamps
|
||||
|
||||
Run once at a specific date/time:
|
||||
|
||||
```
|
||||
```text
|
||||
2026-03-15T09:00:00 → One-time at March 15, 2026 9:00 AM
|
||||
```
|
||||
|
||||
## Repeat Behavior
|
||||
## Repeat behavior
|
||||
|
||||
The `repeat` parameter controls how many times a job runs:
|
||||
|
||||
| Schedule Type | Default Repeat | Behavior |
|
||||
| Schedule type | Default repeat | Behavior |
|
||||
|--------------|----------------|----------|
|
||||
| One-shot (`30m`, timestamp) | 1 (run once) | Runs once, then auto-deleted |
|
||||
| Interval (`every 2h`) | Forever (`null`) | Runs indefinitely until removed |
|
||||
| Cron expression | Forever (`null`) | Runs indefinitely until removed |
|
||||
| One-shot (`30m`, timestamp) | 1 | Runs once |
|
||||
| Interval (`every 2h`) | forever | Runs until removed |
|
||||
| Cron expression | forever | Runs until removed |
|
||||
|
||||
You can override the default:
|
||||
You can override it:
|
||||
|
||||
```python
|
||||
schedule_cronjob(
|
||||
cronjob(
|
||||
action="create",
|
||||
prompt="...",
|
||||
schedule="every 2h",
|
||||
repeat=5 # Run exactly 5 times, then auto-delete
|
||||
repeat=5,
|
||||
)
|
||||
```
|
||||
|
||||
When a job hits its repeat limit, it is automatically removed from the job list.
|
||||
## Managing jobs programmatically
|
||||
|
||||
## Real-World Examples
|
||||
|
||||
### Daily Standup Report
|
||||
|
||||
```
|
||||
Schedule a daily standup report: Every weekday at 9am, check the GitHub
|
||||
repository at github.com/myorg/myproject for:
|
||||
1. Pull requests opened/merged in the last 24 hours
|
||||
2. Issues created or closed
|
||||
3. Any CI/CD failures on the main branch
|
||||
Format as a brief standup-style summary. Deliver to telegram.
|
||||
```
|
||||
|
||||
The agent creates:
|
||||
```python
|
||||
schedule_cronjob(
|
||||
prompt="Check github.com/myorg/myproject for PRs, issues, and CI status from the last 24 hours. Format as a standup report.",
|
||||
schedule="0 9 * * 1-5",
|
||||
name="Daily Standup Report",
|
||||
deliver="telegram"
|
||||
)
|
||||
```
|
||||
|
||||
### Weekly Backup Verification
|
||||
|
||||
```
|
||||
Every Sunday at 2am, verify that backups exist in /data/backups/ for
|
||||
each day of the past week. Check file sizes are > 1MB. Report any
|
||||
gaps or suspiciously small files.
|
||||
```
|
||||
|
||||
### Monitoring Alerts
|
||||
|
||||
```
|
||||
Every 15 minutes, curl https://api.myservice.com/health and verify
|
||||
it returns HTTP 200 with {"status": "ok"}. If it fails, include the
|
||||
error details and response code. Deliver to telegram:123456789.
|
||||
```
|
||||
The agent-facing API is one tool:
|
||||
|
||||
```python
|
||||
schedule_cronjob(
|
||||
prompt="Run 'curl -s -o /dev/null -w \"%{http_code}\" https://api.myservice.com/health' and verify it returns 200. Also fetch the full response with 'curl -s https://api.myservice.com/health' and check for {\"status\": \"ok\"}. Report the result.",
|
||||
schedule="every 15m",
|
||||
name="API Health Check",
|
||||
deliver="telegram:123456789"
|
||||
)
|
||||
cronjob(action="create", ...)
|
||||
cronjob(action="list")
|
||||
cronjob(action="update", job_id="...")
|
||||
cronjob(action="pause", job_id="...")
|
||||
cronjob(action="resume", job_id="...")
|
||||
cronjob(action="run", job_id="...")
|
||||
cronjob(action="remove", job_id="...")
|
||||
```
|
||||
|
||||
### Periodic Disk Usage Check
|
||||
For `update`, pass `skills=[]` to remove all attached skills.
|
||||
|
||||
```python
|
||||
schedule_cronjob(
|
||||
prompt="Check disk usage with 'df -h' and report any partitions above 80% usage. Also check Docker disk usage with 'docker system df' if Docker is installed.",
|
||||
schedule="0 8 * * *",
|
||||
name="Disk Usage Report",
|
||||
deliver="origin"
|
||||
)
|
||||
```
|
||||
## Job storage
|
||||
|
||||
## Managing Jobs
|
||||
Jobs are stored in `~/.hermes/cron/jobs.json`. Output from job runs is saved to `~/.hermes/cron/output/{job_id}/{timestamp}.md`.
|
||||
|
||||
```bash
|
||||
# CLI commands
|
||||
hermes cron list # View all scheduled jobs
|
||||
hermes cron status # Check if the scheduler is running
|
||||
The storage uses atomic file writes so interrupted writes do not leave a partially written job file behind.
|
||||
|
||||
# Slash commands (inside chat)
|
||||
/cron list
|
||||
/cron remove <job_id>
|
||||
```
|
||||
|
||||
The agent can also manage jobs conversationally:
|
||||
- `list_cronjobs` — Shows all jobs with IDs, schedules, repeat status, and next run times
|
||||
- `remove_cronjob` — Removes a job by ID (use `list_cronjobs` to find the ID)
|
||||
|
||||
## Job Storage
|
||||
|
||||
Jobs are stored as JSON in `~/.hermes/cron/jobs.json`. Output from job runs is saved to `~/.hermes/cron/output/{job_id}/{timestamp}.md`.
|
||||
|
||||
The storage uses atomic file writes (temp file + rename) to prevent corruption from concurrent access.
|
||||
|
||||
## Self-Contained Prompts
|
||||
## Self-contained prompts still matter
|
||||
|
||||
:::warning Important
|
||||
Cron job prompts run in a **completely fresh agent session** with zero memory of any prior conversation. The prompt must contain **everything** the agent needs:
|
||||
|
||||
- Full context and background
|
||||
- Specific file paths, URLs, server addresses
|
||||
- Clear instructions and success criteria
|
||||
- Any credentials or configuration details
|
||||
Cron jobs run in a completely fresh agent session. The prompt must contain everything the agent needs that is not already provided by attached skills.
|
||||
:::
|
||||
|
||||
**BAD:** `"Check on that server issue"`
|
||||
|
||||
**GOOD:** `"SSH into server 192.168.1.100 as user 'deploy', check if nginx is running with 'systemctl status nginx', and verify https://example.com returns HTTP 200."`
|
||||
:::
|
||||
|
||||
## Security
|
||||
|
||||
:::warning
|
||||
Scheduled task prompts are scanned for instruction-override patterns (prompt injection). Jobs matching threat patterns like credential exfiltration, SSH backdoor attempts, or prompt injection are blocked at creation time. Content with invisible Unicode characters (zero-width spaces, directional overrides) is also rejected.
|
||||
:::
|
||||
Scheduled task prompts are scanned for prompt-injection and credential-exfiltration patterns at creation and update time. Prompts containing invisible Unicode tricks, SSH backdoor attempts, or obvious secret-exfiltration payloads are blocked.
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ High-level categories:
|
|||
| **Media** | `vision_analyze`, `image_generate`, `text_to_speech` | Multimodal analysis and generation. |
|
||||
| **Agent orchestration** | `todo`, `clarify`, `execute_code`, `delegate_task` | Planning, clarification, code execution, and subagent delegation. |
|
||||
| **Memory & recall** | `memory`, `session_search`, `honcho_*` | Persistent memory, session search, and Honcho cross-session context. |
|
||||
| **Automation & delivery** | `schedule_cronjob`, `send_message` | Scheduled tasks and outbound messaging delivery. |
|
||||
| **Automation & delivery** | `cronjob`, `send_message` | Scheduled tasks with create/list/update/pause/resume/run/remove actions, plus outbound messaging delivery. |
|
||||
| **Integrations** | `ha_*`, MCP server tools, `rl_*` | Home Assistant, MCP, RL training, and other integrations. |
|
||||
|
||||
For the authoritative code-derived registry, see [Built-in Tools Reference](/docs/reference/tools-reference) and [Toolsets Reference](/docs/reference/toolsets-reference).
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ If you want a practical setup walkthrough with recommended configurations and re
|
|||
Before using voice features, make sure you have:
|
||||
|
||||
1. **Hermes Agent installed** — `pip install hermes-agent` (see [Installation](/docs/getting-started/installation))
|
||||
2. **An LLM provider configured** — set `OPENAI_API_KEY`, `OPENAI_BASE_URL`, and `LLM_MODEL` in `~/.hermes/.env`
|
||||
2. **An LLM provider configured** — run `hermes model` or set your preferred provider credentials in `~/.hermes/.env`
|
||||
3. **A working base setup** — run `hermes` to verify the agent responds to text before enabling voice
|
||||
|
||||
:::tip
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ Replace the ID with the actual channel ID (right-click → Copy Channel ID with
|
|||
|
||||
Hermes Agent supports Discord voice messages:
|
||||
|
||||
- **Incoming voice messages** are automatically transcribed using Whisper (requires `GROQ_API_KEY` or `VOICE_TOOLS_OPENAI_KEY` to be set in your environment).
|
||||
- **Incoming voice messages** are automatically transcribed using the configured STT provider: local `faster-whisper` (no key), Groq Whisper (`GROQ_API_KEY`), or OpenAI Whisper (`VOICE_TOOLS_OPENAI_KEY`).
|
||||
- **Text-to-speech**: Use `/voice tts` to have the bot send spoken audio responses alongside text replies.
|
||||
- **Discord voice channels**: Hermes can also join a voice channel, listen to users speaking, and talk back in the channel.
|
||||
|
||||
|
|
|
|||
|
|
@ -13,27 +13,27 @@ For the full voice feature set — including CLI microphone mode, spoken replies
|
|||
## Architecture
|
||||
|
||||
```text
|
||||
┌───────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Hermes Gateway │
|
||||
├───────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌──────────┐ ┌─────────┐ ┌──────────┐ ┌───────┐ ┌───────┐ ┌───────┐ ┌────┐ │
|
||||
│ │ Telegram │ │ Discord │ │ WhatsApp │ │ Slack │ │Signal │ │ Email │ │ HA │ │
|
||||
│ │ Adapter │ │ Adapter │ │ Adapter │ │Adapter│ │Adapter│ │Adapter│ │Adpt│ │
|
||||
│ └────┬─────┘ └────┬────┘ └────┬─────┘ └──┬────┘ └──┬────┘ └──┬────┘ └─┬──┘ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ └─────────────┴───────────┴───────────┴─────────┴─────────┴────────┘ │
|
||||
│ │ │
|
||||
│ ┌────────▼────────┐ │
|
||||
│ │ Session Store │ │
|
||||
│ │ (per-chat) │ │
|
||||
│ └────────┬────────┘ │
|
||||
│ │ │
|
||||
│ ┌────────▼────────┐ │
|
||||
│ │ AIAgent │ │
|
||||
│ │ (run_agent) │ │
|
||||
│ └─────────────────┘ │
|
||||
│ │
|
||||
┌───────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Hermes Gateway │
|
||||
├───────────────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌──────────┐ ┌─────────┐ ┌──────────┐ ┌───────┐ ┌───────┐ ┌───────┐ ┌────┐ │
|
||||
│ │ Telegram │ │ Discord │ │ WhatsApp │ │ Slack │ │Signal │ │ Email │ │ HA │ │
|
||||
│ │ Adapter │ │ Adapter │ │ Adapter │ │Adapter│ │Adapter│ │Adapter│ │Adpt│ │
|
||||
│ └────┬─────┘ └────┬────┘ └────┬─────┘ └──┬────┘ └──┬────┘ └──┬────┘ └─┬──┘ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ └─────────────┴───────────┴───────────┴─────────┴─────────┴────────┘ │
|
||||
│ │ │
|
||||
│ ┌────────▼────────┐ │
|
||||
│ │ Session Store │ │
|
||||
│ │ (per-chat) │ │
|
||||
│ └────────┬────────┘ │
|
||||
│ │ │
|
||||
│ ┌────────▼────────┐ │
|
||||
│ │ AIAgent │ │
|
||||
│ │ (run_agent) │ │
|
||||
│ └─────────────────┘ │
|
||||
│ │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -224,7 +224,7 @@ Make sure the bot has been **invited to the channel** (`/invite @Hermes Agent`).
|
|||
|
||||
Hermes supports voice on Slack:
|
||||
|
||||
- **Incoming:** Voice/audio messages are automatically transcribed using Whisper (requires `VOICE_TOOLS_OPENAI_KEY`)
|
||||
- **Incoming:** Voice/audio messages are automatically transcribed using the configured STT provider: local `faster-whisper`, Groq Whisper (`GROQ_API_KEY`), or OpenAI Whisper (`VOICE_TOOLS_OPENAI_KEY`)
|
||||
- **Outgoing:** TTS responses are sent as audio file attachments
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -131,7 +131,11 @@ Group chat IDs are negative numbers (e.g., `-1001234567890`). Your personal DM c
|
|||
|
||||
### Incoming Voice (Speech-to-Text)
|
||||
|
||||
Voice messages you send on Telegram are automatically transcribed using OpenAI's Whisper API and injected as text into the conversation. This requires `VOICE_TOOLS_OPENAI_KEY` in `~/.hermes/.env`.
|
||||
Voice messages you send on Telegram are automatically transcribed by Hermes's configured STT provider and injected as text into the conversation.
|
||||
|
||||
- `local` uses `faster-whisper` on the machine running Hermes — no API key required
|
||||
- `groq` uses Groq Whisper and requires `GROQ_API_KEY`
|
||||
- `openai` uses OpenAI Whisper and requires `VOICE_TOOLS_OPENAI_KEY`
|
||||
|
||||
### Outgoing Voice (Text-to-Speech)
|
||||
|
||||
|
|
@ -173,7 +177,7 @@ Hermes Agent works in Telegram group chats with a few considerations:
|
|||
| Bot not responding at all | Verify `TELEGRAM_BOT_TOKEN` is correct. Check `hermes gateway` logs for errors. |
|
||||
| Bot responds with "unauthorized" | Your user ID is not in `TELEGRAM_ALLOWED_USERS`. Double-check with @userinfobot. |
|
||||
| Bot ignores group messages | Privacy mode is likely on. Disable it (Step 3) or make the bot a group admin. **Remember to remove and re-add the bot after changing privacy.** |
|
||||
| Voice messages not transcribed | Check that `VOICE_TOOLS_OPENAI_KEY` is set and valid in `~/.hermes/.env`. |
|
||||
| Voice messages not transcribed | Verify STT is available: install `faster-whisper` for local transcription, or set `GROQ_API_KEY` / `VOICE_TOOLS_OPENAI_KEY` in `~/.hermes/.env`. |
|
||||
| Voice replies are files, not bubbles | Install `ffmpeg` (needed for Edge TTS Opus conversion). |
|
||||
| Bot token revoked/invalid | Generate a new token via `/revoke` then `/newbot` or `/token` in BotFather. Update your `.env` file. |
|
||||
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ with reconnection logic.
|
|||
|
||||
Hermes supports voice on WhatsApp:
|
||||
|
||||
- **Incoming:** Voice messages (`.ogg` opus) are automatically transcribed using Whisper (requires `VOICE_TOOLS_OPENAI_KEY`)
|
||||
- **Incoming:** Voice messages (`.ogg` opus) are automatically transcribed using the configured STT provider: local `faster-whisper`, Groq Whisper (`GROQ_API_KEY`), or OpenAI Whisper (`VOICE_TOOLS_OPENAI_KEY`)
|
||||
- **Outgoing:** TTS responses are sent as MP3 audio file attachments
|
||||
- Agent responses are prefixed with "⚕ **Hermes Agent**" for easy identification
|
||||
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ const sidebars: SidebarsConfig = {
|
|||
type: 'category',
|
||||
label: 'Web & Media',
|
||||
items: [
|
||||
'user-guide/features/voice-mode',
|
||||
'user-guide/features/browser',
|
||||
'user-guide/features/vision',
|
||||
'user-guide/features/image-generation',
|
||||
|
|
@ -109,6 +110,7 @@ const sidebars: SidebarsConfig = {
|
|||
'developer-guide/architecture',
|
||||
'developer-guide/agent-loop',
|
||||
'developer-guide/provider-runtime',
|
||||
'developer-guide/adding-providers',
|
||||
'developer-guide/prompt-assembly',
|
||||
'developer-guide/context-compression-and-caching',
|
||||
'developer-guide/gateway-internals',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue