Add platform-specific formatting hints and identity for AIAgent

- Introduced a default agent identity prompt to ensure consistent behavior across platforms.
- Added platform-specific formatting hints for CLI, WhatsApp, Telegram, and Discord to guide the agent's output style.
- Updated the AIAgent initialization to accept a platform parameter, enhancing adaptability to different interfaces.
This commit is contained in:
teknium1 2026-02-12 16:11:16 -08:00
parent 3099a2f53c
commit 84718d183a
3 changed files with 78 additions and 28 deletions

1
cli.py
View file

@ -662,6 +662,7 @@ class HermesCLI:
quiet_mode=True, # Suppress verbose output for clean CLI quiet_mode=True, # Suppress verbose output for clean CLI
ephemeral_system_prompt=self.system_prompt if self.system_prompt else None, ephemeral_system_prompt=self.system_prompt if self.system_prompt else None,
session_id=self.session_id, # Pass CLI's session ID to agent session_id=self.session_id, # Pass CLI's session ID to agent
platform="cli", # CLI interface — agent uses terminal-friendly formatting
) )
return True return True
except Exception as e: except Exception as e:

View file

@ -529,6 +529,10 @@ class GatewayRunner:
# Read from env var or use default (same as CLI) # Read from env var or use default (same as CLI)
max_iterations = int(os.getenv("HERMES_MAX_ITERATIONS", "60")) max_iterations = int(os.getenv("HERMES_MAX_ITERATIONS", "60"))
# Map platform enum to the platform hint key the agent understands.
# Platform.LOCAL ("local") maps to "cli"; others pass through as-is.
platform_key = "cli" if source.platform == Platform.LOCAL else source.platform.value
agent = AIAgent( agent = AIAgent(
model=os.getenv("HERMES_MODEL", "anthropic/claude-opus-4.6"), model=os.getenv("HERMES_MODEL", "anthropic/claude-opus-4.6"),
max_iterations=max_iterations, max_iterations=max_iterations,
@ -537,6 +541,7 @@ class GatewayRunner:
ephemeral_system_prompt=context_prompt, ephemeral_system_prompt=context_prompt,
session_id=session_id, session_id=session_id,
tool_progress_callback=progress_callback if tool_progress_enabled else None, tool_progress_callback=progress_callback if tool_progress_enabled else None,
platform=platform_key, # Tells the agent which interface to format for
) )
# Store agent reference for interrupt support # Store agent reference for interrupt support

View file

@ -54,6 +54,41 @@ from tools.browser_tool import cleanup_browser
import requests import requests
# =============================================================================
# Default Agent Identity & Platform Hints
# =============================================================================
# The default identity prompt is prepended to every conversation so the agent
# knows who it is and behaves consistently across platforms.
DEFAULT_AGENT_IDENTITY = (
"You are Hermes Agent, an intelligent AI assistant created by Nous Research. "
"You are helpful, knowledgeable, and direct. You assist users with a wide "
"range of tasks including answering questions, writing and editing code, "
"analyzing information, creative work, and executing actions via your tools. "
"You communicate clearly, admit uncertainty when appropriate, and prioritize "
"being genuinely useful over being verbose unless otherwise directed below."
)
# Platform-specific formatting hints appended to the system prompt.
# These tell the agent how to format its output for the current interface.
PLATFORM_HINTS = {
"whatsapp": (
"You are on a text messaging communication platform, WhatsApp. "
"Please do not use markdown as it does not render."
),
"telegram": (
"You are on a text messaging communication platform, Telegram. "
"Please do not use markdown as it does not render."
),
"discord": (
"You are in a Discord server or group chat communicating with your user."
),
"cli": (
"You are a CLI AI Agent. Try not to use markdown but simple text "
"renderable inside a terminal."
),
}
# ============================================================================= # =============================================================================
# Model Context Management # Model Context Management
# ============================================================================= # =============================================================================
@ -977,6 +1012,7 @@ class AIAgent:
max_tokens: int = None, max_tokens: int = None,
reasoning_config: Dict[str, Any] = None, reasoning_config: Dict[str, Any] = None,
prefill_messages: List[Dict[str, Any]] = None, prefill_messages: List[Dict[str, Any]] = None,
platform: str = None,
): ):
""" """
Initialize the AI Agent. Initialize the AI Agent.
@ -1007,6 +1043,8 @@ class AIAgent:
prefill_messages (List[Dict]): Messages to prepend to conversation history as prefilled context. prefill_messages (List[Dict]): Messages to prepend to conversation history as prefilled context.
Useful for injecting a few-shot example or priming the model's response style. Useful for injecting a few-shot example or priming the model's response style.
Example: [{"role": "user", "content": "Hi!"}, {"role": "assistant", "content": "Hello!"}] Example: [{"role": "user", "content": "Hi!"}, {"role": "assistant", "content": "Hello!"}]
platform (str): The interface platform the user is on (e.g. "cli", "telegram", "discord", "whatsapp").
Used to inject platform-specific formatting hints into the system prompt.
""" """
self.model = model self.model = model
self.max_iterations = max_iterations self.max_iterations = max_iterations
@ -1015,6 +1053,7 @@ class AIAgent:
self.verbose_logging = verbose_logging self.verbose_logging = verbose_logging
self.quiet_mode = quiet_mode self.quiet_mode = quiet_mode
self.ephemeral_system_prompt = ephemeral_system_prompt self.ephemeral_system_prompt = ephemeral_system_prompt
self.platform = platform # "cli", "telegram", "discord", "whatsapp", etc.
self.log_prefix_chars = log_prefix_chars self.log_prefix_chars = log_prefix_chars
self.log_prefix = f"{log_prefix} " if log_prefix else "" self.log_prefix = f"{log_prefix} " if log_prefix else ""
# Store effective base URL for feature detection (prompt caching, reasoning, etc.) # Store effective base URL for feature detection (prompt caching, reasoning, etc.)
@ -1955,40 +1994,45 @@ class AIAgent:
if not self.quiet_mode: if not self.quiet_mode:
print(f"💬 Starting conversation: '{user_message[:60]}{'...' if len(user_message) > 60 else ''}'") print(f"💬 Starting conversation: '{user_message[:60]}{'...' if len(user_message) > 60 else ''}'")
# Determine which system prompt to use for API calls (ephemeral) # ── Build the full system prompt ──
# Priority: explicit system_message > ephemeral_system_prompt > None # Layers (in order):
base_system_prompt = system_message if system_message is not None else self.ephemeral_system_prompt # 1. Default agent identity (always present)
# 2. User / gateway system prompt (if provided)
# Auto-include skills guidance if skills tools are available # 3. Skills guidance (if skills tools are loaded)
# Embeds a compact category:names index so the model can match skills # 4. Context files (SOUL.md, AGENTS.md, .cursorrules)
# at a glance and load with a single skill_view(name) call. # 5. Current date & time
# 6. Platform-specific formatting hint
prompt_parts = [DEFAULT_AGENT_IDENTITY]
# Layer in the caller-supplied system prompt (explicit > ephemeral).
caller_prompt = system_message if system_message is not None else self.ephemeral_system_prompt
if caller_prompt:
prompt_parts.append(caller_prompt)
# Auto-include skills guidance if skills tools are available.
has_skills_tools = any(name in self.valid_tool_names for name in ['skills_list', 'skill_view']) has_skills_tools = any(name in self.valid_tool_names for name in ['skills_list', 'skill_view'])
skills_prompt = build_skills_system_prompt() if has_skills_tools else "" skills_prompt = build_skills_system_prompt() if has_skills_tools else ""
if skills_prompt: if skills_prompt:
if base_system_prompt: prompt_parts.append(skills_prompt)
active_system_prompt = f"{base_system_prompt}\n\n{skills_prompt}"
else: # Auto-include context files (SOUL.md, AGENTS.md, .cursorrules).
active_system_prompt = skills_prompt
else:
active_system_prompt = base_system_prompt
# Auto-include context files (SOUL.md, AGENTS.md, .cursorrules)
# Discovered from cwd and injected as # Project Context sections.
context_files_prompt = build_context_files_prompt() context_files_prompt = build_context_files_prompt()
if context_files_prompt: if context_files_prompt:
if active_system_prompt: prompt_parts.append(context_files_prompt)
active_system_prompt = f"{active_system_prompt}\n\n{context_files_prompt}"
else: # Current local date and time so the model is never confused about
active_system_prompt = context_files_prompt # what day/time it is (LLM training cutoffs can otherwise mislead it).
# Append the current local date and time so the model knows what
# day/time it is (LLM training cutoffs can otherwise mislead it).
now = datetime.now() now = datetime.now()
timestamp_line = f"Current local date and time: {now.strftime('%A, %B %d, %Y %I:%M %p')}" prompt_parts.append(
if active_system_prompt: f"Current local date and time: {now.strftime('%A, %B %d, %Y %I:%M %p')}"
active_system_prompt = f"{active_system_prompt}\n\n{timestamp_line}" )
else:
active_system_prompt = timestamp_line # Platform-specific formatting hint (no markdown on WhatsApp, etc.).
platform_key = (self.platform or "").lower().strip()
if platform_key in PLATFORM_HINTS:
prompt_parts.append(PLATFORM_HINTS[platform_key])
active_system_prompt = "\n\n".join(prompt_parts)
# Main conversation loop # Main conversation loop
api_call_count = 0 api_call_count = 0