The architecture has been updated

This commit is contained in:
Skyber_2 2026-03-31 23:31:36 +03:00
parent 805f7a017e
commit a01257ead9
1119 changed files with 226 additions and 352 deletions

View file

@ -0,0 +1,389 @@
#!/usr/bin/env python3
"""Discord Voice Doctor — diagnostic tool for voice channel support.
Checks all dependencies, configuration, and bot permissions needed
for Discord voice mode to work correctly.
Usage:
python scripts/discord-voice-doctor.py
.venv/bin/python scripts/discord-voice-doctor.py
"""
import os
import sys
import shutil
from pathlib import Path
# Resolve project root
SCRIPT_DIR = Path(__file__).resolve().parent
PROJECT_ROOT = SCRIPT_DIR.parent
sys.path.insert(0, str(PROJECT_ROOT))
HERMES_HOME = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
ENV_FILE = HERMES_HOME / ".env"
OK = "\033[92m\u2713\033[0m"
FAIL = "\033[91m\u2717\033[0m"
WARN = "\033[93m!\033[0m"
# Track whether discord.py is available for later sections
_discord_available = False
def mask(value):
"""Mask sensitive value: show only first 4 chars."""
if not value or len(value) < 8:
return "****"
return f"{value[:4]}{'*' * (len(value) - 4)}"
def check(label, ok, detail=""):
symbol = OK if ok else FAIL
msg = f" {symbol} {label}"
if detail:
msg += f" ({detail})"
print(msg)
return ok
def warn(label, detail=""):
msg = f" {WARN} {label}"
if detail:
msg += f" ({detail})"
print(msg)
def section(title):
print(f"\n\033[1m{title}\033[0m")
def check_packages():
"""Check Python package dependencies. Returns True if all critical deps OK."""
global _discord_available
section("Python Packages")
ok = True
# discord.py
try:
import discord
_discord_available = True
check("discord.py", True, f"v{discord.__version__}")
except ImportError:
check("discord.py", False, "pip install discord.py[voice]")
ok = False
# PyNaCl
try:
import nacl
ver = getattr(nacl, "__version__", "unknown")
try:
import nacl.secret
nacl.secret.Aead(bytes(32))
check("PyNaCl", True, f"v{ver}")
except (AttributeError, Exception):
check("PyNaCl (Aead)", False, f"v{ver} — need >=1.5.0")
ok = False
except ImportError:
check("PyNaCl", False, "pip install PyNaCl>=1.5.0")
ok = False
# davey (DAVE E2EE)
try:
import davey
check("davey (DAVE E2EE)", True, f"v{getattr(davey, '__version__', '?')}")
except ImportError:
check("davey (DAVE E2EE)", False, "pip install davey")
ok = False
# Optional: local STT
try:
import faster_whisper
check("faster-whisper (local STT)", True)
except ImportError:
warn("faster-whisper (local STT)", "not installed — local STT unavailable")
# Optional: TTS providers
try:
import edge_tts
check("edge-tts", True)
except ImportError:
warn("edge-tts", "not installed — edge TTS unavailable")
try:
import elevenlabs
check("elevenlabs SDK", True)
except ImportError:
warn("elevenlabs SDK", "not installed — premium TTS unavailable")
return ok
def check_system_tools():
"""Check system-level tools (opus, ffmpeg). Returns True if all OK."""
section("System Tools")
ok = True
# Opus codec
if _discord_available:
try:
import discord
opus_loaded = discord.opus.is_loaded()
if not opus_loaded:
import ctypes.util
opus_path = ctypes.util.find_library("opus")
if not opus_path:
# Platform-specific fallback paths
candidates = [
"/opt/homebrew/lib/libopus.dylib", # macOS Apple Silicon
"/usr/local/lib/libopus.dylib", # macOS Intel
"/usr/lib/x86_64-linux-gnu/libopus.so.0", # Debian/Ubuntu x86
"/usr/lib/aarch64-linux-gnu/libopus.so.0", # Debian/Ubuntu ARM
"/usr/lib/libopus.so", # Arch Linux
"/usr/lib64/libopus.so", # RHEL/Fedora
]
for p in candidates:
if os.path.isfile(p):
opus_path = p
break
if opus_path:
discord.opus.load_opus(opus_path)
opus_loaded = discord.opus.is_loaded()
if opus_loaded:
check("Opus codec", True)
else:
check("Opus codec", False, "brew install opus / apt install libopus0")
ok = False
except Exception as e:
check("Opus codec", False, str(e))
ok = False
else:
warn("Opus codec", "skipped — discord.py not installed")
# ffmpeg
ffmpeg_path = shutil.which("ffmpeg")
if ffmpeg_path:
check("ffmpeg", True, ffmpeg_path)
else:
check("ffmpeg", False, "brew install ffmpeg / apt install ffmpeg")
ok = False
return ok
def check_env_vars():
"""Check environment variables. Returns (ok, token, groq_key, eleven_key)."""
section("Environment Variables")
# Load .env
try:
from dotenv import load_dotenv
if ENV_FILE.exists():
load_dotenv(ENV_FILE)
except ImportError:
pass
ok = True
token = os.getenv("DISCORD_BOT_TOKEN", "")
if token:
check("DISCORD_BOT_TOKEN", True, mask(token))
else:
check("DISCORD_BOT_TOKEN", False, "not set")
ok = False
# Allowed users — resolve usernames if possible
allowed = os.getenv("DISCORD_ALLOWED_USERS", "")
if allowed:
users = [u.strip() for u in allowed.split(",") if u.strip()]
user_labels = []
for uid in users:
label = mask(uid)
if token and uid.isdigit():
try:
import requests
r = requests.get(
f"https://discord.com/api/v10/users/{uid}",
headers={"Authorization": f"Bot {token}"},
timeout=3,
)
if r.status_code == 200:
label = f"{r.json().get('username', '?')} ({mask(uid)})"
except Exception:
pass
user_labels.append(label)
check("DISCORD_ALLOWED_USERS", True, f"{len(users)} user(s): {', '.join(user_labels)}")
else:
warn("DISCORD_ALLOWED_USERS", "not set — all users can use voice")
groq_key = os.getenv("GROQ_API_KEY", "")
eleven_key = os.getenv("ELEVENLABS_API_KEY", "")
if groq_key:
check("GROQ_API_KEY (STT)", True, mask(groq_key))
else:
warn("GROQ_API_KEY", "not set — Groq STT unavailable")
if eleven_key:
check("ELEVENLABS_API_KEY (TTS)", True, mask(eleven_key))
else:
warn("ELEVENLABS_API_KEY", "not set — ElevenLabs TTS unavailable")
return ok, token, groq_key, eleven_key
def check_config(groq_key, eleven_key):
"""Check hermes config.yaml."""
section("Configuration")
config_path = HERMES_HOME / "config.yaml"
if config_path.exists():
try:
import yaml
with open(config_path) as f:
cfg = yaml.safe_load(f) or {}
stt_provider = cfg.get("stt", {}).get("provider", "local")
tts_provider = cfg.get("tts", {}).get("provider", "edge")
check("STT provider", True, stt_provider)
check("TTS provider", True, tts_provider)
if stt_provider == "groq" and not groq_key:
warn("STT config says groq but GROQ_API_KEY is missing")
if tts_provider == "elevenlabs" and not eleven_key:
warn("TTS config says elevenlabs but ELEVENLABS_API_KEY is missing")
except Exception as e:
warn("config.yaml", f"parse error: {e}")
else:
warn("config.yaml", "not found — using defaults")
# Voice mode state
voice_mode_path = HERMES_HOME / "gateway_voice_mode.json"
if voice_mode_path.exists():
try:
import json
modes = json.loads(voice_mode_path.read_text())
off_count = sum(1 for v in modes.values() if v == "off")
all_count = sum(1 for v in modes.values() if v == "all")
check("Voice mode state", True, f"{all_count} on, {off_count} off, {len(modes)} total")
except Exception:
warn("Voice mode state", "parse error")
else:
check("Voice mode state", True, "no saved state (fresh)")
def check_bot_permissions(token):
"""Check bot permissions via Discord API. Returns True if all OK."""
section("Bot Permissions")
if not token:
warn("Bot permissions", "no token — skipping")
return True
try:
import requests
except ImportError:
warn("Bot permissions", "requests not installed — skipping")
return True
VOICE_PERMS = {
"Priority Speaker": 8,
"Stream": 9,
"View Channel": 10,
"Send Messages": 11,
"Embed Links": 14,
"Attach Files": 15,
"Read Message History": 16,
"Connect": 20,
"Speak": 21,
"Mute Members": 22,
"Deafen Members": 23,
"Move Members": 24,
"Use VAD": 25,
"Send Voice Messages": 46,
}
REQUIRED_PERMS = {"Connect", "Speak", "View Channel", "Send Messages"}
ok = True
try:
headers = {"Authorization": f"Bot {token}"}
r = requests.get("https://discord.com/api/v10/users/@me", headers=headers, timeout=5)
if r.status_code == 401:
check("Bot login", False, "invalid token (401)")
return False
if r.status_code != 200:
check("Bot login", False, f"HTTP {r.status_code}")
return False
bot = r.json()
bot_name = bot.get("username", "?")
check("Bot login", True, f"{bot_name[:3]}{'*' * (len(bot_name) - 3)}")
# Check guilds
r2 = requests.get("https://discord.com/api/v10/users/@me/guilds", headers=headers, timeout=5)
if r2.status_code != 200:
warn("Guilds", f"HTTP {r2.status_code}")
return ok
guilds = r2.json()
check("Guilds", True, f"{len(guilds)} guild(s)")
for g in guilds[:5]:
perms = int(g.get("permissions", 0))
is_admin = bool(perms & (1 << 3))
if is_admin:
print(f" {OK} {g['name']}: Administrator (all permissions)")
continue
has = []
missing = []
for name, bit in sorted(VOICE_PERMS.items(), key=lambda x: x[1]):
if perms & (1 << bit):
has.append(name)
elif name in REQUIRED_PERMS:
missing.append(name)
if missing:
print(f" {FAIL} {g['name']}: missing {', '.join(missing)}")
ok = False
else:
print(f" {OK} {g['name']}: {', '.join(has)}")
except requests.exceptions.Timeout:
warn("Bot permissions", "Discord API timeout")
except requests.exceptions.ConnectionError:
warn("Bot permissions", "cannot reach Discord API")
except Exception as e:
warn("Bot permissions", f"check failed: {e}")
return ok
def main():
print()
print("\033[1m" + "=" * 50 + "\033[0m")
print("\033[1m Discord Voice Doctor\033[0m")
print("\033[1m" + "=" * 50 + "\033[0m")
all_ok = True
all_ok &= check_packages()
all_ok &= check_system_tools()
env_ok, token, groq_key, eleven_key = check_env_vars()
all_ok &= env_ok
check_config(groq_key, eleven_key)
all_ok &= check_bot_permissions(token)
# Summary
print()
print("\033[1m" + "-" * 50 + "\033[0m")
if all_ok:
print(f" {OK} \033[92mAll checks passed — voice mode ready!\033[0m")
else:
print(f" {FAIL} \033[91mSome checks failed — fix issues above.\033[0m")
print()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,416 @@
#!/usr/bin/env python3
"""
Hermes Gateway - Standalone messaging platform integration.
This is the proper entry point for running the gateway as a service.
NOT tied to the CLI - runs independently.
Usage:
# Run in foreground (for testing)
./scripts/hermes-gateway
# Install as systemd service
./scripts/hermes-gateway install
# Manage the service
./scripts/hermes-gateway start
./scripts/hermes-gateway stop
./scripts/hermes-gateway restart
./scripts/hermes-gateway status
# Uninstall
./scripts/hermes-gateway uninstall
"""
import argparse
import asyncio
import os
import subprocess
import sys
from pathlib import Path
# Add parent directory to path
SCRIPT_DIR = Path(__file__).parent.resolve()
PROJECT_DIR = SCRIPT_DIR.parent
sys.path.insert(0, str(PROJECT_DIR))
# Load .env file
from dotenv import load_dotenv
env_path = PROJECT_DIR / '.env'
if env_path.exists():
load_dotenv(dotenv_path=env_path)
# =============================================================================
# Service Configuration
# =============================================================================
SERVICE_NAME = "hermes-gateway"
SERVICE_DESCRIPTION = "Hermes Agent Gateway - Messaging Platform Integration"
def get_systemd_unit_path() -> Path:
"""Get the path for the systemd user service file."""
return Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
def get_launchd_plist_path() -> Path:
"""Get the path for the launchd plist file (macOS)."""
return Path.home() / "Library" / "LaunchAgents" / f"ai.hermes.gateway.plist"
def get_python_path() -> str:
"""Get the path to the Python interpreter."""
# Prefer the venv if it exists
venv_python = PROJECT_DIR / "venv" / "bin" / "python"
if venv_python.exists():
return str(venv_python)
return sys.executable
def get_gateway_script_path() -> str:
"""Get the path to this script."""
return str(Path(__file__).resolve())
# =============================================================================
# Systemd Service (Linux)
# =============================================================================
def generate_systemd_unit() -> str:
"""Generate the systemd unit file content."""
python_path = get_python_path()
script_path = get_gateway_script_path()
working_dir = str(PROJECT_DIR)
return f"""[Unit]
Description={SERVICE_DESCRIPTION}
After=network.target
StartLimitIntervalSec=600
StartLimitBurst=5
[Service]
Type=simple
ExecStart={python_path} {script_path} run
WorkingDirectory={working_dir}
Restart=on-failure
RestartSec=30
StandardOutput=journal
StandardError=journal
# Environment (optional - can also use .env file)
# Environment="TELEGRAM_BOT_TOKEN=your_token"
# Environment="DISCORD_BOT_TOKEN=your_token"
[Install]
WantedBy=default.target
"""
def install_systemd():
"""Install the systemd user service."""
unit_path = get_systemd_unit_path()
unit_path.parent.mkdir(parents=True, exist_ok=True)
print(f"Installing systemd service to: {unit_path}")
unit_path.write_text(generate_systemd_unit())
# Reload systemd
subprocess.run(["systemctl", "--user", "daemon-reload"], check=True)
# Enable the service (start on boot)
subprocess.run(["systemctl", "--user", "enable", SERVICE_NAME], check=True)
print(f"✓ Service installed and enabled")
print(f"")
print(f"To start the service:")
print(f" systemctl --user start {SERVICE_NAME}")
print(f"")
print(f"To view logs:")
print(f" journalctl --user -u {SERVICE_NAME} -f")
print(f"")
print(f"To enable lingering (keeps service running after logout):")
print(f" sudo loginctl enable-linger $USER")
def uninstall_systemd():
"""Uninstall the systemd user service."""
unit_path = get_systemd_unit_path()
# Stop and disable first
subprocess.run(["systemctl", "--user", "stop", SERVICE_NAME], check=False)
subprocess.run(["systemctl", "--user", "disable", SERVICE_NAME], check=False)
# Remove the unit file
if unit_path.exists():
unit_path.unlink()
print(f"✓ Removed {unit_path}")
# Reload systemd
subprocess.run(["systemctl", "--user", "daemon-reload"], check=True)
print(f"✓ Service uninstalled")
def systemd_status():
"""Show systemd service status."""
subprocess.run(["systemctl", "--user", "status", SERVICE_NAME])
def systemd_start():
"""Start the systemd service."""
subprocess.run(["systemctl", "--user", "start", SERVICE_NAME], check=True)
print(f"✓ Service started")
def systemd_stop():
"""Stop the systemd service."""
subprocess.run(["systemctl", "--user", "stop", SERVICE_NAME], check=True)
print(f"✓ Service stopped")
def systemd_restart():
"""Restart the systemd service."""
subprocess.run(["systemctl", "--user", "restart", SERVICE_NAME], check=True)
print(f"✓ Service restarted")
# =============================================================================
# Launchd Service (macOS)
# =============================================================================
def generate_launchd_plist() -> str:
"""Generate the launchd plist file content."""
python_path = get_python_path()
script_path = get_gateway_script_path()
working_dir = str(PROJECT_DIR)
log_dir = Path.home() / ".hermes" / "logs"
return f"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>ai.hermes.gateway</string>
<key>ProgramArguments</key>
<array>
<string>{python_path}</string>
<string>{script_path}</string>
<string>run</string>
</array>
<key>WorkingDirectory</key>
<string>{working_dir}</string>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<dict>
<key>SuccessfulExit</key>
<false/>
</dict>
<key>StandardOutPath</key>
<string>{log_dir}/gateway.log</string>
<key>StandardErrorPath</key>
<string>{log_dir}/gateway.error.log</string>
<key>EnvironmentVariables</key>
<dict>
<key>PATH</key>
<string>/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>
</dict>
</dict>
</plist>
"""
def install_launchd():
"""Install the launchd service (macOS)."""
plist_path = get_launchd_plist_path()
plist_path.parent.mkdir(parents=True, exist_ok=True)
# Ensure log directory exists
log_dir = Path.home() / ".hermes" / "logs"
log_dir.mkdir(parents=True, exist_ok=True)
print(f"Installing launchd service to: {plist_path}")
plist_path.write_text(generate_launchd_plist())
# Load the service
subprocess.run(["launchctl", "load", str(plist_path)], check=True)
print(f"✓ Service installed and loaded")
print(f"")
print(f"To view logs:")
print(f" tail -f ~/.hermes/logs/gateway.log")
print(f"")
print(f"To manage the service:")
print(f" launchctl start ai.hermes.gateway")
print(f" launchctl stop ai.hermes.gateway")
def uninstall_launchd():
"""Uninstall the launchd service (macOS)."""
plist_path = get_launchd_plist_path()
# Unload first
subprocess.run(["launchctl", "unload", str(plist_path)], check=False)
# Remove the plist file
if plist_path.exists():
plist_path.unlink()
print(f"✓ Removed {plist_path}")
print(f"✓ Service uninstalled")
def launchd_status():
"""Show launchd service status."""
subprocess.run(["launchctl", "list", "ai.hermes.gateway"])
def launchd_start():
"""Start the launchd service."""
subprocess.run(["launchctl", "start", "ai.hermes.gateway"], check=True)
print(f"✓ Service started")
def launchd_stop():
"""Stop the launchd service."""
subprocess.run(["launchctl", "stop", "ai.hermes.gateway"], check=True)
print(f"✓ Service stopped")
def launchd_restart():
"""Restart the launchd service."""
launchd_stop()
launchd_start()
# =============================================================================
# Platform Detection
# =============================================================================
def is_linux() -> bool:
return sys.platform.startswith('linux')
def is_macos() -> bool:
return sys.platform == 'darwin'
def is_windows() -> bool:
return sys.platform == 'win32'
# =============================================================================
# Gateway Runner
# =============================================================================
def run_gateway():
"""Run the gateway in foreground."""
from gateway.run import start_gateway
print("Starting Hermes Gateway...")
print("Press Ctrl+C to stop.")
print()
asyncio.run(start_gateway())
# =============================================================================
# Main CLI
# =============================================================================
def main():
parser = argparse.ArgumentParser(
description="Hermes Gateway - Messaging Platform Integration",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run in foreground (for testing)
./scripts/hermes-gateway run
# Install as system service
./scripts/hermes-gateway install
# Manage the service
./scripts/hermes-gateway start
./scripts/hermes-gateway stop
./scripts/hermes-gateway restart
./scripts/hermes-gateway status
# Uninstall
./scripts/hermes-gateway uninstall
Configuration:
Set environment variables in .env file or system environment:
- TELEGRAM_BOT_TOKEN
- DISCORD_BOT_TOKEN
- WHATSAPP_ENABLED
Or create ~/.hermes/gateway.json for advanced configuration.
"""
)
parser.add_argument(
"command",
choices=["run", "install", "uninstall", "start", "stop", "restart", "status"],
nargs="?",
default="run",
help="Command to execute (default: run)"
)
parser.add_argument(
"--verbose", "-v",
action="store_true",
help="Verbose output"
)
args = parser.parse_args()
# Detect platform and dispatch command
if args.command == "run":
run_gateway()
elif args.command == "install":
if is_linux():
install_systemd()
elif is_macos():
install_launchd()
else:
print("Service installation not supported on this platform.")
print("Please run manually: ./scripts/hermes-gateway run")
sys.exit(1)
elif args.command == "uninstall":
if is_linux():
uninstall_systemd()
elif is_macos():
uninstall_launchd()
else:
print("Service uninstallation not supported on this platform.")
sys.exit(1)
elif args.command == "start":
if is_linux():
systemd_start()
elif is_macos():
launchd_start()
else:
print("Not supported on this platform.")
sys.exit(1)
elif args.command == "stop":
if is_linux():
systemd_stop()
elif is_macos():
launchd_stop()
else:
print("Not supported on this platform.")
sys.exit(1)
elif args.command == "restart":
if is_linux():
systemd_restart()
elif is_macos():
launchd_restart()
else:
print("Not supported on this platform.")
sys.exit(1)
elif args.command == "status":
if is_linux():
systemd_status()
elif is_macos():
launchd_status()
else:
print("Not supported on this platform.")
sys.exit(1)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,28 @@
@echo off
REM ============================================================================
REM Hermes Agent Installer for Windows (CMD wrapper)
REM ============================================================================
REM This batch file launches the PowerShell installer for users running CMD.
REM
REM Usage:
REM curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.cmd -o install.cmd && install.cmd && del install.cmd
REM
REM Or if you're already in PowerShell, use the direct command instead:
REM irm https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.ps1 | iex
REM ============================================================================
echo.
echo Hermes Agent Installer
echo Launching PowerShell installer...
echo.
powershell -ExecutionPolicy ByPass -NoProfile -Command "irm https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.ps1 | iex"
if %ERRORLEVEL% NEQ 0 (
echo.
echo Installation failed. Please try running PowerShell directly:
echo powershell -ExecutionPolicy ByPass -c "irm https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.ps1 | iex"
echo.
pause
exit /b 1
)

View file

@ -0,0 +1,919 @@
# ============================================================================
# Hermes Agent Installer for Windows
# ============================================================================
# Installation script for Windows (PowerShell).
# Uses uv for fast Python provisioning and package management.
#
# Usage:
# irm https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.ps1 | iex
#
# Or download and run with options:
# .\install.ps1 -NoVenv -SkipSetup
#
# ============================================================================
param(
[switch]$NoVenv,
[switch]$SkipSetup,
[string]$Branch = "main",
[string]$HermesHome = "$env:LOCALAPPDATA\hermes",
[string]$InstallDir = "$env:LOCALAPPDATA\hermes\hermes-agent"
)
$ErrorActionPreference = "Stop"
# ============================================================================
# Configuration
# ============================================================================
$RepoUrlSsh = "git@github.com:NousResearch/hermes-agent.git"
$RepoUrlHttps = "https://github.com/NousResearch/hermes-agent.git"
$PythonVersion = "3.11"
$NodeVersion = "22"
# ============================================================================
# Helper functions
# ============================================================================
function Write-Banner {
Write-Host ""
Write-Host "┌─────────────────────────────────────────────────────────┐" -ForegroundColor Magenta
Write-Host "│ ⚕ Hermes Agent Installer │" -ForegroundColor Magenta
Write-Host "├─────────────────────────────────────────────────────────┤" -ForegroundColor Magenta
Write-Host "│ An open source AI agent by Nous Research. │" -ForegroundColor Magenta
Write-Host "└─────────────────────────────────────────────────────────┘" -ForegroundColor Magenta
Write-Host ""
}
function Write-Info {
param([string]$Message)
Write-Host "$Message" -ForegroundColor Cyan
}
function Write-Success {
param([string]$Message)
Write-Host "$Message" -ForegroundColor Green
}
function Write-Warn {
param([string]$Message)
Write-Host "$Message" -ForegroundColor Yellow
}
function Write-Err {
param([string]$Message)
Write-Host "$Message" -ForegroundColor Red
}
# ============================================================================
# Dependency checks
# ============================================================================
function Install-Uv {
Write-Info "Checking for uv package manager..."
# Check if uv is already available
if (Get-Command uv -ErrorAction SilentlyContinue) {
$version = uv --version
$script:UvCmd = "uv"
Write-Success "uv found ($version)"
return $true
}
# Check common install locations
$uvPaths = @(
"$env:USERPROFILE\.local\bin\uv.exe",
"$env:USERPROFILE\.cargo\bin\uv.exe"
)
foreach ($uvPath in $uvPaths) {
if (Test-Path $uvPath) {
$script:UvCmd = $uvPath
$version = & $uvPath --version
Write-Success "uv found at $uvPath ($version)"
return $true
}
}
# Install uv
Write-Info "Installing uv (fast Python package manager)..."
try {
powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" 2>&1 | Out-Null
# Find the installed binary
$uvExe = "$env:USERPROFILE\.local\bin\uv.exe"
if (-not (Test-Path $uvExe)) {
$uvExe = "$env:USERPROFILE\.cargo\bin\uv.exe"
}
if (-not (Test-Path $uvExe)) {
# Refresh PATH and try again
$env:Path = [Environment]::GetEnvironmentVariable("Path", "User") + ";" + [Environment]::GetEnvironmentVariable("Path", "Machine")
if (Get-Command uv -ErrorAction SilentlyContinue) {
$uvExe = (Get-Command uv).Source
}
}
if (Test-Path $uvExe) {
$script:UvCmd = $uvExe
$version = & $uvExe --version
Write-Success "uv installed ($version)"
return $true
}
Write-Err "uv installed but not found on PATH"
Write-Info "Try restarting your terminal and re-running"
return $false
} catch {
Write-Err "Failed to install uv"
Write-Info "Install manually: https://docs.astral.sh/uv/getting-started/installation/"
return $false
}
}
function Test-Python {
Write-Info "Checking Python $PythonVersion..."
# Let uv find or install Python
try {
$pythonPath = & $UvCmd python find $PythonVersion 2>$null
if ($pythonPath) {
$ver = & $pythonPath --version 2>$null
Write-Success "Python found: $ver"
return $true
}
} catch { }
# Python not found — use uv to install it (no admin needed!)
Write-Info "Python $PythonVersion not found, installing via uv..."
try {
$uvOutput = & $UvCmd python install $PythonVersion 2>&1
if ($LASTEXITCODE -eq 0) {
$pythonPath = & $UvCmd python find $PythonVersion 2>$null
if ($pythonPath) {
$ver = & $pythonPath --version 2>$null
Write-Success "Python installed: $ver"
return $true
}
} else {
Write-Warn "uv python install output:"
Write-Host $uvOutput -ForegroundColor DarkGray
}
} catch {
Write-Warn "uv python install error: $_"
}
# Fallback: check if ANY Python 3.10+ is already available on the system
Write-Info "Trying to find any existing Python 3.10+..."
foreach ($fallbackVer in @("3.12", "3.13", "3.10")) {
try {
$pythonPath = & $UvCmd python find $fallbackVer 2>$null
if ($pythonPath) {
$ver = & $pythonPath --version 2>$null
Write-Success "Found fallback: $ver"
$script:PythonVersion = $fallbackVer
return $true
}
} catch { }
}
# Fallback: try system python
if (Get-Command python -ErrorAction SilentlyContinue) {
$sysVer = python --version 2>$null
if ($sysVer -match "3\.(1[0-9]|[1-9][0-9])") {
Write-Success "Using system Python: $sysVer"
return $true
}
}
Write-Err "Failed to install Python $PythonVersion"
Write-Info "Install Python 3.11 manually, then re-run this script:"
Write-Info " https://www.python.org/downloads/"
Write-Info " Or: winget install Python.Python.3.11"
return $false
}
function Test-Git {
Write-Info "Checking Git..."
if (Get-Command git -ErrorAction SilentlyContinue) {
$version = git --version
Write-Success "Git found ($version)"
return $true
}
Write-Err "Git not found"
Write-Info "Please install Git from:"
Write-Info " https://git-scm.com/download/win"
return $false
}
function Test-Node {
Write-Info "Checking Node.js (for browser tools)..."
if (Get-Command node -ErrorAction SilentlyContinue) {
$version = node --version
Write-Success "Node.js $version found"
$script:HasNode = $true
return $true
}
# Check our own managed install from a previous run
$managedNode = "$HermesHome\node\node.exe"
if (Test-Path $managedNode) {
$version = & $managedNode --version
$env:Path = "$HermesHome\node;$env:Path"
Write-Success "Node.js $version found (Hermes-managed)"
$script:HasNode = $true
return $true
}
Write-Info "Node.js not found — installing Node.js $NodeVersion LTS..."
# Try winget first (cleanest on modern Windows)
if (Get-Command winget -ErrorAction SilentlyContinue) {
Write-Info "Installing via winget..."
try {
winget install OpenJS.NodeJS.LTS --silent --accept-package-agreements --accept-source-agreements 2>&1 | Out-Null
# Refresh PATH
$env:Path = [Environment]::GetEnvironmentVariable("Path", "User") + ";" + [Environment]::GetEnvironmentVariable("Path", "Machine")
if (Get-Command node -ErrorAction SilentlyContinue) {
$version = node --version
Write-Success "Node.js $version installed via winget"
$script:HasNode = $true
return $true
}
} catch { }
}
# Fallback: download binary zip to ~/.hermes/node/
Write-Info "Downloading Node.js $NodeVersion binary..."
try {
$arch = if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" }
$indexUrl = "https://nodejs.org/dist/latest-v${NodeVersion}.x/"
$indexPage = Invoke-WebRequest -Uri $indexUrl -UseBasicParsing
$zipName = ($indexPage.Content | Select-String -Pattern "node-v${NodeVersion}\.\d+\.\d+-win-${arch}\.zip" -AllMatches).Matches[0].Value
if ($zipName) {
$downloadUrl = "${indexUrl}${zipName}"
$tmpZip = "$env:TEMP\$zipName"
$tmpDir = "$env:TEMP\hermes-node-extract"
Invoke-WebRequest -Uri $downloadUrl -OutFile $tmpZip -UseBasicParsing
if (Test-Path $tmpDir) { Remove-Item -Recurse -Force $tmpDir }
Expand-Archive -Path $tmpZip -DestinationPath $tmpDir -Force
$extractedDir = Get-ChildItem $tmpDir -Directory | Select-Object -First 1
if ($extractedDir) {
if (Test-Path "$HermesHome\node") { Remove-Item -Recurse -Force "$HermesHome\node" }
Move-Item $extractedDir.FullName "$HermesHome\node"
$env:Path = "$HermesHome\node;$env:Path"
$version = & "$HermesHome\node\node.exe" --version
Write-Success "Node.js $version installed to ~/.hermes/node/"
$script:HasNode = $true
Remove-Item -Force $tmpZip -ErrorAction SilentlyContinue
Remove-Item -Recurse -Force $tmpDir -ErrorAction SilentlyContinue
return $true
}
}
} catch {
Write-Warn "Download failed: $_"
}
Write-Warn "Could not auto-install Node.js"
Write-Info "Install manually: https://nodejs.org/en/download/"
$script:HasNode = $false
return $true
}
function Install-SystemPackages {
$script:HasRipgrep = $false
$script:HasFfmpeg = $false
$needRipgrep = $false
$needFfmpeg = $false
Write-Info "Checking ripgrep (fast file search)..."
if (Get-Command rg -ErrorAction SilentlyContinue) {
$version = rg --version | Select-Object -First 1
Write-Success "$version found"
$script:HasRipgrep = $true
} else {
$needRipgrep = $true
}
Write-Info "Checking ffmpeg (TTS voice messages)..."
if (Get-Command ffmpeg -ErrorAction SilentlyContinue) {
Write-Success "ffmpeg found"
$script:HasFfmpeg = $true
} else {
$needFfmpeg = $true
}
if (-not $needRipgrep -and -not $needFfmpeg) { return }
# Build description and package lists for each package manager
$descParts = @()
$wingetPkgs = @()
$chocoPkgs = @()
$scoopPkgs = @()
if ($needRipgrep) {
$descParts += "ripgrep for faster file search"
$wingetPkgs += "BurntSushi.ripgrep.MSVC"
$chocoPkgs += "ripgrep"
$scoopPkgs += "ripgrep"
}
if ($needFfmpeg) {
$descParts += "ffmpeg for TTS voice messages"
$wingetPkgs += "Gyan.FFmpeg"
$chocoPkgs += "ffmpeg"
$scoopPkgs += "ffmpeg"
}
$description = $descParts -join " and "
$hasWinget = Get-Command winget -ErrorAction SilentlyContinue
$hasChoco = Get-Command choco -ErrorAction SilentlyContinue
$hasScoop = Get-Command scoop -ErrorAction SilentlyContinue
# Try winget first (most common on modern Windows)
if ($hasWinget) {
Write-Info "Installing $description via winget..."
foreach ($pkg in $wingetPkgs) {
try {
winget install $pkg --silent --accept-package-agreements --accept-source-agreements 2>&1 | Out-Null
} catch { }
}
# Refresh PATH and recheck
$env:Path = [Environment]::GetEnvironmentVariable("Path", "User") + ";" + [Environment]::GetEnvironmentVariable("Path", "Machine")
if ($needRipgrep -and (Get-Command rg -ErrorAction SilentlyContinue)) {
Write-Success "ripgrep installed"
$script:HasRipgrep = $true
$needRipgrep = $false
}
if ($needFfmpeg -and (Get-Command ffmpeg -ErrorAction SilentlyContinue)) {
Write-Success "ffmpeg installed"
$script:HasFfmpeg = $true
$needFfmpeg = $false
}
if (-not $needRipgrep -and -not $needFfmpeg) { return }
}
# Fallback: choco
if ($hasChoco -and ($needRipgrep -or $needFfmpeg)) {
Write-Info "Trying Chocolatey..."
foreach ($pkg in $chocoPkgs) {
try { choco install $pkg -y 2>&1 | Out-Null } catch { }
}
if ($needRipgrep -and (Get-Command rg -ErrorAction SilentlyContinue)) {
Write-Success "ripgrep installed via chocolatey"
$script:HasRipgrep = $true
$needRipgrep = $false
}
if ($needFfmpeg -and (Get-Command ffmpeg -ErrorAction SilentlyContinue)) {
Write-Success "ffmpeg installed via chocolatey"
$script:HasFfmpeg = $true
$needFfmpeg = $false
}
}
# Fallback: scoop
if ($hasScoop -and ($needRipgrep -or $needFfmpeg)) {
Write-Info "Trying Scoop..."
foreach ($pkg in $scoopPkgs) {
try { scoop install $pkg 2>&1 | Out-Null } catch { }
}
if ($needRipgrep -and (Get-Command rg -ErrorAction SilentlyContinue)) {
Write-Success "ripgrep installed via scoop"
$script:HasRipgrep = $true
$needRipgrep = $false
}
if ($needFfmpeg -and (Get-Command ffmpeg -ErrorAction SilentlyContinue)) {
Write-Success "ffmpeg installed via scoop"
$script:HasFfmpeg = $true
$needFfmpeg = $false
}
}
# Show manual instructions for anything still missing
if ($needRipgrep) {
Write-Warn "ripgrep not installed (file search will use findstr fallback)"
Write-Info " winget install BurntSushi.ripgrep.MSVC"
}
if ($needFfmpeg) {
Write-Warn "ffmpeg not installed (TTS voice messages will be limited)"
Write-Info " winget install Gyan.FFmpeg"
}
}
# ============================================================================
# Installation
# ============================================================================
function Install-Repository {
Write-Info "Installing to $InstallDir..."
if (Test-Path $InstallDir) {
if (Test-Path "$InstallDir\.git") {
Write-Info "Existing installation found, updating..."
Push-Location $InstallDir
git -c windows.appendAtomically=false fetch origin
git -c windows.appendAtomically=false checkout $Branch
git -c windows.appendAtomically=false pull origin $Branch
Pop-Location
} else {
Write-Err "Directory exists but is not a git repository: $InstallDir"
Write-Info "Remove it or choose a different directory with -InstallDir"
throw "Directory exists but is not a git repository: $InstallDir"
}
} else {
$cloneSuccess = $false
# Fix Windows git "copy-fd: write returned: Invalid argument" error.
# Git for Windows can fail on atomic file operations (hook templates,
# config lock files) due to antivirus, OneDrive, or NTFS filter drivers.
# The -c flag injects config before any file I/O occurs.
Write-Info "Configuring git for Windows compatibility..."
$env:GIT_CONFIG_COUNT = "1"
$env:GIT_CONFIG_KEY_0 = "windows.appendAtomically"
$env:GIT_CONFIG_VALUE_0 = "false"
git config --global windows.appendAtomically false 2>$null
# Try SSH first, then HTTPS, with -c flag for atomic write fix
Write-Info "Trying SSH clone..."
$env:GIT_SSH_COMMAND = "ssh -o BatchMode=yes -o ConnectTimeout=5"
try {
git -c windows.appendAtomically=false clone --branch $Branch --recurse-submodules $RepoUrlSsh $InstallDir
if ($LASTEXITCODE -eq 0) { $cloneSuccess = $true }
} catch { }
$env:GIT_SSH_COMMAND = $null
if (-not $cloneSuccess) {
if (Test-Path $InstallDir) { Remove-Item -Recurse -Force $InstallDir -ErrorAction SilentlyContinue }
Write-Info "SSH failed, trying HTTPS..."
try {
git -c windows.appendAtomically=false clone --branch $Branch --recurse-submodules $RepoUrlHttps $InstallDir
if ($LASTEXITCODE -eq 0) { $cloneSuccess = $true }
} catch { }
}
# Fallback: download ZIP archive (bypasses git file I/O issues entirely)
if (-not $cloneSuccess) {
if (Test-Path $InstallDir) { Remove-Item -Recurse -Force $InstallDir -ErrorAction SilentlyContinue }
Write-Warn "Git clone failed — downloading ZIP archive instead..."
try {
$zipUrl = "https://github.com/NousResearch/hermes-agent/archive/refs/heads/$Branch.zip"
$zipPath = "$env:TEMP\hermes-agent-$Branch.zip"
$extractPath = "$env:TEMP\hermes-agent-extract"
Invoke-WebRequest -Uri $zipUrl -OutFile $zipPath -UseBasicParsing
if (Test-Path $extractPath) { Remove-Item -Recurse -Force $extractPath }
Expand-Archive -Path $zipPath -DestinationPath $extractPath -Force
# GitHub ZIPs extract to repo-branch/ subdirectory
$extractedDir = Get-ChildItem $extractPath -Directory | Select-Object -First 1
if ($extractedDir) {
New-Item -ItemType Directory -Force -Path (Split-Path $InstallDir) -ErrorAction SilentlyContinue | Out-Null
Move-Item $extractedDir.FullName $InstallDir -Force
Write-Success "Downloaded and extracted"
# Initialize git repo so updates work later
Push-Location $InstallDir
git -c windows.appendAtomically=false init 2>$null
git -c windows.appendAtomically=false config windows.appendAtomically false 2>$null
git remote add origin $RepoUrlHttps 2>$null
Pop-Location
Write-Success "Git repo initialized for future updates"
$cloneSuccess = $true
}
# Cleanup temp files
Remove-Item -Force $zipPath -ErrorAction SilentlyContinue
Remove-Item -Recurse -Force $extractPath -ErrorAction SilentlyContinue
} catch {
Write-Err "ZIP download also failed: $_"
}
}
if (-not $cloneSuccess) {
throw "Failed to download repository (tried git clone SSH, HTTPS, and ZIP)"
}
}
# Set per-repo config (harmless if it fails)
Push-Location $InstallDir
git -c windows.appendAtomically=false config windows.appendAtomically false 2>$null
# Ensure submodules are initialized and updated
Write-Info "Initializing submodules..."
git -c windows.appendAtomically=false submodule update --init --recursive 2>$null
if ($LASTEXITCODE -ne 0) {
Write-Warn "Submodule init failed (terminal/RL tools may need manual setup)"
} else {
Write-Success "Submodules ready"
}
Pop-Location
Write-Success "Repository ready"
}
function Install-Venv {
if ($NoVenv) {
Write-Info "Skipping virtual environment (-NoVenv)"
return
}
Write-Info "Creating virtual environment with Python $PythonVersion..."
Push-Location $InstallDir
if (Test-Path "venv") {
Write-Info "Virtual environment already exists, recreating..."
Remove-Item -Recurse -Force "venv"
}
# uv creates the venv and pins the Python version in one step
& $UvCmd venv venv --python $PythonVersion
Pop-Location
Write-Success "Virtual environment ready (Python $PythonVersion)"
}
function Install-Dependencies {
Write-Info "Installing dependencies..."
Push-Location $InstallDir
if (-not $NoVenv) {
# Tell uv to install into our venv (no activation needed)
$env:VIRTUAL_ENV = "$InstallDir\venv"
}
# Install main package with all extras
try {
& $UvCmd pip install -e ".[all]" 2>&1 | Out-Null
} catch {
& $UvCmd pip install -e "." | Out-Null
}
Write-Success "Main package installed"
# Install optional submodules
Write-Info "Installing tinker-atropos (RL training backend)..."
if (Test-Path "tinker-atropos\pyproject.toml") {
try {
& $UvCmd pip install -e ".\tinker-atropos" 2>&1 | Out-Null
Write-Success "tinker-atropos installed"
} catch {
Write-Warn "tinker-atropos install failed (RL tools may not work)"
}
} else {
Write-Warn "tinker-atropos not found (run: git submodule update --init)"
}
Pop-Location
Write-Success "All dependencies installed"
}
function Set-PathVariable {
Write-Info "Setting up hermes command..."
if ($NoVenv) {
$hermesBin = "$InstallDir"
} else {
$hermesBin = "$InstallDir\venv\Scripts"
}
# Add the venv Scripts dir to user PATH so hermes is globally available
# On Windows, the hermes.exe in venv\Scripts\ has the venv Python baked in
$currentPath = [Environment]::GetEnvironmentVariable("Path", "User")
if ($currentPath -notlike "*$hermesBin*") {
[Environment]::SetEnvironmentVariable(
"Path",
"$hermesBin;$currentPath",
"User"
)
Write-Success "Added to user PATH: $hermesBin"
} else {
Write-Info "PATH already configured"
}
# Set HERMES_HOME so the Python code finds config/data in the right place.
# Only needed on Windows where we install to %LOCALAPPDATA%\hermes instead
# of the Unix default ~/.hermes
$currentHermesHome = [Environment]::GetEnvironmentVariable("HERMES_HOME", "User")
if (-not $currentHermesHome -or $currentHermesHome -ne $HermesHome) {
[Environment]::SetEnvironmentVariable("HERMES_HOME", $HermesHome, "User")
Write-Success "Set HERMES_HOME=$HermesHome"
}
$env:HERMES_HOME = $HermesHome
# Update current session
$env:Path = "$hermesBin;$env:Path"
Write-Success "hermes command ready"
}
function Copy-ConfigTemplates {
Write-Info "Setting up configuration files..."
# Create ~/.hermes directory structure
New-Item -ItemType Directory -Force -Path "$HermesHome\cron" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\sessions" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\logs" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\pairing" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\hooks" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\image_cache" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\audio_cache" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\memories" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\skills" | Out-Null
New-Item -ItemType Directory -Force -Path "$HermesHome\whatsapp\session" | Out-Null
# Create .env
$envPath = "$HermesHome\.env"
if (-not (Test-Path $envPath)) {
$examplePath = "$InstallDir\.env.example"
if (Test-Path $examplePath) {
Copy-Item $examplePath $envPath
Write-Success "Created ~/.hermes/.env from template"
} else {
New-Item -ItemType File -Force -Path $envPath | Out-Null
Write-Success "Created ~/.hermes/.env"
}
} else {
Write-Info "~/.hermes/.env already exists, keeping it"
}
# Create config.yaml
$configPath = "$HermesHome\config.yaml"
if (-not (Test-Path $configPath)) {
$examplePath = "$InstallDir\cli-config.yaml.example"
if (Test-Path $examplePath) {
Copy-Item $examplePath $configPath
Write-Success "Created ~/.hermes/config.yaml from template"
}
} else {
Write-Info "~/.hermes/config.yaml already exists, keeping it"
}
# Create SOUL.md if it doesn't exist (global persona file)
$soulPath = "$HermesHome\SOUL.md"
if (-not (Test-Path $soulPath)) {
@"
# Hermes Agent Persona
<!--
This file defines the agent's personality and tone.
The agent will embody whatever you write here.
Edit this to customize how Hermes communicates with you.
Examples:
- "You are a warm, playful assistant who uses kaomoji occasionally."
- "You are a concise technical expert. No fluff, just facts."
- "You speak like a friendly coworker who happens to know everything."
This file is loaded fresh each message -- no restart needed.
Delete the contents (or this file) to use the default personality.
-->
"@ | Set-Content -Path $soulPath -Encoding UTF8
Write-Success "Created ~/.hermes/SOUL.md (edit to customize personality)"
}
Write-Success "Configuration directory ready: ~/.hermes/"
# Seed bundled skills into ~/.hermes/skills/ (manifest-based, one-time per skill)
Write-Info "Syncing bundled skills to ~/.hermes/skills/ ..."
$pythonExe = "$InstallDir\venv\Scripts\python.exe"
if (Test-Path $pythonExe) {
try {
& $pythonExe "$InstallDir\tools\skills_sync.py" 2>$null
Write-Success "Skills synced to ~/.hermes/skills/"
} catch {
# Fallback: simple directory copy
$bundledSkills = "$InstallDir\skills"
$userSkills = "$HermesHome\skills"
if ((Test-Path $bundledSkills) -and -not (Get-ChildItem $userSkills -Exclude '.bundled_manifest' -ErrorAction SilentlyContinue)) {
Copy-Item -Path "$bundledSkills\*" -Destination $userSkills -Recurse -Force -ErrorAction SilentlyContinue
Write-Success "Skills copied to ~/.hermes/skills/"
}
}
}
}
function Install-NodeDeps {
if (-not $HasNode) {
Write-Info "Skipping Node.js dependencies (Node not installed)"
return
}
Push-Location $InstallDir
if (Test-Path "package.json") {
Write-Info "Installing Node.js dependencies (browser tools)..."
try {
npm install --silent 2>&1 | Out-Null
Write-Success "Node.js dependencies installed"
} catch {
Write-Warn "npm install failed (browser tools may not work)"
}
}
# Install WhatsApp bridge dependencies
$bridgeDir = "$InstallDir\scripts\whatsapp-bridge"
if (Test-Path "$bridgeDir\package.json") {
Write-Info "Installing WhatsApp bridge dependencies..."
Push-Location $bridgeDir
try {
npm install --silent 2>&1 | Out-Null
Write-Success "WhatsApp bridge dependencies installed"
} catch {
Write-Warn "WhatsApp bridge npm install failed (WhatsApp may not work)"
}
Pop-Location
}
Pop-Location
}
function Invoke-SetupWizard {
if ($SkipSetup) {
Write-Info "Skipping setup wizard (-SkipSetup)"
return
}
Write-Host ""
Write-Info "Starting setup wizard..."
Write-Host ""
Push-Location $InstallDir
# Run hermes setup using the venv Python directly (no activation needed)
if (-not $NoVenv) {
& ".\venv\Scripts\python.exe" -m hermes_cli.main setup
} else {
python -m hermes_cli.main setup
}
Pop-Location
}
function Start-GatewayIfConfigured {
$envPath = "$HermesHome\.env"
if (-not (Test-Path $envPath)) { return }
$hasMessaging = $false
$content = Get-Content $envPath -ErrorAction SilentlyContinue
foreach ($var in @("TELEGRAM_BOT_TOKEN", "DISCORD_BOT_TOKEN", "SLACK_BOT_TOKEN", "SLACK_APP_TOKEN", "WHATSAPP_ENABLED")) {
$match = $content | Where-Object { $_ -match "^${var}=.+" -and $_ -notmatch "your-token-here" }
if ($match) { $hasMessaging = $true; break }
}
if (-not $hasMessaging) { return }
$hermesCmd = "$InstallDir\venv\Scripts\hermes.exe"
if (-not (Test-Path $hermesCmd)) {
$hermesCmd = "hermes"
}
# If WhatsApp is enabled but not yet paired, run foreground for QR scan
$whatsappEnabled = $content | Where-Object { $_ -match "^WHATSAPP_ENABLED=true" }
$whatsappSession = "$HermesHome\whatsapp\session\creds.json"
if ($whatsappEnabled -and -not (Test-Path $whatsappSession)) {
Write-Host ""
Write-Info "WhatsApp is enabled but not yet paired."
Write-Info "Running 'hermes whatsapp' to pair via QR code..."
Write-Host ""
$response = Read-Host "Pair WhatsApp now? [Y/n]"
if ($response -eq "" -or $response -match "^[Yy]") {
try {
& $hermesCmd whatsapp
} catch {
# Expected after pairing completes
}
}
}
Write-Host ""
Write-Info "Messaging platform token detected!"
Write-Info "The gateway handles messaging platforms and cron job execution."
Write-Host ""
$response = Read-Host "Would you like to start the gateway now? [Y/n]"
if ($response -eq "" -or $response -match "^[Yy]") {
Write-Info "Starting gateway in background..."
try {
$logFile = "$HermesHome\logs\gateway.log"
Start-Process -FilePath $hermesCmd -ArgumentList "gateway" `
-RedirectStandardOutput $logFile `
-RedirectStandardError "$HermesHome\logs\gateway-error.log" `
-WindowStyle Hidden
Write-Success "Gateway started! Your bot is now online."
Write-Info "Logs: $logFile"
Write-Info "To stop: close the gateway process from Task Manager"
} catch {
Write-Warn "Failed to start gateway. Run manually: hermes gateway"
}
} else {
Write-Info "Skipped. Start the gateway later with: hermes gateway"
}
}
function Write-Completion {
Write-Host ""
Write-Host "┌─────────────────────────────────────────────────────────┐" -ForegroundColor Green
Write-Host "│ ✓ Installation Complete! │" -ForegroundColor Green
Write-Host "└─────────────────────────────────────────────────────────┘" -ForegroundColor Green
Write-Host ""
# Show file locations
Write-Host "📁 Your files:" -ForegroundColor Cyan
Write-Host ""
Write-Host " Config: " -NoNewline -ForegroundColor Yellow
Write-Host "$HermesHome\config.yaml"
Write-Host " API Keys: " -NoNewline -ForegroundColor Yellow
Write-Host "$HermesHome\.env"
Write-Host " Data: " -NoNewline -ForegroundColor Yellow
Write-Host "$HermesHome\cron\, sessions\, logs\"
Write-Host " Code: " -NoNewline -ForegroundColor Yellow
Write-Host "$HermesHome\hermes-agent\"
Write-Host ""
Write-Host "─────────────────────────────────────────────────────────" -ForegroundColor Cyan
Write-Host ""
Write-Host "🚀 Commands:" -ForegroundColor Cyan
Write-Host ""
Write-Host " hermes " -NoNewline -ForegroundColor Green
Write-Host "Start chatting"
Write-Host " hermes setup " -NoNewline -ForegroundColor Green
Write-Host "Configure API keys & settings"
Write-Host " hermes config " -NoNewline -ForegroundColor Green
Write-Host "View/edit configuration"
Write-Host " hermes config edit " -NoNewline -ForegroundColor Green
Write-Host "Open config in editor"
Write-Host " hermes gateway " -NoNewline -ForegroundColor Green
Write-Host "Start messaging gateway (Telegram, Discord, etc.)"
Write-Host " hermes update " -NoNewline -ForegroundColor Green
Write-Host "Update to latest version"
Write-Host ""
Write-Host "─────────────────────────────────────────────────────────" -ForegroundColor Cyan
Write-Host ""
Write-Host "⚡ Restart your terminal for PATH changes to take effect" -ForegroundColor Yellow
Write-Host ""
if (-not $HasNode) {
Write-Host "Note: Node.js could not be installed automatically." -ForegroundColor Yellow
Write-Host "Browser tools need Node.js. Install manually:" -ForegroundColor Yellow
Write-Host " https://nodejs.org/en/download/" -ForegroundColor Yellow
Write-Host ""
}
if (-not $HasRipgrep) {
Write-Host "Note: ripgrep (rg) was not installed. For faster file search:" -ForegroundColor Yellow
Write-Host " winget install BurntSushi.ripgrep.MSVC" -ForegroundColor Yellow
Write-Host ""
}
}
# ============================================================================
# Main
# ============================================================================
function Main {
Write-Banner
if (-not (Install-Uv)) { throw "uv installation failed — cannot continue" }
if (-not (Test-Python)) { throw "Python $PythonVersion not available — cannot continue" }
if (-not (Test-Git)) { throw "Git not found — install from https://git-scm.com/download/win" }
Test-Node # Auto-installs if missing
Install-SystemPackages # ripgrep + ffmpeg in one step
Install-Repository
Install-Venv
Install-Dependencies
Install-NodeDeps
Set-PathVariable
Copy-ConfigTemplates
Invoke-SetupWizard
Start-GatewayIfConfigured
Write-Completion
}
# Wrap in try/catch so errors don't kill the terminal when run via:
# irm https://...install.ps1 | iex
# (exit/throw inside iex kills the entire PowerShell session)
try {
Main
} catch {
Write-Host ""
Write-Err "Installation failed: $_"
Write-Host ""
Write-Info "If the error is unclear, try downloading and running the script directly:"
Write-Host " Invoke-WebRequest -Uri 'https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.ps1' -OutFile install.ps1" -ForegroundColor Yellow
Write-Host " .\install.ps1" -ForegroundColor Yellow
Write-Host ""
}

1121
hermes_code/scripts/install.sh Executable file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,34 @@
#!/bin/bash
# Kill all running Modal apps (sandboxes, deployments, etc.)
#
# Usage:
# bash scripts/kill_modal.sh # Stop swe-rex (the sandbox app)
# bash scripts/kill_modal.sh --all # Stop ALL Modal apps
set -uo pipefail
echo "Fetching Modal app list..."
APP_LIST=$(modal app list 2>/dev/null)
if [[ "${1:-}" == "--all" ]]; then
echo "Stopping ALL Modal apps..."
echo "$APP_LIST" | grep -oE 'ap-[A-Za-z0-9]+' | sort -u | while read app_id; do
echo " Stopping $app_id"
modal app stop "$app_id" 2>/dev/null || true
done
else
echo "Stopping swe-rex sandboxes..."
APPS=$(echo "$APP_LIST" | grep 'swe-rex' | grep -oE 'ap-[A-Za-z0-9]+' || true)
if [[ -z "$APPS" ]]; then
echo " No swe-rex apps found."
else
echo "$APPS" | while read app_id; do
echo " Stopping $app_id"
modal app stop "$app_id" 2>/dev/null || true
done
fi
fi
echo ""
echo "Current swe-rex status:"
modal app list 2>/dev/null | grep -E 'State|swe-rex' || echo " (none)"

540
hermes_code/scripts/release.py Executable file
View file

@ -0,0 +1,540 @@
#!/usr/bin/env python3
"""Hermes Agent Release Script
Generates changelogs and creates GitHub releases with CalVer tags.
Usage:
# Preview changelog (dry run)
python scripts/release.py
# Preview with semver bump
python scripts/release.py --bump minor
# Create the release
python scripts/release.py --bump minor --publish
# First release (no previous tag)
python scripts/release.py --bump minor --publish --first-release
# Override CalVer date (e.g. for a belated release)
python scripts/release.py --bump minor --publish --date 2026.3.15
"""
import argparse
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
from datetime import datetime
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent
VERSION_FILE = REPO_ROOT / "hermes_cli" / "__init__.py"
PYPROJECT_FILE = REPO_ROOT / "pyproject.toml"
# ──────────────────────────────────────────────────────────────────────
# Git email → GitHub username mapping
# ──────────────────────────────────────────────────────────────────────
# Auto-extracted from noreply emails + manual overrides
AUTHOR_MAP = {
# teknium (multiple emails)
"teknium1@gmail.com": "teknium1",
"teknium@nousresearch.com": "teknium1",
"127238744+teknium1@users.noreply.github.com": "teknium1",
# contributors (from noreply pattern)
"35742124+0xbyt4@users.noreply.github.com": "0xbyt4",
"82637225+kshitijk4poor@users.noreply.github.com": "kshitijk4poor",
"16443023+stablegenius49@users.noreply.github.com": "stablegenius49",
"185121704+stablegenius49@users.noreply.github.com": "stablegenius49",
"101283333+batuhankocyigit@users.noreply.github.com": "batuhankocyigit",
"126368201+vilkasdev@users.noreply.github.com": "vilkasdev",
"137614867+cutepawss@users.noreply.github.com": "cutepawss",
"96793918+memosr@users.noreply.github.com": "memosr",
"131039422+SHL0MS@users.noreply.github.com": "SHL0MS",
"77628552+raulvidis@users.noreply.github.com": "raulvidis",
"145567217+Aum08Desai@users.noreply.github.com": "Aum08Desai",
"256820943+kshitij-eliza@users.noreply.github.com": "kshitij-eliza",
"44278268+shitcoinsherpa@users.noreply.github.com": "shitcoinsherpa",
"104278804+Sertug17@users.noreply.github.com": "Sertug17",
"112503481+caentzminger@users.noreply.github.com": "caentzminger",
"258577966+voidborne-d@users.noreply.github.com": "voidborne-d",
"70424851+insecurejezza@users.noreply.github.com": "insecurejezza",
"259807879+Bartok9@users.noreply.github.com": "Bartok9",
# contributors (manual mapping from git names)
"dmayhem93@gmail.com": "dmahan93",
"samherring99@gmail.com": "samherring99",
"desaiaum08@gmail.com": "Aum08Desai",
"shannon.sands.1979@gmail.com": "shannonsands",
"shannon@nousresearch.com": "shannonsands",
"eri@plasticlabs.ai": "Erosika",
"hjcpuro@gmail.com": "hjc-puro",
"xaydinoktay@gmail.com": "aydnOktay",
"abdullahfarukozden@gmail.com": "Farukest",
"lovre.pesut@gmail.com": "rovle",
"hakanerten02@hotmail.com": "teyrebaz33",
"alireza78.crypto@gmail.com": "alireza78a",
"brooklyn.bb.nicholson@gmail.com": "brooklynnicholson",
"gpickett00@gmail.com": "gpickett00",
"mcosma@gmail.com": "wakamex",
"clawdia.nash@proton.me": "clawdia-nash",
"pickett.austin@gmail.com": "austinpickett",
"jaisehgal11299@gmail.com": "jaisup",
"percydikec@gmail.com": "PercyDikec",
"dean.kerr@gmail.com": "deankerr",
"socrates1024@gmail.com": "socrates1024",
"satelerd@gmail.com": "satelerd",
"numman.ali@gmail.com": "nummanali",
"0xNyk@users.noreply.github.com": "0xNyk",
"0xnykcd@googlemail.com": "0xNyk",
"buraysandro9@gmail.com": "buray",
"contact@jomar.fr": "joshmartinelle",
"camilo@tekelala.com": "tekelala",
"vincentcharlebois@gmail.com": "vincentcharlebois",
"aryan@synvoid.com": "aryansingh",
"johnsonblake1@gmail.com": "blakejohnson",
"bryan@intertwinesys.com": "bryanyoung",
"christo.mitov@gmail.com": "christomitov",
"hermes@nousresearch.com": "NousResearch",
"openclaw@sparklab.ai": "openclaw",
"semihcvlk53@gmail.com": "Himess",
"erenkar950@gmail.com": "erenkarakus",
"adavyasharma@gmail.com": "adavyas",
"acaayush1111@gmail.com": "aayushchaudhary",
"jason@outland.art": "jasonoutland",
"mrflu1918@proton.me": "SPANISHFLU",
"morganemoss@gmai.com": "mormio",
"kopjop926@gmail.com": "cesareth",
"fuleinist@gmail.com": "fuleinist",
"jack.47@gmail.com": "JackTheGit",
"dalvidjr2022@gmail.com": "Jr-kenny",
"m@statecraft.systems": "mbierling",
"balyan.sid@gmail.com": "balyansid",
}
def git(*args, cwd=None):
"""Run a git command and return stdout."""
result = subprocess.run(
["git"] + list(args),
capture_output=True, text=True,
cwd=cwd or str(REPO_ROOT),
)
if result.returncode != 0:
print(f"git {' '.join(args)} failed: {result.stderr}", file=sys.stderr)
return ""
return result.stdout.strip()
def get_last_tag():
"""Get the most recent CalVer tag."""
tags = git("tag", "--list", "v20*", "--sort=-v:refname")
if tags:
return tags.split("\n")[0]
return None
def get_current_version():
"""Read current semver from __init__.py."""
content = VERSION_FILE.read_text()
match = re.search(r'__version__\s*=\s*"([^"]+)"', content)
return match.group(1) if match else "0.0.0"
def bump_version(current: str, part: str) -> str:
"""Bump a semver version string."""
parts = current.split(".")
if len(parts) != 3:
parts = ["0", "0", "0"]
major, minor, patch = int(parts[0]), int(parts[1]), int(parts[2])
if part == "major":
major += 1
minor = 0
patch = 0
elif part == "minor":
minor += 1
patch = 0
elif part == "patch":
patch += 1
else:
raise ValueError(f"Unknown bump part: {part}")
return f"{major}.{minor}.{patch}"
def update_version_files(semver: str, calver_date: str):
"""Update version strings in source files."""
# Update __init__.py
content = VERSION_FILE.read_text()
content = re.sub(
r'__version__\s*=\s*"[^"]+"',
f'__version__ = "{semver}"',
content,
)
content = re.sub(
r'__release_date__\s*=\s*"[^"]+"',
f'__release_date__ = "{calver_date}"',
content,
)
VERSION_FILE.write_text(content)
# Update pyproject.toml
pyproject = PYPROJECT_FILE.read_text()
pyproject = re.sub(
r'^version\s*=\s*"[^"]+"',
f'version = "{semver}"',
pyproject,
flags=re.MULTILINE,
)
PYPROJECT_FILE.write_text(pyproject)
def resolve_author(name: str, email: str) -> str:
"""Resolve a git author to a GitHub @mention."""
# Try email lookup first
gh_user = AUTHOR_MAP.get(email)
if gh_user:
return f"@{gh_user}"
# Try noreply pattern
noreply_match = re.match(r"(\d+)\+(.+)@users\.noreply\.github\.com", email)
if noreply_match:
return f"@{noreply_match.group(2)}"
# Try username@users.noreply.github.com
noreply_match2 = re.match(r"(.+)@users\.noreply\.github\.com", email)
if noreply_match2:
return f"@{noreply_match2.group(1)}"
# Fallback to git name
return name
def categorize_commit(subject: str) -> str:
"""Categorize a commit by its conventional commit prefix."""
subject_lower = subject.lower()
# Match conventional commit patterns
patterns = {
"breaking": [r"^breaking[\s:(]", r"^!:", r"BREAKING CHANGE"],
"features": [r"^feat[\s:(]", r"^feature[\s:(]", r"^add[\s:(]"],
"fixes": [r"^fix[\s:(]", r"^bugfix[\s:(]", r"^bug[\s:(]", r"^hotfix[\s:(]"],
"improvements": [r"^improve[\s:(]", r"^perf[\s:(]", r"^enhance[\s:(]",
r"^refactor[\s:(]", r"^cleanup[\s:(]", r"^clean[\s:(]",
r"^update[\s:(]", r"^optimize[\s:(]"],
"docs": [r"^doc[\s:(]", r"^docs[\s:(]"],
"tests": [r"^test[\s:(]", r"^tests[\s:(]"],
"chore": [r"^chore[\s:(]", r"^ci[\s:(]", r"^build[\s:(]",
r"^deps[\s:(]", r"^bump[\s:(]"],
}
for category, regexes in patterns.items():
for regex in regexes:
if re.match(regex, subject_lower):
return category
# Heuristic fallbacks
if any(w in subject_lower for w in ["add ", "new ", "implement", "support "]):
return "features"
if any(w in subject_lower for w in ["fix ", "fixed ", "resolve", "patch "]):
return "fixes"
if any(w in subject_lower for w in ["refactor", "cleanup", "improve", "update "]):
return "improvements"
return "other"
def clean_subject(subject: str) -> str:
"""Clean up a commit subject for display."""
# Remove conventional commit prefix
cleaned = re.sub(r"^(feat|fix|docs|chore|refactor|test|perf|ci|build|improve|add|update|cleanup|hotfix|breaking|enhance|optimize|bugfix|bug|feature|tests|deps|bump)[\s:(!]+\s*", "", subject, flags=re.IGNORECASE)
# Remove trailing issue refs that are redundant with PR links
cleaned = cleaned.strip()
# Capitalize first letter
if cleaned:
cleaned = cleaned[0].upper() + cleaned[1:]
return cleaned
def get_commits(since_tag=None):
"""Get commits since a tag (or all commits if None)."""
if since_tag:
range_spec = f"{since_tag}..HEAD"
else:
range_spec = "HEAD"
# Format: hash|author_name|author_email|subject
log = git(
"log", range_spec,
"--format=%H|%an|%ae|%s",
"--no-merges",
)
if not log:
return []
commits = []
for line in log.split("\n"):
if not line.strip():
continue
parts = line.split("|", 3)
if len(parts) != 4:
continue
sha, name, email, subject = parts
commits.append({
"sha": sha,
"short_sha": sha[:8],
"author_name": name,
"author_email": email,
"subject": subject,
"category": categorize_commit(subject),
"github_author": resolve_author(name, email),
})
return commits
def get_pr_number(subject: str) -> str:
"""Extract PR number from commit subject if present."""
match = re.search(r"#(\d+)", subject)
if match:
return match.group(1)
return None
def generate_changelog(commits, tag_name, semver, repo_url="https://github.com/NousResearch/hermes-agent",
prev_tag=None, first_release=False):
"""Generate markdown changelog from categorized commits."""
lines = []
# Header
now = datetime.now()
date_str = now.strftime("%B %d, %Y")
lines.append(f"# Hermes Agent v{semver} ({tag_name})")
lines.append("")
lines.append(f"**Release Date:** {date_str}")
lines.append("")
if first_release:
lines.append("> 🎉 **First official release!** This marks the beginning of regular weekly releases")
lines.append("> for Hermes Agent. See below for everything included in this initial release.")
lines.append("")
# Group commits by category
categories = defaultdict(list)
all_authors = set()
teknium_aliases = {"@teknium1"}
for commit in commits:
categories[commit["category"]].append(commit)
author = commit["github_author"]
if author not in teknium_aliases:
all_authors.add(author)
# Category display order and emoji
category_order = [
("breaking", "⚠️ Breaking Changes"),
("features", "✨ Features"),
("improvements", "🔧 Improvements"),
("fixes", "🐛 Bug Fixes"),
("docs", "📚 Documentation"),
("tests", "🧪 Tests"),
("chore", "🏗️ Infrastructure"),
("other", "📦 Other Changes"),
]
for cat_key, cat_title in category_order:
cat_commits = categories.get(cat_key, [])
if not cat_commits:
continue
lines.append(f"## {cat_title}")
lines.append("")
for commit in cat_commits:
subject = clean_subject(commit["subject"])
pr_num = get_pr_number(commit["subject"])
author = commit["github_author"]
# Build the line
parts = [f"- {subject}"]
if pr_num:
parts.append(f"([#{pr_num}]({repo_url}/pull/{pr_num}))")
else:
parts.append(f"([`{commit['short_sha']}`]({repo_url}/commit/{commit['sha']}))")
if author not in teknium_aliases:
parts.append(f"{author}")
lines.append(" ".join(parts))
lines.append("")
# Contributors section
if all_authors:
# Sort contributors by commit count
author_counts = defaultdict(int)
for commit in commits:
author = commit["github_author"]
if author not in teknium_aliases:
author_counts[author] += 1
sorted_authors = sorted(author_counts.items(), key=lambda x: -x[1])
lines.append("## 👥 Contributors")
lines.append("")
lines.append("Thank you to everyone who contributed to this release!")
lines.append("")
for author, count in sorted_authors:
commit_word = "commit" if count == 1 else "commits"
lines.append(f"- {author} ({count} {commit_word})")
lines.append("")
# Full changelog link
if prev_tag:
lines.append(f"**Full Changelog**: [{prev_tag}...{tag_name}]({repo_url}/compare/{prev_tag}...{tag_name})")
else:
lines.append(f"**Full Changelog**: [{tag_name}]({repo_url}/commits/{tag_name})")
lines.append("")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Hermes Agent Release Tool")
parser.add_argument("--bump", choices=["major", "minor", "patch"],
help="Which semver component to bump")
parser.add_argument("--publish", action="store_true",
help="Actually create the tag and GitHub release (otherwise dry run)")
parser.add_argument("--date", type=str,
help="Override CalVer date (format: YYYY.M.D)")
parser.add_argument("--first-release", action="store_true",
help="Mark as first release (no previous tag expected)")
parser.add_argument("--output", type=str,
help="Write changelog to file instead of stdout")
args = parser.parse_args()
# Determine CalVer date
if args.date:
calver_date = args.date
else:
now = datetime.now()
calver_date = f"{now.year}.{now.month}.{now.day}"
tag_name = f"v{calver_date}"
# Check for existing tag with same date
existing = git("tag", "--list", tag_name)
if existing and not args.publish:
# Append a suffix for same-day releases
suffix = 2
while git("tag", "--list", f"{tag_name}.{suffix}"):
suffix += 1
tag_name = f"{tag_name}.{suffix}"
calver_date = f"{calver_date}.{suffix}"
print(f"Note: Tag {tag_name[:-2]} already exists, using {tag_name}")
# Determine semver
current_version = get_current_version()
if args.bump:
new_version = bump_version(current_version, args.bump)
else:
new_version = current_version
# Get previous tag
prev_tag = get_last_tag()
if not prev_tag and not args.first_release:
print("No previous tags found. Use --first-release for the initial release.")
print(f"Would create tag: {tag_name}")
print(f"Would set version: {new_version}")
# Get commits
commits = get_commits(since_tag=prev_tag)
if not commits:
print("No new commits since last tag.")
if not args.first_release:
return
print(f"{'='*60}")
print(f" Hermes Agent Release Preview")
print(f"{'='*60}")
print(f" CalVer tag: {tag_name}")
print(f" SemVer: v{current_version} → v{new_version}")
print(f" Previous tag: {prev_tag or '(none — first release)'}")
print(f" Commits: {len(commits)}")
print(f" Unique authors: {len(set(c['github_author'] for c in commits))}")
print(f" Mode: {'PUBLISH' if args.publish else 'DRY RUN'}")
print(f"{'='*60}")
print()
# Generate changelog
changelog = generate_changelog(
commits, tag_name, new_version,
prev_tag=prev_tag,
first_release=args.first_release,
)
if args.output:
Path(args.output).write_text(changelog)
print(f"Changelog written to {args.output}")
else:
print(changelog)
if args.publish:
print(f"\n{'='*60}")
print(" Publishing release...")
print(f"{'='*60}")
# Update version files
if args.bump:
update_version_files(new_version, calver_date)
print(f" ✓ Updated version files to v{new_version} ({calver_date})")
# Commit version bump
git("add", str(VERSION_FILE), str(PYPROJECT_FILE))
git("commit", "-m", f"chore: bump version to v{new_version} ({calver_date})")
print(f" ✓ Committed version bump")
# Create annotated tag
git("tag", "-a", tag_name, "-m",
f"Hermes Agent v{new_version} ({calver_date})\n\nWeekly release")
print(f" ✓ Created tag {tag_name}")
# Push
push_result = git("push", "origin", "HEAD", "--tags")
print(f" ✓ Pushed to origin")
# Create GitHub release
changelog_file = REPO_ROOT / ".release_notes.md"
changelog_file.write_text(changelog)
result = subprocess.run(
["gh", "release", "create", tag_name,
"--title", f"Hermes Agent v{new_version} ({calver_date})",
"--notes-file", str(changelog_file)],
capture_output=True, text=True,
cwd=str(REPO_ROOT),
)
changelog_file.unlink(missing_ok=True)
if result.returncode == 0:
print(f" ✓ GitHub release created: {result.stdout.strip()}")
else:
print(f" ✗ GitHub release failed: {result.stderr}")
print(f" Tag was created. Create the release manually:")
print(f" gh release create {tag_name} --title 'Hermes Agent v{new_version} ({calver_date})'")
print(f"\n 🎉 Release v{new_version} ({tag_name}) published!")
else:
print(f"\n{'='*60}")
print(f" Dry run complete. To publish, add --publish")
print(f" Example: python scripts/release.py --bump minor --publish")
print(f"{'='*60}")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,411 @@
#!/usr/bin/env python3
"""
Sample and Compress HuggingFace Datasets
Downloads trajectories from multiple HuggingFace datasets, randomly samples them,
and runs trajectory compression to fit within a target token budget.
Usage:
python scripts/sample_and_compress.py
# Custom sample size
python scripts/sample_and_compress.py --total_samples=5000
# Custom output name
python scripts/sample_and_compress.py --output_name=compressed_16k
"""
import json
import random
import os
from pathlib import Path
from typing import List, Dict, Any, Tuple
import fire
# Load environment variables
from dotenv import load_dotenv
load_dotenv()
# Default datasets to sample from
DEFAULT_DATASETS = [
"NousResearch/swe-terminus-agent-glm-kimi-minimax",
"NousResearch/hermes-agent-megascience-sft1",
"NousResearch/Hermes-Agent-Thinking-GLM-4.7-SFT2",
"NousResearch/Hermes-Agent-Thinking-GLM-4.7-SFT1",
"NousResearch/terminal-tasks-glm-hermes-agent"
]
def load_dataset_from_hf(dataset_name: str) -> List[Dict[str, Any]]:
"""
Load a dataset from HuggingFace.
Args:
dataset_name: HuggingFace dataset name (e.g., "NousResearch/dataset-name")
Returns:
List of trajectory entries
"""
from datasets import load_dataset
print(f" Loading {dataset_name}...")
try:
# Try loading with default config
ds = load_dataset(dataset_name, split="train")
except Exception as e:
print(f" ⚠️ Error loading {dataset_name}: {e}")
return []
# Convert to list of dicts
entries = []
for item in ds:
# Handle different possible formats
if "conversations" in item:
entries.append({"conversations": item["conversations"]})
elif "messages" in item:
# Convert messages format to conversations format if needed
entries.append({"conversations": item["messages"]})
else:
# Assume the whole item is the entry
entries.append(dict(item))
print(f" ✅ Loaded {len(entries):,} entries from {dataset_name}")
return entries
# Global tokenizer for multiprocessing (set in worker init)
_TOKENIZER = None
def _init_tokenizer_worker(tokenizer_name: str):
"""Initialize tokenizer in worker process."""
global _TOKENIZER
from transformers import AutoTokenizer
_TOKENIZER = AutoTokenizer.from_pretrained(tokenizer_name, trust_remote_code=True)
def _count_tokens_for_entry(entry: Dict) -> Tuple[Dict, int]:
"""
Count tokens for a single entry (used in parallel processing).
Args:
entry: Trajectory entry with 'conversations' field
Returns:
Tuple of (entry, token_count)
"""
global _TOKENIZER
conversations = entry.get("conversations", [])
if not conversations:
return entry, 0
total = 0
for turn in conversations:
value = turn.get("value", "")
if value:
try:
total += len(_TOKENIZER.encode(value))
except Exception:
# Fallback to character estimate
total += len(value) // 4
return entry, total
def sample_from_datasets(
datasets: List[str],
total_samples: int,
min_tokens: int = 16000,
tokenizer_name: str = "moonshotai/Kimi-K2-Thinking",
seed: int = 42,
num_proc: int = 8
) -> List[Dict[str, Any]]:
"""
Load all datasets, filter by token count, then randomly sample from combined pool.
Args:
datasets: List of HuggingFace dataset names
total_samples: Total number of samples to collect
min_tokens: Minimum token count to include (only sample trajectories >= this)
tokenizer_name: HuggingFace tokenizer for counting tokens
seed: Random seed for reproducibility
num_proc: Number of parallel processes for tokenization
Returns:
List of sampled trajectory entries
"""
from multiprocessing import Pool
from functools import partial
random.seed(seed)
print(f"\n📥 Loading {len(datasets)} datasets...")
print(f" Minimum tokens: {min_tokens:,} (filtering smaller trajectories)")
print(f" Parallel workers: {num_proc}")
print()
# Load ALL entries from all datasets into one pool
all_entries = []
for dataset_name in datasets:
entries = load_dataset_from_hf(dataset_name)
if not entries:
print(f" ⚠️ Skipping {dataset_name} (no entries loaded)")
continue
# Add source metadata to each entry
for entry in entries:
entry["_source_dataset"] = dataset_name
all_entries.extend(entries)
print(f"\n📊 Total entries loaded: {len(all_entries):,}")
# Filter by token count using parallel processing
print(f"\n🔍 Filtering trajectories with >= {min_tokens:,} tokens (using {num_proc} workers)...")
filtered_entries = []
token_counts = []
# Use multiprocessing for token counting
with Pool(
processes=num_proc,
initializer=_init_tokenizer_worker,
initargs=(tokenizer_name,)
) as pool:
# Process in chunks and show progress
chunk_size = 1000
processed = 0
for result in pool.imap_unordered(_count_tokens_for_entry, all_entries, chunksize=100):
entry, token_count = result
processed += 1
if processed % chunk_size == 0:
print(f" Processed {processed:,}/{len(all_entries):,}...", end="\r")
if token_count >= min_tokens:
entry["_original_tokens"] = token_count
filtered_entries.append(entry)
token_counts.append(token_count)
print(f"\n ✅ Found {len(filtered_entries):,} trajectories >= {min_tokens:,} tokens")
if token_counts:
avg_tokens = sum(token_counts) / len(token_counts)
print(f" 📈 Token stats: min={min(token_counts):,}, max={max(token_counts):,}, avg={avg_tokens:,.0f}")
# Random sample from the filtered pool
if len(filtered_entries) <= total_samples:
print(f"\n⚠️ Only {len(filtered_entries):,} trajectories available, using all of them")
sampled = filtered_entries
else:
sampled = random.sample(filtered_entries, total_samples)
print(f"\n✅ Randomly sampled {len(sampled):,} trajectories from pool of {len(filtered_entries):,}")
# Show source distribution
source_counts = {}
for entry in sampled:
source = entry.get("_source_dataset", "unknown").split("/")[-1]
source_counts[source] = source_counts.get(source, 0) + 1
print(f"\n📌 Sample distribution by source:")
for source, count in sorted(source_counts.items()):
print(f" {source}: {count:,}")
# Shuffle
random.shuffle(sampled)
return sampled
def save_samples_for_compression(
samples: List[Dict[str, Any]],
output_dir: Path,
batch_size: int = 100
):
"""
Save samples to JSONL files for trajectory compression.
Args:
samples: List of trajectory entries
output_dir: Directory to save JSONL files
batch_size: Number of entries per file
"""
output_dir.mkdir(parents=True, exist_ok=True)
# Split into batches
num_batches = (len(samples) + batch_size - 1) // batch_size
print(f"\n💾 Saving {len(samples)} samples to {output_dir}")
print(f" Batch size: {batch_size}, Total batches: {num_batches}")
for i in range(num_batches):
start_idx = i * batch_size
end_idx = min((i + 1) * batch_size, len(samples))
batch = samples[start_idx:end_idx]
output_file = output_dir / f"batch_{i}.jsonl"
with open(output_file, 'w', encoding='utf-8') as f:
for entry in batch:
f.write(json.dumps(entry, ensure_ascii=False) + '\n')
print(f" ✅ Saved {num_batches} batch files")
def run_compression(input_dir: Path, output_dir: Path, config_path: str):
"""
Run trajectory compression on the sampled data.
Args:
input_dir: Directory containing JSONL files to compress
output_dir: Directory for compressed output
config_path: Path to compression config YAML
"""
# Import the compressor
import sys
sys.path.insert(0, str(Path(__file__).parent.parent))
from trajectory_compressor import TrajectoryCompressor, CompressionConfig
print(f"\n🗜️ Running trajectory compression...")
print(f" Input: {input_dir}")
print(f" Output: {output_dir}")
print(f" Config: {config_path}")
# Load config
config = CompressionConfig.from_yaml(config_path)
# Initialize compressor
compressor = TrajectoryCompressor(config)
# Run compression
compressor.process_directory(input_dir, output_dir)
def merge_output_to_single_jsonl(input_dir: Path, output_file: Path):
"""
Merge all JSONL files in a directory into a single JSONL file.
Args:
input_dir: Directory containing JSONL files
output_file: Output JSONL file path
"""
print(f"\n📦 Merging output files into {output_file.name}...")
all_entries = []
for jsonl_file in sorted(input_dir.glob("*.jsonl")):
if jsonl_file.name == output_file.name:
continue
with open(jsonl_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line:
all_entries.append(json.loads(line))
# Write merged file
with open(output_file, 'w', encoding='utf-8') as f:
for entry in all_entries:
f.write(json.dumps(entry, ensure_ascii=False) + '\n')
print(f" ✅ Merged {len(all_entries):,} entries into {output_file.name}")
return output_file
def main(
total_samples: int = 2500,
output_name: str = "compressed_agentic",
datasets: str = None,
config: str = "configs/trajectory_compression.yaml",
seed: int = 42,
batch_size: int = 100,
min_tokens: int = 16000,
num_proc: int = 8,
skip_download: bool = False,
):
"""
Sample trajectories from HuggingFace datasets and run compression.
Args:
total_samples: Total number of samples to collect (default: 2500)
output_name: Name for output directory/file (default: "compressed_agentic")
datasets: Comma-separated list of dataset names (uses defaults if not provided)
config: Path to compression config YAML
seed: Random seed for reproducibility
batch_size: Number of entries per JSONL file during processing
min_tokens: Minimum token count to filter trajectories (default: 16000)
num_proc: Number of parallel workers for tokenization (default: 8)
skip_download: Skip download and use existing sampled data
"""
print("=" * 70)
print("📊 TRAJECTORY SAMPLING AND COMPRESSION")
print("=" * 70)
# Parse datasets
if datasets:
dataset_list = [d.strip() for d in datasets.split(",")]
else:
dataset_list = DEFAULT_DATASETS
print(f"\n📋 Configuration:")
print(f" Total samples: {total_samples:,}")
print(f" Min tokens filter: {min_tokens:,}")
print(f" Parallel workers: {num_proc}")
print(f" Datasets: {len(dataset_list)}")
for ds in dataset_list:
print(f" - {ds}")
print(f" Output name: {output_name}")
print(f" Config: {config}")
print(f" Seed: {seed}")
# Setup paths
base_dir = Path(__file__).parent.parent
sampled_dir = base_dir / "data" / f"{output_name}_raw"
compressed_dir = base_dir / "data" / f"{output_name}_batches"
final_output = base_dir / "data" / f"{output_name}.jsonl"
if not skip_download:
# Step 1: Download, filter by token count, and sample from combined pool
samples = sample_from_datasets(
dataset_list,
total_samples,
min_tokens=min_tokens,
seed=seed,
num_proc=num_proc
)
if not samples:
print("❌ No samples collected. Exiting.")
return
# Step 2: Save to JSONL files
save_samples_for_compression(samples, sampled_dir, batch_size)
else:
print(f"\n⏭️ Skipping download, using existing data in {sampled_dir}")
# Step 3: Run compression
config_path = base_dir / config
if not config_path.exists():
print(f"❌ Config not found: {config_path}")
return
run_compression(sampled_dir, compressed_dir, str(config_path))
# Step 4: Merge into single JSONL file
merge_output_to_single_jsonl(compressed_dir, final_output)
print("\n" + "=" * 70)
print("✅ COMPLETE!")
print("=" * 70)
print(f"\n📁 Raw samples: {sampled_dir}")
print(f"📁 Compressed batches: {compressed_dir}")
print(f"📁 Final output: {final_output}")
print(f"\nTo upload to HuggingFace:")
print(f" huggingface-cli upload NousResearch/{output_name} {final_output}")
if __name__ == "__main__":
fire.Fire(main)

View file

@ -0,0 +1,490 @@
#!/usr/bin/env node
/**
* Hermes Agent WhatsApp Bridge
*
* Standalone Node.js process that connects to WhatsApp via Baileys
* and exposes HTTP endpoints for the Python gateway adapter.
*
* Endpoints (matches gateway/platforms/whatsapp.py expectations):
* GET /messages - Long-poll for new incoming messages
* POST /send - Send a message { chatId, message, replyTo? }
* POST /edit - Edit a sent message { chatId, messageId, message }
* POST /send-media - Send media natively { chatId, filePath, mediaType?, caption?, fileName? }
* POST /typing - Send typing indicator { chatId }
* GET /chat/:id - Get chat info
* GET /health - Health check
*
* Usage:
* node bridge.js --port 3000 --session ~/.hermes/whatsapp/session
*/
import { makeWASocket, useMultiFileAuthState, DisconnectReason, fetchLatestBaileysVersion, downloadMediaMessage } from '@whiskeysockets/baileys';
import express from 'express';
import { Boom } from '@hapi/boom';
import pino from 'pino';
import path from 'path';
import { mkdirSync, readFileSync, writeFileSync, existsSync, readdirSync } from 'fs';
import { randomBytes } from 'crypto';
import qrcode from 'qrcode-terminal';
// Parse CLI args
const args = process.argv.slice(2);
function getArg(name, defaultVal) {
const idx = args.indexOf(`--${name}`);
return idx !== -1 && args[idx + 1] ? args[idx + 1] : defaultVal;
}
const WHATSAPP_DEBUG =
typeof process !== 'undefined' &&
process.env &&
typeof process.env.WHATSAPP_DEBUG === 'string' &&
['1', 'true', 'yes', 'on'].includes(process.env.WHATSAPP_DEBUG.toLowerCase());
const PORT = parseInt(getArg('port', '3000'), 10);
const SESSION_DIR = getArg('session', path.join(process.env.HOME || '~', '.hermes', 'whatsapp', 'session'));
const IMAGE_CACHE_DIR = path.join(process.env.HOME || '~', '.hermes', 'image_cache');
const PAIR_ONLY = args.includes('--pair-only');
const WHATSAPP_MODE = getArg('mode', process.env.WHATSAPP_MODE || 'self-chat'); // "bot" or "self-chat"
const ALLOWED_USERS = (process.env.WHATSAPP_ALLOWED_USERS || '').split(',').map(s => s.trim()).filter(Boolean);
const DEFAULT_REPLY_PREFIX = '⚕ *Hermes Agent*\n────────────\n';
const REPLY_PREFIX = process.env.WHATSAPP_REPLY_PREFIX === undefined
? DEFAULT_REPLY_PREFIX
: process.env.WHATSAPP_REPLY_PREFIX.replace(/\\n/g, '\n');
function formatOutgoingMessage(message) {
return REPLY_PREFIX ? `${REPLY_PREFIX}${message}` : message;
}
mkdirSync(SESSION_DIR, { recursive: true });
// Build LID → phone reverse map from session files (lid-mapping-{phone}.json)
function buildLidMap() {
const map = {};
try {
for (const f of readdirSync(SESSION_DIR)) {
const m = f.match(/^lid-mapping-(\d+)\.json$/);
if (!m) continue;
const phone = m[1];
const lid = JSON.parse(readFileSync(path.join(SESSION_DIR, f), 'utf8'));
if (lid) map[String(lid)] = phone;
}
} catch {}
return map;
}
let lidToPhone = buildLidMap();
const logger = pino({ level: 'warn' });
// Message queue for polling
const messageQueue = [];
const MAX_QUEUE_SIZE = 100;
// Track recently sent message IDs to prevent echo-back loops with media
const recentlySentIds = new Set();
const MAX_RECENT_IDS = 50;
let sock = null;
let connectionState = 'disconnected';
async function startSocket() {
const { state, saveCreds } = await useMultiFileAuthState(SESSION_DIR);
const { version } = await fetchLatestBaileysVersion();
sock = makeWASocket({
version,
auth: state,
logger,
printQRInTerminal: false,
browser: ['Hermes Agent', 'Chrome', '120.0'],
syncFullHistory: false,
markOnlineOnConnect: false,
// Required for Baileys 7.x: without this, incoming messages that need
// E2EE session re-establishment are silently dropped (msg.message === null)
getMessage: async (key) => {
// We don't maintain a message store, so return a placeholder.
// This is enough for Baileys to complete the retry handshake.
return { conversation: '' };
},
});
sock.ev.on('creds.update', () => { saveCreds(); lidToPhone = buildLidMap(); });
sock.ev.on('connection.update', (update) => {
const { connection, lastDisconnect, qr } = update;
if (qr) {
console.log('\n📱 Scan this QR code with WhatsApp on your phone:\n');
qrcode.generate(qr, { small: true });
console.log('\nWaiting for scan...\n');
}
if (connection === 'close') {
const reason = new Boom(lastDisconnect?.error)?.output?.statusCode;
connectionState = 'disconnected';
if (reason === DisconnectReason.loggedOut) {
console.log('❌ Logged out. Delete session and restart to re-authenticate.');
process.exit(1);
} else {
// 515 = restart requested (common after pairing). Always reconnect.
if (reason === 515) {
console.log('↻ WhatsApp requested restart (code 515). Reconnecting...');
} else {
console.log(`⚠️ Connection closed (reason: ${reason}). Reconnecting in 3s...`);
}
setTimeout(startSocket, reason === 515 ? 1000 : 3000);
}
} else if (connection === 'open') {
connectionState = 'connected';
console.log('✅ WhatsApp connected!');
if (PAIR_ONLY) {
console.log('✅ Pairing complete. Credentials saved.');
// Give Baileys a moment to flush creds, then exit cleanly
setTimeout(() => process.exit(0), 2000);
}
}
});
sock.ev.on('messages.upsert', async ({ messages, type }) => {
// In self-chat mode, your own messages commonly arrive as 'append' rather
// than 'notify'. Accept both and filter agent echo-backs below.
if (type !== 'notify' && type !== 'append') return;
for (const msg of messages) {
if (!msg.message) continue;
const chatId = msg.key.remoteJid;
if (WHATSAPP_DEBUG) {
try {
console.log(JSON.stringify({
event: 'upsert', type,
fromMe: !!msg.key.fromMe, chatId,
senderId: msg.key.participant || chatId,
messageKeys: Object.keys(msg.message || {}),
}));
} catch {}
}
const senderId = msg.key.participant || chatId;
const isGroup = chatId.endsWith('@g.us');
const senderNumber = senderId.replace(/@.*/, '');
// Handle fromMe messages based on mode
if (msg.key.fromMe) {
if (isGroup || chatId.includes('status')) continue;
if (WHATSAPP_MODE === 'bot') {
// Bot mode: separate number. ALL fromMe are echo-backs of our own replies — skip.
continue;
}
// Self-chat mode: only allow messages in the user's own self-chat
// WhatsApp now uses LID (Linked Identity Device) format: 67427329167522@lid
// AND classic format: 34652029134@s.whatsapp.net
// sock.user has both: { id: "number:10@s.whatsapp.net", lid: "lid_number:10@lid" }
const myNumber = (sock.user?.id || '').replace(/:.*@/, '@').replace(/@.*/, '');
const myLid = (sock.user?.lid || '').replace(/:.*@/, '@').replace(/@.*/, '');
const chatNumber = chatId.replace(/@.*/, '');
const isSelfChat = (myNumber && chatNumber === myNumber) || (myLid && chatNumber === myLid);
if (!isSelfChat) continue;
}
// Check allowlist for messages from others (resolve LID → phone if needed)
if (!msg.key.fromMe && ALLOWED_USERS.length > 0) {
const resolvedNumber = lidToPhone[senderNumber] || senderNumber;
if (!ALLOWED_USERS.includes(resolvedNumber)) continue;
}
// Extract message body
let body = '';
let hasMedia = false;
let mediaType = '';
const mediaUrls = [];
if (msg.message.conversation) {
body = msg.message.conversation;
} else if (msg.message.extendedTextMessage?.text) {
body = msg.message.extendedTextMessage.text;
} else if (msg.message.imageMessage) {
body = msg.message.imageMessage.caption || '';
hasMedia = true;
mediaType = 'image';
try {
const buf = await downloadMediaMessage(msg, 'buffer', {}, { logger, reuploadRequest: sock.updateMediaMessage });
const mime = msg.message.imageMessage.mimetype || 'image/jpeg';
const extMap = { 'image/jpeg': '.jpg', 'image/png': '.png', 'image/webp': '.webp', 'image/gif': '.gif' };
const ext = extMap[mime] || '.jpg';
mkdirSync(IMAGE_CACHE_DIR, { recursive: true });
const filePath = path.join(IMAGE_CACHE_DIR, `img_${randomBytes(6).toString('hex')}${ext}`);
writeFileSync(filePath, buf);
mediaUrls.push(filePath);
} catch (err) {
console.error('[bridge] Failed to download image:', err.message);
}
} else if (msg.message.videoMessage) {
body = msg.message.videoMessage.caption || '';
hasMedia = true;
mediaType = 'video';
} else if (msg.message.audioMessage || msg.message.pttMessage) {
hasMedia = true;
mediaType = msg.message.pttMessage ? 'ptt' : 'audio';
} else if (msg.message.documentMessage) {
body = msg.message.documentMessage.caption || msg.message.documentMessage.fileName || '';
hasMedia = true;
mediaType = 'document';
}
// For media without caption, use a placeholder so the API message is never empty
if (hasMedia && !body) {
body = `[${mediaType} received]`;
}
// Ignore Hermes' own reply messages in self-chat mode to avoid loops.
if (msg.key.fromMe && ((REPLY_PREFIX && body.startsWith(REPLY_PREFIX)) || recentlySentIds.has(msg.key.id))) {
if (WHATSAPP_DEBUG) {
try { console.log(JSON.stringify({ event: 'ignored', reason: 'agent_echo', chatId, messageId: msg.key.id })); } catch {}
}
continue;
}
// Skip empty messages
if (!body && !hasMedia) {
if (WHATSAPP_DEBUG) {
try {
console.log(JSON.stringify({ event: 'ignored', reason: 'empty', chatId, messageKeys: Object.keys(msg.message || {}) }));
} catch (err) {
console.error('Failed to log empty message event:', err);
}
}
continue;
}
const event = {
messageId: msg.key.id,
chatId,
senderId,
senderName: msg.pushName || senderNumber,
chatName: isGroup ? (chatId.split('@')[0]) : (msg.pushName || senderNumber),
isGroup,
body,
hasMedia,
mediaType,
mediaUrls,
timestamp: msg.messageTimestamp,
};
messageQueue.push(event);
if (messageQueue.length > MAX_QUEUE_SIZE) {
messageQueue.shift();
}
}
});
}
// HTTP server
const app = express();
app.use(express.json());
// Poll for new messages (long-poll style)
app.get('/messages', (req, res) => {
const msgs = messageQueue.splice(0, messageQueue.length);
res.json(msgs);
});
// Send a message
app.post('/send', async (req, res) => {
if (!sock || connectionState !== 'connected') {
return res.status(503).json({ error: 'Not connected to WhatsApp' });
}
const { chatId, message, replyTo } = req.body;
if (!chatId || !message) {
return res.status(400).json({ error: 'chatId and message are required' });
}
try {
const sent = await sock.sendMessage(chatId, { text: formatOutgoingMessage(message) });
// Track sent message ID to prevent echo-back loops
if (sent?.key?.id) {
recentlySentIds.add(sent.key.id);
if (recentlySentIds.size > MAX_RECENT_IDS) {
recentlySentIds.delete(recentlySentIds.values().next().value);
}
}
res.json({ success: true, messageId: sent?.key?.id });
} catch (err) {
res.status(500).json({ error: err.message });
}
});
// Edit a previously sent message
app.post('/edit', async (req, res) => {
if (!sock || connectionState !== 'connected') {
return res.status(503).json({ error: 'Not connected to WhatsApp' });
}
const { chatId, messageId, message } = req.body;
if (!chatId || !messageId || !message) {
return res.status(400).json({ error: 'chatId, messageId, and message are required' });
}
try {
const key = { id: messageId, fromMe: true, remoteJid: chatId };
await sock.sendMessage(chatId, { text: formatOutgoingMessage(message), edit: key });
res.json({ success: true });
} catch (err) {
res.status(500).json({ error: err.message });
}
});
// MIME type map and media type inference for /send-media
const MIME_MAP = {
jpg: 'image/jpeg', jpeg: 'image/jpeg', png: 'image/png',
webp: 'image/webp', gif: 'image/gif',
mp4: 'video/mp4', mov: 'video/quicktime', avi: 'video/x-msvideo',
mkv: 'video/x-matroska', '3gp': 'video/3gpp',
pdf: 'application/pdf',
doc: 'application/msword',
docx: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
xlsx: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
};
function inferMediaType(ext) {
if (['jpg', 'jpeg', 'png', 'webp', 'gif'].includes(ext)) return 'image';
if (['mp4', 'mov', 'avi', 'mkv', '3gp'].includes(ext)) return 'video';
if (['ogg', 'opus', 'mp3', 'wav', 'm4a'].includes(ext)) return 'audio';
return 'document';
}
// Send media (image, video, document) natively
app.post('/send-media', async (req, res) => {
if (!sock || connectionState !== 'connected') {
return res.status(503).json({ error: 'Not connected to WhatsApp' });
}
const { chatId, filePath, mediaType, caption, fileName } = req.body;
if (!chatId || !filePath) {
return res.status(400).json({ error: 'chatId and filePath are required' });
}
try {
if (!existsSync(filePath)) {
return res.status(404).json({ error: `File not found: ${filePath}` });
}
const buffer = readFileSync(filePath);
const ext = filePath.toLowerCase().split('.').pop();
const type = mediaType || inferMediaType(ext);
let msgPayload;
switch (type) {
case 'image':
msgPayload = { image: buffer, caption: caption || undefined, mimetype: MIME_MAP[ext] || 'image/jpeg' };
break;
case 'video':
msgPayload = { video: buffer, caption: caption || undefined, mimetype: MIME_MAP[ext] || 'video/mp4' };
break;
case 'audio': {
const audioMime = (ext === 'ogg' || ext === 'opus') ? 'audio/ogg; codecs=opus' : 'audio/mpeg';
msgPayload = { audio: buffer, mimetype: audioMime, ptt: ext === 'ogg' || ext === 'opus' };
break;
}
case 'document':
default:
msgPayload = {
document: buffer,
fileName: fileName || path.basename(filePath),
caption: caption || undefined,
mimetype: MIME_MAP[ext] || 'application/octet-stream',
};
break;
}
const sent = await sock.sendMessage(chatId, msgPayload);
// Track sent message ID to prevent echo-back loops
if (sent?.key?.id) {
recentlySentIds.add(sent.key.id);
if (recentlySentIds.size > MAX_RECENT_IDS) {
recentlySentIds.delete(recentlySentIds.values().next().value);
}
}
res.json({ success: true, messageId: sent?.key?.id });
} catch (err) {
res.status(500).json({ error: err.message });
}
});
// Typing indicator
app.post('/typing', async (req, res) => {
if (!sock || connectionState !== 'connected') {
return res.status(503).json({ error: 'Not connected' });
}
const { chatId } = req.body;
if (!chatId) return res.status(400).json({ error: 'chatId required' });
try {
await sock.sendPresenceUpdate('composing', chatId);
res.json({ success: true });
} catch (err) {
res.json({ success: false });
}
});
// Chat info
app.get('/chat/:id', async (req, res) => {
const chatId = req.params.id;
const isGroup = chatId.endsWith('@g.us');
if (isGroup && sock) {
try {
const metadata = await sock.groupMetadata(chatId);
return res.json({
name: metadata.subject,
isGroup: true,
participants: metadata.participants.map(p => p.id),
});
} catch {
// Fall through to default
}
}
res.json({
name: chatId.replace(/@.*/, ''),
isGroup,
participants: [],
});
});
// Health check
app.get('/health', (req, res) => {
res.json({
status: connectionState,
queueLength: messageQueue.length,
uptime: process.uptime(),
});
});
// Start
if (PAIR_ONLY) {
// Pair-only mode: just connect, show QR, save creds, exit. No HTTP server.
console.log('📱 WhatsApp pairing mode');
console.log(`📁 Session: ${SESSION_DIR}`);
console.log();
startSocket();
} else {
app.listen(PORT, '127.0.0.1', () => {
console.log(`🌉 WhatsApp bridge listening on port ${PORT} (mode: ${WHATSAPP_MODE})`);
console.log(`📁 Session stored in: ${SESSION_DIR}`);
if (ALLOWED_USERS.length > 0) {
console.log(`🔒 Allowed users: ${ALLOWED_USERS.join(', ')}`);
} else {
console.log(`⚠️ No WHATSAPP_ALLOWED_USERS set — all messages will be processed`);
}
console.log();
startSocket();
});
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,16 @@
{
"name": "hermes-whatsapp-bridge",
"version": "1.0.0",
"description": "WhatsApp bridge for Hermes Agent using Baileys",
"private": true,
"type": "module",
"scripts": {
"start": "node bridge.js"
},
"dependencies": {
"@whiskeysockets/baileys": "7.0.0-rc.9",
"express": "^4.21.0",
"qrcode-terminal": "^0.12.0",
"pino": "^9.0.0"
}
}