feat: overhaul context length detection with models.dev and provider-aware resolution (#2158)

Replace the fragile hardcoded context length system with a multi-source
resolution chain that correctly identifies context windows per provider.

Key changes:

- New agent/models_dev.py: Fetches and caches the models.dev registry
  (3800+ models across 100+ providers with per-provider context windows).
  In-memory cache (1hr TTL) + disk cache for cold starts.

- Rewritten get_model_context_length() resolution chain:
  0. Config override (model.context_length)
  1. Custom providers per-model context_length
  2. Persistent disk cache
  3. Endpoint /models (local servers)
  4. Anthropic /v1/models API (max_input_tokens, API-key only)
  5. OpenRouter live API (existing, unchanged)
  6. Nous suffix-match via OpenRouter (dot/dash normalization)
  7. models.dev registry lookup (provider-aware)
  8. Thin hardcoded defaults (broad family patterns)
  9. 128K fallback (was 2M)

- Provider-aware context: same model now correctly resolves to different
  context windows per provider (e.g. claude-opus-4.6: 1M on Anthropic,
  128K on GitHub Copilot). Provider name flows through ContextCompressor.

- DEFAULT_CONTEXT_LENGTHS shrunk from 80+ entries to ~16 broad patterns.
  models.dev replaces the per-model hardcoding.

- CONTEXT_PROBE_TIERS changed from [2M, 1M, 512K, 200K, 128K, 64K, 32K]
  to [128K, 64K, 32K, 16K, 8K]. Unknown models no longer start at 2M.

- hermes model: prompts for context_length when configuring custom
  endpoints. Supports shorthand (32k, 128K). Saved to custom_providers
  per-model config.

- custom_providers schema extended with optional models dict for
  per-model context_length (backward compatible).

- Nous Portal: suffix-matches bare IDs (claude-opus-4-6) against
  OpenRouter's prefixed IDs (anthropic/claude-opus-4.6) with dot/dash
  normalization. Handles all 15 current Nous models.

- Anthropic direct: queries /v1/models for max_input_tokens. Only works
  with regular API keys (sk-ant-api*), not OAuth tokens. Falls through
  to models.dev for OAuth users.

Tests: 5574 passed (18 new tests for models_dev + updated probe tiers)
Docs: Updated configuration.md context length section, AGENTS.md

Co-authored-by: Test <test@test.com>
This commit is contained in:
Teknium 2026-03-20 06:04:33 -07:00 committed by GitHub
parent b7b585656b
commit 88643a1ba9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 662 additions and 246 deletions

View file

@ -472,35 +472,35 @@ class TestContextProbeTiers:
for i in range(len(CONTEXT_PROBE_TIERS) - 1):
assert CONTEXT_PROBE_TIERS[i] > CONTEXT_PROBE_TIERS[i + 1]
def test_first_tier_is_2m(self):
assert CONTEXT_PROBE_TIERS[0] == 2_000_000
def test_first_tier_is_128k(self):
assert CONTEXT_PROBE_TIERS[0] == 128_000
def test_last_tier_is_32k(self):
assert CONTEXT_PROBE_TIERS[-1] == 32_000
def test_last_tier_is_8k(self):
assert CONTEXT_PROBE_TIERS[-1] == 8_000
class TestGetNextProbeTier:
def test_from_2m(self):
assert get_next_probe_tier(2_000_000) == 1_000_000
def test_from_1m(self):
assert get_next_probe_tier(1_000_000) == 512_000
def test_from_128k(self):
assert get_next_probe_tier(128_000) == 64_000
def test_from_32k_returns_none(self):
assert get_next_probe_tier(32_000) is None
def test_from_64k(self):
assert get_next_probe_tier(64_000) == 32_000
def test_from_32k(self):
assert get_next_probe_tier(32_000) == 16_000
def test_from_8k_returns_none(self):
assert get_next_probe_tier(8_000) is None
def test_from_below_min_returns_none(self):
assert get_next_probe_tier(16_000) is None
assert get_next_probe_tier(4_000) is None
def test_from_arbitrary_value(self):
assert get_next_probe_tier(300_000) == 200_000
assert get_next_probe_tier(100_000) == 64_000
def test_above_max_tier(self):
"""Value above 2M should return 2M."""
assert get_next_probe_tier(5_000_000) == 2_000_000
"""Value above 128K should return 128K."""
assert get_next_probe_tier(500_000) == 128_000
def test_zero_returns_none(self):
assert get_next_probe_tier(0) is None

View file

@ -0,0 +1,197 @@
"""Tests for agent.models_dev — models.dev registry integration."""
import json
from unittest.mock import patch, MagicMock
import pytest
from agent.models_dev import (
PROVIDER_TO_MODELS_DEV,
_extract_context,
fetch_models_dev,
lookup_models_dev_context,
)
SAMPLE_REGISTRY = {
"anthropic": {
"id": "anthropic",
"name": "Anthropic",
"models": {
"claude-opus-4-6": {
"id": "claude-opus-4-6",
"limit": {"context": 1000000, "output": 128000},
},
"claude-sonnet-4-6": {
"id": "claude-sonnet-4-6",
"limit": {"context": 1000000, "output": 64000},
},
"claude-sonnet-4-0": {
"id": "claude-sonnet-4-0",
"limit": {"context": 200000, "output": 64000},
},
},
},
"github-copilot": {
"id": "github-copilot",
"name": "GitHub Copilot",
"models": {
"claude-opus-4.6": {
"id": "claude-opus-4.6",
"limit": {"context": 128000, "output": 32000},
},
},
},
"kilo": {
"id": "kilo",
"name": "Kilo Gateway",
"models": {
"anthropic/claude-sonnet-4.6": {
"id": "anthropic/claude-sonnet-4.6",
"limit": {"context": 1000000, "output": 128000},
},
},
},
"deepseek": {
"id": "deepseek",
"name": "DeepSeek",
"models": {
"deepseek-chat": {
"id": "deepseek-chat",
"limit": {"context": 128000, "output": 8192},
},
},
},
"audio-only": {
"id": "audio-only",
"models": {
"tts-model": {
"id": "tts-model",
"limit": {"context": 0, "output": 0},
},
},
},
}
class TestProviderMapping:
def test_all_mapped_providers_are_strings(self):
for hermes_id, mdev_id in PROVIDER_TO_MODELS_DEV.items():
assert isinstance(hermes_id, str)
assert isinstance(mdev_id, str)
def test_known_providers_mapped(self):
assert PROVIDER_TO_MODELS_DEV["anthropic"] == "anthropic"
assert PROVIDER_TO_MODELS_DEV["copilot"] == "github-copilot"
assert PROVIDER_TO_MODELS_DEV["kilocode"] == "kilo"
assert PROVIDER_TO_MODELS_DEV["ai-gateway"] == "vercel"
def test_unmapped_provider_not_in_dict(self):
assert "nous" not in PROVIDER_TO_MODELS_DEV
assert "openai-codex" not in PROVIDER_TO_MODELS_DEV
class TestExtractContext:
def test_valid_entry(self):
assert _extract_context({"limit": {"context": 128000}}) == 128000
def test_zero_context_returns_none(self):
assert _extract_context({"limit": {"context": 0}}) is None
def test_missing_limit_returns_none(self):
assert _extract_context({"id": "test"}) is None
def test_missing_context_returns_none(self):
assert _extract_context({"limit": {"output": 8192}}) is None
def test_non_dict_returns_none(self):
assert _extract_context("not a dict") is None
def test_float_context_coerced_to_int(self):
assert _extract_context({"limit": {"context": 131072.0}}) == 131072
class TestLookupModelsDevContext:
@patch("agent.models_dev.fetch_models_dev")
def test_exact_match(self, mock_fetch):
mock_fetch.return_value = SAMPLE_REGISTRY
assert lookup_models_dev_context("anthropic", "claude-opus-4-6") == 1000000
@patch("agent.models_dev.fetch_models_dev")
def test_case_insensitive_match(self, mock_fetch):
mock_fetch.return_value = SAMPLE_REGISTRY
assert lookup_models_dev_context("anthropic", "Claude-Opus-4-6") == 1000000
@patch("agent.models_dev.fetch_models_dev")
def test_provider_not_mapped(self, mock_fetch):
mock_fetch.return_value = SAMPLE_REGISTRY
assert lookup_models_dev_context("nous", "some-model") is None
@patch("agent.models_dev.fetch_models_dev")
def test_model_not_found(self, mock_fetch):
mock_fetch.return_value = SAMPLE_REGISTRY
assert lookup_models_dev_context("anthropic", "nonexistent-model") is None
@patch("agent.models_dev.fetch_models_dev")
def test_provider_aware_context(self, mock_fetch):
"""Same model, different context per provider."""
mock_fetch.return_value = SAMPLE_REGISTRY
# Anthropic direct: 1M
assert lookup_models_dev_context("anthropic", "claude-opus-4-6") == 1000000
# GitHub Copilot: only 128K for same model
assert lookup_models_dev_context("copilot", "claude-opus-4.6") == 128000
@patch("agent.models_dev.fetch_models_dev")
def test_zero_context_filtered(self, mock_fetch):
mock_fetch.return_value = SAMPLE_REGISTRY
# audio-only is not a mapped provider, but test the filtering directly
data = SAMPLE_REGISTRY["audio-only"]["models"]["tts-model"]
assert _extract_context(data) is None
@patch("agent.models_dev.fetch_models_dev")
def test_empty_registry(self, mock_fetch):
mock_fetch.return_value = {}
assert lookup_models_dev_context("anthropic", "claude-opus-4-6") is None
class TestFetchModelsDev:
@patch("agent.models_dev.requests.get")
def test_fetch_success(self, mock_get):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json.return_value = SAMPLE_REGISTRY
mock_resp.raise_for_status = MagicMock()
mock_get.return_value = mock_resp
# Clear caches
import agent.models_dev as md
md._models_dev_cache = {}
md._models_dev_cache_time = 0
with patch.object(md, "_save_disk_cache"):
result = fetch_models_dev(force_refresh=True)
assert "anthropic" in result
assert len(result) == len(SAMPLE_REGISTRY)
@patch("agent.models_dev.requests.get")
def test_fetch_failure_returns_stale_cache(self, mock_get):
mock_get.side_effect = Exception("network error")
import agent.models_dev as md
md._models_dev_cache = SAMPLE_REGISTRY
md._models_dev_cache_time = 0 # expired
with patch.object(md, "_load_disk_cache", return_value=SAMPLE_REGISTRY):
result = fetch_models_dev(force_refresh=True)
assert "anthropic" in result
@patch("agent.models_dev.requests.get")
def test_in_memory_cache_used(self, mock_get):
import agent.models_dev as md
import time
md._models_dev_cache = SAMPLE_REGISTRY
md._models_dev_cache_time = time.time() # fresh
result = fetch_models_dev()
mock_get.assert_not_called()
assert result == SAMPLE_REGISTRY

View file

@ -97,30 +97,32 @@ def test_custom_setup_clears_active_oauth_provider(tmp_path, monkeypatch):
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
prompt_values = iter(
[
"https://custom.example/v1",
"custom-api-key",
"custom/model",
]
)
monkeypatch.setattr(
"hermes_cli.setup.prompt",
lambda *args, **kwargs: next(prompt_values),
)
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
input_values = iter([
"https://custom.example/v1",
"custom-api-key",
"custom/model",
"", # context_length (blank = auto-detect)
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {"models": ["m"], "probed_url": base_url + "/models"},
)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
# Core assertion: switching to custom endpoint clears OAuth provider
assert get_active_provider() is None
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "custom"
assert reloaded["model"]["base_url"] == "https://custom.example/v1"
assert reloaded["model"]["default"] == "custom/model"
# _model_flow_custom writes config via its own load/save cycle
reloaded = load_config()
if isinstance(reloaded.get("model"), dict):
assert reloaded["model"].get("provider") == "custom"
assert reloaded["model"].get("default") == "custom/model"
def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, monkeypatch):

View file

@ -99,21 +99,21 @@ def test_setup_custom_endpoint_saves_working_v1_base_url(tmp_path, monkeypatch):
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_prompt(message, current=None, **kwargs):
if "API base URL" in message:
return "http://localhost:8000"
if "API key" in message:
return "local-key"
if "Model name" in message:
return "llm"
return ""
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
input_values = iter([
"http://localhost:8000",
"local-key",
"llm",
"", # context_length (blank = auto-detect)
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt)
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {
@ -126,16 +126,19 @@ def test_setup_custom_endpoint_saves_working_v1_base_url(tmp_path, monkeypatch):
)
setup_model_provider(config)
save_config(config)
env = _read_env(tmp_path)
reloaded = load_config()
# _model_flow_custom saves env vars and config to disk
assert env.get("OPENAI_BASE_URL") == "http://localhost:8000/v1"
assert env.get("OPENAI_API_KEY") == "local-key"
assert reloaded["model"]["provider"] == "custom"
assert reloaded["model"]["base_url"] == "http://localhost:8000/v1"
assert reloaded["model"]["default"] == "llm"
# The model config is saved as a dict by _model_flow_custom
reloaded = load_config()
model_cfg = reloaded.get("model", {})
if isinstance(model_cfg, dict):
assert model_cfg.get("provider") == "custom"
assert model_cfg.get("default") == "llm"
def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(tmp_path, monkeypatch):

View file

@ -459,7 +459,7 @@ def test_model_flow_custom_saves_verified_v1_base_url(monkeypatch, capsys):
)
monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
answers = iter(["http://localhost:8000", "local-key", "llm"])
answers = iter(["http://localhost:8000", "local-key", "llm", ""])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(answers))
hermes_main._model_flow_custom({})