fix(custom-endpoint): verify /models and suggest working /v1 base URL (#1480)
This commit is contained in:
parent
103f7b1ebc
commit
25e53f3c1a
6 changed files with 301 additions and 22 deletions
|
|
@ -308,6 +308,62 @@ def _fetch_anthropic_models(timeout: float = 5.0) -> Optional[list[str]]:
|
|||
return None
|
||||
|
||||
|
||||
def probe_api_models(
|
||||
api_key: Optional[str],
|
||||
base_url: Optional[str],
|
||||
timeout: float = 5.0,
|
||||
) -> dict[str, Any]:
|
||||
"""Probe an OpenAI-compatible ``/models`` endpoint with light URL heuristics."""
|
||||
normalized = (base_url or "").strip().rstrip("/")
|
||||
if not normalized:
|
||||
return {
|
||||
"models": None,
|
||||
"probed_url": None,
|
||||
"resolved_base_url": "",
|
||||
"suggested_base_url": None,
|
||||
"used_fallback": False,
|
||||
}
|
||||
|
||||
if normalized.endswith("/v1"):
|
||||
alternate_base = normalized[:-3].rstrip("/")
|
||||
else:
|
||||
alternate_base = normalized + "/v1"
|
||||
|
||||
candidates: list[tuple[str, bool]] = [(normalized, False)]
|
||||
if alternate_base and alternate_base != normalized:
|
||||
candidates.append((alternate_base, True))
|
||||
|
||||
tried: list[str] = []
|
||||
headers: dict[str, str] = {}
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
for candidate_base, is_fallback in candidates:
|
||||
url = candidate_base.rstrip("/") + "/models"
|
||||
tried.append(url)
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
return {
|
||||
"models": [m.get("id", "") for m in data.get("data", [])],
|
||||
"probed_url": url,
|
||||
"resolved_base_url": candidate_base.rstrip("/"),
|
||||
"suggested_base_url": alternate_base if alternate_base != candidate_base else normalized,
|
||||
"used_fallback": is_fallback,
|
||||
}
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return {
|
||||
"models": None,
|
||||
"probed_url": tried[-1] if tried else normalized.rstrip("/") + "/models",
|
||||
"resolved_base_url": normalized,
|
||||
"suggested_base_url": alternate_base if alternate_base != normalized else None,
|
||||
"used_fallback": False,
|
||||
}
|
||||
|
||||
|
||||
def fetch_api_models(
|
||||
api_key: Optional[str],
|
||||
base_url: Optional[str],
|
||||
|
|
@ -318,22 +374,7 @@ def fetch_api_models(
|
|||
Returns a list of model ID strings, or ``None`` if the endpoint could not
|
||||
be reached (network error, timeout, auth failure, etc.).
|
||||
"""
|
||||
if not base_url:
|
||||
return None
|
||||
|
||||
url = base_url.rstrip("/") + "/models"
|
||||
headers: dict[str, str] = {}
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
# Standard OpenAI format: {"data": [{"id": "model-name", ...}, ...]}
|
||||
return [m.get("id", "") for m in data.get("data", [])]
|
||||
except Exception:
|
||||
return None
|
||||
return probe_api_models(api_key, base_url, timeout=timeout).get("models")
|
||||
|
||||
|
||||
def validate_requested_model(
|
||||
|
|
@ -376,13 +417,53 @@ def validate_requested_model(
|
|||
"message": "Model names cannot contain spaces.",
|
||||
}
|
||||
|
||||
# Custom endpoints can serve any model — skip validation
|
||||
if normalized == "custom":
|
||||
probe = probe_api_models(api_key, base_url)
|
||||
api_models = probe.get("models")
|
||||
if api_models is not None:
|
||||
if requested in set(api_models):
|
||||
return {
|
||||
"accepted": True,
|
||||
"persist": True,
|
||||
"recognized": True,
|
||||
"message": None,
|
||||
}
|
||||
|
||||
suggestions = get_close_matches(requested, api_models, n=3, cutoff=0.5)
|
||||
suggestion_text = ""
|
||||
if suggestions:
|
||||
suggestion_text = "\n Similar models: " + ", ".join(f"`{s}`" for s in suggestions)
|
||||
|
||||
message = (
|
||||
f"Note: `{requested}` was not found in this custom endpoint's model listing "
|
||||
f"({probe.get('probed_url')}). It may still work if the server supports hidden or aliased models."
|
||||
f"{suggestion_text}"
|
||||
)
|
||||
if probe.get("used_fallback"):
|
||||
message += (
|
||||
f"\n Endpoint verification succeeded after trying `{probe.get('resolved_base_url')}`. "
|
||||
f"Consider saving that as your base URL."
|
||||
)
|
||||
|
||||
return {
|
||||
"accepted": True,
|
||||
"persist": True,
|
||||
"recognized": False,
|
||||
"message": message,
|
||||
}
|
||||
|
||||
message = (
|
||||
f"Note: could not reach this custom endpoint's model listing at `{probe.get('probed_url')}`. "
|
||||
f"Hermes will still save `{requested}`, but the endpoint should expose `/models` for verification."
|
||||
)
|
||||
if probe.get("suggested_base_url"):
|
||||
message += f"\n If this server expects `/v1`, try base URL: `{probe.get('suggested_base_url')}`"
|
||||
|
||||
return {
|
||||
"accepted": True,
|
||||
"persist": True,
|
||||
"recognized": False,
|
||||
"message": None,
|
||||
"message": message,
|
||||
}
|
||||
|
||||
# Probe the live API to check if the model actually exists
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue