fix(metadata): fuzzy context length match prefers longest key
The fuzzy match for model context lengths iterated dict insertion order. Shorter model names (e.g. 'gpt-5') could match before more specific ones (e.g. 'gpt-5.4-pro'), returning the wrong context length. Sort by key length descending so more specific model names always match first.
This commit is contained in:
parent
4433b83378
commit
9db75fcfc2
1 changed files with 4 additions and 2 deletions
|
|
@ -266,8 +266,10 @@ def get_model_context_length(model: str, base_url: str = "") -> int:
|
||||||
if model in metadata:
|
if model in metadata:
|
||||||
return metadata[model].get("context_length", 128000)
|
return metadata[model].get("context_length", 128000)
|
||||||
|
|
||||||
# 3. Hardcoded defaults (fuzzy match)
|
# 3. Hardcoded defaults (fuzzy match — longest key first for specificity)
|
||||||
for default_model, length in DEFAULT_CONTEXT_LENGTHS.items():
|
for default_model, length in sorted(
|
||||||
|
DEFAULT_CONTEXT_LENGTHS.items(), key=lambda x: len(x[0]), reverse=True
|
||||||
|
):
|
||||||
if default_model in model or model in default_model:
|
if default_model in model or model in default_model:
|
||||||
return length
|
return length
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue