fix: make vision_analyze timeout configurable via config.yaml (#2480)
Reads auxiliary.vision.timeout from config.yaml (default: 30s) and
passes it to async_call_llm. Useful for slow local vision models
that need more than 30 seconds.
Setting is in config.yaml (not .env) since it's not a secret:
auxiliary:
vision:
timeout: 120
Based on PR #2306.
Co-authored-by: kshitijk4poor <kshitijk4poor@users.noreply.github.com>
This commit is contained in:
parent
a2276177a3
commit
6435d69a6d
3 changed files with 14 additions and 1 deletions
|
|
@ -182,6 +182,7 @@ DEFAULT_CONFIG = {
|
|||
"model": "", # e.g. "google/gemini-2.5-flash", "gpt-4o"
|
||||
"base_url": "", # direct OpenAI-compatible endpoint (takes precedence over provider)
|
||||
"api_key": "", # API key for base_url (falls back to OPENAI_API_KEY)
|
||||
"timeout": 30, # seconds — increase for slow local vision models
|
||||
},
|
||||
"web_extract": {
|
||||
"provider": "auto",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue