forked from open-webui/open-webui
Add workaround for gpt-4-vision-preview model that support 4k tokens
This commit is contained in:
parent
8662437a9f
commit
60afd6ecdd
1 changed files with 2 additions and 2 deletions
|
@ -99,10 +99,10 @@ async def proxy(path: str, request: Request, user=Depends(get_current_user)):
|
||||||
print("Error loading request body into a dictionary:", e)
|
print("Error loading request body into a dictionary:", e)
|
||||||
raise HTTPException(status_code=400, detail="Invalid JSON in request body")
|
raise HTTPException(status_code=400, detail="Invalid JSON in request body")
|
||||||
|
|
||||||
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 10000
|
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
|
||||||
# This is a workaround until OpenAI fixes the issue with this model
|
# This is a workaround until OpenAI fixes the issue with this model
|
||||||
if body_dict.get("model") == "gpt-4-vision-preview":
|
if body_dict.get("model") == "gpt-4-vision-preview":
|
||||||
body_dict["max_tokens"] = 10000
|
body_dict["max_tokens"] = 4000
|
||||||
print("Modified body_dict:", body_dict)
|
print("Modified body_dict:", body_dict)
|
||||||
|
|
||||||
# Try to convert the modified body back to JSON
|
# Try to convert the modified body back to JSON
|
||||||
|
|
Loading…
Reference in a new issue