forked from open-webui/open-webui
Migrate to python logging module with env var control.
This commit is contained in:
parent
d865b9fe59
commit
e6dd0bfbe0
15 changed files with 174 additions and 82 deletions
|
@ -11,14 +11,17 @@ import json
|
|||
import uuid
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from apps.web.models.users import Users
|
||||
from constants import ERROR_MESSAGES
|
||||
from utils.utils import decode_token, get_current_user, get_admin_user
|
||||
from config import OLLAMA_BASE_URLS, MODEL_FILTER_ENABLED, MODEL_FILTER_LIST
|
||||
from config import SRC_LOG_LEVELS, OLLAMA_BASE_URLS, MODEL_FILTER_ENABLED, MODEL_FILTER_LIST
|
||||
|
||||
from typing import Optional, List, Union
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
|
||||
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
|
@ -69,7 +72,7 @@ class UrlUpdateForm(BaseModel):
|
|||
async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
|
||||
app.state.OLLAMA_BASE_URLS = form_data.urls
|
||||
|
||||
print(app.state.OLLAMA_BASE_URLS)
|
||||
log.info(f"app.state.OLLAMA_BASE_URLS: {app.state.OLLAMA_BASE_URLS}")
|
||||
return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS}
|
||||
|
||||
|
||||
|
@ -90,7 +93,7 @@ async def fetch_url(url):
|
|||
return await response.json()
|
||||
except Exception as e:
|
||||
# Handle connection error here
|
||||
print(f"Connection error: {e}")
|
||||
log.error(f"Connection error: {e}")
|
||||
return None
|
||||
|
||||
|
||||
|
@ -114,7 +117,7 @@ def merge_models_lists(model_lists):
|
|||
|
||||
|
||||
async def get_all_models():
|
||||
print("get_all_models")
|
||||
log.info("get_all_models()")
|
||||
tasks = [fetch_url(f"{url}/api/tags") for url in app.state.OLLAMA_BASE_URLS]
|
||||
responses = await asyncio.gather(*tasks)
|
||||
|
||||
|
@ -155,7 +158,7 @@ async def get_ollama_tags(
|
|||
|
||||
return r.json()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -201,7 +204,7 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
|
|||
|
||||
return r.json()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -227,7 +230,7 @@ async def pull_model(
|
|||
form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user)
|
||||
):
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
|
@ -260,7 +263,7 @@ async def pull_model(
|
|||
try:
|
||||
return await run_in_threadpool(get_request)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -299,7 +302,7 @@ async def push_model(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.debug(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
|
@ -331,7 +334,7 @@ async def push_model(
|
|||
try:
|
||||
return await run_in_threadpool(get_request)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -359,9 +362,9 @@ class CreateModelForm(BaseModel):
|
|||
async def create_model(
|
||||
form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user)
|
||||
):
|
||||
print(form_data)
|
||||
log.debug(f"form_data: {form_data}")
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
|
@ -383,7 +386,7 @@ async def create_model(
|
|||
|
||||
r.raise_for_status()
|
||||
|
||||
print(r)
|
||||
log.debug(f"r: {r}")
|
||||
|
||||
return StreamingResponse(
|
||||
stream_content(),
|
||||
|
@ -396,7 +399,7 @@ async def create_model(
|
|||
try:
|
||||
return await run_in_threadpool(get_request)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -434,7 +437,7 @@ async def copy_model(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
try:
|
||||
r = requests.request(
|
||||
|
@ -444,11 +447,11 @@ async def copy_model(
|
|||
)
|
||||
r.raise_for_status()
|
||||
|
||||
print(r.text)
|
||||
log.debug(f"r.text: {r.text}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -481,7 +484,7 @@ async def delete_model(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
try:
|
||||
r = requests.request(
|
||||
|
@ -491,11 +494,11 @@ async def delete_model(
|
|||
)
|
||||
r.raise_for_status()
|
||||
|
||||
print(r.text)
|
||||
log.debug(f"r.text: {r.text}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -521,7 +524,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_use
|
|||
|
||||
url_idx = random.choice(app.state.MODELS[form_data.name]["urls"])
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
try:
|
||||
r = requests.request(
|
||||
|
@ -533,7 +536,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_use
|
|||
|
||||
return r.json()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -573,7 +576,7 @@ async def generate_embeddings(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
try:
|
||||
r = requests.request(
|
||||
|
@ -585,7 +588,7 @@ async def generate_embeddings(
|
|||
|
||||
return r.json()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
|
@ -633,7 +636,7 @@ async def generate_completion(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
|
@ -654,7 +657,7 @@ async def generate_completion(
|
|||
if request_id in REQUEST_POOL:
|
||||
yield chunk
|
||||
else:
|
||||
print("User: canceled request")
|
||||
log.warning("User: canceled request")
|
||||
break
|
||||
finally:
|
||||
if hasattr(r, "close"):
|
||||
|
@ -731,11 +734,11 @@ async def generate_chat_completion(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
print(form_data.model_dump_json(exclude_none=True).encode())
|
||||
log.debug("form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(form_data.model_dump_json(exclude_none=True).encode()))
|
||||
|
||||
def get_request():
|
||||
nonlocal form_data
|
||||
|
@ -754,7 +757,7 @@ async def generate_chat_completion(
|
|||
if request_id in REQUEST_POOL:
|
||||
yield chunk
|
||||
else:
|
||||
print("User: canceled request")
|
||||
log.warning("User: canceled request")
|
||||
break
|
||||
finally:
|
||||
if hasattr(r, "close"):
|
||||
|
@ -777,7 +780,7 @@ async def generate_chat_completion(
|
|||
headers=dict(r.headers),
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
log.exception(e)
|
||||
raise e
|
||||
|
||||
try:
|
||||
|
@ -831,7 +834,7 @@ async def generate_openai_chat_completion(
|
|||
)
|
||||
|
||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||
print(url)
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
|
@ -854,7 +857,7 @@ async def generate_openai_chat_completion(
|
|||
if request_id in REQUEST_POOL:
|
||||
yield chunk
|
||||
else:
|
||||
print("User: canceled request")
|
||||
log.warning("User: canceled request")
|
||||
break
|
||||
finally:
|
||||
if hasattr(r, "close"):
|
||||
|
@ -947,7 +950,7 @@ async def deprecated_proxy(path: str, request: Request, user=Depends(get_current
|
|||
if request_id in REQUEST_POOL:
|
||||
yield chunk
|
||||
else:
|
||||
print("User: canceled request")
|
||||
log.warning("User: canceled request")
|
||||
break
|
||||
finally:
|
||||
if hasattr(r, "close"):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue