Merge pull request #907 from open-webui/dev

0.1.103
This commit is contained in:
Timothy Jaeryang Baek 2024-02-25 14:01:52 -05:00 committed by GitHub
commit 02fb517bbe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 1301 additions and 553 deletions

View file

@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.1.103] - 2024-02-25
### Added
- **🔗 Built-in LiteLLM Proxy**: Now includes LiteLLM proxy within Open WebUI for enhanced functionality.
- Easily integrate existing LiteLLM configurations using `-v /path/to/config.yaml:/app/backend/data/litellm/config.yaml` flag.
- When utilizing Docker container to run Open WebUI, ensure connections to localhost use `host.docker.internal`.
- **🖼️ Image Generation Enhancements**: Introducing Advanced Settings with Image Preview Feature.
- Customize image generation by setting the number of steps; defaults to A1111 value.
### Fixed
- Resolved issue with RAG scan halting document loading upon encountering unsupported MIME types or exceptions (Issue #866).
### Changed
- Ollama is no longer required to run Open WebUI.
- Access our comprehensive documentation at [Open WebUI Documentation](https://docs.openwebui.com/).
## [0.1.102] - 2024-02-22 ## [0.1.102] - 2024-02-22
### Added ### Added

View file

@ -103,14 +103,24 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open
- After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! 😄 - After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! 😄
#### Troubleshooting #### Open WebUI: Server Connection Error
Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s). If you're experiencing connection issues, its often due to the WebUI docker container not being able to reach the Ollama server at 127.0.0.1:11434 (host.docker.internal:11434) inside the container . Use the `--network=host` flag in your docker command to resolve this. Note that the port changes from 3000 to 8080, resulting in the link: `http://localhost:8080`.
**Example Docker Command**:
```bash
docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_API_BASE_URL=http://127.0.0.1:11434/api --name open-webui --restart always ghcr.io/open-webui/open-webui:main
```
### Other Installation Methods ### Other Installation Methods
We offer various installation alternatives, including non-Docker methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance. We offer various installation alternatives, including non-Docker methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance.
### Troubleshooting
Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s).
### Keeping Your Docker Installation Up-to-Date ### Keeping Your Docker Installation Up-to-Date
In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/): In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/):

View file

@ -5,3 +5,10 @@ uploads
.ipynb_checkpoints .ipynb_checkpoints
*.db *.db
_test _test
!/data
/data/*
!/data/litellm
/data/litellm/*
!data/litellm/config.yaml
!data/config.json

7
backend/.gitignore vendored
View file

@ -6,6 +6,11 @@ uploads
*.db *.db
_test _test
Pipfile Pipfile
data/* !/data
/data/*
!/data/litellm
/data/litellm/*
!data/litellm/config.yaml
!data/config.json !data/config.json
.webui_secret_key .webui_secret_key

View file

@ -35,6 +35,7 @@ app.add_middleware(
app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
app.state.ENABLED = app.state.AUTOMATIC1111_BASE_URL != "" app.state.ENABLED = app.state.AUTOMATIC1111_BASE_URL != ""
app.state.IMAGE_SIZE = "512x512" app.state.IMAGE_SIZE = "512x512"
app.state.IMAGE_STEPS = 50
@app.get("/enabled", response_model=bool) @app.get("/enabled", response_model=bool)
@ -49,7 +50,7 @@ async def toggle_enabled(request: Request, user=Depends(get_admin_user)):
app.state.ENABLED = not app.state.ENABLED app.state.ENABLED = not app.state.ENABLED
return app.state.ENABLED return app.state.ENABLED
except Exception as e: except Exception as e:
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
class UrlUpdateForm(BaseModel): class UrlUpdateForm(BaseModel):
@ -102,6 +103,32 @@ async def update_image_size(
) )
class ImageStepsUpdateForm(BaseModel):
steps: int
@app.get("/steps")
async def get_image_size(user=Depends(get_admin_user)):
return {"IMAGE_STEPS": app.state.IMAGE_STEPS}
@app.post("/steps/update")
async def update_image_size(
form_data: ImageStepsUpdateForm, user=Depends(get_admin_user)
):
if form_data.steps >= 0:
app.state.IMAGE_STEPS = form_data.steps
return {
"IMAGE_STEPS": app.state.IMAGE_STEPS,
"status": True,
}
else:
raise HTTPException(
status_code=400,
detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 50)."),
)
@app.get("/models") @app.get("/models")
def get_models(user=Depends(get_current_user)): def get_models(user=Depends(get_current_user)):
try: try:
@ -109,7 +136,8 @@ def get_models(user=Depends(get_current_user)):
models = r.json() models = r.json()
return models return models
except Exception as e: except Exception as e:
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) app.state.ENABLED = False
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
@app.get("/models/default") @app.get("/models/default")
@ -120,7 +148,8 @@ async def get_default_model(user=Depends(get_admin_user)):
return {"model": options["sd_model_checkpoint"]} return {"model": options["sd_model_checkpoint"]}
except Exception as e: except Exception as e:
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) app.state.ENABLED = False
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
class UpdateModelForm(BaseModel): class UpdateModelForm(BaseModel):
@ -177,6 +206,9 @@ def generate_image(
"height": height, "height": height,
} }
if app.state.IMAGE_STEPS != None:
data["steps"] = app.state.IMAGE_STEPS
if form_data.negative_prompt != None: if form_data.negative_prompt != None:
data["negative_prompt"] = form_data.negative_prompt data["negative_prompt"] = form_data.negative_prompt
@ -190,4 +222,4 @@ def generate_image(
return r.json() return r.json()
except Exception as e: except Exception as e:
print(e) print(e)
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))

View file

@ -6,6 +6,8 @@ from bs4 import BeautifulSoup
from pathlib import Path from pathlib import Path
import json import json
import yaml
import markdown import markdown
import requests import requests
import shutil import shutil
@ -83,8 +85,6 @@ for version in soup.find_all("h2"):
# Find the next sibling that is a h3 tag (section title) # Find the next sibling that is a h3 tag (section title)
current = version.find_next_sibling() current = version.find_next_sibling()
print(current)
while current and current.name != "h2": while current and current.name != "h2":
if current.name == "h3": if current.name == "h3":
section_title = current.get_text().lower() # e.g., "added", "fixed" section_title = current.get_text().lower() # e.g., "added", "fixed"
@ -165,6 +165,40 @@ Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
DOCS_DIR = f"{DATA_DIR}/docs" DOCS_DIR = f"{DATA_DIR}/docs"
Path(DOCS_DIR).mkdir(parents=True, exist_ok=True) Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
####################################
# LITELLM_CONFIG
####################################
def create_config_file(file_path):
directory = os.path.dirname(file_path)
# Check if directory exists, if not, create it
if not os.path.exists(directory):
os.makedirs(directory)
# Data to write into the YAML file
config_data = {
"general_settings": {},
"litellm_settings": {},
"model_list": [],
"router_settings": {},
}
# Write data to YAML file
with open(file_path, "w") as file:
yaml.dump(config_data, file)
LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
if not os.path.exists(LITELLM_CONFIG_PATH):
print("Config file doesn't exist. Creating...")
create_config_file(LITELLM_CONFIG_PATH)
print("Config file created successfully.")
#################################### ####################################
# OLLAMA_API_BASE_URL # OLLAMA_API_BASE_URL
#################################### ####################################

View file

@ -0,0 +1,4 @@
general_settings: {}
litellm_settings: {}
model_list: []
router_settings: {}

View file

@ -2,25 +2,31 @@ from bs4 import BeautifulSoup
import json import json
import markdown import markdown
import time import time
import os
import sys
from fastapi import FastAPI, Request, Depends
from fastapi import FastAPI, Request
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
from fastapi import HTTPException from fastapi import HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.wsgi import WSGIMiddleware from fastapi.middleware.wsgi import WSGIMiddleware
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from starlette.exceptions import HTTPException as StarletteHTTPException from starlette.exceptions import HTTPException as StarletteHTTPException
from litellm.proxy.proxy_server import ProxyConfig, initialize
from litellm.proxy.proxy_server import app as litellm_app
from apps.ollama.main import app as ollama_app from apps.ollama.main import app as ollama_app
from apps.openai.main import app as openai_app from apps.openai.main import app as openai_app
from apps.audio.main import app as audio_app from apps.audio.main import app as audio_app
from apps.images.main import app as images_app from apps.images.main import app as images_app
from apps.rag.main import app as rag_app from apps.rag.main import app as rag_app
from apps.web.main import app as webui_app from apps.web.main import app as webui_app
from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR
from utils.utils import get_http_authorization_cred, get_current_user
class SPAStaticFiles(StaticFiles): class SPAStaticFiles(StaticFiles):
@ -34,6 +40,21 @@ class SPAStaticFiles(StaticFiles):
raise ex raise ex
proxy_config = ProxyConfig()
async def config():
router, model_list, general_settings = await proxy_config.load_config(
router=None, config_file_path="./data/litellm/config.yaml"
)
await initialize(config="./data/litellm/config.yaml", telemetry=False)
async def startup():
await config()
app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None) app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None)
origins = ["*"] origins = ["*"]
@ -47,6 +68,11 @@ app.add_middleware(
) )
@app.on_event("startup")
async def on_startup():
await startup()
@app.middleware("http") @app.middleware("http")
async def check_url(request: Request, call_next): async def check_url(request: Request, call_next):
start_time = int(time.time()) start_time = int(time.time())
@ -57,7 +83,23 @@ async def check_url(request: Request, call_next):
return response return response
@litellm_app.middleware("http")
async def auth_middleware(request: Request, call_next):
auth_header = request.headers.get("Authorization", "")
if ENV != "dev":
try:
user = get_current_user(get_http_authorization_cred(auth_header))
print(user)
except Exception as e:
return JSONResponse(status_code=400, content={"detail": str(e)})
response = await call_next(request)
return response
app.mount("/api/v1", webui_app) app.mount("/api/v1", webui_app)
app.mount("/litellm/api", litellm_app)
app.mount("/ollama/api", ollama_app) app.mount("/ollama/api", ollama_app)
app.mount("/openai/api", openai_app) app.mount("/openai/api", openai_app)

View file

@ -16,6 +16,10 @@ aiohttp
peewee peewee
bcrypt bcrypt
litellm
apscheduler
google-generativeai
langchain langchain
langchain-community langchain-community
chromadb chromadb

View file

@ -58,6 +58,14 @@ def extract_token_from_auth_header(auth_header: str):
return auth_header[len("Bearer ") :] return auth_header[len("Bearer ") :]
def get_http_authorization_cred(auth_header: str):
try:
scheme, credentials = auth_header.split(" ")
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
except:
raise ValueError(ERROR_MESSAGES.INVALID_TOKEN)
def get_current_user( def get_current_user(
auth_token: HTTPAuthorizationCredentials = Depends(bearer_security), auth_token: HTTPAuthorizationCredentials = Depends(bearer_security),
): ):

View file

@ -1,6 +1,6 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.102", "version": "0.1.103",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "vite dev --host", "dev": "vite dev --host",

View file

@ -5,6 +5,7 @@
<link rel="icon" href="%sveltekit.assets%/favicon.png" /> <link rel="icon" href="%sveltekit.assets%/favicon.png" />
<link rel="manifest" href="%sveltekit.assets%/manifest.json" /> <link rel="manifest" href="%sveltekit.assets%/manifest.json" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" /> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
<meta name="robots" content="noindex,nofollow" />
<script> <script>
// On page load or when changing themes, best to add inline in `head` to avoid FOUC // On page load or when changing themes, best to add inline in `head` to avoid FOUC
if ( if (

View file

@ -198,6 +198,71 @@ export const updateImageSize = async (token: string = '', size: string) => {
return res.IMAGE_SIZE; return res.IMAGE_SIZE;
}; };
export const getImageSteps = async (token: string = '') => {
let error = null;
const res = await fetch(`${IMAGES_API_BASE_URL}/steps`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res.IMAGE_STEPS;
};
export const updateImageSteps = async (token: string = '', steps: number) => {
let error = null;
const res = await fetch(`${IMAGES_API_BASE_URL}/steps/update`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({ steps })
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res.IMAGE_STEPS;
};
export const getDiffusionModels = async (token: string = '') => { export const getDiffusionModels = async (token: string = '') => {
let error = null; let error = null;

View file

@ -0,0 +1,148 @@
import { LITELLM_API_BASE_URL } from '$lib/constants';
export const getLiteLLMModels = async (token: string = '') => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/v1/models`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
const models = Array.isArray(res) ? res : res?.data ?? null;
return models
? models
.map((model) => ({
id: model.id,
name: model.name ?? model.id,
external: true,
source: 'litellm'
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);
})
: models;
};
export const getLiteLLMModelInfo = async (token: string = '') => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/info`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
const models = Array.isArray(res) ? res : res?.data ?? null;
return models;
};
type AddLiteLLMModelForm = {
name: string;
model: string;
api_base: string;
api_key: string;
rpm: string;
};
export const addLiteLLMModel = async (token: string = '', payload: AddLiteLLMModelForm) => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/new`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
model_name: payload.name,
litellm_params: {
model: payload.model,
...(payload.api_base === '' ? {} : { api_base: payload.api_base }),
...(payload.api_key === '' ? {} : { api_key: payload.api_key }),
...(isNaN(parseInt(payload.rpm)) ? {} : { rpm: parseInt(payload.rpm) })
}
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
return res;
};
export const deleteLiteLLMModel = async (token: string = '', id: string) => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/delete`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
id: id
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
return res;
};

View file

@ -128,7 +128,9 @@ export const getOllamaModels = async (token: string = '') => {
throw error; throw error;
} }
return (res?.models ?? []).sort((a, b) => { return (res?.models ?? [])
.map((model) => ({ id: model.model, name: model.name ?? model.model, ...model }))
.sort((a, b) => {
return a.name.localeCompare(b.name); return a.name.localeCompare(b.name);
}); });
}; };

View file

@ -163,7 +163,7 @@ export const getOpenAIModels = async (token: string = '') => {
return models return models
? models ? models
.map((model) => ({ name: model.id, external: true })) .map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
.sort((a, b) => { .sort((a, b) => {
return a.name.localeCompare(b.name); return a.name.localeCompare(b.name);
}) })
@ -200,17 +200,21 @@ export const getOpenAIModelsDirect = async (
const models = Array.isArray(res) ? res : res?.data ?? null; const models = Array.isArray(res) ? res : res?.data ?? null;
return models return models
.map((model) => ({ name: model.id, external: true })) .map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
.filter((model) => (base_url.includes('openai') ? model.name.includes('gpt') : true)) .filter((model) => (base_url.includes('openai') ? model.name.includes('gpt') : true))
.sort((a, b) => { .sort((a, b) => {
return a.name.localeCompare(b.name); return a.name.localeCompare(b.name);
}); });
}; };
export const generateOpenAIChatCompletion = async (token: string = '', body: object) => { export const generateOpenAIChatCompletion = async (
token: string = '',
body: object,
url: string = OPENAI_API_BASE_URL
) => {
let error = null; let error = null;
const res = await fetch(`${OPENAI_API_BASE_URL}/chat/completions`, { const res = await fetch(`${url}/chat/completions`, {
method: 'POST', method: 'POST',
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,

View file

@ -25,7 +25,7 @@
$: if (selectedModels.length > 0 && $models.length > 0) { $: if (selectedModels.length > 0 && $models.length > 0) {
selectedModels = selectedModels.map((model) => selectedModels = selectedModels.map((model) =>
$models.map((m) => m.name).includes(model) ? model : '' $models.map((m) => m.id).includes(model) ? model : ''
); );
} }
</script> </script>
@ -45,7 +45,7 @@
{#if model.name === 'hr'} {#if model.name === 'hr'}
<hr /> <hr />
{:else} {:else}
<option value={model.name} class="text-gray-700 text-lg" <option value={model.id} class="text-gray-700 text-lg"
>{model.name + >{model.name +
`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option `${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
> >

View file

@ -38,6 +38,7 @@
</div> </div>
</div> </div>
{#if ollamaVersion}
<hr class=" dark:border-gray-700" /> <hr class=" dark:border-gray-700" />
<div> <div>
@ -48,6 +49,7 @@
</div> </div>
</div> </div>
</div> </div>
{/if}
<hr class=" dark:border-gray-700" /> <hr class=" dark:border-gray-700" />

View file

@ -3,7 +3,7 @@
import { createEventDispatcher, onMount } from 'svelte'; import { createEventDispatcher, onMount } from 'svelte';
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
import { getOllamaAPIUrl, updateOllamaAPIUrl } from '$lib/apis/ollama'; import { getOllamaAPIUrl, getOllamaVersion, updateOllamaAPIUrl } from '$lib/apis/ollama';
import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai'; import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai';
import toast from 'svelte-french-toast'; import toast from 'svelte-french-toast';
@ -15,6 +15,9 @@
let OPENAI_API_KEY = ''; let OPENAI_API_KEY = '';
let OPENAI_API_BASE_URL = ''; let OPENAI_API_BASE_URL = '';
let showOpenAI = false;
let showLiteLLM = false;
const updateOpenAIHandler = async () => { const updateOpenAIHandler = async () => {
OPENAI_API_BASE_URL = await updateOpenAIUrl(localStorage.token, OPENAI_API_BASE_URL); OPENAI_API_BASE_URL = await updateOpenAIUrl(localStorage.token, OPENAI_API_BASE_URL);
OPENAI_API_KEY = await updateOpenAIKey(localStorage.token, OPENAI_API_KEY); OPENAI_API_KEY = await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
@ -24,11 +27,15 @@
const updateOllamaAPIUrlHandler = async () => { const updateOllamaAPIUrlHandler = async () => {
API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL); API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL);
const _models = await getModels('ollama');
if (_models.length > 0) { const ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => {
toast.error(error);
return null;
});
if (ollamaVersion) {
toast.success('Server connection verified'); toast.success('Server connection verified');
await models.set(_models); await models.set(await getModels());
} }
}; };
@ -42,7 +49,7 @@
</script> </script>
<form <form
class="flex flex-col h-full space-y-3 text-sm" class="flex flex-col h-full justify-between text-sm"
on:submit|preventDefault={() => { on:submit|preventDefault={() => {
updateOpenAIHandler(); updateOpenAIHandler();
dispatch('save'); dispatch('save');
@ -53,6 +60,59 @@
// }); // });
}} }}
> >
<div class=" pr-1.5 overflow-y-scroll max-h-[20.5rem] space-y-3">
<div class=" space-y-3">
<div class="mt-2 space-y-2 pr-1.5">
<div class="flex justify-between items-center text-sm">
<div class=" font-medium">OpenAI API</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
showOpenAI = !showOpenAI;
}}>{showOpenAI ? 'Hide' : 'Show'}</button
>
</div>
{#if showOpenAI}
<div>
<div class=" mb-2.5 text-sm font-medium">API Key</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Key"
bind:value={OPENAI_API_KEY}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">API Base URL</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Base URL"
bind:value={OPENAI_API_BASE_URL}
autocomplete="off"
/>
</div>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
WebUI will make requests to <span class=" text-gray-200"
>'{OPENAI_API_BASE_URL}/chat'</span
>
</div>
</div>
{/if}
</div>
</div>
<hr class=" dark:border-gray-700" />
<div> <div>
<div class=" mb-2.5 text-sm font-medium">Ollama API URL</div> <div class=" mb-2.5 text-sm font-medium">Ollama API URL</div>
<div class="flex w-full"> <div class="flex w-full">
@ -96,40 +156,6 @@
</a> </a>
</div> </div>
</div> </div>
<hr class=" dark:border-gray-700" />
<div class=" space-y-3">
<div>
<div class=" mb-2.5 text-sm font-medium">OpenAI API Key</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Key"
bind:value={OPENAI_API_KEY}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">OpenAI API Base URL</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Base URL"
bind:value={OPENAI_API_BASE_URL}
autocomplete="off"
/>
</div>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
WebUI will make requests to <span class=" text-gray-200">'{OPENAI_API_BASE_URL}/chat'</span>
</div>
</div>
</div> </div>
<div class="flex justify-end pt-3 text-sm font-medium"> <div class="flex justify-end pt-3 text-sm font-medium">

View file

@ -84,7 +84,7 @@
</script> </script>
<div class="flex flex-col h-full justify-between text-sm"> <div class="flex flex-col h-full justify-between text-sm">
<div class=" pr-1.5 overflow-y-scroll max-h-[21rem]"> <div class=" pr-1.5 overflow-y-scroll max-h-[20.5rem]">
<div class=""> <div class="">
<div class=" mb-1 text-sm font-medium">WebUI Settings</div> <div class=" mb-1 text-sm font-medium">WebUI Settings</div>

View file

@ -12,7 +12,9 @@
toggleImageGenerationEnabledStatus, toggleImageGenerationEnabledStatus,
updateAUTOMATIC1111Url, updateAUTOMATIC1111Url,
updateDefaultDiffusionModel, updateDefaultDiffusionModel,
updateImageSize updateImageSize,
getImageSteps,
updateImageSteps
} from '$lib/apis/images'; } from '$lib/apis/images';
import { getBackendConfig } from '$lib/apis'; import { getBackendConfig } from '$lib/apis';
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
@ -21,20 +23,23 @@
let loading = false; let loading = false;
let enableImageGeneration = true; let enableImageGeneration = false;
let AUTOMATIC1111_BASE_URL = ''; let AUTOMATIC1111_BASE_URL = '';
let selectedModel = ''; let selectedModel = '';
let models = []; let models = null;
let imageSize = ''; let imageSize = '';
let steps = 50;
const getModels = async () => { const getModels = async () => {
models = await getDiffusionModels(localStorage.token).catch((error) => { models = await getDiffusionModels(localStorage.token).catch((error) => {
toast.error(error); toast.error(error);
return null; return null;
}); });
selectedModel = await getDefaultDiffusionModel(localStorage.token); selectedModel = await getDefaultDiffusionModel(localStorage.token).catch((error) => {
return '';
});
}; };
const updateAUTOMATIC1111UrlHandler = async () => { const updateAUTOMATIC1111UrlHandler = async () => {
@ -83,6 +88,7 @@
if (enableImageGeneration && AUTOMATIC1111_BASE_URL) { if (enableImageGeneration && AUTOMATIC1111_BASE_URL) {
imageSize = await getImageSize(localStorage.token); imageSize = await getImageSize(localStorage.token);
steps = await getImageSteps(localStorage.token);
getModels(); getModels();
} }
} }
@ -98,12 +104,16 @@
toast.error(error); toast.error(error);
return null; return null;
}); });
await updateImageSteps(localStorage.token, steps).catch((error) => {
toast.error(error);
return null;
});
dispatch('save'); dispatch('save');
loading = false; loading = false;
}} }}
> >
<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[21rem]"> <div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[20.5rem]">
<div> <div>
<div class=" mb-1 text-sm font-medium">Image Settings</div> <div class=" mb-1 text-sm font-medium">Image Settings</div>
@ -188,7 +198,7 @@
{#if !selectedModel} {#if !selectedModel}
<option value="" disabled selected>Select a model</option> <option value="" disabled selected>Select a model</option>
{/if} {/if}
{#each models as model} {#each models ?? [] as model}
<option value={model.title} class="bg-gray-100 dark:bg-gray-700" <option value={model.title} class="bg-gray-100 dark:bg-gray-700"
>{model.model_name}</option >{model.model_name}</option
> >
@ -210,6 +220,19 @@
</div> </div>
</div> </div>
</div> </div>
<div>
<div class=" mb-2.5 text-sm font-medium">Set Steps</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter Number of Steps (e.g. 50)"
bind:value={steps}
/>
</div>
</div>
</div>
{/if} {/if}
</div> </div>

View file

@ -2,14 +2,33 @@
import queue from 'async/queue'; import queue from 'async/queue';
import toast from 'svelte-french-toast'; import toast from 'svelte-french-toast';
import { createModel, deleteModel, pullModel } from '$lib/apis/ollama'; import { createModel, deleteModel, getOllamaVersion, pullModel } from '$lib/apis/ollama';
import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants'; import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
import { WEBUI_NAME, models, user } from '$lib/stores'; import { WEBUI_NAME, models, user } from '$lib/stores';
import { splitStream } from '$lib/utils'; import { splitStream } from '$lib/utils';
import { onMount } from 'svelte';
import { addLiteLLMModel, deleteLiteLLMModel, getLiteLLMModelInfo } from '$lib/apis/litellm';
export let getModels: Function; export let getModels: Function;
let showLiteLLM = false;
let showLiteLLMParams = false;
let liteLLMModelInfo = [];
let liteLLMModel = '';
let liteLLMModelName = '';
let liteLLMAPIBase = '';
let liteLLMAPIKey = '';
let liteLLMRPM = '';
let deleteLiteLLMModelId = '';
$: liteLLMModelName = liteLLMModel;
// Models // Models
let showExperimentalOllama = false;
let ollamaVersion = '';
const MAX_PARALLEL_DOWNLOADS = 3; const MAX_PARALLEL_DOWNLOADS = 3;
const modelDownloadQueue = queue( const modelDownloadQueue = queue(
(task: { modelName: string }, cb) => (task: { modelName: string }, cb) =>
@ -286,22 +305,82 @@
opts.callback({ success: true, modelName: opts.modelName }); opts.callback({ success: true, modelName: opts.modelName });
} }
}; };
const addLiteLLMModelHandler = async () => {
if (!liteLLMModelInfo.find((info) => info.model_name === liteLLMModelName)) {
const res = await addLiteLLMModel(localStorage.token, {
name: liteLLMModelName,
model: liteLLMModel,
api_base: liteLLMAPIBase,
api_key: liteLLMAPIKey,
rpm: liteLLMRPM
}).catch((error) => {
toast.error(error);
return null;
});
if (res) {
if (res.message) {
toast.success(res.message);
}
}
} else {
toast.error(`Model ${liteLLMModelName} already exists.`);
}
liteLLMModelName = '';
liteLLMModel = '';
liteLLMAPIBase = '';
liteLLMAPIKey = '';
liteLLMRPM = '';
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
models.set(await getModels());
};
const deleteLiteLLMModelHandler = async () => {
const res = await deleteLiteLLMModel(localStorage.token, deleteLiteLLMModelId).catch(
(error) => {
toast.error(error);
return null;
}
);
if (res) {
if (res.message) {
toast.success(res.message);
}
}
deleteLiteLLMModelId = '';
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
models.set(await getModels());
};
onMount(async () => {
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
});
</script> </script>
<div class="flex flex-col h-full justify-between text-sm"> <div class="flex flex-col h-full justify-between text-sm">
<div class=" space-y-3 pr-1.5 overflow-y-scroll h-80"> <div class=" space-y-3 pr-1.5 overflow-y-scroll h-[23rem]">
{#if ollamaVersion}
<div class="space-y-2 pr-1.5">
<div> <div>
<div class=" mb-2.5 text-sm font-medium">Pull a model from Ollama.com</div> <div class=" mb-2 text-sm font-medium">Manage Ollama Models</div>
<div class=" mb-2 text-sm font-medium">Pull a model from Ollama.com</div>
<div class="flex w-full"> <div class="flex w-full">
<div class="flex-1 mr-2"> <div class="flex-1 mr-2">
<input <input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none" class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder="Enter model tag (e.g. mistral:7b)" placeholder="Enter model tag (e.g. mistral:7b)"
bind:value={modelTag} bind:value={modelTag}
/> />
</div> </div>
<button <button
class="px-3 text-gray-100 bg-emerald-600 hover:bg-emerald-700 disabled:bg-gray-700 disabled:cursor-not-allowed rounded transition" class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
on:click={() => { on:click={() => {
pullModelHandler(); pullModelHandler();
}} }}
@ -379,14 +458,12 @@
{/if} {/if}
</div> </div>
<hr class=" dark:border-gray-700" />
<div> <div>
<div class=" mb-2.5 text-sm font-medium">Delete a model</div> <div class=" mb-2 text-sm font-medium">Delete a model</div>
<div class="flex w-full"> <div class="flex w-full">
<div class="flex-1 mr-2"> <div class="flex-1 mr-2">
<select <select
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none" class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
bind:value={deleteModelTag} bind:value={deleteModelTag}
placeholder="Select a model" placeholder="Select a model"
> >
@ -401,7 +478,7 @@
</select> </select>
</div> </div>
<button <button
class="px-3 bg-red-700 hover:bg-red-800 text-gray-100 rounded transition" class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
on:click={() => { on:click={() => {
deleteModelHandler(); deleteModelHandler();
}} }}
@ -422,21 +499,27 @@
</div> </div>
</div> </div>
<hr class=" dark:border-gray-700" /> <div>
<div class="flex justify-between items-center text-xs">
<div class=" text-sm font-medium">Experimental</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
showExperimentalOllama = !showExperimentalOllama;
}}>{showExperimentalOllama ? 'Show' : 'Hide'}</button
>
</div>
</div>
{#if showExperimentalOllama}
<form <form
on:submit|preventDefault={() => { on:submit|preventDefault={() => {
uploadModelHandler(); uploadModelHandler();
}} }}
> >
<div class=" mb-2 flex w-full justify-between"> <div class=" mb-2 flex w-full justify-between">
<div class=" text-sm font-medium"> <div class=" text-sm font-medium">Upload a GGUF model</div>
Upload a GGUF model <a
class=" text-xs font-medium text-gray-500 underline"
href="https://github.com/jmorganca/ollama/blob/main/README.md#import-from-gguf"
target="_blank">(Experimental)</a
>
</div>
<button <button
class="p-1 px-3 text-xs flex rounded transition" class="p-1 px-3 text-xs flex rounded transition"
@ -475,7 +558,7 @@
<button <button
type="button" type="button"
class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-800" class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-850"
on:click={() => { on:click={() => {
document.getElementById('model-upload-input').click(); document.getElementById('model-upload-input').click();
}} }}
@ -490,7 +573,7 @@
{:else} {:else}
<div class="flex-1 {modelFileUrl !== '' ? 'mr-2' : ''}"> <div class="flex-1 {modelFileUrl !== '' ? 'mr-2' : ''}">
<input <input
class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-800 outline-none {modelFileUrl !== class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-850 outline-none {modelFileUrl !==
'' ''
? 'mr-2' ? 'mr-2'
: ''}" : ''}"
@ -592,5 +675,253 @@
</div> </div>
{/if} {/if}
</form> </form>
{/if}
</div>
<hr class=" dark:border-gray-700 my-2" />
{/if}
<div class=" space-y-3">
<div class="mt-2 space-y-3 pr-1.5">
<div>
<div class=" mb-2 text-sm font-medium">Manage LiteLLM Models</div>
<div>
<div class="flex justify-between items-center text-xs">
<div class=" text-sm font-medium">Add a model</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
showLiteLLMParams = !showLiteLLMParams;
}}>{showLiteLLMParams ? 'Advanced' : 'Default'}</button
>
</div>
</div>
<div class="my-2 space-y-2">
<div class="flex w-full mb-1.5">
<div class="flex-1 mr-2">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder="Enter LiteLLM Model (litellm_params.model)"
bind:value={liteLLMModel}
autocomplete="off"
/>
</div>
<button
class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
on:click={() => {
addLiteLLMModelHandler();
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M8.75 3.75a.75.75 0 0 0-1.5 0v3.5h-3.5a.75.75 0 0 0 0 1.5h3.5v3.5a.75.75 0 0 0 1.5 0v-3.5h3.5a.75.75 0 0 0 0-1.5h-3.5v-3.5Z"
/>
</svg>
</button>
</div>
{#if showLiteLLMParams}
<div>
<div class=" mb-1.5 text-sm font-medium">Model Name</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder="Enter Model Name (model_name)"
bind:value={liteLLMModelName}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-1.5 text-sm font-medium">API Base URL</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder="Enter LiteLLM API Base URL (litellm_params.api_base)"
bind:value={liteLLMAPIBase}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-1.5 text-sm font-medium">API Key</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder="Enter LiteLLM API Key (litellm_params.api_key)"
bind:value={liteLLMAPIKey}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class="mb-1.5 text-sm font-medium">API RPM</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder="Enter LiteLLM API RPM (litellm_params.rpm)"
bind:value={liteLLMRPM}
autocomplete="off"
/>
</div>
</div>
</div>
{/if}
</div>
<div class="mb-2 text-xs text-gray-400 dark:text-gray-500">
Not sure what to add?
<a
class=" text-gray-300 font-medium"
href="https://litellm.vercel.app/docs/proxy/configs#quick-start"
target="_blank"
>
Click here for help.
</a>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">Delete a model</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<select
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
bind:value={deleteLiteLLMModelId}
placeholder="Select a model"
>
{#if !deleteLiteLLMModelId}
<option value="" disabled selected>Select a model</option>
{/if}
{#each liteLLMModelInfo as model}
<option value={model.model_info.id} class="bg-gray-100 dark:bg-gray-700"
>{model.model_name}</option
>
{/each}
</select>
</div>
<button
class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
on:click={() => {
deleteLiteLLMModelHandler();
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M5 3.25V4H2.75a.75.75 0 0 0 0 1.5h.3l.815 8.15A1.5 1.5 0 0 0 5.357 15h5.285a1.5 1.5 0 0 0 1.493-1.35l.815-8.15h.3a.75.75 0 0 0 0-1.5H11v-.75A2.25 2.25 0 0 0 8.75 1h-1.5A2.25 2.25 0 0 0 5 3.25Zm2.25-.75a.75.75 0 0 0-.75.75V4h3v-.75a.75.75 0 0 0-.75-.75h-1.5ZM6.05 6a.75.75 0 0 1 .787.713l.275 5.5a.75.75 0 0 1-1.498.075l-.275-5.5A.75.75 0 0 1 6.05 6Zm3.9 0a.75.75 0 0 1 .712.787l-.275 5.5a.75.75 0 0 1-1.498-.075l.275-5.5a.75.75 0 0 1 .786-.711Z"
clip-rule="evenodd"
/>
</svg>
</button>
</div>
</div>
</div>
</div>
<!-- <div class="mt-2 space-y-3 pr-1.5">
<div>
<div class=" mb-2.5 text-sm font-medium">Add LiteLLM Model</div>
<div class="flex w-full mb-2">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter LiteLLM Model (e.g. ollama/mistral)"
bind:value={liteLLMModel}
autocomplete="off"
/>
</div>
</div>
<div class="flex justify-between items-center text-sm">
<div class=" font-medium">Advanced Model Params</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
showLiteLLMParams = !showLiteLLMParams;
}}>{showLiteLLMParams ? 'Hide' : 'Show'}</button
>
</div>
{#if showLiteLLMParams}
<div>
<div class=" mb-2.5 text-sm font-medium">LiteLLM API Key</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter LiteLLM API Key (e.g. os.environ/AZURE_API_KEY_CA)"
bind:value={liteLLMAPIKey}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">LiteLLM API Base URL</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter LiteLLM API Base URL"
bind:value={liteLLMAPIBase}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">LiteLLM API RPM</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter LiteLLM API RPM"
bind:value={liteLLMRPM}
autocomplete="off"
/>
</div>
</div>
</div>
{/if}
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
Not sure what to add?
<a
class=" text-gray-300 font-medium"
href="https://litellm.vercel.app/docs/proxy/configs#quick-start"
target="_blank"
>
Click here for help.
</a>
</div>
</div>
</div> -->
</div>
</div> </div>
</div> </div>

View file

@ -4,6 +4,7 @@
import { getOllamaModels } from '$lib/apis/ollama'; import { getOllamaModels } from '$lib/apis/ollama';
import { getOpenAIModels } from '$lib/apis/openai'; import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
import Modal from '../common/Modal.svelte'; import Modal from '../common/Modal.svelte';
import Account from './Settings/Account.svelte'; import Account from './Settings/Account.svelte';
@ -27,23 +28,29 @@
let selectedTab = 'general'; let selectedTab = 'general';
const getModels = async (type = 'all') => { const getModels = async () => {
const models = []; let models = await Promise.all([
models.push( await getOllamaModels(localStorage.token).catch((error) => {
...(await getOllamaModels(localStorage.token).catch((error) => {
toast.error(error);
return [];
}))
);
if (type === 'all') {
const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error); console.log(error);
return null; return null;
}); }),
models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : [])); await getOpenAIModels(localStorage.token).catch((error) => {
} console.log(error);
return null;
}),
await getLiteLLMModels(localStorage.token).catch((error) => {
console.log(error);
return null;
})
]);
models = models
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
return models; return models;
}; };
</script> </script>

View file

@ -5,6 +5,8 @@ export const APP_NAME = 'Open WebUI';
export const WEBUI_BASE_URL = dev ? `http://${location.hostname}:8080` : ``; export const WEBUI_BASE_URL = dev ? `http://${location.hostname}:8080` : ``;
export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`; export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;
export const LITELLM_API_BASE_URL = `${WEBUI_BASE_URL}/litellm/api`;
export const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama/api`; export const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama/api`;
export const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai/api`; export const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai/api`;
export const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/audio/api/v1`; export const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/audio/api/v1`;

View file

@ -11,6 +11,7 @@
import { getModelfiles } from '$lib/apis/modelfiles'; import { getModelfiles } from '$lib/apis/modelfiles';
import { getPrompts } from '$lib/apis/prompts'; import { getPrompts } from '$lib/apis/prompts';
import { getOpenAIModels } from '$lib/apis/openai'; import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
import { getDocs } from '$lib/apis/documents'; import { getDocs } from '$lib/apis/documents';
import { getAllChatTags } from '$lib/apis/chats'; import { getAllChatTags } from '$lib/apis/chats';
@ -43,24 +44,28 @@
let showShortcuts = false; let showShortcuts = false;
const getModels = async () => { const getModels = async () => {
let models = []; let models = await Promise.all([
models.push( await getOllamaModels(localStorage.token).catch((error) => {
...(await getOllamaModels(localStorage.token).catch((error) => {
toast.error(error);
return [];
}))
);
// $settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1',
// $settings.OPENAI_API_KEY
const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error); console.log(error);
return null; return null;
}); }),
await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error);
return null;
}),
await getLiteLLMModels(localStorage.token).catch((error) => {
console.log(error);
return null;
})
]);
models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : [])); models = models
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
return models; return models;
}; };
@ -117,8 +122,6 @@
await models.set(await getModels()); await models.set(await getModels());
}); });
await setOllamaVersion();
document.addEventListener('keydown', function (event) { document.addEventListener('keydown', function (event) {
const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac
// Check if the Shift key is pressed // Check if the Shift key is pressed
@ -250,60 +253,6 @@
</div> </div>
</div> </div>
</div> </div>
{:else if checkVersion(REQUIRED_OLLAMA_VERSION, ollamaVersion ?? '0')}
<div class="fixed w-full h-full flex z-50">
<div
class="absolute w-full h-full backdrop-blur-md bg-white/20 dark:bg-gray-900/50 flex justify-center"
>
<div class="m-auto pb-44 flex flex-col justify-center">
<div class="max-w-md">
<div class="text-center dark:text-white text-2xl font-medium z-50">
Connection Issue or Update Needed
</div>
<div class=" mt-4 text-center text-sm dark:text-gray-200 w-full">
Oops! It seems like your Ollama needs a little attention. <br
class=" hidden sm:flex"
/>We've detected either a connection hiccup or observed that you're using an older
version. Ensure you're on the latest Ollama version
<br class=" hidden sm:flex" />(version
<span class=" dark:text-white font-medium">{REQUIRED_OLLAMA_VERSION} or higher</span
>) or check your connection.
<div class="mt-1 text-sm">
Trouble accessing Ollama?
<a
class=" text-black dark:text-white font-semibold underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
Click here for help.
</a>
</div>
</div>
<div class=" mt-6 mx-auto relative group w-fit">
<button
class="relative z-20 flex px-5 py-2 rounded-full bg-white border border-gray-100 dark:border-none hover:bg-gray-100 transition font-medium text-sm"
on:click={async () => {
location.href = '/';
// await setOllamaVersion();
}}
>
Check Again
</button>
<button
class="text-xs text-center w-full mt-2 text-gray-400 underline"
on:click={async () => {
await setOllamaVersion(REQUIRED_OLLAMA_VERSION);
}}>Close</button
>
</div>
</div>
</div>
</div>
</div>
{:else if localDBChats.length > 0} {:else if localDBChats.length > 0}
<div class="fixed w-full h-full flex z-50"> <div class="fixed w-full h-full flex z-50">
<div <div

View file

@ -36,6 +36,7 @@
import ModelSelector from '$lib/components/chat/ModelSelector.svelte'; import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
import Navbar from '$lib/components/layout/Navbar.svelte'; import Navbar from '$lib/components/layout/Navbar.svelte';
import { RAGTemplate } from '$lib/utils/rag'; import { RAGTemplate } from '$lib/utils/rag';
import { LITELLM_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
import { WEBUI_BASE_URL } from '$lib/constants'; import { WEBUI_BASE_URL } from '$lib/constants';
let stopResponseFlag = false; let stopResponseFlag = false;
@ -132,6 +133,10 @@
selectedModels = ['']; selectedModels = [''];
} }
selectedModels = selectedModels.map((modelId) =>
$models.map((m) => m.id).includes(modelId) ? modelId : ''
);
let _settings = JSON.parse(localStorage.getItem('settings') ?? '{}'); let _settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
settings.set({ settings.set({
..._settings ..._settings
@ -150,6 +155,10 @@
const submitPrompt = async (userPrompt, _user = null) => { const submitPrompt = async (userPrompt, _user = null) => {
console.log('submitPrompt', $chatId); console.log('submitPrompt', $chatId);
selectedModels = selectedModels.map((modelId) =>
$models.map((m) => m.id).includes(modelId) ? modelId : ''
);
if (selectedModels.includes('')) { if (selectedModels.includes('')) {
toast.error('Model not selected'); toast.error('Model not selected');
} else if (messages.length != 0 && messages.at(-1).done != true) { } else if (messages.length != 0 && messages.at(-1).done != true) {
@ -278,10 +287,10 @@
} }
await Promise.all( await Promise.all(
selectedModels.map(async (model) => { selectedModels.map(async (modelId) => {
console.log(model); const model = $models.filter((m) => m.id === modelId).at(0);
const modelTag = $models.filter((m) => m.name === model).at(0);
if (model) {
// Create response message // Create response message
let responseMessageId = uuidv4(); let responseMessageId = uuidv4();
let responseMessage = { let responseMessage = {
@ -290,7 +299,7 @@
childrenIds: [], childrenIds: [],
role: 'assistant', role: 'assistant',
content: '', content: '',
model: model, model: model.id,
timestamp: Math.floor(Date.now() / 1000) // Unix epoch timestamp: Math.floor(Date.now() / 1000) // Unix epoch
}; };
@ -306,12 +315,13 @@
]; ];
} }
if (modelTag?.external) { if (model?.external) {
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId); await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
} else if (modelTag) { } else if (model) {
await sendPromptOllama(model, prompt, responseMessageId, _chatId); await sendPromptOllama(model, prompt, responseMessageId, _chatId);
}
} else { } else {
toast.error(`Model ${model} not found`); toast.error(`Model ${modelId} not found`);
} }
}) })
); );
@ -320,6 +330,7 @@
}; };
const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => { const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
model = model.id;
const responseMessage = history.messages[responseMessageId]; const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated // Wait until history/message have been updated
@ -531,8 +542,10 @@
const responseMessage = history.messages[responseMessageId]; const responseMessage = history.messages[responseMessageId];
scrollToBottom(); scrollToBottom();
const res = await generateOpenAIChatCompletion(localStorage.token, { const res = await generateOpenAIChatCompletion(
model: model, localStorage.token,
{
model: model.id,
stream: true, stream: true,
messages: [ messages: [
$settings.system $settings.system
@ -578,7 +591,9 @@
num_ctx: $settings?.options?.num_ctx ?? undefined, num_ctx: $settings?.options?.num_ctx ?? undefined,
frequency_penalty: $settings?.options?.repeat_penalty ?? undefined, frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
max_tokens: $settings?.options?.num_predict ?? undefined max_tokens: $settings?.options?.num_predict ?? undefined
}); },
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) { if (res && res.ok) {
const reader = res.body const reader = res.body

6
test.json Normal file
View file

@ -0,0 +1,6 @@
{
"model_name": "string",
"litellm_params": {
"model": "ollama/mistral"
}
}