Merge branch 'ollama-webui:main' into main

This commit is contained in:
Marclass 2024-01-17 16:20:52 -07:00 committed by GitHub
commit 8dacc86ab0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 332 additions and 27 deletions

View file

@ -17,7 +17,8 @@ from apps.web.models.chats import (
) )
from utils.utils import ( from utils.utils import (
bearer_scheme, ) bearer_scheme,
)
from constants import ERROR_MESSAGES from constants import ERROR_MESSAGES
router = APIRouter() router = APIRouter()
@ -29,7 +30,8 @@ router = APIRouter()
@router.get("/", response_model=List[ChatTitleIdResponse]) @router.get("/", response_model=List[ChatTitleIdResponse])
async def get_user_chats( async def get_user_chats(
user=Depends(get_current_user), skip: int = 0, limit: int = 50): user=Depends(get_current_user), skip: int = 0, limit: int = 50
):
return Chats.get_chat_lists_by_user_id(user.id, skip, limit) return Chats.get_chat_lists_by_user_id(user.id, skip, limit)
@ -41,9 +43,8 @@ async def get_user_chats(
@router.get("/all", response_model=List[ChatResponse]) @router.get("/all", response_model=List[ChatResponse])
async def get_all_user_chats(user=Depends(get_current_user)): async def get_all_user_chats(user=Depends(get_current_user)):
return [ return [
ChatResponse(**{ ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
**chat.model_dump(), "chat": json.loads(chat.chat) for chat in Chats.get_all_chats_by_user_id(user.id)
}) for chat in Chats.get_all_chats_by_user_id(user.id)
] ]
@ -54,8 +55,14 @@ async def get_all_user_chats(user=Depends(get_current_user)):
@router.post("/new", response_model=Optional[ChatResponse]) @router.post("/new", response_model=Optional[ChatResponse])
async def create_new_chat(form_data: ChatForm, user=Depends(get_current_user)): async def create_new_chat(form_data: ChatForm, user=Depends(get_current_user)):
chat = Chats.insert_new_chat(user.id, form_data) try:
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)}) chat = Chats.insert_new_chat(user.id, form_data)
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
except Exception as e:
print(e)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
)
############################ ############################
@ -68,12 +75,11 @@ async def get_chat_by_id(id: str, user=Depends(get_current_user)):
chat = Chats.get_chat_by_id_and_user_id(id, user.id) chat = Chats.get_chat_by_id_and_user_id(id, user.id)
if chat: if chat:
return ChatResponse(**{ return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
**chat.model_dump(), "chat": json.loads(chat.chat)
})
else: else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, raise HTTPException(
detail=ERROR_MESSAGES.NOT_FOUND) status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
)
############################ ############################
@ -82,17 +88,15 @@ async def get_chat_by_id(id: str, user=Depends(get_current_user)):
@router.post("/{id}", response_model=Optional[ChatResponse]) @router.post("/{id}", response_model=Optional[ChatResponse])
async def update_chat_by_id(id: str, async def update_chat_by_id(
form_data: ChatForm, id: str, form_data: ChatForm, user=Depends(get_current_user)
user=Depends(get_current_user)): ):
chat = Chats.get_chat_by_id_and_user_id(id, user.id) chat = Chats.get_chat_by_id_and_user_id(id, user.id)
if chat: if chat:
updated_chat = {**json.loads(chat.chat), **form_data.chat} updated_chat = {**json.loads(chat.chat), **form_data.chat}
chat = Chats.update_chat_by_id(id, updated_chat) chat = Chats.update_chat_by_id(id, updated_chat)
return ChatResponse(**{ return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
**chat.model_dump(), "chat": json.loads(chat.chat)
})
else: else:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, status_code=status.HTTP_401_UNAUTHORIZED,

View file

@ -1,4 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
cd "$SCRIPT_DIR" || exit
PORT="${PORT:-8080}" PORT="${PORT:-8080}"
uvicorn main:app --host 0.0.0.0 --port $PORT --forwarded-allow-ips '*' uvicorn main:app --host 0.0.0.0 --port "$PORT" --forwarded-allow-ips '*'

204
docs/apache.md Normal file
View file

@ -0,0 +1,204 @@
# Hosting UI and Models separately
Sometimes, its beneficial to host Ollama, separate from the UI, but retain the RAG and RBAC support features shared across users:
# Ollama WebUI Configuration
## UI Configuration
For the UI configuration, you can set up the Apache VirtualHost as follows:
```
# Assuming you have a website hosting this UI at "server.com"
<VirtualHost 192.168.1.100:80>
ServerName server.com
DocumentRoot /home/server/public_html
ProxyPass / http://server.com:3000/ nocanon
ProxyPassReverse / http://server.com:3000/
</VirtualHost>
```
Enable the site first before you can request SSL:
`a2ensite server.com.conf` # this will enable the site. a2ensite is short for "Apache 2 Enable Site"
```
# For SSL
<VirtualHost 192.168.1.100:443>
ServerName server.com
DocumentRoot /home/server/public_html
ProxyPass / http://server.com:3000/ nocanon
ProxyPassReverse / http://server.com:3000/
SSLEngine on
SSLCertificateFile /etc/ssl/virtualmin/170514456861234/ssl.cert
SSLCertificateKeyFile /etc/ssl/virtualmin/170514456861234/ssl.key
SSLProtocol all -SSLv2 -SSLv3 -TLSv1 -TLSv1.1
SSLProxyEngine on
SSLCACertificateFile /etc/ssl/virtualmin/170514456865864/ssl.ca
</VirtualHost>
```
I'm using virtualmin here for my SSL clusters, but you can also use certbot directly or your preferred SSL method. To use SSL:
### Prerequisites.
Run the following commands:
`snap install certbot --classic`
`snap apt install python3-certbot-apache` (this will install the apache plugin).
Navigate to the apache sites-available directory:
`cd /etc/apache2/sites-available/`
Create server.com.conf if it is not yet already created, containing the above `<virtualhost>` configuration (it should match your case. Modify as necessary). Use the one without the SSL:
Once it's created, run `certbot --apache -d server.com`, this will request and add/create an SSL keys for you as well as create the server.com.le-ssl.conf
# Configuring Ollama Server
On your latest installation of Ollama, make sure that you have setup your api server from the official Ollama reference:
[Ollama FAQ](https://github.com/jmorganca/ollama/blob/main/docs/faq.md)
### TL;DR
The guide doesn't seem to match the current updated service file on linux. So, we will address it here:
Unless when you're compiling Ollama from source, installing with the standard install `curl https://ollama.ai/install.sh | sh` creates a file called `ollama.service` in /etc/systemd/system. You can use nano to edit the file:
```
sudo nano /etc/systemd/system/ollama.service
```
Add the following lines:
```
Environment="OLLAMA_HOST=0.0.0.0:11434" # this line is mandatory. You can also specify
```
For instance:
```
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=/usr/local/bin/ollama serve
Environment="OLLAMA_HOST=0.0.0.0:11434" # this line is mandatory. You can also specify 192.168.254.109:DIFFERENT_PORT, format
Environment="OLLAMA_ORIGINS=http://192.168.254.106:11434,https://models.server.city" # this line is optional
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/s>
[Install]
WantedBy=default.target
```
Save the file by pressing CTRL+S, then press CTRL+X
When your computer restarts, the Ollama server will now be listening on the IP:PORT you specified, in this case 0.0.0.0:11434, or 192.168.254.106:11434 (whatever your local IP address is). Make sure that your router is correctly configured to serve pages from that local IP by forwarding 11434 to your local IP server.
# Ollama Model Configuration
## For the Ollama model configuration, use the following Apache VirtualHost setup:
Navigate to the apache sites-available directory:
`cd /etc/apache2/sites-available/`
`nano models.server.city.conf` # match this with your ollama server domain
Add the folloing virtualhost containing this example (modify as needed):
```
# Assuming you have a website hosting this UI at "models.server.city"
<IfModule mod_ssl.c>
<VirtualHost 192.168.254.109:443>
DocumentRoot "/var/www/html/"
ServerName models.server.city
<Directory "/var/www/html/">
Options None
Require all granted
</Directory>
ProxyRequests Off
ProxyPreserveHost On
ProxyAddHeaders On
SSLProxyEngine on
ProxyPass / http://server.city:1000/ nocanon # or port 11434
ProxyPassReverse / http://server.city:1000/ # or port 11434
SSLCertificateFile /etc/letsencrypt/live/models.server.city/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/models.server.city/privkey.pem
Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>
</IfModule>
```
You may need to enable the site first (if you haven't done so yet) before you can request SSL:
`a2ensite models.server.city.conf`
#### For the SSL part of Ollama server
Run the following commands:
Navigate to the apache sites-available directory:
`cd /etc/apache2/sites-available/`
`certbot --apache -d server.com`
```
<VirtualHost 192.168.254.109:80>
DocumentRoot "/var/www/html/"
ServerName models.server.city
<Directory "/var/www/html/">
Options None
Require all granted
</Directory>
ProxyRequests Off
ProxyPreserveHost On
ProxyAddHeaders On
SSLProxyEngine on
ProxyPass / http://server.city:1000/ nocanon # or port 11434
ProxyPassReverse / http://server.city:1000/ # or port 11434
RewriteEngine on
RewriteCond %{SERVER_NAME} =models.server.city
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
</VirtualHost>
```
Don't forget to restart/reload Apache with `systemctl reload apache2`
Open your site at https://server.com!
**Congratulations**, your _**Open-AI-like Chat-GPT style UI**_ is now serving AI with RAG, RBAC and multimodal features! Download Ollama models if you haven't yet done so!
If you encounter any misconfiguration or errors, please file an issue or engage with our discussion. There are a lot of friendly developers here to assist you.
Let's make this UI much more user friendly for everyone!
Thanks for making ollama-webui your UI Choice for AI!
This doc is made by **Bob Reyes**, your **Ollama-Web-UI** fan from the Philippines.

View file

@ -301,7 +301,10 @@
const file = inputFiles[0]; const file = inputFiles[0];
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) { if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
reader.readAsDataURL(file); reader.readAsDataURL(file);
} else if (SUPPORTED_FILE_TYPE.includes(file['type'])) { } else if (
SUPPORTED_FILE_TYPE.includes(file['type']) ||
['md'].includes(file.name.split('.').at(-1))
) {
uploadDoc(file); uploadDoc(file);
filesInputElement.value = ''; filesInputElement.value = '';
} else { } else {
@ -461,8 +464,8 @@
placeholder={chatInputPlaceholder !== '' placeholder={chatInputPlaceholder !== ''
? chatInputPlaceholder ? chatInputPlaceholder
: speechRecognitionListening : speechRecognitionListening
? 'Listening...' ? 'Listening...'
: 'Send a message'} : 'Send a message'}
bind:value={prompt} bind:value={prompt}
on:keypress={(e) => { on:keypress={(e) => {
if (e.keyCode == 13 && !e.shiftKey) { if (e.keyCode == 13 && !e.shiftKey) {

View file

@ -21,7 +21,7 @@
import { WEB_UI_VERSION, WEBUI_API_BASE_URL } from '$lib/constants'; import { WEB_UI_VERSION, WEBUI_API_BASE_URL } from '$lib/constants';
import { config, models, settings, user, chats } from '$lib/stores'; import { config, models, settings, user, chats } from '$lib/stores';
import { splitStream, getGravatarURL } from '$lib/utils'; import { splitStream, getGravatarURL, getImportOrigin, convertOpenAIChats } from '$lib/utils';
import Advanced from './Settings/Advanced.svelte'; import Advanced from './Settings/Advanced.svelte';
import Modal from '../common/Modal.svelte'; import Modal from '../common/Modal.svelte';
@ -132,6 +132,13 @@
reader.onload = (event) => { reader.onload = (event) => {
let chats = JSON.parse(event.target.result); let chats = JSON.parse(event.target.result);
console.log(chats); console.log(chats);
if (getImportOrigin(chats) == 'openai') {
try {
chats = convertOpenAIChats(chats);
} catch (error) {
console.log('Unable to import chats:', error);
}
}
importChats(chats); importChats(chats);
}; };

View file

@ -192,3 +192,74 @@ export const calculateSHA256 = async (file) => {
throw error; throw error;
} }
}; };
export const getImportOrigin = (_chats) => {
// Check what external service chat imports are from
if ('mapping' in _chats[0]) {
return 'openai';
}
return 'webui';
};
const convertOpenAIMessages = (convo) => {
// Parse OpenAI chat messages and create chat dictionary for creating new chats
const mapping = convo['mapping'];
const messages = [];
let currentId = '';
for (let message_id in mapping) {
const message = mapping[message_id];
currentId = message_id;
if (message['message'] == null || message['message']['content']['parts'][0] == '') {
// Skip chat messages with no content
continue;
} else {
const new_chat = {
id: message_id,
parentId: messages.length > 0 && message['parent'] in mapping ? message['parent'] : null,
childrenIds: message['children'] || [],
role: message['message']?.['author']?.['role'] !== 'user' ? 'assistant' : 'user',
content: message['message']?.['content']?.['parts']?.[0] || '',
model: 'gpt-3.5-turbo',
done: true,
context: null
};
messages.push(new_chat);
}
}
let history = {};
messages.forEach((obj) => (history[obj.id] = obj));
const chat = {
history: {
currentId: currentId,
messages: history // Need to convert this to not a list and instead a json object
},
models: ['gpt-3.5-turbo'],
messages: messages,
options: {},
timestamp: convo['create_time'],
title: convo['title'] ?? 'New Chat'
};
return chat;
};
export const convertOpenAIChats = (_chats) => {
// Create a list of dictionaries with each conversation from import
const chats = [];
for (let convo of _chats) {
const chat = convertOpenAIMessages(convo);
if (Object.keys(chat.history.messages).length > 0) {
chats.push({
id: convo['id'],
user_id: '',
title: convo['title'],
chat: chat,
timestamp: convo['timestamp']
});
}
}
return chats;
};

View file

@ -200,6 +200,13 @@
await chatId.set('local'); await chatId.set('local');
} }
await tick(); await tick();
} else if (chat.chat["models"] != selectedModels) {
// If model is not saved in DB, then save selectedmodel when message is sent
chat = await updateChatById(localStorage.token, $chatId, {
models: selectedModels
});
await chats.set(await getChatList(localStorage.token));
} }
// Reset chat input textarea // Reset chat input textarea
@ -696,7 +703,7 @@
<div class="min-h-screen w-full flex justify-center"> <div class="min-h-screen w-full flex justify-center">
<div class=" py-2.5 flex flex-col justify-between w-full"> <div class=" py-2.5 flex flex-col justify-between w-full">
<div class="max-w-2xl mx-auto w-full px-3 md:px-0 mt-10"> <div class="max-w-2xl mx-auto w-full px-3 md:px-0 mt-10">
<ModelSelector bind:selectedModels disabled={messages.length > 0} /> <ModelSelector bind:selectedModels disabled={messages.length > 0 && !selectedModels.includes('')} />
</div> </div>
<div class=" h-full mt-10 mb-32 w-full flex flex-col"> <div class=" h-full mt-10 mb-32 w-full flex flex-col">

View file

@ -67,7 +67,10 @@
if (inputFiles && inputFiles.length > 0) { if (inputFiles && inputFiles.length > 0) {
const file = inputFiles[0]; const file = inputFiles[0];
if (SUPPORTED_FILE_TYPE.includes(file['type'])) { if (
SUPPORTED_FILE_TYPE.includes(file['type']) ||
['md'].includes(file.name.split('.').at(-1))
) {
uploadDoc(file); uploadDoc(file);
} else { } else {
toast.error(`Unsupported File Type '${file['type']}'.`); toast.error(`Unsupported File Type '${file['type']}'.`);
@ -144,7 +147,10 @@
on:change={async (e) => { on:change={async (e) => {
if (inputFiles && inputFiles.length > 0) { if (inputFiles && inputFiles.length > 0) {
const file = inputFiles[0]; const file = inputFiles[0];
if (SUPPORTED_FILE_TYPE.includes(file['type'])) { if (
SUPPORTED_FILE_TYPE.includes(file['type']) ||
['md'].includes(file.name.split('.').at(-1))
) {
uploadDoc(file); uploadDoc(file);
} else { } else {
toast.error(`Unsupported File Type '${file['type']}'.`); toast.error(`Unsupported File Type '${file['type']}'.`);