Merge pull request #442 from ollama-webui/many-models

feat: collaborative chat
This commit is contained in:
Timothy Jaeryang Baek 2024-01-09 23:11:10 -08:00 committed by GitHub
commit 2d9830b2c2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 269 additions and 17 deletions

View file

@ -33,9 +33,9 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c
- ✒️🔢 **Full Markdown and LaTeX Support**: Elevate your LLM experience with comprehensive Markdown and LaTeX capabilities for enriched interaction.
- 📚 **Local RAG Integration**: Dive into the future of chat interactions with the groundbreaking Retrieval Augmented Generation (RAG) support. This feature seamlessly integrates document interactions into your chat experience. You can load documents directly into the chat or add files to your document library, effortlessly accessing them using '#' command in the prompt. In its alpha phase, occasional issues may arise as we actively refine and enhance this feature to ensure optimal performance and reliability.
- 📚 **Local RAG Integration**: Dive into the future of chat interactions with the groundbreaking Retrieval Augmented Generation (RAG) support. This feature seamlessly integrates document interactions into your chat experience. You can load documents directly into the chat or add files to your document library, effortlessly accessing them using **'#'** command in the prompt. In its alpha phase, occasional issues may arise as we actively refine and enhance this feature to ensure optimal performance and reliability.
- 📜 **Prompt Preset Support**: Instantly access preset prompts using the '/' command in the chat input. Load predefined conversation starters effortlessly and expedite your interactions. Effortlessly import prompts through [OllamaHub](https://ollamahub.com/) integration.
- 📜 **Prompt Preset Support**: Instantly access preset prompts using the **'/'** command in the chat input. Load predefined conversation starters effortlessly and expedite your interactions. Effortlessly import prompts through [OllamaHub](https://ollamahub.com/) integration.
- 👍👎 **RLHF Annotation**: Empower your messages by rating them with thumbs up and thumbs down, facilitating the creation of datasets for Reinforcement Learning from Human Feedback (RLHF). Utilize your messages to train or fine-tune models, all while ensuring the confidentiality of locally saved data.
@ -51,6 +51,8 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c
- ⚙️ **Many Models Conversations**: Effortlessly engage with various models simultaneously, harnessing their unique strengths for optimal responses. Enhance your experience by leveraging a diverse set of models in parallel.
- 💬 **Collaborative Chat**: Harness the collective intelligence of multiple models by seamlessly orchestrating group conversations. Use the **'@'** command to specify the model, enabling dynamic and diverse dialogues within your chat interface. Immerse yourself in the collective intelligence woven into your chat environment.
- 🤝 **OpenAI API Integration**: Effortlessly integrate OpenAI-compatible API for versatile conversations alongside Ollama models. Customize the API Base URL to link with **LMStudio, Mistral, OpenRouter, and more**.
- 🔄 **Regeneration History Access**: Easily revisit and explore your entire regeneration history.
@ -92,6 +94,7 @@ Don't forget to explore our sibling project, [OllamaHub](https://ollamahub.com/)
1. **Installing Docker:**
- **For Windows and Mac Users:**
- Download Docker Desktop from [Docker's official website](https://www.docker.com/products/docker-desktop).
- Follow the installation instructions provided on the website. After installation, open Docker Desktop to ensure it's running properly.
@ -112,6 +115,7 @@ Don't forget to explore our sibling project, [OllamaHub](https://ollamahub.com/)
This command downloads a test image and runs it in a container, which prints an informational message.
2. **Ensure You Have the Latest Version of Ollama:**
- Download the latest version from [https://ollama.ai/](https://ollama.ai/).
3. **Verify Ollama Installation:**
@ -264,7 +268,6 @@ See [TROUBLESHOOTING.md](/TROUBLESHOOTING.md) for information on how to troubles
Here are some exciting tasks on our roadmap:
- 💬 **Group Conversations**: Witness the dynamic synergy of multiple Language Models engaging in collaborative discussions. In this unique feature, you can orchestrate interactions between various models, opening up avenues for diverse and intriguing dialogues. Experience the power of collective intelligence within your chat environment.
- 🌐 **Web Browsing Capability**: Experience the convenience of seamlessly integrating web content directly into your chat. Easily browse and share information without leaving the conversation.
- 🔄 **Function Calling**: Empower your interactions by running code directly within the chat. Execute functions and commands effortlessly, enhancing the functionality of your conversations.
- ⚙️ **Custom Python Backend Actions**: Empower your Ollama Web UI by creating or downloading custom Python backend actions. Unleash the full potential of your web interface with tailored actions that suit your specific needs, enhancing functionality and versatility.

View file

@ -167,6 +167,44 @@ export const generateTitle = async (token: string = '', model: string, prompt: s
return res?.response ?? 'New Chat';
};
export const generatePrompt = async (token: string = '', model: string, conversation: string) => {
let error = null;
if (conversation === '') {
conversation = '[no existing conversation]';
}
const res = await fetch(`${OLLAMA_API_BASE_URL}/generate`, {
method: 'POST',
headers: {
'Content-Type': 'text/event-stream',
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
model: model,
prompt: `Conversation:
${conversation}
As USER in the conversation above, your task is to continue the conversation. Remember, Your responses should be crafted as if you're a human conversing in a natural, realistic manner, keeping in mind the context and flow of the dialogue. Please generate a fitting response to the last message in the conversation, or if there is no existing conversation, initiate one as a normal person would.
Response:
`
})
}).catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const generateChatCompletion = async (token: string = '', body: object) => {
let error = null;

View file

@ -10,6 +10,7 @@
import AddFilesPlaceholder from '../AddFilesPlaceholder.svelte';
import { SUPPORTED_FILE_TYPE } from '$lib/constants';
import Documents from './MessageInput/Documents.svelte';
import Models from './MessageInput/Models.svelte';
export let submitPrompt: Function;
export let stopResponse: Function;
@ -18,12 +19,17 @@
export let autoScroll = true;
let filesInputElement;
let promptsElement;
let documentsElement;
let modelsElement;
let inputFiles;
let dragged = false;
let user = null;
let chatInputPlaceholder = '';
export let files = [];
export let fileUploadEnabled = true;
@ -35,6 +41,15 @@
let speechRecognition;
$: if (prompt) {
const chatInput = document.getElementById('chat-textarea');
if (chatInput) {
chatInput.style.height = '';
chatInput.style.height = Math.min(chatInput.scrollHeight, 200) + 'px';
}
}
const speechRecognitionHandler = () => {
// Check if SpeechRecognition is supported
@ -79,7 +94,7 @@
console.log('recognition ended');
speechRecognitionListening = false;
if (prompt !== '' && $settings?.speechAutoSend === true) {
submitPrompt(prompt);
submitPrompt(prompt, user);
}
};
@ -246,6 +261,14 @@
];
}}
/>
{:else if prompt.charAt(0) === '@'}
<Models
bind:this={modelsElement}
bind:prompt
bind:user
bind:chatInputPlaceholder
{messages}
/>
{:else if messages.length == 0 && suggestionPrompts.length !== 0}
<Suggestions {suggestionPrompts} {submitPrompt} />
{/if}
@ -293,7 +316,7 @@
<form
class=" flex flex-col relative w-full rounded-xl border dark:border-gray-600 bg-white dark:bg-gray-800 dark:text-gray-100"
on:submit|preventDefault={() => {
submitPrompt(prompt);
submitPrompt(prompt, user);
}}
>
{#if files.length > 0}
@ -435,14 +458,18 @@
class=" dark:bg-gray-800 dark:text-gray-100 outline-none w-full py-3 px-2 {fileUploadEnabled
? ''
: ' pl-4'} rounded-xl resize-none h-[48px]"
placeholder={speechRecognitionListening ? 'Listening...' : 'Send a message'}
placeholder={chatInputPlaceholder !== ''
? chatInputPlaceholder
: speechRecognitionListening
? 'Listening...'
: 'Send a message'}
bind:value={prompt}
on:keypress={(e) => {
if (e.keyCode == 13 && !e.shiftKey) {
e.preventDefault();
}
if (prompt !== '' && e.keyCode == 13 && !e.shiftKey) {
submitPrompt(prompt);
submitPrompt(prompt, user);
}
}}
on:keydown={async (e) => {
@ -477,10 +504,10 @@
editButton?.click();
}
if (['/', '#'].includes(prompt.charAt(0)) && e.key === 'ArrowUp') {
if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'ArrowUp') {
e.preventDefault();
(promptsElement || documentsElement).selectUp();
(promptsElement || documentsElement || modelsElement).selectUp();
const commandOptionButton = [
...document.getElementsByClassName('selected-command-option-button')
@ -488,10 +515,10 @@
commandOptionButton.scrollIntoView({ block: 'center' });
}
if (['/', '#'].includes(prompt.charAt(0)) && e.key === 'ArrowDown') {
if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'ArrowDown') {
e.preventDefault();
(promptsElement || documentsElement).selectDown();
(promptsElement || documentsElement || modelsElement).selectDown();
const commandOptionButton = [
...document.getElementsByClassName('selected-command-option-button')
@ -499,7 +526,7 @@
commandOptionButton.scrollIntoView({ block: 'center' });
}
if (['/', '#'].includes(prompt.charAt(0)) && e.key === 'Enter') {
if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'Enter') {
e.preventDefault();
const commandOptionButton = [
@ -509,7 +536,7 @@
commandOptionButton?.click();
}
if (['/', '#'].includes(prompt.charAt(0)) && e.key === 'Tab') {
if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'Tab') {
e.preventDefault();
const commandOptionButton = [
@ -540,6 +567,7 @@
on:input={(e) => {
e.target.style.height = '';
e.target.style.height = Math.min(e.target.scrollHeight, 200) + 'px';
user = null;
}}
on:paste={(e) => {
const clipboardData = e.clipboardData || window.clipboardData;

View file

@ -0,0 +1,158 @@
<script lang="ts">
import { generatePrompt } from '$lib/apis/ollama';
import { models } from '$lib/stores';
import { splitStream } from '$lib/utils';
import { tick } from 'svelte';
import toast from 'svelte-french-toast';
export let prompt = '';
export let user = null;
export let chatInputPlaceholder = '';
export let messages = [];
let selectedIdx = 0;
let filteredModels = [];
$: filteredModels = $models
.filter(
(p) =>
p.name !== 'hr' &&
!p.external &&
p.name.includes(prompt.split(' ')?.at(0)?.substring(1) ?? '')
)
.sort((a, b) => a.name.localeCompare(b.name));
$: if (prompt) {
selectedIdx = 0;
}
export const selectUp = () => {
selectedIdx = Math.max(0, selectedIdx - 1);
};
export const selectDown = () => {
selectedIdx = Math.min(selectedIdx + 1, filteredModels.length - 1);
};
const confirmSelect = async (model) => {
// dispatch('select', model);
prompt = '';
user = JSON.parse(JSON.stringify(model.name));
await tick();
chatInputPlaceholder = `'${model.name}' is thinking...`;
const chatInputElement = document.getElementById('chat-textarea');
await tick();
chatInputElement?.focus();
await tick();
const convoText = messages.reduce((a, message, i, arr) => {
return `${a}### ${message.role.toUpperCase()}\n${message.content}\n\n`;
}, '');
const res = await generatePrompt(localStorage.token, model.name, convoText);
if (res && res.ok) {
const reader = res.body
.pipeThrough(new TextDecoderStream())
.pipeThrough(splitStream('\n'))
.getReader();
while (true) {
const { value, done } = await reader.read();
if (done) {
break;
}
try {
let lines = value.split('\n');
for (const line of lines) {
if (line !== '') {
console.log(line);
let data = JSON.parse(line);
if ('detail' in data) {
throw data;
}
if (data.done == false) {
if (prompt == '' && data.response == '\n') {
continue;
} else {
prompt += data.response;
console.log(data.response);
chatInputElement.scrollTop = chatInputElement.scrollHeight;
await tick();
}
}
}
}
} catch (error) {
console.log(error);
if ('detail' in error) {
toast.error(error.detail);
}
break;
}
}
} else {
if (res !== null) {
const error = await res.json();
console.log(error);
if ('detail' in error) {
toast.error(error.detail);
} else {
toast.error(error.error);
}
} else {
toast.error(`Uh-oh! There was an issue connecting to Ollama.`);
}
}
chatInputPlaceholder = '';
console.log(user);
};
</script>
{#if filteredModels.length > 0}
<div class="md:px-2 mb-3 text-left w-full">
<div class="flex w-full rounded-lg border border-gray-100 dark:border-gray-700">
<div class=" bg-gray-100 dark:bg-gray-700 w-10 rounded-l-lg text-center">
<div class=" text-lg font-semibold mt-2">@</div>
</div>
<div class="max-h-60 flex flex-col w-full rounded-r-lg">
<div class=" overflow-y-auto bg-white p-2 rounded-tr-lg space-y-0.5">
{#each filteredModels as model, modelIdx}
<button
class=" px-3 py-1.5 rounded-lg w-full text-left {modelIdx === selectedIdx
? ' bg-gray-100 selected-command-option-button'
: ''}"
type="button"
on:click={() => {
confirmSelect(model);
}}
on:mousemove={() => {
selectedIdx = modelIdx;
}}
on:focus={() => {}}
>
<div class=" font-medium text-black line-clamp-1">
{model.name}
</div>
<!-- <div class=" text-xs text-gray-600 line-clamp-1">
{doc.title}
</div> -->
</button>
{/each}
</div>
</div>
</div>
</div>
{/if}

View file

@ -2,6 +2,7 @@
import { tick } from 'svelte';
import Name from './Name.svelte';
import ProfileImage from './ProfileImage.svelte';
import { modelfiles } from '$lib/stores';
export let user;
export let message;
@ -42,11 +43,25 @@
</script>
<div class=" flex w-full">
<ProfileImage src={user?.profile_image_url ?? '/user.png'} />
<ProfileImage
src={message.user
? $modelfiles.find((modelfile) => modelfile.tagName === message.user)?.imageUrl ?? '/user.png'
: user?.profile_image_url ?? '/user.png'}
/>
<div class="w-full overflow-hidden">
<div class="user-message">
<Name>You</Name>
<Name>
{#if message.user}
{#if $modelfiles.map((modelfile) => modelfile.tagName).includes(message.user)}
{$modelfiles.find((modelfile) => modelfile.tagName === message.user)?.title}
{:else}
You <span class=" text-gray-500 text-sm font-medium">{message?.user ?? ''}</span>
{/if}
{:else}
You
{/if}
</Name>
</div>
<div

View file

@ -116,7 +116,7 @@
// Ollama functions
//////////////////////////
const submitPrompt = async (userPrompt) => {
const submitPrompt = async (userPrompt, _user = null) => {
console.log('submitPrompt', $chatId);
if (selectedModels.includes('')) {
@ -143,6 +143,7 @@
parentId: messages.length !== 0 ? messages.at(-1).id : null,
childrenIds: [],
role: 'user',
user: _user ?? undefined,
content: userPrompt,
files: files.length > 0 ? files : undefined
};

View file

@ -135,7 +135,8 @@
// Ollama functions
//////////////////////////
const submitPrompt = async (userPrompt) => {
const submitPrompt = async (userPrompt, user) => {
console.log(userPrompt, user);
console.log('submitPrompt', $chatId);
if (selectedModels.includes('')) {
@ -143,6 +144,14 @@
} else if (messages.length != 0 && messages.at(-1).done != true) {
// Response not done
console.log('wait');
} else if (
files.length > 0 &&
files.filter((file) => file.upload_status === false).length > 0
) {
// Upload not done
toast.error(
`Oops! Hold tight! Your files are still in the processing oven. We're cooking them up to perfection. Please be patient and we'll let you know once they're ready.`
);
} else {
// Reset chat message textarea height
document.getElementById('chat-textarea').style.height = '';