fix: continue generation

This commit is contained in:
Timothy J. Baek 2024-02-25 15:46:12 -08:00
parent 6245146661
commit 1ff0c9a95d
2 changed files with 117 additions and 112 deletions

View file

@ -732,25 +732,26 @@
responseMessage.done = false; responseMessage.done = false;
await tick(); await tick();
const modelTag = $models.filter((m) => m.name === responseMessage.model).at(0); const model = $models.filter((m) => m.id === responseMessage.model).at(0);
if (modelTag?.external) { if (model) {
if (model?.external) {
await sendPromptOpenAI( await sendPromptOpenAI(
responseMessage.model, model,
history.messages[responseMessage.parentId].content, history.messages[responseMessage.parentId].content,
responseMessage.id, responseMessage.id,
_chatId _chatId
); );
} else if (modelTag) { } else
await sendPromptOllama( await sendPromptOllama(
responseMessage.model, model,
history.messages[responseMessage.parentId].content, history.messages[responseMessage.parentId].content,
responseMessage.id, responseMessage.id,
_chatId _chatId
); );
} else {
toast.error(`Model ${model} not found`);
} }
} else {
toast.error(`Model ${modelId} not found`);
} }
}; };

View file

@ -238,7 +238,6 @@
await sendPrompt(userPrompt, userMessageId); await sendPrompt(userPrompt, userMessageId);
} }
}; };
const sendPrompt = async (prompt, parentId) => { const sendPrompt = async (prompt, parentId) => {
const _chatId = JSON.parse(JSON.stringify($chatId)); const _chatId = JSON.parse(JSON.stringify($chatId));
@ -292,10 +291,10 @@
} }
await Promise.all( await Promise.all(
selectedModels.map(async (model) => { selectedModels.map(async (modelId) => {
console.log(model); const model = $models.filter((m) => m.id === modelId).at(0);
const modelTag = $models.filter((m) => m.name === model).at(0);
if (model) {
// Create response message // Create response message
let responseMessageId = uuidv4(); let responseMessageId = uuidv4();
let responseMessage = { let responseMessage = {
@ -304,7 +303,7 @@
childrenIds: [], childrenIds: [],
role: 'assistant', role: 'assistant',
content: '', content: '',
model: model, model: model.id,
timestamp: Math.floor(Date.now() / 1000) // Unix epoch timestamp: Math.floor(Date.now() / 1000) // Unix epoch
}; };
@ -320,12 +319,13 @@
]; ];
} }
if (modelTag?.external) { if (model?.external) {
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId); await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
} else if (modelTag) { } else if (model) {
await sendPromptOllama(model, prompt, responseMessageId, _chatId); await sendPromptOllama(model, prompt, responseMessageId, _chatId);
}
} else { } else {
toast.error(`Model ${model} not found`); toast.error(`Model ${modelId} not found`);
} }
}) })
); );
@ -334,6 +334,7 @@
}; };
const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => { const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
model = model.id;
const responseMessage = history.messages[responseMessageId]; const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated // Wait until history/message have been updated
@ -543,11 +544,12 @@
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => { const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
const responseMessage = history.messages[responseMessageId]; const responseMessage = history.messages[responseMessageId];
scrollToBottom(); scrollToBottom();
const res = await generateOpenAIChatCompletion(localStorage.token, { const res = await generateOpenAIChatCompletion(
model: model, localStorage.token,
{
model: model.id,
stream: true, stream: true,
messages: [ messages: [
$settings.system $settings.system
@ -593,7 +595,9 @@
num_ctx: $settings?.options?.num_ctx ?? undefined, num_ctx: $settings?.options?.num_ctx ?? undefined,
frequency_penalty: $settings?.options?.repeat_penalty ?? undefined, frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
max_tokens: $settings?.options?.num_predict ?? undefined max_tokens: $settings?.options?.num_predict ?? undefined
}); },
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) { if (res && res.ok) {
const reader = res.body const reader = res.body
@ -704,7 +708,6 @@
await setChatTitle(_chatId, userPrompt); await setChatTitle(_chatId, userPrompt);
} }
}; };
const stopResponse = () => { const stopResponse = () => {
stopResponseFlag = true; stopResponseFlag = true;
console.log('stopResponse'); console.log('stopResponse');
@ -719,25 +722,26 @@
responseMessage.done = false; responseMessage.done = false;
await tick(); await tick();
const modelTag = $models.filter((m) => m.name === responseMessage.model).at(0); const model = $models.filter((m) => m.id === responseMessage.model).at(0);
if (modelTag?.external) { if (model) {
if (model?.external) {
await sendPromptOpenAI( await sendPromptOpenAI(
responseMessage.model, model,
history.messages[responseMessage.parentId].content, history.messages[responseMessage.parentId].content,
responseMessage.id, responseMessage.id,
_chatId _chatId
); );
} else if (modelTag) { } else
await sendPromptOllama( await sendPromptOllama(
responseMessage.model, model,
history.messages[responseMessage.parentId].content, history.messages[responseMessage.parentId].content,
responseMessage.id, responseMessage.id,
_chatId _chatId
); );
} else {
toast.error(`Model ${model} not found`);
} }
} else {
toast.error(`Model ${modelId} not found`);
} }
}; };