fix: continue generation

This commit is contained in:
Timothy J. Baek 2024-02-25 15:46:12 -08:00
parent 6245146661
commit 1ff0c9a95d
2 changed files with 117 additions and 112 deletions

View file

@ -732,25 +732,26 @@
responseMessage.done = false;
await tick();
const modelTag = $models.filter((m) => m.name === responseMessage.model).at(0);
const model = $models.filter((m) => m.id === responseMessage.model).at(0);
if (modelTag?.external) {
if (model) {
if (model?.external) {
await sendPromptOpenAI(
responseMessage.model,
model,
history.messages[responseMessage.parentId].content,
responseMessage.id,
_chatId
);
} else if (modelTag) {
} else
await sendPromptOllama(
responseMessage.model,
model,
history.messages[responseMessage.parentId].content,
responseMessage.id,
_chatId
);
} else {
toast.error(`Model ${model} not found`);
}
} else {
toast.error(`Model ${modelId} not found`);
}
};

View file

@ -238,7 +238,6 @@
await sendPrompt(userPrompt, userMessageId);
}
};
const sendPrompt = async (prompt, parentId) => {
const _chatId = JSON.parse(JSON.stringify($chatId));
@ -292,10 +291,10 @@
}
await Promise.all(
selectedModels.map(async (model) => {
console.log(model);
const modelTag = $models.filter((m) => m.name === model).at(0);
selectedModels.map(async (modelId) => {
const model = $models.filter((m) => m.id === modelId).at(0);
if (model) {
// Create response message
let responseMessageId = uuidv4();
let responseMessage = {
@ -304,7 +303,7 @@
childrenIds: [],
role: 'assistant',
content: '',
model: model,
model: model.id,
timestamp: Math.floor(Date.now() / 1000) // Unix epoch
};
@ -320,12 +319,13 @@
];
}
if (modelTag?.external) {
if (model?.external) {
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
} else if (modelTag) {
} else if (model) {
await sendPromptOllama(model, prompt, responseMessageId, _chatId);
}
} else {
toast.error(`Model ${model} not found`);
toast.error(`Model ${modelId} not found`);
}
})
);
@ -334,6 +334,7 @@
};
const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
model = model.id;
const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated
@ -543,11 +544,12 @@
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
const responseMessage = history.messages[responseMessageId];
scrollToBottom();
const res = await generateOpenAIChatCompletion(localStorage.token, {
model: model,
const res = await generateOpenAIChatCompletion(
localStorage.token,
{
model: model.id,
stream: true,
messages: [
$settings.system
@ -593,7 +595,9 @@
num_ctx: $settings?.options?.num_ctx ?? undefined,
frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
max_tokens: $settings?.options?.num_predict ?? undefined
});
},
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) {
const reader = res.body
@ -704,7 +708,6 @@
await setChatTitle(_chatId, userPrompt);
}
};
const stopResponse = () => {
stopResponseFlag = true;
console.log('stopResponse');
@ -719,25 +722,26 @@
responseMessage.done = false;
await tick();
const modelTag = $models.filter((m) => m.name === responseMessage.model).at(0);
const model = $models.filter((m) => m.id === responseMessage.model).at(0);
if (modelTag?.external) {
if (model) {
if (model?.external) {
await sendPromptOpenAI(
responseMessage.model,
model,
history.messages[responseMessage.parentId].content,
responseMessage.id,
_chatId
);
} else if (modelTag) {
} else
await sendPromptOllama(
responseMessage.model,
model,
history.messages[responseMessage.parentId].content,
responseMessage.id,
_chatId
);
} else {
toast.error(`Model ${model} not found`);
}
} else {
toast.error(`Model ${modelId} not found`);
}
};