forked from open-webui/open-webui
Merge pull request #1798 from cheahjs/feat/abort-openai-responses-on-stop
feat: abort openai text completion when stopping responses
This commit is contained in:
commit
2f8164d75f
4 changed files with 20 additions and 8 deletions
|
@ -211,10 +211,12 @@ export const generateOpenAIChatCompletion = async (
|
|||
token: string = '',
|
||||
body: object,
|
||||
url: string = OPENAI_API_BASE_URL
|
||||
) => {
|
||||
): Promise<[Response | null, AbortController]> => {
|
||||
const controller = new AbortController();
|
||||
let error = null;
|
||||
|
||||
const res = await fetch(`${url}/chat/completions`, {
|
||||
signal: controller.signal,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
|
@ -231,7 +233,7 @@ export const generateOpenAIChatCompletion = async (
|
|||
throw error;
|
||||
}
|
||||
|
||||
return res;
|
||||
return [res, controller];
|
||||
};
|
||||
|
||||
export const synthesizeOpenAISpeech = async (
|
||||
|
|
|
@ -532,7 +532,7 @@
|
|||
|
||||
console.log(model);
|
||||
|
||||
const res = await generateOpenAIChatCompletion(
|
||||
const [res, controller] = await generateOpenAIChatCompletion(
|
||||
localStorage.token,
|
||||
{
|
||||
model: model.id,
|
||||
|
@ -608,6 +608,11 @@
|
|||
if (done || stopResponseFlag || _chatId !== $chatId) {
|
||||
responseMessage.done = true;
|
||||
messages = messages;
|
||||
|
||||
if (stopResponseFlag) {
|
||||
controller.abort('User: Stop Response');
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -544,7 +544,7 @@
|
|||
|
||||
console.log(docs);
|
||||
|
||||
const res = await generateOpenAIChatCompletion(
|
||||
const [res, controller] = await generateOpenAIChatCompletion(
|
||||
localStorage.token,
|
||||
{
|
||||
model: model.id,
|
||||
|
@ -620,6 +620,11 @@
|
|||
if (done || stopResponseFlag || _chatId !== $chatId) {
|
||||
responseMessage.done = true;
|
||||
messages = messages;
|
||||
|
||||
if (stopResponseFlag) {
|
||||
controller.abort('User: Stop Response');
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
const textCompletionHandler = async () => {
|
||||
const model = $models.find((model) => model.id === selectedModelId);
|
||||
|
||||
const res = await generateOpenAIChatCompletion(
|
||||
const [res, controller] = await generateOpenAIChatCompletion(
|
||||
localStorage.token,
|
||||
{
|
||||
model: model.id,
|
||||
|
@ -96,7 +96,7 @@
|
|||
const { value, done } = await reader.read();
|
||||
if (done || stopResponseFlag) {
|
||||
if (stopResponseFlag) {
|
||||
await cancelOllamaRequest(localStorage.token, currentRequestId);
|
||||
controller.abort('User: Stop Response');
|
||||
}
|
||||
|
||||
currentRequestId = null;
|
||||
|
@ -135,7 +135,7 @@
|
|||
const chatCompletionHandler = async () => {
|
||||
const model = $models.find((model) => model.id === selectedModelId);
|
||||
|
||||
const res = await generateOpenAIChatCompletion(
|
||||
const [res, controller] = await generateOpenAIChatCompletion(
|
||||
localStorage.token,
|
||||
{
|
||||
model: model.id,
|
||||
|
@ -182,7 +182,7 @@
|
|||
const { value, done } = await reader.read();
|
||||
if (done || stopResponseFlag) {
|
||||
if (stopResponseFlag) {
|
||||
await cancelOllamaRequest(localStorage.token, currentRequestId);
|
||||
controller.abort('User: Stop Response');
|
||||
}
|
||||
|
||||
currentRequestId = null;
|
||||
|
|
Loading…
Reference in a new issue