Merge pull request 'dev' (!1) from dev into main
Some checks failed
Release / release (push) Failing after 9s
Create and publish a Docker image / build-and-push-image (push) Successful in 30m44s
Python CI / Format Backend (latest, 3.12.2) (push) Failing after 8s
Bun CI / Format & Build Frontend (push) Successful in 51s

Reviewed-on: #1
This commit is contained in:
Tibo De Peuter 2024-03-19 12:35:03 +01:00
commit f7ec002fe6
8 changed files with 52 additions and 26 deletions

View file

@ -12,13 +12,16 @@ on:
# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
env:
REGISTRY: ghcr.io
REGISTRY: git.depeuter.dev
IMAGE_NAME: ${{ github.repository }}
RUNNER_TOOL_CACHE: /toolcache
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
jobs:
build-and-push-image:
runs-on: ubuntu-latest
container:
image: catthehacker/ubuntu:act-latest
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
permissions:
contents: read
@ -39,7 +42,7 @@ jobs:
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
password: ${{ secrets.CI_TOKEN }}
- name: Extract metadata for Docker images
id: meta

View file

@ -8,15 +8,19 @@ jobs:
name: 'Format Backend'
env:
PUBLIC_API_BASE_URL: ''
RUNNER_TOOL_CACHE: /toolcache
runs-on: ubuntu-latest
strategy:
matrix:
node-version:
- latest
python-version: [ '3.12.2' ]
steps:
- uses: actions/checkout@v4
- name: Use Python
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Use Bun
uses: oven-sh/setup-bun@v1
- name: Install dependencies

View file

@ -293,6 +293,7 @@ def generate_image(
"size": form_data.size if form_data.size else app.state.IMAGE_SIZE,
"response_format": "b64_json",
}
r = requests.post(
url=f"https://api.openai.com/v1/images/generations",
json=data,
@ -300,7 +301,6 @@ def generate_image(
)
r.raise_for_status()
res = r.json()
images = []
@ -356,7 +356,10 @@ def generate_image(
return images
except Exception as e:
print(e)
if r:
print(r.json())
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
error = e
if r != None:
data = r.json()
if "error" in data:
error = data["error"]["message"]
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error))

View file

@ -123,6 +123,7 @@ async def get_all_models():
map(lambda response: response["models"], responses)
)
}
app.state.MODELS = {model["model"]: model for model in models["models"]}
return models
@ -181,11 +182,17 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
responses = await asyncio.gather(*tasks)
responses = list(filter(lambda x: x is not None, responses))
lowest_version = min(
responses, key=lambda x: tuple(map(int, x["version"].split(".")))
)
if len(responses) > 0:
lowest_version = min(
responses, key=lambda x: tuple(map(int, x["version"].split(".")))
)
return {"version": lowest_version["version"]}
return {"version": lowest_version["version"]}
else:
raise HTTPException(
status_code=500,
detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
)
else:
url = app.state.OLLAMA_BASE_URLS[url_idx]
try:

View file

@ -52,3 +52,4 @@ class ERROR_MESSAGES(str, Enum):
MODEL_NOT_FOUND = lambda name="": f"Model '{name}' was not found"
OPENAI_NOT_FOUND = lambda name="": f"OpenAI API was not found"
OLLAMA_NOT_FOUND = "WebUI could not connect to Ollama"

View file

@ -140,7 +140,9 @@
};
const scrollToBottom = () => {
messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
if (messagesContainerElement) {
messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
}
};
//////////////////////////
@ -308,7 +310,7 @@
.map((file) => file.url.slice(file.url.indexOf(',') + 1));
// Add images array only if it contains elements
if (imageUrls && imageUrls.length > 0) {
if (imageUrls && imageUrls.length > 0 && message.role === 'user') {
baseMessage.images = imageUrls;
}
@ -532,7 +534,8 @@
.filter((message) => message)
.map((message, idx, arr) => ({
role: message.role,
...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&
message.role === 'user'
? {
content: [
{

View file

@ -160,7 +160,9 @@
};
const scrollToBottom = () => {
messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
if (messagesContainerElement) {
messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
}
};
//////////////////////////
@ -321,7 +323,7 @@
.map((file) => file.url.slice(file.url.indexOf(',') + 1));
// Add images array only if it contains elements
if (imageUrls && imageUrls.length > 0) {
if (imageUrls && imageUrls.length > 0 && message.role === 'user') {
baseMessage.images = imageUrls;
}
@ -545,7 +547,8 @@
.filter((message) => message)
.map((message, idx, arr) => ({
role: message.role,
...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&
message.role === 'user'
? {
content: [
{
@ -688,7 +691,12 @@
if (messages.length == 2) {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
await setChatTitle(_chatId, userPrompt);
if ($settings?.titleAutoGenerateModel) {
await generateChatTitle(_chatId, userPrompt);
} else {
await setChatTitle(_chatId, userPrompt);
}
}
};

View file

@ -3,16 +3,13 @@
@tailwind utilities;
@layer base {
html {
html, pre {
font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu,
Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji',
'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
}
pre {
font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu,
Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji',
'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
white-space: pre-wrap;
}
pre {
white-space: pre-wrap;
}
}