forked from open-webui/open-webui
feat: backend reverse proxy
This commit is contained in:
parent
611b10a79d
commit
6a9bef755b
13 changed files with 179 additions and 13 deletions
28
Dockerfile
28
Dockerfile
|
@ -1,20 +1,36 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
FROM node:alpine
|
||||
WORKDIR /app
|
||||
FROM node:alpine as build
|
||||
|
||||
ARG OLLAMA_API_BASE_URL=''
|
||||
ARG OLLAMA_API_BASE_URL='/ollama/api'
|
||||
RUN echo $OLLAMA_API_BASE_URL
|
||||
|
||||
ENV ENV prod
|
||||
|
||||
ENV PUBLIC_API_BASE_URL $OLLAMA_API_BASE_URL
|
||||
RUN echo $PUBLIC_API_BASE_URL
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package.json package-lock.json ./
|
||||
RUN npm ci
|
||||
|
||||
COPY . .
|
||||
RUN npm run build
|
||||
|
||||
CMD [ "npm", "run", "start"]
|
||||
FROM python:3.11-slim-buster as base
|
||||
|
||||
ARG OLLAMA_API_BASE_URL='/ollama/api'
|
||||
|
||||
ENV ENV=prod
|
||||
ENV OLLAMA_API_BASE_URL $OLLAMA_API_BASE_URL
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=build /app/build /app/build
|
||||
|
||||
WORKDIR /app/backend
|
||||
|
||||
COPY ./backend/requirements.txt ./requirements.txt
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
COPY ./backend .
|
||||
|
||||
CMD [ "sh", "start.sh"]
|
2
backend/.gitignore
vendored
Normal file
2
backend/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
__pycache__
|
||||
.env
|
55
backend/apps/ollama/main.py
Normal file
55
backend/apps/ollama/main.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
from flask import Flask, request, Response
|
||||
from flask_cors import CORS
|
||||
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
from config import OLLAMA_API_BASE_URL
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(
|
||||
app
|
||||
) # Enable Cross-Origin Resource Sharing (CORS) to allow requests from different domains
|
||||
|
||||
# Define the target server URL
|
||||
TARGET_SERVER_URL = OLLAMA_API_BASE_URL
|
||||
|
||||
|
||||
@app.route("/", defaults={"path": ""}, methods=["GET", "POST", "PUT", "DELETE"])
|
||||
@app.route("/<path:path>", methods=["GET", "POST", "PUT", "DELETE"])
|
||||
def proxy(path):
|
||||
# Combine the base URL of the target server with the requested path
|
||||
target_url = f"{TARGET_SERVER_URL}/{path}"
|
||||
print(target_url)
|
||||
|
||||
# Get data from the original request
|
||||
data = request.get_data()
|
||||
headers = dict(request.headers)
|
||||
|
||||
# Make a request to the target server
|
||||
target_response = requests.request(
|
||||
method=request.method,
|
||||
url=target_url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
stream=True, # Enable streaming for server-sent events
|
||||
)
|
||||
|
||||
# Proxy the target server's response to the client
|
||||
def generate():
|
||||
for chunk in target_response.iter_content(chunk_size=8192):
|
||||
yield chunk
|
||||
|
||||
response = Response(generate(), status=target_response.status_code)
|
||||
|
||||
# Copy headers from the target server's response to the client's response
|
||||
for key, value in target_response.headers.items():
|
||||
response.headers[key] = value
|
||||
|
||||
return response
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True)
|
15
backend/config.py
Normal file
15
backend/config.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
import sys
|
||||
import os
|
||||
from dotenv import load_dotenv, find_dotenv
|
||||
|
||||
load_dotenv(find_dotenv())
|
||||
|
||||
ENV = os.environ.get("ENV", "dev")
|
||||
|
||||
OLLAMA_API_BASE_URL = os.environ.get(
|
||||
"OLLAMA_API_BASE_URL", "http://localhost:11434/api"
|
||||
)
|
||||
|
||||
if ENV == "prod":
|
||||
if OLLAMA_API_BASE_URL == "/ollama/api":
|
||||
OLLAMA_API_BASE_URL = "http://host.docker.internal:11434/api"
|
1
backend/dev.sh
Normal file
1
backend/dev.sh
Normal file
|
@ -0,0 +1 @@
|
|||
uvicorn main:app --port 8080 --reload
|
51
backend/main.py
Normal file
51
backend/main.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
import time
|
||||
import sys
|
||||
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
|
||||
from fastapi import HTTPException
|
||||
from starlette.exceptions import HTTPException as StarletteHTTPException
|
||||
|
||||
from fastapi.middleware.wsgi import WSGIMiddleware
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from apps.ollama.main import app as ollama_app
|
||||
|
||||
|
||||
class SPAStaticFiles(StaticFiles):
|
||||
async def get_response(self, path: str, scope):
|
||||
try:
|
||||
return await super().get_response(path, scope)
|
||||
except (HTTPException, StarletteHTTPException) as ex:
|
||||
if ex.status_code == 404:
|
||||
return await super().get_response("index.html", scope)
|
||||
else:
|
||||
raise ex
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
origins = ["*"]
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
@app.middleware("http")
|
||||
async def check_url(request: Request, call_next):
|
||||
start_time = int(time.time())
|
||||
response = await call_next(request)
|
||||
process_time = int(time.time()) - start_time
|
||||
response.headers["X-Process-Time"] = str(process_time)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
app.mount("/ollama/api", WSGIMiddleware(ollama_app))
|
||||
app.mount("/", SPAStaticFiles(directory="../build", html=True), name="spa-static-files")
|
19
backend/requirements.txt
Normal file
19
backend/requirements.txt
Normal file
|
@ -0,0 +1,19 @@
|
|||
fastapi
|
||||
uvicorn[standard]
|
||||
pydantic
|
||||
python-multipart
|
||||
|
||||
flask
|
||||
flask_cors
|
||||
|
||||
python-socketio
|
||||
python-jose
|
||||
passlib[bcrypt]
|
||||
uuid
|
||||
|
||||
requests
|
||||
pymongo
|
||||
bcrypt
|
||||
|
||||
PyJWT
|
||||
pyjwt[crypto]
|
1
backend/start.sh
Normal file
1
backend/start.sh
Normal file
|
@ -0,0 +1 @@
|
|||
uvicorn main:app --host 0.0.0.0 --port 8080
|
|
@ -3,7 +3,7 @@
|
|||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "http-server ./build",
|
||||
"start": "http-server ./build -P http://localhost:8080?",
|
||||
"dev": "vite dev --host",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
|
@ -50,4 +50,4 @@
|
|||
"svelte-french-toast": "^1.2.0",
|
||||
"uuid": "^9.0.1"
|
||||
}
|
||||
}
|
||||
}
|
2
run.sh
2
run.sh
|
@ -1,5 +1,5 @@
|
|||
docker stop ollama-webui || true
|
||||
docker rm ollama-webui || true
|
||||
docker build -t ollama-webui .
|
||||
docker run -d -p 3000:8080 --name ollama-webui --restart always ollama-webui
|
||||
docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway --name ollama-webui --restart always ollama-webui
|
||||
docker image prune -f
|
|
@ -2,4 +2,10 @@
|
|||
import { page } from '$app/stores';
|
||||
</script>
|
||||
|
||||
<div>{$page.status}: {$page.error.message}</div>
|
||||
<div class=" bg-white dark:bg-gray-800 min-h-screen">
|
||||
<div class=" flex h-full">
|
||||
<div class="m-auto my-10 dark:text-gray-300 text-3xl font-semibold">
|
||||
{$page.status}: {$page.error.message}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
// if you want to generate a static html file
|
||||
// for your page.
|
||||
// Documentation: https://kit.svelte.dev/docs/page-options#prerender
|
||||
export const prerender = true;
|
||||
// export const prerender = true;
|
||||
|
||||
// if you want to Generate a SPA
|
||||
// you have to set ssr to false.
|
||||
// This is not the case (so set as true or comment the line)
|
||||
// Documentation: https://kit.svelte.dev/docs/page-options#ssr
|
||||
// export const ssr = false;
|
||||
export const ssr = false;
|
||||
|
||||
// How to manage the trailing slashes in the URLs
|
||||
// the URL for about page witll be /about with 'ignore' (default)
|
||||
|
|
|
@ -14,7 +14,7 @@ const config = {
|
|||
adapter: adapter({
|
||||
pages: 'build',
|
||||
assets: 'build',
|
||||
fallback: null
|
||||
fallback: 'index.html'
|
||||
})
|
||||
}
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue