diff --git a/Dockerfile b/Dockerfile index fa145b08..8104c727 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,20 +1,36 @@ # syntax=docker/dockerfile:1 -FROM node:alpine -WORKDIR /app +FROM node:alpine as build -ARG OLLAMA_API_BASE_URL='' +ARG OLLAMA_API_BASE_URL='/ollama/api' RUN echo $OLLAMA_API_BASE_URL -ENV ENV prod - ENV PUBLIC_API_BASE_URL $OLLAMA_API_BASE_URL RUN echo $PUBLIC_API_BASE_URL +WORKDIR /app + COPY package.json package-lock.json ./ RUN npm ci COPY . . RUN npm run build -CMD [ "npm", "run", "start"] +FROM python:3.11-slim-buster as base + +ARG OLLAMA_API_BASE_URL='/ollama/api' + +ENV ENV=prod +ENV OLLAMA_API_BASE_URL $OLLAMA_API_BASE_URL + +WORKDIR /app +COPY --from=build /app/build /app/build + +WORKDIR /app/backend + +COPY ./backend/requirements.txt ./requirements.txt +RUN pip3 install -r requirements.txt + +COPY ./backend . + +CMD [ "sh", "start.sh"] \ No newline at end of file diff --git a/README.md b/README.md index 05d31667..fced28e2 100644 --- a/README.md +++ b/README.md @@ -47,21 +47,19 @@ ChatGPT-Style Web Interface for Ollama ๐Ÿฆ™ - โš™๏ธ **Fine-Tuned Control with Advanced Parameters**: Gain a deeper level of control by adjusting parameters such as temperature and defining your system prompts to tailor the conversation to your specific preferences and needs. +- ๐Ÿ” **Auth Header Support**: Effortlessly enhance security by adding Authorization headers to Ollama requests directly from the web UI settings, ensuring access to secured Ollama servers. + - ๐Ÿ”— **External Ollama Server Connection**: Seamlessly link to an external Ollama server hosted on a different address by configuring the environment variable during the Docker build phase. Additionally, you can also set the external server connection URL from the web UI post-build. -- ๐Ÿ”‘ **Auth Header Support**: Securely access Ollama servers with added Authorization headers for enhanced authentication. +- ๐Ÿ”’ **Backend Reverse Proxy Support**: Strengthen security by enabling direct communication between Ollama Web UI backend and Ollama, eliminating the need to expose Ollama over LAN. - ๐ŸŒŸ **Continuous Updates**: We are committed to improving Ollama Web UI with regular updates and new features. ## How to Install ๐Ÿš€ -### Prerequisites +### Installing Both Ollama and Ollama Web UI Using Docker Compose -Make sure you have the latest version of Ollama installed before proceeding with the installation. You can find the latest version of Ollama at [https://ollama.ai/](https://ollama.ai/). - -#### Installing Both Ollama and Ollama Web UI Using Docker Compose - -If you don't have Ollama installed, you can also use the provided Docker Compose file for a hassle-free installation. Simply run the following command: +If you don't have Ollama installed yet, you can use the provided Docker Compose file for a hassle-free installation. Simply run the following command: ```bash docker compose up --build @@ -69,86 +67,84 @@ docker compose up --build This command will install both Ollama and Ollama Web UI on your system. Ensure to modify the `compose.yaml` file for GPU support if needed. -#### Checking Ollama +### Installing Ollama Web UI Only -After installing, verify that Ollama is running by accessing the following link in your web browser: [http://127.0.0.1:11434/](http://127.0.0.1:11434/). Note that the port number may differ based on your system configuration. +#### Prerequisites -#### Accessing Ollama Web Interface over LAN +Make sure you have the latest version of Ollama installed before proceeding with the installation. You can find the latest version of Ollama at [https://ollama.ai/](https://ollama.ai/). -If you want to access the Ollama web interface over LAN, for example, from your phone, run Ollama using the following command: +##### Checking Ollama + +After installing Ollama, verify that Ollama is running by accessing the following link in your web browser: [http://127.0.0.1:11434/](http://127.0.0.1:11434/). Note that the port number may differ based on your system configuration. + +#### Using Docker ๐Ÿณ + +If Ollama is hosted on your local machine and accessible at [http://127.0.0.1:11434/](http://127.0.0.1:11434/), run the following command: ```bash -OLLAMA_HOST=0.0.0.0 OLLAMA_ORIGINS=* ollama serve -``` - -In case you encounter any issues running the command and encounter errors, ensure to turn off any existing Ollama service that might be running in the background before retrying. - -If you're running Ollama via Docker: - -```bash -docker run -d -v ollama:/root/.ollama -p 11434:11434 -e OLLAMA_ORIGINS="*" --name ollama ollama/ollama -``` - -### Using Docker ๐Ÿณ - -If Ollama is hosted on your local machine, run the following command: - -```bash -docker run -d -p 3000:8080 --name ollama-webui --restart always ghcr.io/ollama-webui/ollama-webui:main +docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway --name ollama-webui --restart always ghcr.io/ollama-webui/ollama-webui:main ``` Alternatively, if you prefer to build the container yourself, use the following command: ```bash -docker build --build-arg OLLAMA_API_BASE_URL='' -t ollama-webui . -docker run -d -p 3000:8080 --name ollama-webui --restart always ollama-webui +docker build -t ollama-webui . +docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway --name ollama-webui --restart always ollama-webui ``` -Your Ollama Web UI should now be hosted at [http://localhost:3000](http://localhost:3000). Enjoy! ๐Ÿ˜„ +Your Ollama Web UI should now be hosted at [http://localhost:3000](http://localhost:3000) and accessible over LAN (or Network). Enjoy! ๐Ÿ˜„ -#### Connecting to Ollama on a Different Server +#### Accessing External Ollama on a Different Server -If Ollama is hosted on a server other than your local machine, change `OLLAMA_API_BASE_URL` to match: +Change `OLLAMA_API_BASE_URL` environment variable to match the external Ollama Server url: ```bash -docker build --build-arg OLLAMA_API_BASE_URL='https://example.com/api' -t ollama-webui . -docker run -d -p 3000:8080 --name ollama-webui --restart always ollama-webui +docker run -d -p 3000:8080 -e OLLAMA_API_BASE_URL=https://example.com/api --name ollama-webui --restart always ghcr.io/ollama-webui/ollama-webui:main +``` + +Alternatively, if you prefer to build the container yourself, use the following command: + +```bash +docker build -t ollama-webui . +docker run -d -p 3000:8080 -e OLLAMA_API_BASE_URL=https://example.com/api --name ollama-webui --restart always ollama-webui ``` ## How to Build for Static Deployment -1. Install `node` +1. Clone & Enter the project - ```sh - # Mac, Linux - curl https://webi.sh/node@lts | sh - source ~/.config/envman/PATH.env - ``` - - ```pwsh - # Windows - curl.exe https://webi.ms/node@lts | powershell - ``` - -2. Clone & Enter the project ```sh git clone https://github.com/ollama-webui/ollama-webui.git pushd ./ollama-webui/ ``` -3. Create and edit `.env` + +2. Create and edit `.env` + ```sh cp -RPp example.env .env ``` + +3. Install node dependencies + + ```sh + npm i + ``` + 4. Run in dev mode, or build the site for deployment + - Test in Dev mode: + ```sh npm run dev ``` - - Build for Deploy: \ - (`PUBLIC_API_BASE_URL` will overwrite the value in `.env`) + + - Build for Deploy: + ```sh + #`PUBLIC_API_BASE_URL` will overwrite the value in `.env` PUBLIC_API_BASE_URL='https://example.com/api' npm run build ``` + 5. Test the build with `caddy` (or the server of your choice) ```sh @@ -168,9 +164,9 @@ See [TROUBLESHOOTING.md](/TROUBLESHOOTING.md) for information on how to troubles Here are some exciting tasks on our to-do list: +- ๐Ÿ” **Access Control**: Securely manage requests to Ollama by utilizing the backend as a reverse proxy gateway, ensuring only authenticated users can send specific requests. - ๐Ÿงช **Research-Centric Features**: Empower researchers in the fields of LLM and HCI with a comprehensive web UI for conducting user studies. Stay tuned for ongoing feature enhancements (e.g., surveys, analytics, and participant tracking) to facilitate their research. - ๐Ÿ“ˆ **User Study Tools**: Providing specialized tools, like heat maps and behavior tracking modules, to empower researchers in capturing and analyzing user behavior patterns with precision and accuracy. -- ๐ŸŒ **Web Browser Extension**: Seamlessly integrate our services into your browsing experience with our convenient browser extension. - ๐Ÿ“š **Enhanced Documentation**: Elevate your setup and customization experience with improved, comprehensive documentation. Feel free to contribute and help us make Ollama Web UI even better! ๐Ÿ™Œ diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 00000000..6d178708 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,2 @@ +__pycache__ +.env \ No newline at end of file diff --git a/backend/apps/ollama/main.py b/backend/apps/ollama/main.py new file mode 100644 index 00000000..bb211cfe --- /dev/null +++ b/backend/apps/ollama/main.py @@ -0,0 +1,55 @@ +from flask import Flask, request, Response +from flask_cors import CORS + + +import requests +import json + + +from config import OLLAMA_API_BASE_URL + +app = Flask(__name__) +CORS( + app +) # Enable Cross-Origin Resource Sharing (CORS) to allow requests from different domains + +# Define the target server URL +TARGET_SERVER_URL = OLLAMA_API_BASE_URL + + +@app.route("/", defaults={"path": ""}, methods=["GET", "POST", "PUT", "DELETE"]) +@app.route("/", methods=["GET", "POST", "PUT", "DELETE"]) +def proxy(path): + # Combine the base URL of the target server with the requested path + target_url = f"{TARGET_SERVER_URL}/{path}" + print(target_url) + + # Get data from the original request + data = request.get_data() + headers = dict(request.headers) + + # Make a request to the target server + target_response = requests.request( + method=request.method, + url=target_url, + data=data, + headers=headers, + stream=True, # Enable streaming for server-sent events + ) + + # Proxy the target server's response to the client + def generate(): + for chunk in target_response.iter_content(chunk_size=8192): + yield chunk + + response = Response(generate(), status=target_response.status_code) + + # Copy headers from the target server's response to the client's response + for key, value in target_response.headers.items(): + response.headers[key] = value + + return response + + +if __name__ == "__main__": + app.run(debug=True) diff --git a/backend/config.py b/backend/config.py new file mode 100644 index 00000000..2a33818d --- /dev/null +++ b/backend/config.py @@ -0,0 +1,15 @@ +import sys +import os +from dotenv import load_dotenv, find_dotenv + +load_dotenv(find_dotenv()) + +ENV = os.environ.get("ENV", "dev") + +OLLAMA_API_BASE_URL = os.environ.get( + "OLLAMA_API_BASE_URL", "http://localhost:11434/api" +) + +if ENV == "prod": + if OLLAMA_API_BASE_URL == "/ollama/api": + OLLAMA_API_BASE_URL = "http://host.docker.internal:11434/api" diff --git a/backend/dev.sh b/backend/dev.sh new file mode 100644 index 00000000..27793999 --- /dev/null +++ b/backend/dev.sh @@ -0,0 +1 @@ +uvicorn main:app --port 8080 --reload \ No newline at end of file diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 00000000..2851df20 --- /dev/null +++ b/backend/main.py @@ -0,0 +1,51 @@ +import time +import sys + +from fastapi import FastAPI, Request +from fastapi.staticfiles import StaticFiles + +from fastapi import HTTPException +from starlette.exceptions import HTTPException as StarletteHTTPException + +from fastapi.middleware.wsgi import WSGIMiddleware +from fastapi.middleware.cors import CORSMiddleware + +from apps.ollama.main import app as ollama_app + + +class SPAStaticFiles(StaticFiles): + async def get_response(self, path: str, scope): + try: + return await super().get_response(path, scope) + except (HTTPException, StarletteHTTPException) as ex: + if ex.status_code == 404: + return await super().get_response("index.html", scope) + else: + raise ex + + +app = FastAPI() + +origins = ["*"] + +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +@app.middleware("http") +async def check_url(request: Request, call_next): + start_time = int(time.time()) + response = await call_next(request) + process_time = int(time.time()) - start_time + response.headers["X-Process-Time"] = str(process_time) + + return response + + +app.mount("/ollama/api", WSGIMiddleware(ollama_app)) +app.mount("/", SPAStaticFiles(directory="../build", html=True), name="spa-static-files") diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 00000000..1c69fffc --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,19 @@ +fastapi +uvicorn[standard] +pydantic +python-multipart + +flask +flask_cors + +python-socketio +python-jose +passlib[bcrypt] +uuid + +requests +pymongo +bcrypt + +PyJWT +pyjwt[crypto] \ No newline at end of file diff --git a/backend/start.sh b/backend/start.sh new file mode 100644 index 00000000..2d070299 --- /dev/null +++ b/backend/start.sh @@ -0,0 +1 @@ +uvicorn main:app --host 0.0.0.0 --port 8080 \ No newline at end of file diff --git a/compose.yaml b/compose.yaml index e9ca7102..0a77d64f 100644 --- a/compose.yaml +++ b/compose.yaml @@ -15,8 +15,6 @@ services: - ollama:/root/.ollama ports: - 11434:11434 - environment: - - 'OLLAMA_ORIGINS=*' container_name: ollama pull_policy: always tty: true @@ -24,16 +22,18 @@ services: image: ollama/ollama:latest ollama-webui: - restart: unless-stopped build: context: . args: - OLLAMA_API_BASE_URL: '' + OLLAMA_API_BASE_URL: '/ollama/api' dockerfile: Dockerfile image: ollama-webui:latest container_name: ollama-webui ports: - 3000:8080 + extra_hosts: + - host.docker.internal:host-gateway + restart: unless-stopped volumes: ollama: {} diff --git a/demo.gif b/demo.gif index 5fd564ca..510e041a 100644 Binary files a/demo.gif and b/demo.gif differ diff --git a/package.json b/package.json index 399b2912..5b1a89ec 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "version": "0.0.1", "private": true, "scripts": { - "start": "http-server ./build", + "start": "http-server ./build -P http://localhost:8080?", "dev": "vite dev --host", "build": "vite build", "preview": "vite preview", @@ -50,4 +50,4 @@ "svelte-french-toast": "^1.2.0", "uuid": "^9.0.1" } -} +} \ No newline at end of file diff --git a/run.sh b/run.sh index 2c520052..584c7f64 100644 --- a/run.sh +++ b/run.sh @@ -1,5 +1,5 @@ docker stop ollama-webui || true docker rm ollama-webui || true docker build -t ollama-webui . -docker run -d -p 3000:8080 --name ollama-webui --restart always ollama-webui +docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway --name ollama-webui --restart always ollama-webui docker image prune -f \ No newline at end of file diff --git a/src/lib/constants.ts b/src/lib/constants.ts index 54894fac..57f7cfe6 100644 --- a/src/lib/constants.ts +++ b/src/lib/constants.ts @@ -8,7 +8,7 @@ export const API_BASE_URL = : `http://localhost:11434/api` : PUBLIC_API_BASE_URL; -export const WEB_UI_VERSION = 'v1.0.0-alpha.5'; +export const WEB_UI_VERSION = 'v1.0.0-alpha.6'; // Source: https://kit.svelte.dev/docs/modules#$env-static-public // This feature, akin to $env/static/private, exclusively incorporates environment variables diff --git a/src/routes/+error.svelte b/src/routes/+error.svelte index 7bf6432c..76748a1d 100644 --- a/src/routes/+error.svelte +++ b/src/routes/+error.svelte @@ -2,4 +2,10 @@ import { page } from '$app/stores'; -
{$page.status}: {$page.error.message}
+
+
+
+ {$page.status}: {$page.error.message} +
+
+
diff --git a/src/routes/+layout.js b/src/routes/+layout.js index 9220eb55..b49c5280 100644 --- a/src/routes/+layout.js +++ b/src/routes/+layout.js @@ -1,13 +1,13 @@ // if you want to generate a static html file // for your page. // Documentation: https://kit.svelte.dev/docs/page-options#prerender -export const prerender = true; +// export const prerender = true; // if you want to Generate a SPA // you have to set ssr to false. // This is not the case (so set as true or comment the line) // Documentation: https://kit.svelte.dev/docs/page-options#ssr -// export const ssr = false; +export const ssr = false; // How to manage the trailing slashes in the URLs // the URL for about page witll be /about with 'ignore' (default) diff --git a/src/routes/+page.svelte b/src/routes/+page.svelte index 53e3282f..de09e7af 100644 --- a/src/routes/+page.svelte +++ b/src/routes/+page.svelte @@ -1184,7 +1184,7 @@ {:else}