diff --git a/.gitignore b/.gitignore index 6635cf55..1250aef9 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,294 @@ node_modules !.env.example vite.config.js.timestamp-* vite.config.ts.timestamp-* +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional stylelint cache +.stylelintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# vuepress v2.x temp and cache directory +.temp +.cache + +# Docusaurus cache and generated files +.docusaurus + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* \ No newline at end of file diff --git a/.prettierignore b/.prettierignore index 38972655..b722147c 100644 --- a/.prettierignore +++ b/.prettierignore @@ -11,3 +11,6 @@ node_modules pnpm-lock.yaml package-lock.json yarn.lock + +# Ignore kubernetes files +kubernetes \ No newline at end of file diff --git a/INSTALLATION.md b/INSTALLATION.md new file mode 100644 index 00000000..4b802c69 --- /dev/null +++ b/INSTALLATION.md @@ -0,0 +1,35 @@ +### Installing Both Ollama and Ollama Web UI Using Kustomize + +For cpu-only pod + +```bash +kubectl apply -f ./kubernetes/manifest/base +``` + +For gpu-enabled pod + +```bash +kubectl apply -k ./kubernetes/manifest +``` + +### Installing Both Ollama and Ollama Web UI Using Helm + +Package Helm file first + +```bash +helm package ./kubernetes/helm/ +``` + +For cpu-only pod + +```bash +helm install ollama-webui ./ollama-webui-*.tgz +``` + +For gpu-enabled pod + +```bash +helm install ollama-webui ./ollama-webui-*.tgz --set ollama.resources.limits.nvidia.com/gpu="1" +``` + +Check the `kubernetes/helm/values.yaml` file to know which parameters are available for customization diff --git a/README.md b/README.md index e99704ab..cd655838 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c - ⚡ **Swift Responsiveness**: Enjoy fast and responsive performance. -- 🚀 **Effortless Setup**: Install seamlessly using Docker for a hassle-free experience. +- 🚀 **Effortless Setup**: Install seamlessly using Docker or Kubernetes (kubectl, kustomize or helm) for a hassle-free experience. - 💻 **Code Syntax Highlighting**: Enjoy enhanced code readability with our syntax highlighting feature. @@ -79,32 +79,6 @@ Don't forget to explore our sibling project, [OllamaHub](https://ollamahub.com/) - **Privacy and Data Security:** We prioritize your privacy and data security above all. Please be reassured that all data entered into the Ollama Web UI is stored locally on your device. Our system is designed to be privacy-first, ensuring that no external requests are made, and your data does not leave your local environment. We are committed to maintaining the highest standards of data privacy and security, ensuring that your information remains confidential and under your control. -### Installing Both Ollama and Ollama Web UI Using Docker Compose - -If you don't have Ollama installed yet, you can use the provided Docker Compose file for a hassle-free installation. Simply run the following command: - -```bash -docker compose up -d --build -``` - -This command will install both Ollama and Ollama Web UI on your system. - -#### Enable GPU - -Use the additional Docker Compose file designed to enable GPU support by running the following command: - -```bash -docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d --build -``` - -#### Expose Ollama API outside the container stack - -Deploy the service with an additional Docker Compose file designed for API exposure: - -```bash -docker compose -f docker-compose.yml -f docker-compose.api.yml up -d --build -``` - ### Installing Ollama Web UI Only #### Prerequisites @@ -149,6 +123,69 @@ docker build -t ollama-webui . docker run -d -p 3000:8080 -e OLLAMA_API_BASE_URL=https://example.com/api -v ollama-webui:/app/backend/data --name ollama-webui --restart always ollama-webui ``` +### Installing Both Ollama and Ollama Web UI + +#### Using Docker Compose + +If you don't have Ollama installed yet, you can use the provided Docker Compose file for a hassle-free installation. Simply run the following command: + +```bash +docker compose up -d --build +``` + +This command will install both Ollama and Ollama Web UI on your system. + +##### Enable GPU + +Use the additional Docker Compose file designed to enable GPU support by running the following command: + +```bash +docker compose -f docker-compose.yaml -f docker-compose.gpu.yaml up -d --build +``` + +##### Expose Ollama API outside the container stack + +Deploy the service with an additional Docker Compose file designed for API exposure: + +```bash +docker compose -f docker-compose.yaml -f docker-compose.api.yaml up -d --build +``` + +#### Using Provided `run-compose.sh` Script (Linux) + +Also available on Windows under any docker-enabled WSL2 linux distro (you have to enable it from Docker Desktop) + +Simply run the following command to grant execute permission to script: + +```bash +chmod +x run-compose.sh +``` + +##### For CPU only container + +```bash +./run-compose.sh +``` + +##### Enable GPU + +For GPU enabled container (to enable this you must have your gpu driver for docker, it mostly works with nvidia so this is the official install guide: [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)) +Warning! A GPU-enabled installation has only been tested using linux and nvidia GPU, full functionalities are not guaranteed under Windows or Macos or using a different GPU + +```bash +./run-compose.sh --enable-gpu +``` + +Note that both the above commands will use the latest production docker image in repository, to be able to build the latest local version you'll need to append the `--build` parameter, for example: + +```bash +./run-compose.sh --enable-gpu --build +``` + +#### Using Alternative Methods (Kustomize or Helm) + +See [INSTALLATION.md](/INSTALLATION.md) for information on how to install and/or join our [Ollama Web UI Discord community](https://discord.gg/5rJgQTnV4s). + ## How to Install Without Docker While we strongly recommend using our convenient Docker container installation for optimal support, we understand that some situations may require a non-Docker setup, especially for development purposes. Please note that non-Docker installations are not officially supported, and you might need to troubleshoot on your own. diff --git a/docker-compose.api.yml b/docker-compose.api.yaml similarity index 59% rename from docker-compose.api.yml rename to docker-compose.api.yaml index c36cf11e..f19974e7 100644 --- a/docker-compose.api.yml +++ b/docker-compose.api.yaml @@ -1,7 +1,7 @@ -version: '3.6' +version: '3.8' services: ollama: # Expose Ollama API outside the container stack ports: - - 11434:11434 \ No newline at end of file + - ${OLLAMA_WEBAPI_PORT-11434}:11434 diff --git a/docker-compose.data.yaml b/docker-compose.data.yaml new file mode 100644 index 00000000..57d6fc37 --- /dev/null +++ b/docker-compose.data.yaml @@ -0,0 +1,6 @@ +version: '3.8' + +services: + ollama: + volumes: + - ${OLLAMA_DATA_DIR-./ollama-data}:/root/.ollama \ No newline at end of file diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yaml similarity index 59% rename from docker-compose.gpu.yml rename to docker-compose.gpu.yaml index db47ae13..424f485a 100644 --- a/docker-compose.gpu.yml +++ b/docker-compose.gpu.yaml @@ -1,4 +1,4 @@ -version: '3.6' +version: '3.8' services: ollama: @@ -7,7 +7,7 @@ services: resources: reservations: devices: - - driver: nvidia - count: 1 + - driver: ${OLLAMA_GPU_DRIVER-nvidia} + count: ${OLLAMA_GPU_COUNT-1} capabilities: - gpu diff --git a/docker-compose.yml b/docker-compose.yaml similarity index 86% rename from docker-compose.yml rename to docker-compose.yaml index a7357740..47263b0a 100644 --- a/docker-compose.yml +++ b/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '3.6' +version: '3.8' services: ollama: @@ -16,14 +16,14 @@ services: args: OLLAMA_API_BASE_URL: '/ollama/api' dockerfile: Dockerfile - image: ollama-webui:latest + image: ghcr.io/ollama-webui/ollama-webui:main container_name: ollama-webui volumes: - ollama-webui:/app/backend/data depends_on: - ollama ports: - - 3000:8080 + - ${OLLAMA_WEBUI_PORT-3000}:8080 environment: - "OLLAMA_API_BASE_URL=http://ollama:11434/api" extra_hosts: diff --git a/kubernetes/helm/.helmignore b/kubernetes/helm/.helmignore new file mode 100644 index 00000000..e69de29b diff --git a/kubernetes/helm/Chart.yaml b/kubernetes/helm/Chart.yaml new file mode 100644 index 00000000..52683b65 --- /dev/null +++ b/kubernetes/helm/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ollama-webui +description: "Ollama Web UI: A User-Friendly Web Interface for Chat Interactions 👋" +version: 1.0.0 +icon: https://raw.githubusercontent.com/ollama-webui/ollama-webui/main/static/favicon.png diff --git a/kubernetes/helm/templates/ollama-namespace.yaml b/kubernetes/helm/templates/ollama-namespace.yaml new file mode 100644 index 00000000..59f79447 --- /dev/null +++ b/kubernetes/helm/templates/ollama-namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} \ No newline at end of file diff --git a/kubernetes/helm/templates/ollama-service.yaml b/kubernetes/helm/templates/ollama-service.yaml new file mode 100644 index 00000000..54558473 --- /dev/null +++ b/kubernetes/helm/templates/ollama-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: ollama-service + namespace: {{ .Values.namespace }} +spec: + type: {{ .Values.ollama.service.type }} + selector: + app: ollama + ports: + - protocol: TCP + port: {{ .Values.ollama.servicePort }} + targetPort: {{ .Values.ollama.servicePort }} \ No newline at end of file diff --git a/kubernetes/helm/templates/ollama-statefulset.yaml b/kubernetes/helm/templates/ollama-statefulset.yaml new file mode 100644 index 00000000..83cb6883 --- /dev/null +++ b/kubernetes/helm/templates/ollama-statefulset.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ollama + namespace: {{ .Values.namespace }} +spec: + serviceName: "ollama" + replicas: {{ .Values.ollama.replicaCount }} + selector: + matchLabels: + app: ollama + template: + metadata: + labels: + app: ollama + spec: + containers: + - name: ollama + image: {{ .Values.ollama.image }} + ports: + - containerPort: {{ .Values.ollama.servicePort }} + env: + {{- if .Values.ollama.gpu.enabled }} + - name: PATH + value: /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + - name: LD_LIBRARY_PATH + value: /usr/local/nvidia/lib:/usr/local/nvidia/lib64 + - name: NVIDIA_DRIVER_CAPABILITIES + value: compute,utility + {{- end}} + {{- if .Values.ollama.resources }} + resources: {{- toYaml .Values.ollama.resources | nindent 10 }} + {{- end }} + volumeMounts: + - name: ollama-volume + mountPath: /root/.ollama + tty: true + {{- with .Values.ollama.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + tolerations: + {{- if .Values.ollama.gpu.enabled }} + - key: nvidia.com/gpu + operator: Exists + effect: NoSchedule + {{- end }} + volumeClaimTemplates: + - metadata: + name: ollama-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.ollama.volumeSize }} \ No newline at end of file diff --git a/kubernetes/helm/templates/webui-deployment.yaml b/kubernetes/helm/templates/webui-deployment.yaml new file mode 100644 index 00000000..d9721ee0 --- /dev/null +++ b/kubernetes/helm/templates/webui-deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ollama-webui-deployment + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: ollama-webui + template: + metadata: + labels: + app: ollama-webui + spec: + containers: + - name: ollama-webui + image: {{ .Values.webui.image }} + ports: + - containerPort: 8080 + {{- if .Values.webui.resources }} + resources: {{- toYaml .Values.webui.resources | nindent 10 }} + {{- end }} + volumeMounts: + - name: webui-volume + mountPath: /app/backend/data + env: + - name: OLLAMA_API_BASE_URL + value: "http://ollama-service.{{ .Values.namespace }}.svc.cluster.local:{{ .Values.ollama.servicePort }}/api" + tty: true + {{- with .Values.webui.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: webui-volume + persistentVolumeClaim: + claimName: ollama-webui-pvc \ No newline at end of file diff --git a/kubernetes/helm/templates/webui-ingress.yaml b/kubernetes/helm/templates/webui-ingress.yaml new file mode 100644 index 00000000..84f819f3 --- /dev/null +++ b/kubernetes/helm/templates/webui-ingress.yaml @@ -0,0 +1,23 @@ +{{- if .Values.webui.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ollama-webui-ingress + namespace: {{ .Values.namespace }} +{{- if .Values.webui.ingress.annotations }} + annotations: +{{ toYaml .Values.webui.ingress.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + rules: + - host: {{ .Values.webui.ingress.host }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ollama-webui-service + port: + number: {{ .Values.webui.servicePort }} +{{- end }} diff --git a/kubernetes/helm/templates/webui-pvc.yaml b/kubernetes/helm/templates/webui-pvc.yaml new file mode 100644 index 00000000..e9961aa8 --- /dev/null +++ b/kubernetes/helm/templates/webui-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: ollama-webui + name: ollama-webui-pvc + namespace: {{ .Values.namespace }} +spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.webui.volumeSize }} \ No newline at end of file diff --git a/kubernetes/helm/templates/webui-service.yaml b/kubernetes/helm/templates/webui-service.yaml new file mode 100644 index 00000000..7fefa4fd --- /dev/null +++ b/kubernetes/helm/templates/webui-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: ollama-webui-service + namespace: {{ .Values.namespace }} +spec: + type: {{ .Values.webui.service.type }} # Default: NodePort # Use LoadBalancer if you're on a cloud that supports it + selector: + app: ollama-webui + ports: + - protocol: TCP + port: {{ .Values.webui.servicePort }} + targetPort: {{ .Values.webui.servicePort }} + # If using NodePort, you can optionally specify the nodePort: + # nodePort: 30000 \ No newline at end of file diff --git a/kubernetes/helm/values.yaml b/kubernetes/helm/values.yaml new file mode 100644 index 00000000..648b4050 --- /dev/null +++ b/kubernetes/helm/values.yaml @@ -0,0 +1,38 @@ +namespace: ollama-namespace + +ollama: + replicaCount: 1 + image: ollama/ollama:latest + servicePort: 11434 + resources: + limits: + cpu: "2000m" + memory: "2Gi" + nvidia.com/gpu: "0" + volumeSize: 1Gi + nodeSelector: {} + tolerations: [] + service: + type: ClusterIP + gpu: + enabled: false + +webui: + replicaCount: 1 + image: ghcr.io/ollama-webui/ollama-webui:main + servicePort: 8080 + resources: + limits: + cpu: "500m" + memory: "500Mi" + ingress: + enabled: true + annotations: + # Use appropriate annotations for your Ingress controller, e.g., for NGINX: + # nginx.ingress.kubernetes.io/rewrite-target: / + host: ollama.minikube.local + volumeSize: 1Gi + nodeSelector: {} + tolerations: [] + service: + type: NodePort \ No newline at end of file diff --git a/kubernetes/manifest/base/ollama-namespace.yaml b/kubernetes/manifest/base/ollama-namespace.yaml new file mode 100644 index 00000000..f296eb20 --- /dev/null +++ b/kubernetes/manifest/base/ollama-namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ollama-namespace \ No newline at end of file diff --git a/kubernetes/manifest/base/ollama-service.yaml b/kubernetes/manifest/base/ollama-service.yaml new file mode 100644 index 00000000..a9467fc4 --- /dev/null +++ b/kubernetes/manifest/base/ollama-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: ollama-service + namespace: ollama-namespace +spec: + selector: + app: ollama + ports: + - protocol: TCP + port: 11434 + targetPort: 11434 \ No newline at end of file diff --git a/kubernetes/manifest/base/ollama-statefulset.yaml b/kubernetes/manifest/base/ollama-statefulset.yaml new file mode 100644 index 00000000..ee63faa9 --- /dev/null +++ b/kubernetes/manifest/base/ollama-statefulset.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ollama + namespace: ollama-namespace +spec: + serviceName: "ollama" + replicas: 1 + selector: + matchLabels: + app: ollama + template: + metadata: + labels: + app: ollama + spec: + containers: + - name: ollama + image: ollama/ollama:latest + ports: + - containerPort: 11434 + resources: + limits: + cpu: "2000m" + memory: "2Gi" + volumeMounts: + - name: ollama-volume + mountPath: /root/.ollama + tty: true + volumeClaimTemplates: + - metadata: + name: ollama-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/kubernetes/manifest/base/webui-deployment.yaml b/kubernetes/manifest/base/webui-deployment.yaml new file mode 100644 index 00000000..58de0368 --- /dev/null +++ b/kubernetes/manifest/base/webui-deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ollama-webui-deployment + namespace: ollama-namespace +spec: + replicas: 1 + selector: + matchLabels: + app: ollama-webui + template: + metadata: + labels: + app: ollama-webui + spec: + containers: + - name: ollama-webui + image: ghcr.io/ollama-webui/ollama-webui:main + ports: + - containerPort: 8080 + resources: + limits: + cpu: "500m" + memory: "500Mi" + env: + - name: OLLAMA_API_BASE_URL + value: "http://ollama-service.ollama-namespace.svc.cluster.local:11434/api" + tty: true \ No newline at end of file diff --git a/kubernetes/manifest/base/webui-ingress.yaml b/kubernetes/manifest/base/webui-ingress.yaml new file mode 100644 index 00000000..0038807c --- /dev/null +++ b/kubernetes/manifest/base/webui-ingress.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ollama-webui-ingress + namespace: ollama-namespace + #annotations: + # Use appropriate annotations for your Ingress controller, e.g., for NGINX: + # nginx.ingress.kubernetes.io/rewrite-target: / +spec: + rules: + - host: ollama.minikube.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ollama-webui-service + port: + number: 8080 diff --git a/kubernetes/manifest/base/webui-service.yaml b/kubernetes/manifest/base/webui-service.yaml new file mode 100644 index 00000000..b41daeaf --- /dev/null +++ b/kubernetes/manifest/base/webui-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: ollama-webui-service + namespace: ollama-namespace +spec: + type: NodePort # Use LoadBalancer if you're on a cloud that supports it + selector: + app: ollama-webui + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + # If using NodePort, you can optionally specify the nodePort: + # nodePort: 30000 \ No newline at end of file diff --git a/kubernetes/manifest/kustomization.yaml b/kubernetes/manifest/kustomization.yaml new file mode 100644 index 00000000..a4b03d96 --- /dev/null +++ b/kubernetes/manifest/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- base/ollama-namespace.yaml +- base/ollama-service.yaml +- base/ollama-statefulset.yaml +- base/webui-deployment.yaml +- base/webui-service.yaml +- base/webui-ingress.yaml + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +patches: +- path: patches/ollama-statefulset-gpu.yaml diff --git a/kubernetes/manifest/patches/ollama-statefulset-gpu.yaml b/kubernetes/manifest/patches/ollama-statefulset-gpu.yaml new file mode 100644 index 00000000..54e5aba6 --- /dev/null +++ b/kubernetes/manifest/patches/ollama-statefulset-gpu.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ollama + namespace: ollama-namespace +spec: + selector: + matchLabels: + app: ollama + serviceName: "ollama" + template: + spec: + containers: + - name: ollama + resources: + limits: + nvidia.com/gpu: "1" diff --git a/run-compose.sh b/run-compose.sh new file mode 100755 index 00000000..0557bce9 --- /dev/null +++ b/run-compose.sh @@ -0,0 +1,237 @@ +#!/bin/bash + +# Define color and formatting codes +BOLD='\033[1m' +GREEN='\033[1;32m' +WHITE='\033[1;37m' +RED='\033[0;31m' +NC='\033[0m' # No Color +# Unicode character for tick mark +TICK='\u2713' + +# Detect GPU driver +get_gpu_driver() { + # Detect NVIDIA GPUs + if lspci | grep -i nvidia >/dev/null; then + echo "nvidia" + return + fi + + # Detect AMD GPUs (including GCN architecture check for amdgpu vs radeon) + if lspci | grep -i amd >/dev/null; then + # List of known GCN and later architecture cards + # This is a simplified list, and in a real-world scenario, you'd want a more comprehensive one + local gcn_and_later=("Radeon HD 7000" "Radeon HD 8000" "Radeon R5" "Radeon R7" "Radeon R9" "Radeon RX") + + # Get GPU information + local gpu_info=$(lspci | grep -i 'vga.*amd') + + for model in "${gcn_and_later[@]}"; do + if echo "$gpu_info" | grep -iq "$model"; then + echo "amdgpu" + return + fi + done + + # Default to radeon if no GCN or later architecture is detected + echo "radeon" + return + fi + + # Detect Intel GPUs + if lspci | grep -i intel >/dev/null; then + echo "i915" + return + fi + + # If no known GPU is detected + echo "Unknown or unsupported GPU driver" + exit 1 +} + +# Function for rolling animation +show_loading() { + local spin='-\|/' + local i=0 + + printf " " + + while kill -0 $1 2>/dev/null; do + i=$(( (i+1) %4 )) + printf "\b${spin:$i:1}" + sleep .1 + done + + # Replace the spinner with a tick + printf "\b${GREEN}${TICK}${NC}" +} + +# Usage information +usage() { + echo "Usage: $0 [OPTIONS]" + echo "Options:" + echo " --enable-gpu[count=COUNT] Enable GPU support with the specified count." + echo " --enable-api[port=PORT] Enable API and expose it on the specified port." + echo " --webui[port=PORT] Set the port for the web user interface." + echo " --data[folder=PATH] Bind mount for ollama data folder (by default will create the 'ollama' volume)." + echo " --build Build the docker image before running the compose project." + echo " --drop Drop the compose project." + echo " -q, --quiet Run script in headless mode." + echo " -h, --help Show this help message." + echo "" + echo "Examples:" + echo " $0 --drop" + echo " $0 --enable-gpu[count=1]" + echo " $0 --enable-api[port=11435]" + echo " $0 --enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000]" + echo " $0 --enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000] --data[folder=./ollama-data]" + echo " $0 --enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000] --data[folder=./ollama-data] --build" + echo "" + echo "This script configures and runs a docker-compose setup with optional GPU support, API exposure, and web UI configuration." + echo "About the gpu to use, the script automatically detects it using the "lspci" command." + echo "In this case the gpu detected is: $(get_gpu_driver)" +} + +# Default values +gpu_count=1 +api_port=11435 +webui_port=3000 +headless=false +build_image=false +kill_compose=false + +# Function to extract value from the parameter +extract_value() { + echo "$1" | sed -E 's/.*\[.*=(.*)\].*/\1/; t; s/.*//' +} + +# Parse arguments +while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + --enable-gpu*) + enable_gpu=true + value=$(extract_value "$key") + gpu_count=${value:-1} + ;; + --enable-api*) + enable_api=true + value=$(extract_value "$key") + api_port=${value:-11435} + ;; + --webui*) + value=$(extract_value "$key") + webui_port=${value:-3000} + ;; + --data*) + value=$(extract_value "$key") + data_dir=${value:-"./ollama-data"} + ;; + --drop) + kill_compose=true + ;; + --build) + build_image=true + ;; + -q|--quiet) + headless=true + ;; + -h|--help) + usage + exit + ;; + *) + # Unknown option + echo "Unknown option: $key" + usage + exit 1 + ;; + esac + shift # past argument or value +done + +if [[ $kill_compose == true ]]; then + docker compose down --remove-orphans + echo -e "${GREEN}${BOLD}Compose project dropped successfully.${NC}" + exit +else + DEFAULT_COMPOSE_COMMAND="docker compose -f docker-compose.yaml" + if [[ $enable_gpu == true ]]; then + # Validate and process command-line arguments + if [[ -n $gpu_count ]]; then + if ! [[ $gpu_count =~ ^[0-9]+$ ]]; then + echo "Invalid GPU count: $gpu_count" + exit 1 + fi + echo "Enabling GPU with $gpu_count GPUs" + # Add your GPU allocation logic here + export OLLAMA_GPU_DRIVER=$(get_gpu_driver) + export OLLAMA_GPU_COUNT=$gpu_count # Set OLLAMA_GPU_COUNT environment variable + fi + DEFAULT_COMPOSE_COMMAND+=" -f docker-compose.gpu.yaml" + fi + if [[ $enable_api == true ]]; then + DEFAULT_COMPOSE_COMMAND+=" -f docker-compose.api.yaml" + if [[ -n $api_port ]]; then + export OLLAMA_WEBAPI_PORT=$api_port # Set OLLAMA_WEBAPI_PORT environment variable + fi + fi + if [[ -n $data_dir ]]; then + DEFAULT_COMPOSE_COMMAND+=" -f docker-compose.data.yaml" + export OLLAMA_DATA_DIR=$data_dir # Set OLLAMA_DATA_DIR environment variable + fi + DEFAULT_COMPOSE_COMMAND+=" up -d" + DEFAULT_COMPOSE_COMMAND+=" --remove-orphans" + DEFAULT_COMPOSE_COMMAND+=" --force-recreate" + if [[ $build_image == true ]]; then + DEFAULT_COMPOSE_COMMAND+=" --build" + fi +fi + +# Recap of environment variables +echo +echo -e "${WHITE}${BOLD}Current Setup:${NC}" +echo -e " ${GREEN}${BOLD}GPU Driver:${NC} ${OLLAMA_GPU_DRIVER:-Not Enabled}" +echo -e " ${GREEN}${BOLD}GPU Count:${NC} ${OLLAMA_GPU_COUNT:-Not Enabled}" +echo -e " ${GREEN}${BOLD}WebAPI Port:${NC} ${OLLAMA_WEBAPI_PORT:-Not Enabled}" +echo -e " ${GREEN}${BOLD}Data Folder:${NC} ${data_dir:-Using ollama volume}" +echo -e " ${GREEN}${BOLD}WebUI Port:${NC} $webui_port" +echo + +if [[ $headless == true ]]; then + echo -ne "${WHITE}${BOLD}Running in headless mode... ${NC}" + choice="y" +else + # Ask for user acceptance + echo -ne "${WHITE}${BOLD}Do you want to proceed with current setup? (Y/n): ${NC}" + read -n1 -s choice +fi + +echo + +if [[ $choice == "" || $choice == "y" ]]; then + # Execute the command with the current user + eval "$DEFAULT_COMPOSE_COMMAND" & + + # Capture the background process PID + PID=$! + + # Display the loading animation + #show_loading $PID + + # Wait for the command to finish + wait $PID + + echo + # Check exit status + if [ $? -eq 0 ]; then + echo -e "${GREEN}${BOLD}Compose project started successfully.${NC}" + else + echo -e "${RED}${BOLD}There was an error starting the compose project.${NC}" + fi +else + echo "Aborted." +fi + +echo