From 9bbae0e25ac08aa8a8e1db1e6c5456e4cafb0c4e Mon Sep 17 00:00:00 2001 From: icervero Date: Tue, 19 Dec 2023 17:34:50 +0100 Subject: [PATCH 1/4] Refactor docker-compose configuration for modularity Split the original docker-compose.yml into three separate files for enhanced modularity and ease of use. Created docker-compose.api.yml for API exposure configuration and docker-compose.gpu.yml for GPU support. This change simplifies the management of different deployment environments and configurations, making it easier to enable or disable specific features such as GPU support and API access without modifying the main docker-compose file. --- README.md | 9 ++++++++- docker-compose.api.yml | 7 +++++++ docker-compose.gpu.yml | 13 +++++++++++++ docker-compose.yml | 12 ------------ 4 files changed, 28 insertions(+), 13 deletions(-) create mode 100644 docker-compose.api.yml create mode 100644 docker-compose.gpu.yml diff --git a/README.md b/README.md index c5919688..549bce65 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,14 @@ If you don't have Ollama installed yet, you can use the provided Docker Compose docker compose up -d --build ``` -This command will install both Ollama and Ollama Web UI on your system. Ensure to modify the `compose.yaml` file for GPU support and Exposing Ollama API outside the container stack if needed. +This command will install both Ollama and Ollama Web UI on your system. +Enable GPU support or Exposing Ollama API outside the container stack with the following command: +```bash +docker compose -f docker-compose.yml \ + -f docker-compose.gpu.yml \ + -f docker-compose.api.yml \ + up -d --build +``` ### Installing Ollama Web UI Only diff --git a/docker-compose.api.yml b/docker-compose.api.yml new file mode 100644 index 00000000..c36cf11e --- /dev/null +++ b/docker-compose.api.yml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + ollama: + # Expose Ollama API outside the container stack + ports: + - 11434:11434 \ No newline at end of file diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml new file mode 100644 index 00000000..db47ae13 --- /dev/null +++ b/docker-compose.gpu.yml @@ -0,0 +1,13 @@ +version: '3.6' + +services: + ollama: + # GPU support + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: + - gpu diff --git a/docker-compose.yml b/docker-compose.yml index b5036354..427f8580 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,20 +2,8 @@ version: '3.6' services: ollama: - # Uncomment below for GPU support - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: - # - gpu volumes: - ollama:/root/.ollama - # Uncomment below to expose Ollama API outside the container stack - # ports: - # - 11434:11434 container_name: ollama pull_policy: always tty: true From 7e0f5ff20e90f5e571cace1ea76940ce6ff87502 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 24 Dec 2023 04:35:40 -0500 Subject: [PATCH 2/4] doc: feature update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index de950ef8..83650903 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c - ⚙️ **Many Models Conversations**: Effortlessly engage with various models simultaneously, harnessing their unique strengths for optimal responses. Enhance your experience by leveraging a diverse set of models in parallel. -- 🤝 **OpenAI Model Integration**: Seamlessly utilize OpenAI models alongside Ollama models for a versatile conversational experience. +- 🤝 **OpenAI API Integration**: Effortlessly integrate OpenAI-compatible API for versatile conversations with Ollama models. Customize the API Base URL to link with **LMStudio, Mistral, OpenRouter, and more**. - 🔄 **Regeneration History Access**: Easily revisit and explore your entire regeneration history. From bc7a6bae8605dcaf9f9bcbbb05874091d33446ec Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 24 Dec 2023 04:36:40 -0500 Subject: [PATCH 3/4] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 83650903..5af1a6c4 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c - ⚙️ **Many Models Conversations**: Effortlessly engage with various models simultaneously, harnessing their unique strengths for optimal responses. Enhance your experience by leveraging a diverse set of models in parallel. -- 🤝 **OpenAI API Integration**: Effortlessly integrate OpenAI-compatible API for versatile conversations with Ollama models. Customize the API Base URL to link with **LMStudio, Mistral, OpenRouter, and more**. +- 🤝 **OpenAI API Integration**: Effortlessly integrate OpenAI-compatible API for versatile conversations alongside Ollama models. Customize the API Base URL to link with **LMStudio, Mistral, OpenRouter, and more**. - 🔄 **Regeneration History Access**: Easily revisit and explore your entire regeneration history. From 9395b4a1dceb1d4322865549c6aef5d82625d472 Mon Sep 17 00:00:00 2001 From: Ignasi Cervero Date: Sun, 24 Dec 2023 12:23:16 +0100 Subject: [PATCH 4/4] Refactor Docker installation instructions in README for enhanced clarity - Separate GPU support and API exposure instructions into distinct sections - Improve readability and user guidance for Docker Compose setup --- README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 52fd3354..b3e407f0 100644 --- a/README.md +++ b/README.md @@ -79,13 +79,18 @@ If you don't have Ollama installed yet, you can use the provided Docker Compose docker compose up -d --build ``` -This command will install both Ollama and Ollama Web UI on your system. -Enable GPU support or Exposing Ollama API outside the container stack with the following command: +This command will install both Ollama and Ollama Web UI on your system. + +#### Enable GPU +Use the additional Docker Compose file designed to enable GPU support by running the following command: ```bash -docker compose -f docker-compose.yml \ - -f docker-compose.gpu.yml \ - -f docker-compose.api.yml \ - up -d --build +docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d --build +``` + +#### Expose Ollama API outside the container stack +Deploy the service with an additional Docker Compose file designed for API exposure: +```bash +docker compose -f docker-compose.yml -f docker-compose.api.yml up -d --build ``` ### Installing Ollama Web UI Only