diff --git a/run-ollama-docker.sh b/run-ollama-docker.sh index 5942fd3c..c8ce166a 100644 --- a/run-ollama-docker.sh +++ b/run-ollama-docker.sh @@ -1,7 +1,19 @@ +#!/bin/bash + +host_port=11434 +container_port=11434 + +read -r -p "Do you want ollama in Docker with GPU support? (y/n): " use_gpu + docker rm -f ollama || true -docker pull ollama/ollama -# CPU Only -docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama -# GPU Support -# docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama -docker image prune -f \ No newline at end of file +docker pull ollama/ollama:latest + +docker_args="-d -v ollama:/root/.ollama -p $host_port:$container_port --name ollama ollama/ollama" + +if [ "$use_gpu" == "y" ]; then + docker_args+=" --gpus=all" +fi + +docker run "$docker_args" + +docker image prune -f diff --git a/run.sh b/run.sh index 6e2dc611..c8ac77cc 100644 --- a/run.sh +++ b/run.sh @@ -1,5 +1,19 @@ -docker build -t ollama-webui . -docker stop ollama-webui || true -docker rm ollama-webui || true -docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v ollama-webui:/app/backend/data --name ollama-webui --restart always ollama-webui -docker image prune -f \ No newline at end of file +#!/bin/bash + +image_name="ollama-webui" +container_name="ollama-webui" +host_port=3000 +container_port=8080 + +docker build -t "$image_name" . +docker stop "$container_name" &>/dev/null || true +docker rm "$container_name" &>/dev/null || true + +docker run -d -p "$host_port":"$container_port" \ + --add-host=host.docker.internal:host-gateway \ + -v "${image_name}:/app/backend/data" \ + --name "$container_name" \ + --restart always \ + "$image_name" + +docker image prune -f