2024-01-09 08:21:36 +01:00
|
|
|
#!/bin/bash
|
|
|
|
|
2024-01-09 08:46:07 +01:00
|
|
|
host_port=11434
|
|
|
|
container_port=11434
|
|
|
|
|
2024-01-09 08:30:58 +01:00
|
|
|
read -r -p "Do you want ollama in Docker with GPU support? (y/n): " use_gpu
|
2024-01-09 08:27:39 +01:00
|
|
|
|
2024-01-05 06:44:19 +01:00
|
|
|
docker rm -f ollama || true
|
2024-01-09 08:28:35 +01:00
|
|
|
docker pull ollama/ollama:latest
|
2024-01-09 08:27:39 +01:00
|
|
|
|
2024-01-09 08:46:07 +01:00
|
|
|
docker_args="-d -v ollama:/root/.ollama -p $host_port:$container_port --name ollama ollama/ollama"
|
2024-01-09 08:30:58 +01:00
|
|
|
|
2024-01-30 00:38:56 +01:00
|
|
|
if [ "$use_gpu" = "y" ]; then
|
|
|
|
docker_args="--gpus=all $docker_args"
|
2024-01-09 08:27:39 +01:00
|
|
|
fi
|
|
|
|
|
2024-01-30 00:38:56 +01:00
|
|
|
docker run $docker_args
|
2024-01-09 08:30:58 +01:00
|
|
|
|
2024-01-30 00:38:56 +01:00
|
|
|
docker image prune -f
|