Merge commit 'c8f81a126117485ebfdbe31a74f0973e46208791' into dev

This commit is contained in:
Daniele Viti 2023-12-24 13:37:46 +01:00
commit 3568be4da8
6 changed files with 319 additions and 15 deletions

291
.gitignore vendored
View file

@ -8,3 +8,294 @@ node_modules
!.env.example !.env.example
vite.config.js.timestamp-* vite.config.js.timestamp-*
vite.config.ts.timestamp-* vite.config.ts.timestamp-*
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*

View file

@ -45,7 +45,7 @@ Also check our sibling project, [OllamaHub](https://ollamahub.com/), where you c
- ⚙️ **Many Models Conversations**: Effortlessly engage with various models simultaneously, harnessing their unique strengths for optimal responses. Enhance your experience by leveraging a diverse set of models in parallel. - ⚙️ **Many Models Conversations**: Effortlessly engage with various models simultaneously, harnessing their unique strengths for optimal responses. Enhance your experience by leveraging a diverse set of models in parallel.
- 🤝 **OpenAI Model Integration**: Seamlessly utilize OpenAI models alongside Ollama models for a versatile conversational experience. - 🤝 **OpenAI API Integration**: Effortlessly integrate OpenAI-compatible API for versatile conversations alongside Ollama models. Customize the API Base URL to link with **LMStudio, Mistral, OpenRouter, and more**.
- 🔄 **Regeneration History Access**: Easily revisit and explore your entire regeneration history. - 🔄 **Regeneration History Access**: Easily revisit and explore your entire regeneration history.

6
docker-compose.api.yaml Normal file
View file

@ -0,0 +1,6 @@
version: '3.8'
services:
ollama:
ports:
- ${OLLAMA_WEBAPI_PORT-11434}:11434

12
docker-compose.gpu.yaml Normal file
View file

@ -0,0 +1,12 @@
version: '3.8'
services:
ollama:
deploy:
resources:
reservations:
devices:
- driver: ${OLLAMA_GPU_DRIVER-nvidia}
count: ${OLLAMA_GPU_COUNT-1}
capabilities:
- gpu

View file

@ -1,21 +1,9 @@
version: '3.6' version: '3.8'
services: services:
ollama: ollama:
# Uncomment below for GPU support
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities:
# - gpu
volumes: volumes:
- ollama:/root/.ollama - ollama:/root/.ollama
# Uncomment below to expose Ollama API outside the container stack
# ports:
# - 11434:11434
container_name: ollama container_name: ollama
pull_policy: always pull_policy: always
tty: true tty: true
@ -33,7 +21,7 @@ services:
depends_on: depends_on:
- ollama - ollama
ports: ports:
- 3000:8080 - ${OLLAMA_WEBUI_PORT-3000}:8080
environment: environment:
- "OLLAMA_API_BASE_URL=http://ollama:11434/api" - "OLLAMA_API_BASE_URL=http://ollama:11434/api"
extra_hosts: extra_hosts:

View file

@ -147,6 +147,7 @@ if [[ $enable_gpu == true ]]; then
echo "Invalid GPU count: $gpu_count" echo "Invalid GPU count: $gpu_count"
exit 1 exit 1
fi fi
echo "Enabling GPU with $gpu_count GPUs"
# Add your GPU allocation logic here # Add your GPU allocation logic here
export OLLAMA_GPU_DRIVER=$(get_gpu_driver) export OLLAMA_GPU_DRIVER=$(get_gpu_driver)
export OLLAMA_GPU_COUNT=$gpu_count # Set OLLAMA_GPU_COUNT environment variable export OLLAMA_GPU_COUNT=$gpu_count # Set OLLAMA_GPU_COUNT environment variable
@ -183,6 +184,12 @@ else
echo -ne "${WHITE}${BOLD}Do you want to proceed with current setup? (Y/n): ${NC}" echo -ne "${WHITE}${BOLD}Do you want to proceed with current setup? (Y/n): ${NC}"
read -n1 -s choice read -n1 -s choice
fi fi
echo -e " ${GREEN}${BOLD}WebUI Port:${NC} $webui_port"
echo
# Ask for user acceptance
echo -ne "${WHITE}${BOLD}Do you want to proceed with current setup? (Y/n): ${NC}"
read -n1 -s choice
if [[ $choice == "" || $choice == "y" ]]; then if [[ $choice == "" || $choice == "y" ]]; then
# Execute the command with the current user # Execute the command with the current user