forked from open-webui/open-webui
Merge pull request #725 from jnkstr/no-internet-whisper
fix: no internet connection for whisper if you use docker
This commit is contained in:
commit
1def55cf09
3 changed files with 13 additions and 5 deletions
|
@ -30,6 +30,10 @@ ENV WEBUI_SECRET_KEY ""
|
||||||
ENV SCARF_NO_ANALYTICS true
|
ENV SCARF_NO_ANALYTICS true
|
||||||
ENV DO_NOT_TRACK true
|
ENV DO_NOT_TRACK true
|
||||||
|
|
||||||
|
#Whisper TTS Settings
|
||||||
|
ENV WHISPER_MODEL="base"
|
||||||
|
ENV WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
|
||||||
|
|
||||||
WORKDIR /app/backend
|
WORKDIR /app/backend
|
||||||
|
|
||||||
# install python dependencies
|
# install python dependencies
|
||||||
|
@ -45,6 +49,8 @@ RUN apt-get update \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# RUN python -c "from sentence_transformers import SentenceTransformer; model = SentenceTransformer('all-MiniLM-L6-v2')"
|
# RUN python -c "from sentence_transformers import SentenceTransformer; model = SentenceTransformer('all-MiniLM-L6-v2')"
|
||||||
|
RUN python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"
|
||||||
|
|
||||||
|
|
||||||
# copy embedding weight from build
|
# copy embedding weight from build
|
||||||
RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
|
RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import os
|
||||||
from fastapi import (
|
from fastapi import (
|
||||||
FastAPI,
|
FastAPI,
|
||||||
Request,
|
Request,
|
||||||
|
@ -20,7 +21,7 @@ from utils.utils import (
|
||||||
)
|
)
|
||||||
from utils.misc import calculate_sha256
|
from utils.misc import calculate_sha256
|
||||||
|
|
||||||
from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL_NAME
|
from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR
|
||||||
|
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
app.add_middleware(
|
app.add_middleware(
|
||||||
|
@ -53,12 +54,11 @@ def transcribe(
|
||||||
f.write(contents)
|
f.write(contents)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
model_name = WHISPER_MODEL_NAME
|
|
||||||
model = WhisperModel(
|
model = WhisperModel(
|
||||||
model_name,
|
WHISPER_MODEL,
|
||||||
device="cpu",
|
device="cpu",
|
||||||
compute_type="int8",
|
compute_type="int8",
|
||||||
download_root=f"{CACHE_DIR}/whisper/models",
|
download_root=WHISPER_MODEL_DIR,
|
||||||
)
|
)
|
||||||
|
|
||||||
segments, info = model.transcribe(file_path, beam_size=5)
|
segments, info = model.transcribe(file_path, beam_size=5)
|
||||||
|
|
|
@ -139,4 +139,6 @@ CHUNK_OVERLAP = 100
|
||||||
####################################
|
####################################
|
||||||
# Transcribe
|
# Transcribe
|
||||||
####################################
|
####################################
|
||||||
WHISPER_MODEL_NAME = "base"
|
|
||||||
|
WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
|
||||||
|
WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")
|
||||||
|
|
Loading…
Reference in a new issue