From a5b9bbf10b89a53cf8f4ed13c14eeae674a75237 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 11 Feb 2024 00:17:50 -0800 Subject: [PATCH 1/6] feat: whisper support --- backend/apps/audio/main.py | 80 +++++++++++++++++++++ backend/config.py | 5 ++ backend/main.py | 4 ++ backend/requirements.txt | 2 + src/lib/apis/audio/index.ts | 31 ++++++++ src/lib/components/chat/MessageInput.svelte | 4 ++ src/lib/constants.ts | 1 + 7 files changed, 127 insertions(+) create mode 100644 backend/apps/audio/main.py create mode 100644 src/lib/apis/audio/index.ts diff --git a/backend/apps/audio/main.py b/backend/apps/audio/main.py new file mode 100644 index 00000000..f80e3ac8 --- /dev/null +++ b/backend/apps/audio/main.py @@ -0,0 +1,80 @@ +from fastapi import ( + FastAPI, + Request, + Depends, + HTTPException, + status, + UploadFile, + File, + Form, +) +from fastapi.middleware.cors import CORSMiddleware +from faster_whisper import WhisperModel + +from constants import ERROR_MESSAGES +from utils.utils import ( + decode_token, + get_current_user, + get_verified_user, + get_admin_user, +) +from utils.misc import calculate_sha256 + +from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL_NAME + +app = FastAPI() +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +@app.post("/transcribe") +def transcribe( + file: UploadFile = File(...), + user=Depends(get_current_user), +): + print(file.content_type) + + if file.content_type not in ["audio/mpeg", "audio/wav"]: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED, + ) + + try: + filename = file.filename + file_path = f"{UPLOAD_DIR}/{filename}" + contents = file.file.read() + with open(file_path, "wb") as f: + f.write(contents) + f.close() + + model_name = WHISPER_MODEL_NAME + model = WhisperModel( + model_name, + device="cpu", + compute_type="int8", + download_root=f"{CACHE_DIR}/whisper/models", + ) + + segments, info = model.transcribe(file_path, beam_size=5) + print( + "Detected language '%s' with probability %f" + % (info.language, info.language_probability) + ) + + transcript = "".join([segment.text for segment in list(segments)]) + + return {"text": transcript} + + except Exception as e: + print(e) + + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=ERROR_MESSAGES.DEFAULT(e), + ) diff --git a/backend/config.py b/backend/config.py index 65ee2298..cf6e8139 100644 --- a/backend/config.py +++ b/backend/config.py @@ -132,3 +132,8 @@ CHROMA_CLIENT = chromadb.PersistentClient( ) CHUNK_SIZE = 1500 CHUNK_OVERLAP = 100 + +#################################### +# Transcribe +#################################### +WHISPER_MODEL_NAME = "tiny" diff --git a/backend/main.py b/backend/main.py index f7a82b66..3a28670e 100644 --- a/backend/main.py +++ b/backend/main.py @@ -10,6 +10,8 @@ from starlette.exceptions import HTTPException as StarletteHTTPException from apps.ollama.main import app as ollama_app from apps.openai.main import app as openai_app +from apps.audio.main import app as audio_app + from apps.web.main import app as webui_app from apps.rag.main import app as rag_app @@ -55,6 +57,8 @@ app.mount("/api/v1", webui_app) app.mount("/ollama/api", ollama_app) app.mount("/openai/api", openai_app) + +app.mount("/audio/api/v1", audio_app) app.mount("/rag/api/v1", rag_app) diff --git a/backend/requirements.txt b/backend/requirements.txt index 68cba254..56e1d36e 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -30,6 +30,8 @@ openpyxl pyxlsb xlrd +faster-whisper + PyJWT pyjwt[crypto] diff --git a/src/lib/apis/audio/index.ts b/src/lib/apis/audio/index.ts new file mode 100644 index 00000000..d2848339 --- /dev/null +++ b/src/lib/apis/audio/index.ts @@ -0,0 +1,31 @@ +import { AUDIO_API_BASE_URL } from '$lib/constants'; + +export const transcribeAudio = async (token: string, file: File) => { + const data = new FormData(); + data.append('file', file); + + let error = null; + const res = await fetch(`${AUDIO_API_BASE_URL}/transcribe`, { + method: 'POST', + headers: { + Accept: 'application/json', + authorization: `Bearer ${token}` + }, + body: data + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + error = err.detail; + console.log(err); + return null; + }); + + if (error) { + throw error; + } + + return res; +}; diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index 0844c489..5ad78119 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -11,6 +11,7 @@ import { SUPPORTED_FILE_TYPE, SUPPORTED_FILE_EXTENSIONS } from '$lib/constants'; import Documents from './MessageInput/Documents.svelte'; import Models from './MessageInput/Models.svelte'; + import { transcribeAudio } from '$lib/apis/audio'; export let submitPrompt: Function; export let stopResponse: Function; @@ -201,6 +202,9 @@ console.log(file, file.name.split('.').at(-1)); if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) { reader.readAsDataURL(file); + } else if (['audio/mpeg', 'audio/wav'].includes(file['type'])) { + const res = await transcribeAudio(localStorage.token, file); + console.log(res); } else if ( SUPPORTED_FILE_TYPE.includes(file['type']) || SUPPORTED_FILE_EXTENSIONS.includes(file.name.split('.').at(-1)) diff --git a/src/lib/constants.ts b/src/lib/constants.ts index b373eb11..ce25a314 100644 --- a/src/lib/constants.ts +++ b/src/lib/constants.ts @@ -7,6 +7,7 @@ export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`; export const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama/api`; export const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai/api`; export const RAG_API_BASE_URL = `${WEBUI_BASE_URL}/rag/api/v1`; +export const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/audio/api/v1`; export const WEB_UI_VERSION = 'v1.0.0-alpha-static'; From c6c69924d1ea99f83a2465d98e4f53d4d58e2947 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 11 Feb 2024 01:06:25 -0800 Subject: [PATCH 2/6] feat: audio rag support --- src/lib/components/chat/MessageInput.svelte | 19 +++++++++++++++---- src/lib/constants.ts | 4 +++- src/lib/utils/index.ts | 6 ++++++ 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index 5ad78119..aae99992 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -2,7 +2,7 @@ import toast from 'svelte-french-toast'; import { onMount, tick } from 'svelte'; import { settings } from '$lib/stores'; - import { calculateSHA256, findWordIndices } from '$lib/utils'; + import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils'; import Prompts from './MessageInput/PromptCommands.svelte'; import Suggestions from './MessageInput/Suggestions.svelte'; @@ -124,6 +124,20 @@ try { files = [...files, doc]; + + if (['audio/mpeg', 'audio/wav'].includes(file['type'])) { + const res = await transcribeAudio(localStorage.token, file).catch((error) => { + toast.error(error); + return null; + }); + + if (res) { + console.log(res); + const blob = new Blob([res.text], { type: 'text/plain' }); + file = blobToFile(blob, `${file.name}.txt`); + } + } + const res = await uploadDocToVectorDB(localStorage.token, '', file); if (res) { @@ -202,9 +216,6 @@ console.log(file, file.name.split('.').at(-1)); if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) { reader.readAsDataURL(file); - } else if (['audio/mpeg', 'audio/wav'].includes(file['type'])) { - const res = await transcribeAudio(localStorage.token, file); - console.log(res); } else if ( SUPPORTED_FILE_TYPE.includes(file['type']) || SUPPORTED_FILE_EXTENSIONS.includes(file.name.split('.').at(-1)) diff --git a/src/lib/constants.ts b/src/lib/constants.ts index ce25a314..3200ad44 100644 --- a/src/lib/constants.ts +++ b/src/lib/constants.ts @@ -24,7 +24,9 @@ export const SUPPORTED_FILE_TYPE = [ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/octet-stream', 'application/x-javascript', - 'text/markdown' + 'text/markdown', + 'audio/mpeg', + 'audio/wav' ]; export const SUPPORTED_FILE_EXTENSIONS = [ diff --git a/src/lib/utils/index.ts b/src/lib/utils/index.ts index fa0963ea..79e53d9a 100644 --- a/src/lib/utils/index.ts +++ b/src/lib/utils/index.ts @@ -341,3 +341,9 @@ export const extractSentences = (text) => { .map((sentence) => removeEmojis(sentence.trim())) .filter((sentence) => sentence !== ''); }; + +export const blobToFile = (blob, fileName) => { + // Create a new File object from the Blob + const file = new File([blob], fileName, { type: blob.type }); + return file; +}; From f245c6269227a0febd72ebc9d2c067173183ed62 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 11 Feb 2024 02:12:49 -0800 Subject: [PATCH 3/6] feat: whisper voice input --- src/lib/components/chat/MessageInput.svelte | 217 +++++++++++++----- .../chat/Messages/ResponseMessage.svelte | 6 +- src/lib/components/chat/Settings/Voice.svelte | 72 ++++-- 3 files changed, 219 insertions(+), 76 deletions(-) diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index aae99992..29354610 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -35,7 +35,6 @@ export let fileUploadEnabled = true; export let speechRecognitionEnabled = true; - export let speechRecognitionListening = false; export let prompt = ''; export let messages = []; @@ -51,62 +50,170 @@ } } + let mediaRecorder; + let audioChunks = []; + let isRecording = false; + const MIN_DECIBELS = -45; + + const startRecording = async () => { + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + mediaRecorder = new MediaRecorder(stream); + mediaRecorder.onstart = () => { + isRecording = true; + console.log('Recording started'); + }; + mediaRecorder.ondataavailable = (event) => audioChunks.push(event.data); + mediaRecorder.onstop = async () => { + isRecording = false; + console.log('Recording stopped'); + + // Create a blob from the audio chunks + const audioBlob = new Blob(audioChunks, { type: 'audio/wav' }); + + const file = blobToFile(audioBlob, 'recording.wav'); + + const res = await transcribeAudio(localStorage.token, file).catch((error) => { + toast.error(error); + return null; + }); + + if (res) { + prompt = res.text; + await tick(); + + const inputElement = document.getElementById('chat-textarea'); + inputElement?.focus(); + + if (prompt !== '' && $settings?.speechAutoSend === true) { + submitPrompt(prompt, user); + } + } + + // saveRecording(audioBlob); + audioChunks = []; + }; + + // Start recording + mediaRecorder.start(); + + // Monitor silence + monitorSilence(stream); + }; + + const monitorSilence = (stream) => { + const audioContext = new AudioContext(); + const audioStreamSource = audioContext.createMediaStreamSource(stream); + const analyser = audioContext.createAnalyser(); + analyser.minDecibels = MIN_DECIBELS; + audioStreamSource.connect(analyser); + + const bufferLength = analyser.frequencyBinCount; + const domainData = new Uint8Array(bufferLength); + + let lastSoundTime = Date.now(); + + const detectSound = () => { + analyser.getByteFrequencyData(domainData); + + if (domainData.some((value) => value > 0)) { + lastSoundTime = Date.now(); + } + + if (isRecording && Date.now() - lastSoundTime > 3000) { + mediaRecorder.stop(); + audioContext.close(); + return; + } + + window.requestAnimationFrame(detectSound); + }; + + window.requestAnimationFrame(detectSound); + }; + + const saveRecording = (blob) => { + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + document.body.appendChild(a); + a.style = 'display: none'; + a.href = url; + a.download = 'recording.wav'; + a.click(); + window.URL.revokeObjectURL(url); + }; + const speechRecognitionHandler = () => { // Check if SpeechRecognition is supported - if (speechRecognitionListening) { - speechRecognition.stop(); + if (isRecording) { + if (speechRecognition) { + speechRecognition.stop(); + } + + if (mediaRecorder) { + mediaRecorder.stop(); + } } else { - if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) { - // Create a SpeechRecognition object - speechRecognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)(); + isRecording = true; - // Set continuous to true for continuous recognition - speechRecognition.continuous = true; - - // Set the timeout for turning off the recognition after inactivity (in milliseconds) - const inactivityTimeout = 3000; // 3 seconds - - let timeoutId; - // Start recognition - speechRecognition.start(); - speechRecognitionListening = true; - - // Event triggered when speech is recognized - speechRecognition.onresult = function (event) { - // Clear the inactivity timeout - clearTimeout(timeoutId); - - // Handle recognized speech - console.log(event); - const transcript = event.results[Object.keys(event.results).length - 1][0].transcript; - prompt = `${prompt}${transcript}`; - - // Restart the inactivity timeout - timeoutId = setTimeout(() => { - console.log('Speech recognition turned off due to inactivity.'); - speechRecognition.stop(); - }, inactivityTimeout); - }; - - // Event triggered when recognition is ended - speechRecognition.onend = function () { - // Restart recognition after it ends - console.log('recognition ended'); - speechRecognitionListening = false; - if (prompt !== '' && $settings?.speechAutoSend === true) { - submitPrompt(prompt, user); - } - }; - - // Event triggered when an error occurs - speechRecognition.onerror = function (event) { - console.log(event); - toast.error(`Speech recognition error: ${event.error}`); - speechRecognitionListening = false; - }; + if ($settings?.voice?.STTEngine ?? '' !== '') { + startRecording(); } else { - toast.error('SpeechRecognition API is not supported in this browser.'); + if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) { + // Create a SpeechRecognition object + speechRecognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)(); + + // Set continuous to true for continuous recognition + speechRecognition.continuous = true; + + // Set the timeout for turning off the recognition after inactivity (in milliseconds) + const inactivityTimeout = 3000; // 3 seconds + + let timeoutId; + // Start recognition + speechRecognition.start(); + + // Event triggered when speech is recognized + speechRecognition.onresult = async (event) => { + // Clear the inactivity timeout + clearTimeout(timeoutId); + + // Handle recognized speech + console.log(event); + const transcript = event.results[Object.keys(event.results).length - 1][0].transcript; + + prompt = `${prompt}${transcript}`; + + await tick(); + const inputElement = document.getElementById('chat-textarea'); + inputElement?.focus(); + + // Restart the inactivity timeout + timeoutId = setTimeout(() => { + console.log('Speech recognition turned off due to inactivity.'); + speechRecognition.stop(); + }, inactivityTimeout); + }; + + // Event triggered when recognition is ended + speechRecognition.onend = function () { + // Restart recognition after it ends + console.log('recognition ended'); + isRecording = false; + if (prompt !== '' && $settings?.speechAutoSend === true) { + submitPrompt(prompt, user); + } + }; + + // Event triggered when an error occurs + speechRecognition.onerror = function (event) { + console.log(event); + toast.error(`Speech recognition error: ${event.error}`); + isRecording = false; + }; + } else { + toast.error('SpeechRecognition API is not supported in this browser.'); + } } } }; @@ -550,7 +657,7 @@ : ' pl-4'} rounded-xl resize-none h-[48px]" placeholder={chatInputPlaceholder !== '' ? chatInputPlaceholder - : speechRecognitionListening + : isRecording ? 'Listening...' : 'Send a message'} bind:value={prompt} @@ -659,6 +766,10 @@ e.target.style.height = Math.min(e.target.scrollHeight, 200) + 'px'; user = null; }} + on:focus={(e) => { + e.target.style.height = ''; + e.target.style.height = Math.min(e.target.scrollHeight, 200) + 'px'; + }} on:paste={(e) => { const clipboardData = e.clipboardData || window.clipboardData; @@ -696,7 +807,7 @@ speechRecognitionHandler(); }} > - {#if speechRecognitionListening} + {#if isRecording} { @@ -179,7 +179,7 @@ for (const [idx, sentence] of sentences.entries()) { const res = await synthesizeOpenAISpeech( localStorage.token, - $settings?.speech?.speaker, + $settings?.voice?.speaker, sentence ).catch((error) => { toast.error(error); @@ -204,7 +204,7 @@ clearInterval(getVoicesLoop); const voice = - voices?.filter((v) => v.name === $settings?.speech?.speaker)?.at(0) ?? undefined; + voices?.filter((v) => v.name === $settings?.voice?.speaker)?.at(0) ?? undefined; const speak = new SpeechSynthesisUtterance(message.content); diff --git a/src/lib/components/chat/Settings/Voice.svelte b/src/lib/components/chat/Settings/Voice.svelte index 0dc7f0c5..5867b917 100644 --- a/src/lib/components/chat/Settings/Voice.svelte +++ b/src/lib/components/chat/Settings/Voice.svelte @@ -1,17 +1,21 @@
+
-
TTS Settings
+
STT Settings
-
Speech Engine
+
Speech-to-Text Engine
@@ -155,6 +160,33 @@ {/if}
+
+ +
+
TTS Settings
+ +
+
Text-to-Speech Engine
+
+ +
+
Auto-playback response
@@ -177,7 +209,7 @@
- {#if engine === ''} + {#if TTSEngine === ''}
Set Voice
@@ -196,7 +228,7 @@
- {:else if engine === 'openai'} + {:else if TTSEngine === 'openai'}
Set Voice
From 98496a70559e9cf612890319ee916ab5a150e58f Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 11 Feb 2024 02:15:54 -0800 Subject: [PATCH 4/6] feat: update default whisper model --- backend/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/config.py b/backend/config.py index cf6e8139..81b90084 100644 --- a/backend/config.py +++ b/backend/config.py @@ -136,4 +136,4 @@ CHUNK_OVERLAP = 100 #################################### # Transcribe #################################### -WHISPER_MODEL_NAME = "tiny" +WHISPER_MODEL_NAME = "base" From 2ef4fc121587ac8ca9aebec26dcaedc31d741af1 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 11 Feb 2024 02:17:24 -0800 Subject: [PATCH 5/6] fix: strip transcription text --- backend/apps/audio/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/apps/audio/main.py b/backend/apps/audio/main.py index f80e3ac8..c03aaf37 100644 --- a/backend/apps/audio/main.py +++ b/backend/apps/audio/main.py @@ -69,7 +69,7 @@ def transcribe( transcript = "".join([segment.text for segment in list(segments)]) - return {"text": transcript} + return {"text": transcript.strip()} except Exception as e: print(e) From 3ce8e8a2ae4b9926fc4be511df6656704dfc7022 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 11 Feb 2024 02:21:06 -0800 Subject: [PATCH 6/6] refac: rename voice to audio --- .../components/chat/Messages/ResponseMessage.svelte | 6 +++--- .../chat/Settings/{Voice.svelte => Audio.svelte} | 10 +++++----- src/lib/components/chat/SettingsModal.svelte | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) rename src/lib/components/chat/Settings/{Voice.svelte => Audio.svelte} (97%) diff --git a/src/lib/components/chat/Messages/ResponseMessage.svelte b/src/lib/components/chat/Messages/ResponseMessage.svelte index 10f1582b..1d8a4fa5 100644 --- a/src/lib/components/chat/Messages/ResponseMessage.svelte +++ b/src/lib/components/chat/Messages/ResponseMessage.svelte @@ -148,7 +148,7 @@ } else { speaking = true; - if ($settings?.voice?.TTSEngine === 'openai') { + if ($settings?.audio?.TTSEngine === 'openai') { loadingSpeech = true; const sentences = extractSentences(message.content).reduce((mergedTexts, currentText) => { @@ -179,7 +179,7 @@ for (const [idx, sentence] of sentences.entries()) { const res = await synthesizeOpenAISpeech( localStorage.token, - $settings?.voice?.speaker, + $settings?.audio?.speaker, sentence ).catch((error) => { toast.error(error); @@ -204,7 +204,7 @@ clearInterval(getVoicesLoop); const voice = - voices?.filter((v) => v.name === $settings?.voice?.speaker)?.at(0) ?? undefined; + voices?.filter((v) => v.name === $settings?.audio?.speaker)?.at(0) ?? undefined; const speak = new SpeechSynthesisUtterance(message.content); diff --git a/src/lib/components/chat/Settings/Voice.svelte b/src/lib/components/chat/Settings/Audio.svelte similarity index 97% rename from src/lib/components/chat/Settings/Voice.svelte rename to src/lib/components/chat/Settings/Audio.svelte index 5867b917..289ec0bb 100644 --- a/src/lib/components/chat/Settings/Voice.svelte +++ b/src/lib/components/chat/Settings/Audio.svelte @@ -5,7 +5,7 @@ export let saveSettings: Function; - // Voice + // Audio let STTEngines = ['', 'openai']; let STTEngine = ''; @@ -74,9 +74,9 @@ speechAutoSend = settings.speechAutoSend ?? false; responseAutoPlayback = settings.responseAutoPlayback ?? false; - STTEngine = settings?.voice?.STTEngine ?? ''; - TTSEngine = settings?.voice?.TTSEngine ?? ''; - speaker = settings?.voice?.speaker ?? ''; + STTEngine = settings?.audio?.STTEngine ?? ''; + TTSEngine = settings?.audio?.TTSEngine ?? ''; + speaker = settings?.audio?.speaker ?? ''; if (TTSEngine === 'openai') { getOpenAIVoices(); @@ -90,7 +90,7 @@ class="flex flex-col h-full justify-between space-y-3 text-sm" on:submit|preventDefault={() => { saveSettings({ - voice: { + audio: { STTEngine: STTEngine !== '' ? STTEngine : undefined, TTSEngine: TTSEngine !== '' ? TTSEngine : undefined, speaker: speaker !== '' ? speaker : undefined diff --git a/src/lib/components/chat/SettingsModal.svelte b/src/lib/components/chat/SettingsModal.svelte index 83b2e307..3e5cf066 100644 --- a/src/lib/components/chat/SettingsModal.svelte +++ b/src/lib/components/chat/SettingsModal.svelte @@ -13,7 +13,7 @@ import General from './Settings/General.svelte'; import External from './Settings/External.svelte'; import Interface from './Settings/Interface.svelte'; - import Voice from './Settings/Voice.svelte'; + import Audio from './Settings/Audio.svelte'; import Chats from './Settings/Chats.svelte'; export let show = false; @@ -206,11 +206,11 @@