open-webui/backend/apps/rag/main.py

516 lines
14 KiB
Python
Raw Normal View History

2024-01-07 08:40:51 +01:00
from fastapi import (
FastAPI,
Request,
Depends,
HTTPException,
status,
UploadFile,
File,
Form,
)
2024-01-07 07:07:20 +01:00
from fastapi.middleware.cors import CORSMiddleware
2024-01-07 10:40:36 +01:00
import os, shutil
2024-02-18 06:06:08 +01:00
from pathlib import Path
2024-02-01 22:35:41 +01:00
from typing import List
2024-01-07 07:07:20 +01:00
2024-01-07 17:28:35 +01:00
# from chromadb.utils import embedding_functions
2024-01-07 07:07:20 +01:00
2024-01-07 18:05:52 +01:00
from langchain_community.document_loaders import (
WebBaseLoader,
TextLoader,
PyPDFLoader,
CSVLoader,
2024-01-07 22:56:01 +01:00
Docx2txtLoader,
2024-01-13 14:46:56 +01:00
UnstructuredEPubLoader,
2024-01-10 00:24:53 +01:00
UnstructuredWordDocumentLoader,
UnstructuredMarkdownLoader,
UnstructuredXMLLoader,
2024-01-19 18:48:04 +01:00
UnstructuredRSTLoader,
2024-01-23 22:03:22 +01:00
UnstructuredExcelLoader,
2024-01-07 18:05:52 +01:00
)
2024-01-07 07:59:22 +01:00
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
2024-02-18 06:06:08 +01:00
from langchain_community.vectorstores import Chroma
2024-01-07 07:59:22 +01:00
from pydantic import BaseModel
from typing import Optional
2024-02-18 06:06:08 +01:00
import mimetypes
2024-01-07 07:59:22 +01:00
import uuid
2024-02-18 06:06:08 +01:00
import json
2024-01-07 18:33:34 +01:00
import time
2024-01-07 07:59:22 +01:00
2024-02-18 06:06:08 +01:00
from apps.web.models.documents import (
Documents,
DocumentForm,
DocumentResponse,
)
from utils.misc import (
calculate_sha256,
calculate_sha256_string,
sanitize_filename,
extract_folders_after_data_docs,
)
from utils.utils import get_current_user, get_admin_user
2024-02-18 06:06:08 +01:00
from config import (
UPLOAD_DIR,
DOCS_DIR,
EMBED_MODEL,
CHROMA_CLIENT,
CHUNK_SIZE,
CHUNK_OVERLAP,
2024-02-18 07:41:03 +01:00
RAG_TEMPLATE,
2024-02-18 06:06:08 +01:00
)
2024-01-07 07:59:22 +01:00
from constants import ERROR_MESSAGES
2024-01-07 17:28:35 +01:00
# EMBEDDING_FUNC = embedding_functions.SentenceTransformerEmbeddingFunction(
# model_name=EMBED_MODEL
# )
2024-01-07 07:07:20 +01:00
app = FastAPI()
2024-02-18 07:29:52 +01:00
app.state.CHUNK_SIZE = CHUNK_SIZE
app.state.CHUNK_OVERLAP = CHUNK_OVERLAP
2024-02-18 07:41:03 +01:00
app.state.RAG_TEMPLATE = RAG_TEMPLATE
2024-02-18 07:29:52 +01:00
2024-01-07 07:07:20 +01:00
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
2024-01-07 08:40:51 +01:00
class CollectionNameForm(BaseModel):
2024-01-07 07:59:22 +01:00
collection_name: Optional[str] = "test"
2024-01-07 08:40:51 +01:00
class StoreWebForm(CollectionNameForm):
url: str
2024-01-07 10:40:36 +01:00
def store_data_in_vector_db(data, collection_name) -> bool:
2024-01-07 07:59:22 +01:00
text_splitter = RecursiveCharacterTextSplitter(
2024-02-18 07:29:52 +01:00
chunk_size=app.state.CHUNK_SIZE, chunk_overlap=app.state.CHUNK_OVERLAP
2024-01-07 07:59:22 +01:00
)
docs = text_splitter.split_documents(data)
texts = [doc.page_content for doc in docs]
metadatas = [doc.metadata for doc in docs]
2024-01-07 10:40:36 +01:00
try:
2024-01-07 17:34:05 +01:00
collection = CHROMA_CLIENT.create_collection(name=collection_name)
2024-01-07 07:59:22 +01:00
2024-01-07 10:40:36 +01:00
collection.add(
documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts]
)
return True
except Exception as e:
print(e)
if e.__class__.__name__ == "UniqueConstraintError":
return True
return False
2024-01-07 07:59:22 +01:00
2024-01-07 07:07:20 +01:00
@app.get("/")
async def get_status():
2024-02-18 07:29:52 +01:00
return {
"status": True,
"chunk_size": app.state.CHUNK_SIZE,
"chunk_overlap": app.state.CHUNK_OVERLAP,
}
@app.get("/chunk")
async def get_chunk_params(user=Depends(get_admin_user)):
return {
"status": True,
"chunk_size": app.state.CHUNK_SIZE,
"chunk_overlap": app.state.CHUNK_OVERLAP,
}
class ChunkParamUpdateForm(BaseModel):
chunk_size: int
chunk_overlap: int
@app.post("/chunk/update")
async def update_chunk_params(
form_data: ChunkParamUpdateForm, user=Depends(get_admin_user)
):
app.state.CHUNK_SIZE = form_data.chunk_size
app.state.CHUNK_OVERLAP = form_data.chunk_overlap
return {
"status": True,
"chunk_size": app.state.CHUNK_SIZE,
"chunk_overlap": app.state.CHUNK_OVERLAP,
}
2024-01-07 07:59:22 +01:00
2024-02-18 07:41:03 +01:00
@app.get("/template")
async def get_rag_template(user=Depends(get_current_user)):
return {
"status": True,
"template": app.state.RAG_TEMPLATE,
}
class RAGTemplateForm(BaseModel):
template: str
@app.post("/template/update")
async def update_rag_template(form_data: RAGTemplateForm, user=Depends(get_admin_user)):
# TODO: check template requirements
app.state.RAG_TEMPLATE = form_data.template
return {"status": True, "template": app.state.RAG_TEMPLATE}
2024-02-04 00:57:06 +01:00
class QueryDocForm(BaseModel):
2024-02-01 22:35:41 +01:00
collection_name: str
query: str
k: Optional[int] = 4
2024-02-04 00:57:06 +01:00
@app.post("/query/doc")
def query_doc(
form_data: QueryDocForm,
2024-01-07 11:46:12 +01:00
user=Depends(get_current_user),
):
2024-01-07 10:59:00 +01:00
try:
collection = CHROMA_CLIENT.get_collection(
2024-02-01 22:35:41 +01:00
name=form_data.collection_name,
2024-01-07 10:59:00 +01:00
)
2024-02-01 22:35:41 +01:00
result = collection.query(query_texts=[form_data.query], n_results=form_data.k)
2024-01-07 10:59:00 +01:00
return result
except Exception as e:
print(e)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.DEFAULT(e),
)
2024-01-07 07:59:22 +01:00
2024-02-01 22:35:41 +01:00
class QueryCollectionsForm(BaseModel):
collection_names: List[str]
query: str
k: Optional[int] = 4
2024-02-03 23:44:49 +01:00
def merge_and_sort_query_results(query_results, k):
# Initialize lists to store combined data
combined_ids = []
combined_distances = []
combined_metadatas = []
combined_documents = []
# Combine data from each dictionary
for data in query_results:
combined_ids.extend(data["ids"][0])
combined_distances.extend(data["distances"][0])
combined_metadatas.extend(data["metadatas"][0])
combined_documents.extend(data["documents"][0])
# Create a list of tuples (distance, id, metadata, document)
combined = list(
zip(combined_distances, combined_ids, combined_metadatas, combined_documents)
)
# Sort the list based on distances
combined.sort(key=lambda x: x[0])
# Unzip the sorted list
sorted_distances, sorted_ids, sorted_metadatas, sorted_documents = zip(*combined)
# Slicing the lists to include only k elements
sorted_distances = list(sorted_distances)[:k]
sorted_ids = list(sorted_ids)[:k]
sorted_metadatas = list(sorted_metadatas)[:k]
sorted_documents = list(sorted_documents)[:k]
# Create the output dictionary
merged_query_results = {
"ids": [sorted_ids],
"distances": [sorted_distances],
"metadatas": [sorted_metadatas],
"documents": [sorted_documents],
"embeddings": None,
"uris": None,
"data": None,
}
return merged_query_results
2024-02-04 00:57:06 +01:00
@app.post("/query/collection")
def query_collection(
2024-02-01 22:35:41 +01:00
form_data: QueryCollectionsForm,
user=Depends(get_current_user),
):
results = []
for collection_name in form_data.collection_names:
try:
collection = CHROMA_CLIENT.get_collection(
name=collection_name,
)
result = collection.query(
query_texts=[form_data.query], n_results=form_data.k
)
results.append(result)
except:
pass
2024-02-03 23:44:49 +01:00
return merge_and_sort_query_results(results, form_data.k)
2024-02-01 22:35:41 +01:00
2024-01-07 07:59:22 +01:00
@app.post("/web")
2024-01-07 11:46:12 +01:00
def store_web(form_data: StoreWebForm, user=Depends(get_current_user)):
2024-01-07 07:59:22 +01:00
# "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
try:
loader = WebBaseLoader(form_data.url)
data = loader.load()
2024-01-27 07:17:28 +01:00
collection_name = form_data.collection_name
if collection_name == "":
collection_name = calculate_sha256_string(form_data.url)[:63]
store_data_in_vector_db(data, collection_name)
2024-01-08 10:26:15 +01:00
return {
"status": True,
2024-01-27 07:17:28 +01:00
"collection_name": collection_name,
2024-01-08 10:26:15 +01:00
"filename": form_data.url,
}
2024-01-07 07:59:22 +01:00
except Exception as e:
print(e)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.DEFAULT(e),
)
2024-02-18 06:06:08 +01:00
def get_loader(filename: str, file_content_type: str, file_path: str):
file_ext = filename.split(".")[-1].lower()
2024-01-25 09:24:49 +01:00
known_type = True
known_source_ext = [
"go",
"py",
"java",
"sh",
"bat",
"ps1",
"cmd",
"js",
"ts",
"css",
"cpp",
"hpp",
"h",
"c",
"cs",
"sql",
"log",
"ini",
"pl",
"pm",
"r",
"dart",
"dockerfile",
"env",
"php",
"hs",
"hsc",
"lua",
"nginxconf",
"conf",
"m",
"mm",
"plsql",
"perl",
"rb",
"rs",
"db2",
"scala",
"bash",
"swift",
"vue",
"svelte",
]
if file_ext == "pdf":
loader = PyPDFLoader(file_path)
elif file_ext == "csv":
loader = CSVLoader(file_path)
elif file_ext == "rst":
loader = UnstructuredRSTLoader(file_path, mode="elements")
elif file_ext == "xml":
loader = UnstructuredXMLLoader(file_path)
elif file_ext == "md":
loader = UnstructuredMarkdownLoader(file_path)
2024-02-18 06:06:08 +01:00
elif file_content_type == "application/epub+zip":
2024-01-25 09:24:49 +01:00
loader = UnstructuredEPubLoader(file_path)
elif (
2024-02-18 06:06:08 +01:00
file_content_type
2024-01-25 09:24:49 +01:00
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
or file_ext in ["doc", "docx"]
):
loader = Docx2txtLoader(file_path)
2024-02-18 06:06:08 +01:00
elif file_content_type in [
2024-01-25 09:24:49 +01:00
"application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
] or file_ext in ["xls", "xlsx"]:
loader = UnstructuredExcelLoader(file_path)
2024-02-18 06:06:08 +01:00
elif file_ext in known_source_ext or file_content_type.find("text/") >= 0:
2024-01-25 09:24:49 +01:00
loader = TextLoader(file_path)
else:
loader = TextLoader(file_path)
known_type = False
return loader, known_type
2024-01-07 07:59:22 +01:00
@app.post("/doc")
2024-01-07 11:46:12 +01:00
def store_doc(
2024-01-07 18:00:30 +01:00
collection_name: Optional[str] = Form(None),
2024-01-07 11:46:12 +01:00
file: UploadFile = File(...),
user=Depends(get_current_user),
):
2024-01-07 07:59:22 +01:00
# "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
2024-01-07 08:40:51 +01:00
2024-01-10 00:24:53 +01:00
print(file.content_type)
2024-01-07 07:59:22 +01:00
try:
2024-01-07 08:40:51 +01:00
filename = file.filename
2024-01-07 10:40:36 +01:00
file_path = f"{UPLOAD_DIR}/{filename}"
2024-01-07 07:59:22 +01:00
contents = file.file.read()
2024-01-07 08:40:51 +01:00
with open(file_path, "wb") as f:
2024-01-07 07:59:22 +01:00
f.write(contents)
f.close()
2024-01-07 18:00:30 +01:00
f = open(file_path, "rb")
if collection_name == None:
collection_name = calculate_sha256(f)[:63]
f.close()
2024-02-18 06:06:08 +01:00
loader, known_type = get_loader(file.filename, file.content_type, file_path)
2024-01-07 08:40:51 +01:00
data = loader.load()
2024-01-07 10:40:36 +01:00
result = store_data_in_vector_db(data, collection_name)
if result:
2024-01-08 10:26:15 +01:00
return {
"status": True,
"collection_name": collection_name,
"filename": filename,
2024-01-25 09:24:49 +01:00
"known_type": known_type,
2024-01-08 10:26:15 +01:00
}
2024-01-07 10:40:36 +01:00
else:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT(),
)
2024-01-07 07:59:22 +01:00
except Exception as e:
print(e)
2024-01-13 14:46:56 +01:00
if "No pandoc was found" in str(e):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED,
)
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ERROR_MESSAGES.DEFAULT(e),
)
2024-01-07 07:59:22 +01:00
2024-02-18 06:06:08 +01:00
@app.get("/scan")
def scan_docs_dir(user=Depends(get_admin_user)):
try:
for path in Path(DOCS_DIR).rglob("./**/*"):
if path.is_file() and not path.name.startswith("."):
tags = extract_folders_after_data_docs(path)
filename = path.name
file_content_type = mimetypes.guess_type(path)
f = open(path, "rb")
collection_name = calculate_sha256(f)[:63]
f.close()
2024-02-18 06:31:46 +01:00
loader, known_type = get_loader(
filename, file_content_type[0], str(path)
)
2024-02-18 06:06:08 +01:00
data = loader.load()
result = store_data_in_vector_db(data, collection_name)
if result:
sanitized_filename = sanitize_filename(filename)
doc = Documents.get_doc_by_name(sanitized_filename)
if doc == None:
doc = Documents.insert_new_doc(
user.id,
DocumentForm(
**{
"name": sanitized_filename,
"title": filename,
"collection_name": collection_name,
"filename": filename,
"content": (
json.dumps(
{
"tags": list(
map(
lambda name: {"name": name},
tags,
)
)
}
)
if len(tags)
else "{}"
),
}
),
)
except Exception as e:
print(e)
return True
2024-01-07 10:40:36 +01:00
@app.get("/reset/db")
def reset_vector_db(user=Depends(get_admin_user)):
CHROMA_CLIENT.reset()
2024-01-07 10:40:36 +01:00
@app.get("/reset")
def reset(user=Depends(get_admin_user)) -> bool:
folder = f"{UPLOAD_DIR}"
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
2024-01-07 10:40:36 +01:00
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
2024-01-07 10:40:36 +01:00
except Exception as e:
print("Failed to delete %s. Reason: %s" % (file_path, e))
2024-01-07 10:40:36 +01:00
try:
CHROMA_CLIENT.reset()
except Exception as e:
print(e)
return True