titanhacker commited on
Commit
7c4e3b0
·
verified ·
1 Parent(s): 2bdbdb6

Upload 10 files

Browse files
utils/__pycache__/chatbot.cpython-39.pyc ADDED
Binary file (5.05 kB). View file
 
utils/__pycache__/load_config.cpython-39.pyc ADDED
Binary file (3.17 kB). View file
 
utils/__pycache__/prepare_vectordb.cpython-39.pyc ADDED
Binary file (4.12 kB). View file
 
utils/__pycache__/ui_settings.cpython-39.pyc ADDED
Binary file (1.4 kB). View file
 
utils/__pycache__/upload_file.cpython-39.pyc ADDED
Binary file (1.75 kB). View file
 
utils/chatbot.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import os
4
+ from langchain.vectorstores import Chroma
5
+ from typing import List, Tuple
6
+ import re
7
+ import ast
8
+ import html
9
+ from utils.load_config import LoadConfig
10
+ from langchain.embeddings import HuggingFaceEmbeddings
11
+ import requests
12
+ import torch
13
+ FLASK_APP_ENDPOINT = "http://127.0.0.1:8888/generate_text"
14
+
15
+ APPCFG = LoadConfig()
16
+ URL = ""
17
+ hyperlink = f"[RAG]({URL})"
18
+
19
+
20
+ class ChatBot:
21
+ """
22
+ Class representing a chatbot with document retrieval and response generation capabilities.
23
+
24
+ This class provides static methods for responding to user queries, handling feedback, and
25
+ cleaning references from retrieved documents.
26
+ """
27
+ @staticmethod
28
+ def respond(chatbot: List,
29
+ message: str,
30
+ data_type: str = "Preprocessed doc",
31
+ temperature: float = 0.1,
32
+ top_k: int = 10,
33
+ top_p: float = 0.1) -> Tuple:
34
+ """
35
+ Generate a response to a user query using document retrieval and language model completion.
36
+
37
+ Parameters:
38
+ chatbot (List): List representing the chatbot's conversation history.
39
+ message (str): The user's query.
40
+ data_type (str): Type of data used for document retrieval ("Preprocessed doc" or "Upload doc: Process for RAG").
41
+ temperature (float): Temperature parameter for language model completion.
42
+
43
+ Returns:
44
+ Tuple: A tuple containing an empty string, the updated chat history, and references from retrieved documents.
45
+ """
46
+
47
+ # Retrieve embedding function from code env resources
48
+ # emb_model = "sentence-transformers/all-MiniLM-L6-v2"
49
+ embedding_function = HuggingFaceEmbeddings(
50
+ model_name="NeuML/pubmedbert-base-embeddings",
51
+ # cache_folder=os.getenv('SENTENCE_TRANSFORMERS_HOME')
52
+ )
53
+ if data_type == "Preprocessed doc":
54
+ # directories
55
+ if os.path.exists(APPCFG.persist_directory):
56
+ vectordb = Chroma(persist_directory=APPCFG.persist_directory,
57
+ embedding_function=embedding_function)
58
+ else:
59
+ chatbot.append(
60
+ (message, f"VectorDB does not exist. Please first execute the 'upload_data_manually.py' module. For further information please visit {hyperlink}."))
61
+ return "", chatbot, None
62
+
63
+ elif data_type == "Upload doc: Process for RAG":
64
+ if os.path.exists(APPCFG.custom_persist_directory):
65
+ vectordb = Chroma(persist_directory=APPCFG.custom_persist_directory,
66
+ embedding_function=embedding_function)
67
+ else:
68
+ chatbot.append(
69
+ (message, f"No file was uploaded. Please first upload your files using the 'upload' button."))
70
+ return "", chatbot, None
71
+
72
+ docs = vectordb.similarity_search(message, k=APPCFG.k)
73
+ question = "# Prompt that you have to answer:\n" + message
74
+ retrieved_content, markdown_documents = ChatBot.clean_references(docs)
75
+ # Memory: previous two Q&A pairs
76
+ chat_history = f"Chat history:\n {str(chatbot[-APPCFG.number_of_q_a_pairs:])}\n\n"
77
+ if APPCFG.add_history:
78
+ prompt_wrapper = f"{APPCFG.llm_system_role_with_history}\n\n{chat_history}\n\n{retrieved_content}{question}"
79
+ else:
80
+ prompt_wrapper = f"{APPCFG.llm_system_role_without_history}\n\n{question}\n\n{retrieved_content}"
81
+
82
+ print("========================")
83
+ print(prompt_wrapper)
84
+ print("========================")
85
+ messages = [
86
+ {"role": "user", "content": prompt_wrapper},
87
+ ]
88
+ data = {
89
+ "prompt": messages,
90
+ "max_new_tokens": APPCFG.max_new_tokens,
91
+ "do_sample": APPCFG.do_sample,
92
+ "temperature": temperature,
93
+ "top_k": top_k,
94
+ "top_p": top_p
95
+ }
96
+ response = requests.post(FLASK_APP_ENDPOINT, json=data)
97
+ # print(response.text)
98
+ response_json = response.json()
99
+
100
+ chatbot.append(
101
+ (message, response_json["response"]))
102
+ # Clean up GPU memory
103
+ del vectordb
104
+ del docs
105
+ torch.cuda.empty_cache()
106
+ return "", chatbot, markdown_documents
107
+
108
+ @staticmethod
109
+ def clean_references(documents: List) -> str:
110
+ """
111
+ Clean and format references from retrieved documents.
112
+
113
+ Parameters:
114
+ documents (List): List of retrieved documents.
115
+
116
+ Returns:
117
+ str: A string containing cleaned and formatted references.
118
+ """
119
+ server_url = "http://localhost:8000"
120
+ documents = [str(x)+"\n\n" for x in documents]
121
+ markdown_documents = ""
122
+ retrieved_content = ""
123
+ counter = 1
124
+ for doc in documents:
125
+ # Extract content and metadata
126
+ content, metadata = re.match(
127
+ r"page_content=(.*?)( metadata=\{.*\})", doc).groups()
128
+ metadata = metadata.split('=', 1)[1]
129
+ metadata_dict = ast.literal_eval(metadata)
130
+
131
+ # Decode newlines and other escape sequences
132
+ content = bytes(content, "utf-8").decode("unicode_escape")
133
+
134
+ # Replace escaped newlines with actual newlines
135
+ content = re.sub(r'\\n', '\n', content)
136
+ content = re.sub(r'\s*<EOS>\s*<pad>\s*', ' ', content)
137
+ content = re.sub(r'\s+', ' ', content).strip()
138
+
139
+
140
+ # Decode HTML entities
141
+ content = html.unescape(content)
142
+
143
+ # Replace incorrect unicode characters with correct ones
144
+ #content = content.encode('utf-8').decode('utf-8', 'ignore')
145
+ # Use UTF-8 encoding instead of latin-1 to avoid encoding issues
146
+ content = content.encode('utf-8', 'ignore').decode('utf-8', 'ignore')
147
+
148
+
149
+
150
+ # Remove or replace special characters and mathematical symbols
151
+ # This step may need to be customized based on the specific symbols in your documents
152
+ content = re.sub(r'–', '-', content)
153
+ content = re.sub(r'∈', '∈', content)
154
+ content = re.sub(r'×', '×', content)
155
+ content = re.sub(r'fi', 'fi', content)
156
+ content = re.sub(r'∈', '∈', content)
157
+ content = re.sub(r'·', '·', content)
158
+ content = re.sub(r'fl', 'fl', content)
159
+
160
+ pdf_url = f"{server_url}/{os.path.basename(metadata_dict['source'])}"
161
+ retrieved_content += f"# Content {counter}:\n" + \
162
+ content + "\n\n"
163
+
164
+ # Append cleaned content to the markdown string with two newlines between documents
165
+ markdown_documents += f"# Retrieved content {counter}:\n" + content + "\n\n" + \
166
+ f"Source: {os.path.basename(metadata_dict['source'])}" + " | " +\
167
+ f"Page number: {str(metadata_dict['page'])}" + " | " +\
168
+ f"[View PDF]({pdf_url})" "\n\n"
169
+ counter += 1
170
+
171
+ return retrieved_content, markdown_documents
utils/load_config.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ from dotenv import load_dotenv
4
+ import yaml
5
+ from pyprojroot import here
6
+ import shutil
7
+
8
+ load_dotenv()
9
+
10
+
11
+ class LoadConfig:
12
+ """
13
+ A class for loading configuration settings and managing directories.
14
+
15
+ This class loads various configuration settings from the 'app_config.yml' file,
16
+ including language model (LLM) configurations, retrieval configurations, summarizer
17
+ configurations, and memory configurations. It also sets up OpenAI API credentials
18
+ and performs directory-related operations such as creating and removing directories.
19
+ """
20
+
21
+ def __init__(self) -> None:
22
+ with open(here("configs/app_config.yml")) as cfg:
23
+ app_config = yaml.load(cfg, Loader=yaml.FullLoader)
24
+
25
+ # LLM configs
26
+ self.llm_engine = app_config["llm_config"]["engine"]
27
+ self.llm_system_role_with_history = app_config["llm_config"]["llm_system_role_with_history"]
28
+ self.llm_system_role_without_history = app_config[
29
+ "llm_config"]["llm_system_role_without_history"]
30
+ self.persist_directory = str(here(
31
+ app_config["directories"]["persist_directory"])) # needs to be strin for summation in chromadb backend: self._settings.require("persist_directory") + "/chroma.sqlite3"
32
+ self.custom_persist_directory = str(here(
33
+ app_config["directories"]["custom_persist_directory"]))
34
+ self.gemma_token = os.getenv("GEMMA_TOKEN")
35
+ self.device = app_config["llm_config"]["device"]
36
+ # Retrieval configs
37
+ self.data_directory = app_config["directories"]["data_directory"]
38
+ self.k = app_config["retrieval_config"]["k"]
39
+ self.chunk_size = int(app_config["splitter_config"]["chunk_size"])
40
+ self.chunk_overlap = int(
41
+ app_config["splitter_config"]["chunk_overlap"])
42
+ self.temperature = float(app_config["llm_config"]["temperature"])
43
+ self.add_history = bool(app_config["llm_config"]["add_history"])
44
+ self.top_k = int(app_config["llm_config"]["top_k"])
45
+ self.top_p = float(app_config["llm_config"]["top_p"])
46
+ self.max_new_tokens = int(app_config["llm_config"]["max_new_tokens"])
47
+ self.do_sample = bool(app_config["llm_config"]["do_sample"])
48
+ self.embedding_model = app_config["llm_config"]["embedding_model"]
49
+
50
+ # Memory
51
+ self.number_of_q_a_pairs = int(
52
+ app_config["memory"]["number_of_q_a_pairs"])
53
+
54
+ # clean up the upload doc vectordb if it exists
55
+ self.create_directory(self.persist_directory)
56
+ self.remove_directory(self.custom_persist_directory)
57
+
58
+ def create_directory(self, directory_path: str):
59
+ """
60
+ Create a directory if it does not exist.
61
+
62
+ Parameters:
63
+ directory_path (str): The path of the directory to be created.
64
+ """
65
+ if not os.path.exists(directory_path):
66
+ os.makedirs(directory_path)
67
+
68
+ def remove_directory(self, directory_path: str):
69
+ """
70
+ Removes the specified directory.
71
+
72
+ Parameters:
73
+ directory_path (str): The path of the directory to be removed.
74
+
75
+ Raises:
76
+ OSError: If an error occurs during the directory removal process.
77
+
78
+ Returns:
79
+ None
80
+ """
81
+ if os.path.exists(directory_path):
82
+ try:
83
+ shutil.rmtree(directory_path)
84
+ print(
85
+ f"The directory '{directory_path}' has been successfully removed.")
86
+ except OSError as e:
87
+ print(f"Error: {e}")
88
+ else:
89
+ print(f"The directory '{directory_path}' does not exist.")
utils/prepare_vectordb.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.vectorstores import Chroma
2
+ from langchain.document_loaders import PyPDFLoader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ import os
5
+ from typing import List
6
+ from langchain.embeddings import HuggingFaceEmbeddings
7
+
8
+
9
+ class PrepareVectorDB:
10
+ """
11
+ A class for preparing and saving a VectorDB using OpenAI embeddings.
12
+
13
+ This class facilitates the process of loading documents, chunking them, and creating a VectorDB
14
+ with OpenAI embeddings. It provides methods to prepare and save the VectorDB.
15
+
16
+ Parameters:
17
+ data_directory (str or List[str]): The directory or list of directories containing the documents.
18
+ persist_directory (str): The directory to save the VectorDB.
19
+ chunk_size (int): The size of the chunks for document processing.
20
+ chunk_overlap (int): The overlap between chunks.
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ data_directory: str,
26
+ persist_directory: str,
27
+ chunk_size: int,
28
+ chunk_overlap: int
29
+ ) -> None:
30
+ """
31
+ Initialize the PrepareVectorDB instance.
32
+
33
+ Parameters:
34
+ data_directory (str or List[str]): The directory or list of directories containing the documents.
35
+ persist_directory (str): The directory to save the VectorDB.
36
+ chunk_size (int): The size of the chunks for document processing.
37
+ chunk_overlap (int): The overlap between chunks.
38
+
39
+ """
40
+
41
+ self.text_splitter = RecursiveCharacterTextSplitter(
42
+ chunk_size=chunk_size,
43
+ chunk_overlap=chunk_overlap,
44
+ separators=["\n\n", "\n", " ", ""]
45
+ )
46
+ """Other options: CharacterTextSplitter, TokenTextSplitter, etc."""
47
+ self.data_directory = data_directory
48
+ self.persist_directory = persist_directory
49
+ self.embedding_function = HuggingFaceEmbeddings(
50
+ model_name="NeuML/pubmedbert-base-embeddings",
51
+ # cache_folder=os.getenv('SENTENCE_TRANSFORMERS_HOME')
52
+ )
53
+
54
+ def __load_all_documents(self) -> List:
55
+ """
56
+ Load all documents from the specified directory or directories.
57
+
58
+ Returns:
59
+ List: A list of loaded documents.
60
+ """
61
+ doc_counter = 0
62
+ if isinstance(self.data_directory, list):
63
+ print("Loading the uploaded documents...")
64
+ docs = []
65
+ for doc_dir in self.data_directory:
66
+ docs.extend(PyPDFLoader(doc_dir).load())
67
+ doc_counter += 1
68
+ print("Number of loaded documents:", doc_counter)
69
+ print("Number of pages:", len(docs), "\n\n")
70
+ else:
71
+ print("Loading documents manually...")
72
+ document_list = os.listdir(self.data_directory)
73
+ docs = []
74
+ for doc_name in document_list:
75
+ docs.extend(PyPDFLoader(os.path.join(
76
+ self.data_directory, doc_name)).load())
77
+ doc_counter += 1
78
+ print("Number of loaded documents:", doc_counter)
79
+ print("Number of pages:", len(docs), "\n\n")
80
+
81
+ return docs
82
+
83
+ def __chunk_documents(self, docs: List) -> List:
84
+ """
85
+ Chunk the loaded documents using the specified text splitter.
86
+
87
+ Parameters:
88
+ docs (List): The list of loaded documents.
89
+
90
+ Returns:
91
+ List: A list of chunked documents.
92
+
93
+ """
94
+ print("Chunking documents...")
95
+ chunked_documents = self.text_splitter.split_documents(docs)
96
+ print("Number of chunks:", len(chunked_documents), "\n\n")
97
+ return chunked_documents
98
+
99
+ def prepare_and_save_vectordb(self):
100
+ """
101
+ Load, chunk, and create a VectorDB with OpenAI embeddings, and save it.
102
+
103
+ Returns:
104
+ Chroma: The created VectorDB.
105
+ """
106
+ docs = self.__load_all_documents()
107
+ chunked_documents = self.__chunk_documents(docs)
108
+ print("Preparing vectordb...")
109
+ vectordb = Chroma.from_documents(
110
+ documents=chunked_documents,
111
+ embedding=self.embedding_function,
112
+ persist_directory=self.persist_directory
113
+ )
114
+ print("VectorDB is created and saved.")
115
+ print("Number of vectors in vectordb:",
116
+ vectordb._collection.count(), "\n\n")
117
+ return vectordb
utils/ui_settings.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ class UISettings:
5
+ """
6
+ Utility class for managing UI settings.
7
+
8
+ This class provides static methods for toggling UI components, such as a sidebar.
9
+ """
10
+ @staticmethod
11
+ def toggle_sidebar(state):
12
+ """
13
+ Toggle the visibility state of a UI component.
14
+
15
+ Parameters:
16
+ state: The current state of the UI component.
17
+
18
+ Returns:
19
+ Tuple: A tuple containing the updated UI component state and the new state.
20
+ """
21
+ state = not state
22
+ return gr.update(visible=state), state
23
+
24
+ @staticmethod
25
+ def feedback(data: gr.LikeData):
26
+ """
27
+ Process user feedback on the generated response.
28
+
29
+ Parameters:
30
+ data (gr.LikeData): Gradio LikeData object containing user feedback.
31
+ """
32
+ if data.liked:
33
+ print("You upvoted this response: " + data.value)
34
+ else:
35
+ print("You downvoted this response: " + data.value)
utils/upload_file.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.prepare_vectordb import PrepareVectorDB
2
+ from typing import List, Tuple
3
+ from utils.load_config import LoadConfig
4
+
5
+ APPCFG = LoadConfig()
6
+
7
+
8
+ class UploadFile:
9
+ """
10
+ Utility class for handling file uploads and processing.
11
+
12
+ This class provides static methods for checking directories and processing uploaded files
13
+ to prepare a VectorDB.
14
+ """
15
+
16
+ @staticmethod
17
+ def process_uploaded_files(files_dir: List, chatbot: List, rag_with_dropdown: str) -> Tuple:
18
+ """
19
+ Process uploaded files to prepare a VectorDB.
20
+
21
+ Parameters:
22
+ files_dir (List): List of paths to the uploaded files.
23
+ chatbot: An instance of the chatbot for communication.
24
+
25
+ Returns:
26
+ Tuple: A tuple containing an empty string and the updated chatbot instance.
27
+ """
28
+ if rag_with_dropdown == "Upload doc: Process for RAG":
29
+ prepare_vectordb_instance = PrepareVectorDB(data_directory=files_dir,
30
+ persist_directory=APPCFG.custom_persist_directory,
31
+ chunk_size=APPCFG.chunk_size,
32
+ chunk_overlap=APPCFG.chunk_overlap)
33
+ prepare_vectordb_instance.prepare_and_save_vectordb()
34
+ chatbot.append(
35
+ (" ", "Uploaded files are ready. Please ask your question"))
36
+ else:
37
+ chatbot.append(
38
+ (" ", "If you would like to upload a PDF, please select your desired action in 'rag_with' dropdown."))
39
+ return "", chatbot