Spaces:
Sleeping
Sleeping
Atreyu4EVR
commited on
made corrections to the bugs
Browse files
app.py
CHANGED
@@ -1,28 +1,28 @@
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
3 |
-
from sentence_transformers import SentenceTransformer
|
4 |
import os
|
|
|
5 |
from dotenv import load_dotenv
|
6 |
-
import numpy as np
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
from langchain.schema import Document
|
9 |
from langchain_community.llms import HuggingFaceHub
|
10 |
-
from langchain.chains import RetrievalQA
|
11 |
-
from langchain.prompts import PromptTemplate
|
12 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
13 |
from langchain_community.vectorstores import Chroma
|
14 |
-
from
|
15 |
-
|
16 |
|
|
|
17 |
load_dotenv()
|
18 |
|
19 |
-
#
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
-
#
|
26 |
model_links = {
|
27 |
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
28 |
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
@@ -30,128 +30,119 @@ model_links = {
|
|
30 |
|
31 |
model_info = {
|
32 |
"Meta-Llama-3.1-8B": {
|
33 |
-
|
34 |
\nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.**\n""",
|
35 |
-
|
36 |
},
|
37 |
"Mistral-7B-Instruct-v0.3": {
|
38 |
-
|
39 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.**\n""",
|
40 |
-
|
41 |
},
|
42 |
}
|
43 |
|
44 |
# Random dog images for error message
|
45 |
-
|
46 |
|
47 |
-
#
|
48 |
-
|
49 |
|
50 |
-
# Create the sidebar with the dropdown for model selection
|
51 |
-
selected_model = st.sidebar.selectbox("Select Model", models)
|
52 |
-
|
53 |
-
# Function to load and process documents
|
54 |
def load_and_process_documents(file_path):
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
if not documents:
|
87 |
-
raise ValueError("No valid documents found in JSON file.")
|
88 |
-
|
89 |
-
# Create Document objects
|
90 |
-
doc_objects = [
|
91 |
-
Document(
|
92 |
-
page_content=doc["content"],
|
93 |
-
metadata={"title": doc["title"], "id": doc["id"]}
|
94 |
-
) for doc in documents
|
95 |
-
]
|
96 |
-
|
97 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP)
|
98 |
-
splits = text_splitter.split_documents(doc_objects)
|
99 |
-
|
100 |
-
return splits
|
101 |
-
|
102 |
-
def get_vectorstore(file_path):
|
103 |
-
# Check if vectorstore already exists
|
104 |
if os.path.exists(VECTORSTORE_PATH):
|
105 |
print("Loading existing vectorstore...")
|
106 |
-
return Chroma(
|
107 |
-
|
|
|
|
|
108 |
print("Creating new vectorstore...")
|
109 |
-
splits =
|
110 |
-
|
111 |
-
# Process in batches
|
112 |
vectorstore = None
|
113 |
for i in tqdm(range(0, len(splits), BATCH_SIZE), desc="Processing batches"):
|
114 |
-
batch = splits[i:i+BATCH_SIZE]
|
115 |
if vectorstore is None:
|
116 |
-
vectorstore = Chroma.from_documents(
|
|
|
|
|
|
|
|
|
117 |
else:
|
118 |
vectorstore.add_documents(documents=batch)
|
119 |
-
|
120 |
vectorstore.persist()
|
121 |
return vectorstore
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
vectorstore = get_vectorstore(file_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
return RetrievalQA.from_chain_type(
|
126 |
llm=llm,
|
127 |
chain_type="stuff",
|
128 |
retriever=vectorstore.as_retriever(search_kwargs={"k": RETRIEVER_K}),
|
129 |
-
return_source_documents=True
|
130 |
)
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
llm=llm,
|
135 |
-
chain_type="stuff",
|
136 |
-
retriever=compression_retriever,
|
137 |
-
return_source_documents=True
|
138 |
-
)
|
139 |
-
|
140 |
-
return qa_chain
|
141 |
|
142 |
# Streamlit app
|
143 |
-
st.header(
|
144 |
|
145 |
# Sidebar for model selection
|
146 |
-
st.
|
|
|
147 |
|
148 |
# Temperature slider
|
149 |
-
|
150 |
|
151 |
# Display model info
|
152 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
153 |
-
st.sidebar.markdown(model_info[selected_model][
|
154 |
-
st.sidebar.image(model_info[selected_model][
|
155 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
156 |
|
157 |
# Initialize chat history
|
@@ -164,7 +155,7 @@ for message in st.session_state.messages:
|
|
164 |
st.markdown(message["content"])
|
165 |
|
166 |
# Set up advanced RAG pipeline
|
167 |
-
qa_chain =
|
168 |
|
169 |
# Chat input
|
170 |
if prompt := st.chat_input("Type message here..."):
|
@@ -176,16 +167,19 @@ if prompt := st.chat_input("Type message here..."):
|
|
176 |
# Generate and display assistant response
|
177 |
with st.chat_message("assistant"):
|
178 |
try:
|
|
|
|
|
|
|
179 |
result = qa_chain({"query": prompt})
|
180 |
response = result["result"]
|
181 |
st.write(response)
|
182 |
-
|
183 |
except Exception as e:
|
184 |
response = """😵💫 Looks like someone unplugged something!
|
185 |
\n Either the model space is being updated or something is down.
|
186 |
\n"""
|
187 |
st.write(response)
|
188 |
-
random_dog_pick =
|
189 |
st.image(random_dog_pick)
|
190 |
st.write("This was the error message:")
|
191 |
st.write(str(e))
|
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
|
|
3 |
import os
|
4 |
+
import json
|
5 |
from dotenv import load_dotenv
|
|
|
6 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
from langchain.schema import Document
|
8 |
from langchain_community.llms import HuggingFaceHub
|
9 |
+
from langchain.chains import RetrievalQA
|
|
|
10 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
11 |
from langchain_community.vectorstores import Chroma
|
12 |
+
from tqdm import tqdm
|
13 |
+
import random
|
14 |
|
15 |
+
# Load environment variables
|
16 |
load_dotenv()
|
17 |
|
18 |
+
# Constants
|
19 |
+
CHUNK_SIZE = 8192
|
20 |
+
CHUNK_OVERLAP = 200
|
21 |
+
BATCH_SIZE = 100
|
22 |
+
RETRIEVER_K = 4
|
23 |
+
VECTORSTORE_PATH = "./vectorstore"
|
24 |
|
25 |
+
# Model information
|
26 |
model_links = {
|
27 |
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
28 |
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
|
|
30 |
|
31 |
model_info = {
|
32 |
"Meta-Llama-3.1-8B": {
|
33 |
+
"description": """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.
|
34 |
\nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.**\n""",
|
35 |
+
"logo": "llama_logo.gif",
|
36 |
},
|
37 |
"Mistral-7B-Instruct-v0.3": {
|
38 |
+
"description": """The Mistral-7B-Instruct-v0.3 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.3.
|
39 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.**\n""",
|
40 |
+
"logo": "https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp",
|
41 |
},
|
42 |
}
|
43 |
|
44 |
# Random dog images for error message
|
45 |
+
random_dogs = ["randomdog.jpg", "randomdog2.jpg", "randomdog3.jpg"] # Add more as needed
|
46 |
|
47 |
+
# Set up embeddings
|
48 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
49 |
|
|
|
|
|
|
|
|
|
50 |
def load_and_process_documents(file_path):
|
51 |
+
"""Load and process documents from a JSON file."""
|
52 |
+
try:
|
53 |
+
with open(file_path, "r") as file:
|
54 |
+
data = json.load(file)
|
55 |
+
|
56 |
+
documents = data.get("documents", [])
|
57 |
+
|
58 |
+
if not documents:
|
59 |
+
raise ValueError("No valid documents found in JSON file.")
|
60 |
+
|
61 |
+
doc_objects = [
|
62 |
+
Document(
|
63 |
+
page_content=doc["content"],
|
64 |
+
metadata={"title": doc["title"], "id": doc["id"]},
|
65 |
+
)
|
66 |
+
for doc in documents
|
67 |
+
]
|
68 |
+
|
69 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
70 |
+
chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP
|
71 |
+
)
|
72 |
+
splits = text_splitter.split_documents(doc_objects)
|
73 |
+
|
74 |
+
return splits
|
75 |
+
except Exception as e:
|
76 |
+
st.error(f"Error loading documents: {str(e)}")
|
77 |
+
return []
|
78 |
+
|
79 |
+
def get_vectorstore(file_path):
|
80 |
+
"""Get or create a vectorstore."""
|
81 |
+
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
if os.path.exists(VECTORSTORE_PATH):
|
83 |
print("Loading existing vectorstore...")
|
84 |
+
return Chroma(
|
85 |
+
persist_directory=VECTORSTORE_PATH, embedding_function=embeddings
|
86 |
+
)
|
87 |
+
|
88 |
print("Creating new vectorstore...")
|
89 |
+
splits = load_and_process_documents(file_path)
|
90 |
+
|
|
|
91 |
vectorstore = None
|
92 |
for i in tqdm(range(0, len(splits), BATCH_SIZE), desc="Processing batches"):
|
93 |
+
batch = splits[i : i + BATCH_SIZE]
|
94 |
if vectorstore is None:
|
95 |
+
vectorstore = Chroma.from_documents(
|
96 |
+
documents=batch,
|
97 |
+
embedding=embeddings,
|
98 |
+
persist_directory=VECTORSTORE_PATH,
|
99 |
+
)
|
100 |
else:
|
101 |
vectorstore.add_documents(documents=batch)
|
102 |
+
|
103 |
vectorstore.persist()
|
104 |
return vectorstore
|
105 |
+
except Exception as e:
|
106 |
+
st.error(f"Error creating vectorstore: {str(e)}")
|
107 |
+
return None
|
108 |
+
|
109 |
+
@st.cache_resource(hash_funcs={"builtins.tuple": lambda _: None})
|
110 |
+
def setup_rag_pipeline(file_path, model_name, temperature):
|
111 |
+
"""Set up the RAG pipeline."""
|
112 |
+
try:
|
113 |
vectorstore = get_vectorstore(file_path)
|
114 |
+
if vectorstore is None:
|
115 |
+
raise ValueError("Failed to create or load vectorstore.")
|
116 |
+
|
117 |
+
llm = HuggingFaceHub(
|
118 |
+
repo_id=model_links[model_name],
|
119 |
+
model_kwargs={"temperature": temperature, "max_length": 4000},
|
120 |
+
)
|
121 |
+
|
122 |
return RetrievalQA.from_chain_type(
|
123 |
llm=llm,
|
124 |
chain_type="stuff",
|
125 |
retriever=vectorstore.as_retriever(search_kwargs={"k": RETRIEVER_K}),
|
126 |
+
return_source_documents=True,
|
127 |
)
|
128 |
+
except Exception as e:
|
129 |
+
st.error(f"Error setting up RAG pipeline: {str(e)}")
|
130 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
# Streamlit app
|
133 |
+
st.header("Liahona.AI")
|
134 |
|
135 |
# Sidebar for model selection
|
136 |
+
selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
|
137 |
+
st.markdown(f"_powered_ by ***:violet[{selected_model}]***")
|
138 |
|
139 |
# Temperature slider
|
140 |
+
temperature = st.sidebar.slider("Select a temperature value", 0.0, 1.0, 0.5)
|
141 |
|
142 |
# Display model info
|
143 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
144 |
+
st.sidebar.markdown(model_info[selected_model]["description"])
|
145 |
+
st.sidebar.image(model_info[selected_model]["logo"])
|
146 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
147 |
|
148 |
# Initialize chat history
|
|
|
155 |
st.markdown(message["content"])
|
156 |
|
157 |
# Set up advanced RAG pipeline
|
158 |
+
qa_chain = setup_rag_pipeline("index_training.json", selected_model, temperature)
|
159 |
|
160 |
# Chat input
|
161 |
if prompt := st.chat_input("Type message here..."):
|
|
|
167 |
# Generate and display assistant response
|
168 |
with st.chat_message("assistant"):
|
169 |
try:
|
170 |
+
if qa_chain is None:
|
171 |
+
raise ValueError("RAG pipeline is not properly set up.")
|
172 |
+
|
173 |
result = qa_chain({"query": prompt})
|
174 |
response = result["result"]
|
175 |
st.write(response)
|
176 |
+
|
177 |
except Exception as e:
|
178 |
response = """😵💫 Looks like someone unplugged something!
|
179 |
\n Either the model space is being updated or something is down.
|
180 |
\n"""
|
181 |
st.write(response)
|
182 |
+
random_dog_pick = random.choice(random_dogs)
|
183 |
st.image(random_dog_pick)
|
184 |
st.write("This was the error message:")
|
185 |
st.write(str(e))
|