Atreyu4EVR commited on
Commit
ada655b
Β·
verified Β·
1 Parent(s): 2a6e577

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -27
app.py CHANGED
@@ -1,27 +1,33 @@
1
- import streamlit as st
2
  import os
3
- import torch
4
  from openai import OpenAI
5
- import numpy as np
6
- import sys
7
  from dotenv import load_dotenv
8
- import random
9
- from huggingface_hub import InferenceClient
 
 
 
 
 
10
 
11
  # Load environment variables
12
  load_dotenv()
13
 
 
 
 
 
14
  # Constants
15
  MAX_TOKENS = 4000
16
  DEFAULT_TEMPERATURE = 0.5
17
 
18
- # initialize the client
19
-
20
  client = OpenAI(
21
- base_url="https://api-inference.huggingface.co/v1",
22
- api_key=os.environ.get('API_KEY') # Replace with your token
23
  )
24
-
25
  # Create supported models
26
  model_links = {
27
  "Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
@@ -30,40 +36,50 @@ model_links = {
30
  "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
31
  }
32
 
 
 
 
 
 
 
 
 
33
 
34
- # Random dog images for error message
35
- random_dog_images = ["broken_llama3.jpeg"]
 
 
 
 
 
 
 
 
36
 
37
  def reset_conversation():
38
- '''
39
- Resets Conversation
40
- '''
41
  st.session_state.conversation = []
42
  st.session_state.messages = []
43
- return None
44
 
45
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
46
-
47
  def main():
48
- st.header('Multi-Models')
49
-
50
 
51
  # Sidebar for model selection and temperature
52
  selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
53
  temperature = st.sidebar.slider('Select a temperature value', 0.0, 1.0, DEFAULT_TEMPERATURE)
54
 
 
 
55
  if "prev_option" not in st.session_state:
56
  st.session_state.prev_option = selected_model
57
 
58
  if st.session_state.prev_option != selected_model:
59
  st.session_state.messages = []
60
- # st.write(f"Changed to {selected_model}")
61
  st.session_state.prev_option = selected_model
62
  reset_conversation()
63
 
64
  st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
65
 
66
- # Display model info and logo
67
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
68
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
69
 
@@ -76,15 +92,28 @@ def main():
76
  with st.chat_message(message["role"]):
77
  st.markdown(message["content"])
78
 
 
 
 
79
  # Chat input and response
80
  if prompt := st.chat_input("Type message here..."):
81
- process_user_input(client, prompt, selected_model, temperature)
82
 
83
- def process_user_input(client, prompt, selected_model, temperature):
84
  # Display user message
85
  with st.chat_message("user"):
86
  st.markdown(prompt)
87
- st.session_state.messages.append({"role": "user", "content": prompt})
 
 
 
 
 
 
 
 
 
 
88
 
89
  # Generate and display assistant response
90
  with st.chat_message("assistant"):
@@ -110,7 +139,7 @@ def handle_error(error):
110
  response = """πŸ˜΅β€πŸ’« Looks like someone unplugged something!
111
  \n Either the model space is being updated or something is down."""
112
  st.write(response)
113
- random_dog_pick = random.choice(random_dog_images)
114
  st.image(random_dog_pick)
115
  st.write("This was the error message:")
116
  st.write(str(error))
 
 
1
  import os
2
+ import random
3
  from openai import OpenAI
4
+ import streamlit as st
 
5
  from dotenv import load_dotenv
6
+ from huggingface_hub import get_token
7
+ from langchain_huggingface import HuggingFaceEndpoint
8
+ from langchain.indexes import VectorstoreIndexCreator
9
+ from langchain_community.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader
10
+ from langchain_huggingface.embeddings.huggingface_endpoint import HuggingFaceEndpointEmbeddings
11
+ from langchain.chains import RetrievalQA
12
+ from langchain_community.vectorstores import FAISS
13
 
14
  # Load environment variables
15
  load_dotenv()
16
 
17
+ api_key=os.environ.get('API_KEY')
18
+
19
+ get_token()
20
+
21
  # Constants
22
  MAX_TOKENS = 4000
23
  DEFAULT_TEMPERATURE = 0.5
24
 
25
+ # Initialize the OpenAI client
 
26
  client = OpenAI(
27
+ base_url="https://api-inference.huggingface.co/v1",
28
+ api_key=api_key
29
  )
30
+
31
  # Create supported models
32
  model_links = {
33
  "Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
 
36
  "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
37
  }
38
 
39
+ # Load documents and set up RAG pipeline
40
+ @st.cache_resource
41
+ def setup_rag_pipeline():
42
+ loader = HuggingFaceDatasetLoader(
43
+ path='Atreyu4EVR/General-BYUI-Data',
44
+ page_content_column='content'
45
+ )
46
+ documents = loader.load()
47
 
48
+ hf_embeddings = HuggingFaceEndpointEmbeddings(
49
+ model="sentence-transformers/all-MiniLM-L12-v2",
50
+ task="feature-extraction",
51
+ huggingfacehub_api_token=api_key
52
+ )
53
+
54
+ vector_store = FAISS.from_documents(documents, hf_embeddings)
55
+ retriever = vector_store.as_retriever()
56
+
57
+ return retriever
58
 
59
  def reset_conversation():
 
 
 
60
  st.session_state.conversation = []
61
  st.session_state.messages = []
 
62
 
 
 
63
  def main():
64
+ st.header('Multi-Models with RAG')
 
65
 
66
  # Sidebar for model selection and temperature
67
  selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
68
  temperature = st.sidebar.slider('Select a temperature value', 0.0, 1.0, DEFAULT_TEMPERATURE)
69
 
70
+ st.sidebar.button('Reset Chat', on_click=reset_conversation)
71
+
72
  if "prev_option" not in st.session_state:
73
  st.session_state.prev_option = selected_model
74
 
75
  if st.session_state.prev_option != selected_model:
76
  st.session_state.messages = []
 
77
  st.session_state.prev_option = selected_model
78
  reset_conversation()
79
 
80
  st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
81
 
82
+ # Display model info
83
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
84
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
85
 
 
92
  with st.chat_message(message["role"]):
93
  st.markdown(message["content"])
94
 
95
+ # Set up RAG pipeline
96
+ retriever = setup_rag_pipeline()
97
+
98
  # Chat input and response
99
  if prompt := st.chat_input("Type message here..."):
100
+ process_user_input(client, prompt, selected_model, temperature, retriever)
101
 
102
+ def process_user_input(client, prompt, selected_model, temperature, retriever):
103
  # Display user message
104
  with st.chat_message("user"):
105
  st.markdown(prompt)
106
+
107
+ # Retrieve relevant documents
108
+ relevant_docs = retriever.get_relevant_documents(prompt)
109
+ context = "\n".join([doc.page_content for doc in relevant_docs])
110
+
111
+ # Prepare messages with context
112
+ messages = [
113
+ {"role": "system", "content": f"You are an AI assistant. Use the following context to answer the user's question: {context}"},
114
+ {"role": "user", "content": prompt}
115
+ ]
116
+ st.session_state.messages.extend(messages)
117
 
118
  # Generate and display assistant response
119
  with st.chat_message("assistant"):
 
139
  response = """πŸ˜΅β€πŸ’« Looks like someone unplugged something!
140
  \n Either the model space is being updated or something is down."""
141
  st.write(response)
142
+ random_dog_pick = random.choice(["broken_llama3.jpeg"])
143
  st.image(random_dog_pick)
144
  st.write("This was the error message:")
145
  st.write(str(error))