Kibo1 commited on
Commit
934d19e
·
verified ·
1 Parent(s): 268d8be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -92
app.py CHANGED
@@ -1,105 +1,111 @@
1
  import pandas as pd
2
-
3
-
4
- df = pd.read_csv('./Mental_Health_FAQ.csv')
5
-
6
- import pandas as pd
7
- from sentence_transformers import SentenceTransformer
8
- # Assuming your DataFrame is already loaded as 'df'
9
-
10
- context_data = []
11
-
12
- for i in range(len(df)):
13
- context = f"Question: {df.iloc[i]['Questions']} Answer: {df.iloc[i]['Answers']}"
14
- context_data.append(context)
15
- # print(context_data)
16
-
17
- # Embed the contexts
18
- embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
19
- context_embeddings = embedding_model.encode(context_data)
20
-
21
- #print(f"Number of contexts: {len(context_data)}")
22
- #print(f"Shape of embeddings: {context_embeddings.shape}")
23
-
24
-
25
  import os
26
-
27
- # Get the secret key from the environment
28
- groq_key = os.environ.get('new_chatAPI_key')
29
-
30
- ## LLM used for RAG
31
  from langchain_groq import ChatGroq
32
-
33
- llm = ChatGroq(model="llama-3.3-70b-versatile",api_key=groq_key)
34
-
35
- ## Embedding model!
36
  from langchain_huggingface import HuggingFaceEmbeddings
37
- embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
38
-
39
- # create vector store!
40
  from langchain_chroma import Chroma
41
-
42
- vectorstore = Chroma(
43
- collection_name="medical_dataset_store",
44
- embedding_function=embed_model,
45
- )
46
-
47
- # add data to vector nstore
48
- vectorstore.add_texts(context_data)
49
-
50
- retriever = vectorstore.as_retriever()
51
-
52
  from langchain_core.prompts import PromptTemplate
53
-
54
- template = ("""You are a mental health professional.
55
- Use the provided context to answer the question.
56
- If you don't know the answer, say so. Explain your answer in detail.
57
- Do not discuss the context in your response; just provide the answer directly.
58
-
59
- Context: {context}
60
-
61
- Question: {question}
62
-
63
- Answer:""")
64
-
65
- rag_prompt = PromptTemplate.from_template(template)
66
-
67
  from langchain_core.output_parsers import StrOutputParser
68
  from langchain_core.runnables import RunnablePassthrough
69
-
70
- rag_chain = (
71
- {"context": retriever, "question": RunnablePassthrough()}
72
- | rag_prompt
73
- | llm
74
- | StrOutputParser()
75
- )
76
-
77
  import gradio as gr
78
-
79
- def rag_memory_stream(message, history):
80
- partial_text = ""
81
- for new_text in rag_chain.stream(message):
82
- partial_text += new_text
83
- yield partial_text
84
-
85
- examples = [
86
- "I am not in a good mood",
87
- "what is the possible symptompts of depression?"
88
- ]
89
-
90
- description = "Real-time AI App with Groq API and LangChain to Answer medical questions"
91
-
92
-
93
- title = "ThriveTalk Expert :) Try me!"
94
- demo = gr.ChatInterface(fn=rag_memory_stream,
95
- type="messages",
96
- title=title,
97
- description=description,
98
- fill_height=True,
99
- examples=examples,
100
- theme="glass",
101
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
 
104
  if __name__ == "__main__":
105
  demo.launch()
 
 
1
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import os
3
+ from sentence_transformers import SentenceTransformer
 
 
 
 
4
  from langchain_groq import ChatGroq
 
 
 
 
5
  from langchain_huggingface import HuggingFaceEmbeddings
 
 
 
6
  from langchain_chroma import Chroma
 
 
 
 
 
 
 
 
 
 
 
7
  from langchain_core.prompts import PromptTemplate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  from langchain_core.output_parsers import StrOutputParser
9
  from langchain_core.runnables import RunnablePassthrough
 
 
 
 
 
 
 
 
10
  import gradio as gr
11
+ import logging
12
+
13
+ # Set up basic logging (optional, but useful)
14
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
15
+
16
+
17
+ try:
18
+ # Load the data - check for the file path
19
+ df = pd.read_csv('./Mental_Health_FAQ.csv')
20
+
21
+ context_data = []
22
+ for i in range(len(df)):
23
+ context = f"Question: {df.iloc[i]['Questions']} Answer: {df.iloc[i]['Answers']}"
24
+ context_data.append(context)
25
+
26
+ # Embed the contexts
27
+ embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
28
+ context_embeddings = embedding_model.encode(context_data)
29
+
30
+
31
+ # Get the API Key - important to check this is set
32
+ groq_key = os.environ.get('new_chatAPI_key')
33
+ if not groq_key:
34
+ raise ValueError("Groq API key not found in environment variables.")
35
+
36
+
37
+ # LLM used for RAG
38
+ llm = ChatGroq(model="llama-3.3-70b-versatile",api_key=groq_key)
39
+
40
+ # Embedding model
41
+ embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
42
+
43
+
44
+ # Create the Vector Store!
45
+ vectorstore = Chroma(
46
+ collection_name="medical_dataset_store",
47
+ embedding_function=embed_model,
48
+ )
49
+
50
+ # Add data to vector store
51
+ vectorstore.add_texts(context_data)
52
+
53
+ retriever = vectorstore.as_retriever()
54
+
55
+ # Create the prompt template
56
+ template = ("""You are a mental health professional.
57
+ Use the provided context to answer the question.
58
+ If you don't know the answer, say so. Explain your answer in detail.
59
+ Do not discuss the context in your response; just provide the answer directly.
60
+ Context: {context}
61
+ Question: {question}
62
+ Answer:""")
63
+
64
+ rag_prompt = PromptTemplate.from_template(template)
65
+ rag_chain = (
66
+ {"context": retriever, "question": RunnablePassthrough()}
67
+ | rag_prompt
68
+ | llm
69
+ | StrOutputParser()
70
+ )
71
+
72
+
73
+ def rag_memory_stream(message, history):
74
+ partial_text = ""
75
+ for new_text in rag_chain.stream(message):
76
+ partial_text += new_text
77
+ yield partial_text
78
+
79
+ examples = [
80
+ "I am not in a good mood",
81
+ "what is the possible symptompts of depression?"
82
+ ]
83
+
84
+ description = "Real-time AI App with Groq API and LangChain to Answer medical questions"
85
+ title = "ThriveTalk Expert :) Try me!"
86
+ demo = gr.ChatInterface(fn=rag_memory_stream,
87
+ type="messages",
88
+ title=title,
89
+ description=description,
90
+ fill_height=True,
91
+ examples=examples,
92
+ theme="glass",
93
+ )
94
+
95
+ except Exception as e:
96
+ logging.error(f"An error occurred during initialization: {e}")
97
+ # If there is an error then return a dummy error text to tell user
98
+ def error_function(message, history):
99
+ yield "An error has occurred. Please check the logs"
100
+ demo = gr.ChatInterface(fn=error_function,
101
+ type="messages",
102
+ title="ERROR :(",
103
+ description="Please check the logs",
104
+ fill_height=True,
105
+ theme="glass",
106
+ )
107
 
108
 
109
  if __name__ == "__main__":
110
  demo.launch()
111
+