Pratik Dwivedi commited on
Commit
9f98769
1 Parent(s): 727d805

main functionality

Browse files
Files changed (3) hide show
  1. app.py +59 -54
  2. app2.py +63 -0
  3. requirements.txt +2 -1
app.py CHANGED
@@ -1,63 +1,68 @@
1
  import streamlit as st
2
- from langchain.memory import ConversationBufferMemory
3
- from langchain.chains import LLMChain
4
- from langchain_community.llms import HuggingFaceHub
5
- from langchain.prompts import PromptTemplate
6
- from dotenv import load_dotenv
7
- import time
8
-
9
- def get_response(model, query):
10
- prompt_template = PromptTemplate(
11
- template="I have a question about my health. {user_question}",
12
- input_variables=["user_question"]
13
- )
14
- # get the response
15
- memory = ConversationBufferMemory(memory_key="messages", return_messages=True)
16
- print(memory)
17
- conversation_chain = LLMChain(
18
- llm=model,
19
- prompt=prompt_template,
20
- # retriever=vectorstore.as_retriever(),
21
- memory=memory)
22
- response = conversation_chain.invoke(query)
23
- answer = response["text"]
24
- if "\n\n" in answer:
25
- answer = answer.split("\n\n", 1)[1]
26
- return answer
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def main():
29
- st.title("Health Chatbot")
30
- # load the environment variables
31
- load_dotenv()
32
- print("Loading LLM from HuggingFace")
33
- with st.spinner('Loading LLM from HuggingFace...'):
34
- # llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta", model_kwargs={"temperature":0.7, "max_new_tokens":1028, "top_p":0.95})
35
-
36
- llm = HuggingFaceHub(repo_id="epfl-llm/meditron-70b", model_kwargs={"temperature":0.7, "max_new_tokens":1028, "top_p":0.95})
 
37
 
 
38
  if "messages" not in st.session_state:
39
  st.session_state.messages = []
40
-
41
-
42
- if st.button("Clear Chat"):
43
- st.session_state.messages = []
44
-
45
- for message in st.session_state.messages:
46
- if message["role"] == "user":
47
- st.chat_message("user").markdown(message["content"])
 
 
 
 
48
  else:
49
- st.chat_message("bot").markdown(message["content"])
50
-
51
- user_prompt = st.chat_input("ask a question", key="user")
52
- if user_prompt:
53
- st.chat_message("user").markdown(user_prompt)
54
- st.session_state.messages.append({"role": "user", "content": user_prompt})
55
- with st.spinner('Thinking...'):
56
- start_time = time.time()
57
- response = get_response(llm, user_prompt)
58
- st.write("Response Time: ", time.time() - start_time)
59
- st.chat_message("bot").markdown(response)
60
- st.session_state.messages.append({"role": "bot", "content": response})
61
-
 
62
  if __name__ == "__main__":
63
  main()
 
1
  import streamlit as st
2
+ import openai
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ # Function to interact with GPT-3 model
5
+ def ask_gpt3_personalized(prompt, height_cm, weight_kg, age):
6
+ response = openai.ChatCompletion.create(
7
+ model="gpt-3.5-turbo",
8
+ messages=[
9
+ {"role": "system", "content": "You are a chatbot that will answer queries related to fitness and nutrition. The chatbot should understand questions about workout routines, dietary advice, and general fitness tips. Chatbot will offer personalized workout and diet plans based on user inputs such as body type, fitness goals, and dietary restrictions. this is the chat history use this as a reference to answer the queries"+st.session_state.messages},
10
+ {"role": "user", "content": "my height is " + str(height_cm) + " cm, my weight is " + str(weight_kg) + " kg, and I am " + str(age) + " years old."+prompt},
11
+ ]
12
+ )
13
+ return response['choices'][0]['message']['content']
14
+
15
+ def ask_gpt3(prompt):
16
+ response = openai.ChatCompletion.create(
17
+ model="gpt-3.5-turbo",
18
+ messages=[
19
+ {"role": "system", "content": "You are a chatbot that will answer queries related to fitness and nutrition. The chatbot should understand questions about workout routines, dietary advice, and general fitness tips. Chatbot will offer personalized workout and diet plans based on user inputs such as body type, fitness goals, and dietary restrictions. this is the chat history use this as a reference to answer the queries"+st.session_state.messages},
20
+ {"role": "user", "content": prompt},
21
+ ]
22
+ )
23
+ return response['choices'][0]['message']['content']
24
+
25
+ # Main function to run the Streamlit app
26
  def main():
27
+ st.title("Fitness Chatbot")
28
+
29
+ # add a sibebar that can take in user input for the chatbot prompt and the API key
30
+ st.sidebar.title("Personal Information")
31
+ openai.api_key = st.sidebar.text_input("Your OpenAI API Key here", "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")
32
+ height_cm = st.sidebar.number_input("Height (cm)", 0, 300)
33
+ weight_kg = st.sidebar.number_input("Weight (kg)", 0, 300)
34
+ age = st.sidebar.number_input("Age", 0, 100)
35
+
36
 
37
+ # Initialize conversation history
38
  if "messages" not in st.session_state:
39
  st.session_state.messages = []
40
+
41
+ # User input
42
+ user_input = st.chat_input("Ask something")
43
+
44
+ # If user input is not empty
45
+ if user_input:
46
+ # Add user input to conversation history
47
+ st.session_state.messages.append({"role": "user", "content": user_input})
48
+
49
+ # Get chatbot response
50
+ if height_cm and weight_kg and age:
51
+ chatbot_response = ask_gpt3_personalized(user_input, height_cm, weight_kg, age)
52
  else:
53
+ chatbot_response = ask_gpt3(user_input)
54
+
55
+ # Add chatbot response to conversation history
56
+ st.session_state.messages.append({"role": "assistant", "content": chatbot_response})
57
+
58
+ # Display conversation history
59
+ for message in st.session_state.messages:
60
+ with st.chat_message(message["role"]):
61
+ st.markdown(message["content"])
62
+
63
+ # Clear the chat input
64
+ st.session_state["chat_input"] = ""
65
+
66
+ # Run the main function
67
  if __name__ == "__main__":
68
  main()
app2.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.memory import ConversationBufferMemory
3
+ from langchain.chains import LLMChain
4
+ from langchain_community.llms import HuggingFaceHub
5
+ from langchain.prompts import PromptTemplate
6
+ from dotenv import load_dotenv
7
+ import time
8
+
9
+ def get_response(model, query):
10
+ prompt_template = PromptTemplate(
11
+ template="I have a question about my health. {user_question}",
12
+ input_variables=["user_question"]
13
+ )
14
+ # get the response
15
+ memory = ConversationBufferMemory(memory_key="messages", return_messages=True)
16
+ print(memory)
17
+ conversation_chain = LLMChain(
18
+ llm=model,
19
+ prompt=prompt_template,
20
+ # retriever=vectorstore.as_retriever(),
21
+ memory=memory)
22
+ response = conversation_chain.invoke(query)
23
+ answer = response["text"]
24
+ if "\n\n" in answer:
25
+ answer = answer.split("\n\n", 1)[1]
26
+ return answer
27
+
28
+ def main():
29
+ st.title("Health Chatbot")
30
+ # load the environment variables
31
+ load_dotenv()
32
+ print("Loading LLM from HuggingFace")
33
+ with st.spinner('Loading LLM from HuggingFace...'):
34
+ llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta", model_kwargs={"temperature":0.7, "max_new_tokens":1028, "top_p":0.95})
35
+
36
+ # llm = HuggingFaceHub(repo_id="ajdev/falcon_medical", model_kwargs={"temperature":0.7, "max_new_tokens":250, "top_p":0.95})
37
+
38
+ if "messages" not in st.session_state:
39
+ st.session_state.messages = []
40
+
41
+
42
+ if st.button("Clear Chat"):
43
+ st.session_state.messages = []
44
+
45
+ for message in st.session_state.messages:
46
+ if message["role"] == "user":
47
+ st.chat_message("user").markdown(message["content"])
48
+ else:
49
+ st.chat_message("bot").markdown(message["content"])
50
+
51
+ user_prompt = st.chat_input("ask a question", key="user")
52
+ if user_prompt:
53
+ st.chat_message("user").markdown(user_prompt)
54
+ st.session_state.messages.append({"role": "user", "content": user_prompt})
55
+ with st.spinner('Thinking...'):
56
+ start_time = time.time()
57
+ response = get_response(llm, user_prompt)
58
+ st.write("Response Time: ", time.time() - start_time)
59
+ st.chat_message("bot").markdown(response)
60
+ st.session_state.messages.append({"role": "bot", "content": response})
61
+
62
+ if __name__ == "__main__":
63
+ main()
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  streamlit
2
  langchain
3
  langchain-community
4
- InstructorEmbedding
 
 
1
  streamlit
2
  langchain
3
  langchain-community
4
+ InstructorEmbedding
5
+ openai==0.28