ogegadavis254 commited on
Commit
fadd816
1 Parent(s): 3529132

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -72
app.py CHANGED
@@ -1,89 +1,115 @@
1
- """ Simple Chatbot
2
- @author: Nigel Gebodh
3
- @email: nigel.gebodh@gmail.com
4
- """
5
-
6
  import streamlit as st
7
  import os
8
  import requests
9
- from dotenv import load_dotenv
10
  import json
11
 
12
- load_dotenv()
13
 
14
- def reset_conversation():
15
- '''
16
- Resets Conversation
17
- '''
18
- st.session_state.conversation = []
19
- st.session_state.messages = []
20
- return None
21
-
22
- # Add reset button to clear conversation
23
- st.sidebar.button('Reset Chat', on_click=reset_conversation)
24
-
25
- # Model info for Mistral
26
- model_info = {
27
- "Mistral": {
28
- 'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
29
- \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
30
- 'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'
31
- },
32
- "BibleLearnerAI": {
33
- 'description': """You're now chatting with **BibleLearnerAI**. This AI is focused on religion, specifically Christianity, and provides relevant Bible verses. When greeted, it responds with a religious greeting and introduces itself. It knows the Bible more than anything else.""",
34
- 'logo': 'https://your-bible-teacher.com/wp-content/uploads/2019/03/teacher-300x300.png'
 
35
  }
36
- }
37
 
38
- # Create model description for selected model
39
- selected_model = st.sidebar.selectbox("Select Model", ["Mistral", "BibleLearnerAI"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # Create model description for selected model
42
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
43
- st.sidebar.markdown(model_info[selected_model]['description'])
44
- st.sidebar.image(model_info[selected_model]['logo'])
45
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
46
- st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
47
- st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
48
 
49
- # Initialize chat history
 
 
 
 
 
 
 
50
  if "messages" not in st.session_state:
51
  st.session_state.messages = []
52
 
53
- # Display chat messages from history on app rerun
54
- for message in st.session_state.messages:
55
- with st.chat_message(message["role"]):
56
- st.markdown(message["content"])
57
-
58
- # Pre-instructions for addiction recovery
59
- pre_instructions = "Welcome to the Addiction Recovery AI. I'm here to help you recover from your worst addictions. Feel free to ask me anything related to your recovery journey."
60
 
61
- # Pre-instructions for BibleLearnerAI
62
- pre_instructions_bible = "Welcome to BibleLearnerAI. I'm here to assist you in learning more about the Bible and Christianity. Feel free to ask me anything related to religion and spirituality."
63
 
64
- # Add pre-instructions to chat history based on the selected model
65
- if selected_model == "Mistral":
66
- st.session_state.messages.append({"role": "assistant", "content": pre_instructions})
67
- elif selected_model == "BibleLearnerAI":
68
- st.session_state.messages.append({"role": "assistant", "content": pre_instructions_bible})
69
 
70
  # Accept user input
71
- if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
72
- # Display user message in chat message container
73
- with st.chat_message("user"):
74
- st.markdown(prompt)
75
- # Add user message to chat history
76
- st.session_state.messages.append({"role": "user", "content": prompt})
77
-
78
- # Display assistant response in chat message container
79
- with st.chat_message("assistant"):
80
- # Call the appropriate model based on the selected_model
81
- if selected_model == "Mistral":
82
- # Code to call Mistral model
83
- pass
84
- elif selected_model == "BibleLearnerAI":
85
- stream = get_streamed_response(prompt, [(prompt, pre_instructions_bible)])
86
- for response in stream:
87
- st.write(response)
88
-
89
- st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import os
3
  import requests
 
4
  import json
5
 
6
+ entire_assistant_response = ""
7
 
8
+ def get_streamed_response(message, history, model):
9
+ all_message = []
10
+
11
+ for human, assistant in history:
12
+ all_message.append({"role": "user", "content": human})
13
+ all_message.append({"role": "assistant", "content": assistant})
14
+
15
+ global entire_assistant_response
16
+ entire_assistant_response = "" # Reset the entire assistant response
17
+
18
+ all_message.append({"role": "user", "content": message})
19
+
20
+ url = "https://api.together.xyz/v1/chat/completions"
21
+ payload = {
22
+ "model": model,
23
+ "temperature": 1.05,
24
+ "top_p": 0.9,
25
+ "top_k": 50,
26
+ "repetition_penalty": 1,
27
+ "n": 1,
28
+ "messages": all_message,
29
+ "stream_tokens": True,
30
  }
 
31
 
32
+ TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
33
+ headers = {
34
+ "accept": "application/json",
35
+ "content-type": "application/json",
36
+ "Authorization": f"Bearer {TOGETHER_API_KEY}",
37
+ }
38
+
39
+ response = requests.post(url, json=payload, headers=headers, stream=True)
40
+ response.raise_for_status() # Ensure HTTP request was successful
41
+
42
+ for line in response.iter_lines():
43
+ if line:
44
+ decoded_line = line.decode('utf-8')
45
+
46
+ # Check for the completion signal
47
+ if decoded_line == "data: [DONE]":
48
+ yield entire_assistant_response # Yield the entire response at the end
49
+ break
50
+
51
+ try:
52
+ # Decode and strip any SSE format specific prefix ("data: ")
53
+ if decoded_line.startswith("data: "):
54
+ decoded_line = decoded_line.replace("data: ", "")
55
+ chunk_data = json.loads(decoded_line)
56
+ content = chunk_data['choices'][0]['delta']['content']
57
+ entire_assistant_response += content # Aggregate content
58
+ yield entire_assistant_response
59
 
60
+ except json.JSONDecodeError:
61
+ print(f"Invalid JSON received: {decoded_line}")
62
+ continue
63
+ except KeyError as e:
64
+ print(f"KeyError encountered: {e}")
65
+ continue
 
66
 
67
+ print(entire_assistant_response)
68
+ all_message.append({"role": "assistant", "content": entire_assistant_response})
69
+
70
+
71
+ # Initialize Streamlit app
72
+ st.title("AI Chatbot")
73
+
74
+ # Initialize session state if not present
75
  if "messages" not in st.session_state:
76
  st.session_state.messages = []
77
 
78
+ # Define available models
79
+ models = {
80
+ "Addiction Recovery": "model_addiction_recovery",
81
+ "Mental Health": "model_mental_health",
82
+ "Wellness": "model_wellness"
83
+ }
 
84
 
85
+ # Allow user to select a model
86
+ selected_model = st.selectbox("Select Model", list(models.keys()))
87
 
88
+ # Define models
89
+ model_addiction_recovery = "model_addiction_recovery"
90
+ model_mental_health = "model_mental_health"
91
+ model_wellness = "model_wellness"
 
92
 
93
  # Accept user input
94
+ if prompt := st.text_input("You:", key="user_input"):
95
+ # Display user message
96
+ with st.spinner("AI is typing..."):
97
+ st.session_state.messages.append({"role": "user", "content": prompt})
98
+
99
+ # Call selected model to get response
100
+ if selected_model == "Addiction Recovery":
101
+ response_stream = get_streamed_response(prompt, [(m["content"] for m in st.session_state.messages[:-1])], model_addiction_recovery)
102
+ elif selected_model == "Mental Health":
103
+ response_stream = get_streamed_response(prompt, [(m["content"] for m in st.session_state.messages[:-1])], model_mental_health)
104
+ elif selected_model == "Wellness":
105
+ response_stream = get_streamed_response(prompt, [(m["content"] for m in st.session_state.messages[:-1])], model_wellness)
106
+
107
+ for response in response_stream:
108
+ st.session_state.messages.append({"role": "assistant", "content": response})
109
+
110
+ # Display chat history
111
+ for message in st.session_state.messages:
112
+ if message["role"] == "user":
113
+ st.text_input("You:", value=message["content"], disabled=True)
114
+ else:
115
+ st.text_input("AI:", value=message["content"], disabled=True)