Spaces:
Sleeping
Sleeping
ogegadavis254
commited on
Commit
•
877a721
1
Parent(s):
c0d899e
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,15 @@
|
|
1 |
-
""" Chatbot
|
2 |
-
@author:
|
3 |
-
@email:
|
4 |
"""
|
5 |
|
6 |
import streamlit as st
|
7 |
-
from openai import OpenAI
|
8 |
import os
|
9 |
-
|
|
|
10 |
|
11 |
load_dotenv()
|
12 |
|
13 |
-
# Initialize the client
|
14 |
-
client = OpenAI(
|
15 |
-
base_url="https://api-inference.huggingface.co/v1",
|
16 |
-
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
17 |
-
)
|
18 |
-
|
19 |
def reset_conversation():
|
20 |
'''
|
21 |
Resets Conversation
|
@@ -33,20 +27,24 @@ model_info = {
|
|
33 |
'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
34 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
35 |
'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'
|
|
|
|
|
|
|
|
|
36 |
}
|
37 |
}
|
38 |
|
39 |
-
# Create model description for
|
40 |
-
st.sidebar.
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
43 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
44 |
st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
|
45 |
st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
|
46 |
|
47 |
-
# If model is not already selected, set it to Mistral
|
48 |
-
selected_model = "Mistral"
|
49 |
-
|
50 |
# Initialize chat history
|
51 |
if "messages" not in st.session_state:
|
52 |
st.session_state.messages = []
|
@@ -56,6 +54,18 @@ for message in st.session_state.messages:
|
|
56 |
with st.chat_message(message["role"]):
|
57 |
st.markdown(message["content"])
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
# Accept user input
|
60 |
if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
|
61 |
# Display user message in chat message container
|
@@ -66,16 +76,13 @@ if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
|
|
66 |
|
67 |
# Display assistant response in chat message container
|
68 |
with st.chat_message("assistant"):
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
]
|
75 |
-
|
76 |
-
|
77 |
-
max_tokens=1000,
|
78 |
-
)
|
79 |
|
80 |
-
response = st.write_stream(stream)
|
81 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
1 |
+
""" Simple Chatbot
|
2 |
+
@author: Nigel Gebodh
|
3 |
+
@email: nigel.gebodh@gmail.com
|
4 |
"""
|
5 |
|
6 |
import streamlit as st
|
|
|
7 |
import os
|
8 |
+
import requests
|
9 |
+
import json
|
10 |
|
11 |
load_dotenv()
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
def reset_conversation():
|
14 |
'''
|
15 |
Resets Conversation
|
|
|
27 |
'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
28 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
29 |
'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'
|
30 |
+
},
|
31 |
+
"BibleLearnerAI": {
|
32 |
+
'description': """You're now chatting with **BibleLearnerAI**. This AI is focused on religion, specifically Christianity, and provides relevant Bible verses. When greeted, it responds with a religious greeting and introduces itself. It knows the Bible more than anything else.""",
|
33 |
+
'logo': 'https://your-bible-teacher.com/wp-content/uploads/2019/03/teacher-300x300.png'
|
34 |
}
|
35 |
}
|
36 |
|
37 |
+
# Create model description for selected model
|
38 |
+
selected_model = st.sidebar.selectbox("Select Model", ["Mistral", "BibleLearnerAI"])
|
39 |
+
|
40 |
+
# Create model description for selected model
|
41 |
+
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
42 |
+
st.sidebar.markdown(model_info[selected_model]['description'])
|
43 |
+
st.sidebar.image(model_info[selected_model]['logo'])
|
44 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
45 |
st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
|
46 |
st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
|
47 |
|
|
|
|
|
|
|
48 |
# Initialize chat history
|
49 |
if "messages" not in st.session_state:
|
50 |
st.session_state.messages = []
|
|
|
54 |
with st.chat_message(message["role"]):
|
55 |
st.markdown(message["content"])
|
56 |
|
57 |
+
# Pre-instructions for addiction recovery
|
58 |
+
pre_instructions = "Welcome to the Addiction Recovery AI. I'm here to help you recover from your worst addictions. Feel free to ask me anything related to your recovery journey."
|
59 |
+
|
60 |
+
# Pre-instructions for BibleLearnerAI
|
61 |
+
pre_instructions_bible = "Welcome to BibleLearnerAI. I'm here to assist you in learning more about the Bible and Christianity. Feel free to ask me anything related to religion and spirituality."
|
62 |
+
|
63 |
+
# Add pre-instructions to chat history based on the selected model
|
64 |
+
if selected_model == "Mistral":
|
65 |
+
st.session_state.messages.append({"role": "assistant", "content": pre_instructions})
|
66 |
+
elif selected_model == "BibleLearnerAI":
|
67 |
+
st.session_state.messages.append({"role": "assistant", "content": pre_instructions_bible})
|
68 |
+
|
69 |
# Accept user input
|
70 |
if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
|
71 |
# Display user message in chat message container
|
|
|
76 |
|
77 |
# Display assistant response in chat message container
|
78 |
with st.chat_message("assistant"):
|
79 |
+
# Call the appropriate model based on the selected_model
|
80 |
+
if selected_model == "Mistral":
|
81 |
+
# Code to call Mistral model
|
82 |
+
pass
|
83 |
+
elif selected_model == "BibleLearnerAI":
|
84 |
+
stream = get_streamed_response(prompt, [(prompt, pre_instructions_bible)])
|
85 |
+
for response in stream:
|
86 |
+
st.write(response)
|
|
|
|
|
87 |
|
|
|
88 |
st.session_state.messages.append({"role": "assistant", "content": response})
|