Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,11 @@
|
|
1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import numpy as np
|
3 |
import streamlit as st
|
4 |
from openai import OpenAI
|
@@ -14,59 +21,42 @@ load_dotenv()
|
|
14 |
# initialize the client
|
15 |
client = OpenAI(
|
16 |
base_url="https://api-inference.huggingface.co/v1",
|
17 |
-
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#
|
18 |
-
)
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
"Meta-Llama-3-
|
26 |
-
"Mistral-
|
27 |
-
"
|
28 |
-
"
|
29 |
-
"
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
"
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
"
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
"
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
"Zephyr-7B":
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
[Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
|
52 |
-
is the third model in the series, and is a fine-tuned version of google/gemma-7b \
|
53 |
-
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
54 |
-
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
|
55 |
-
"Zephyr-7B-β":
|
56 |
-
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
57 |
-
\nFrom Huggingface: \n\
|
58 |
-
Zephyr is a series of language models that are trained to act as helpful assistants. \
|
59 |
-
[Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
|
60 |
-
is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
|
61 |
-
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
62 |
-
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
|
63 |
-
"Meta-Llama-3-8B":
|
64 |
-
{'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
65 |
-
\nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
|
66 |
-
'logo':'Llama_logo.png'},
|
67 |
}
|
68 |
|
69 |
-
|
70 |
#Random dog images for error message
|
71 |
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
|
72 |
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
|
@@ -101,7 +91,7 @@ models =[key for key in model_links.keys()]
|
|
101 |
# Create the sidebar with the dropdown for model selection
|
102 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
103 |
|
104 |
-
#Create a temperature slider
|
105 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
106 |
|
107 |
|
@@ -111,10 +101,8 @@ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
|
|
111 |
|
112 |
# Create model description
|
113 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
114 |
-
st.sidebar.markdown(model_info[selected_model]['description'])
|
115 |
-
st.sidebar.image(model_info[selected_model]['logo'])
|
116 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
117 |
-
|
118 |
|
119 |
|
120 |
|
@@ -134,8 +122,8 @@ if st.session_state.prev_option != selected_model:
|
|
134 |
repo_id = model_links[selected_model]
|
135 |
|
136 |
|
137 |
-
st.subheader(f'
|
138 |
-
# st.title(f'ChatBot Using {selected_model}')
|
139 |
|
140 |
# Set a default model
|
141 |
if selected_model not in st.session_state:
|
@@ -155,17 +143,15 @@ for message in st.session_state.messages:
|
|
155 |
|
156 |
# Accept user input
|
157 |
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
158 |
-
|
159 |
# Display user message in chat message container
|
160 |
with st.chat_message("user"):
|
161 |
st.markdown(prompt)
|
162 |
# Add user message to chat history
|
163 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
164 |
-
|
165 |
|
166 |
# Display assistant response in chat message container
|
167 |
with st.chat_message("assistant"):
|
168 |
-
|
169 |
try:
|
170 |
stream = client.chat.completions.create(
|
171 |
model=model_links[selected_model],
|
@@ -196,5 +182,7 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
|
196 |
|
197 |
|
198 |
|
199 |
-
|
|
|
|
|
200 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
1 |
|
2 |
+
|
3 |
+
# # initialize the client
|
4 |
+
# client = OpenAI(
|
5 |
+
# base_url="https://api-inference.huggingface.co/v1",
|
6 |
+
# api_key=os.environ.get('')#"hf_xxx" # Replace with your token
|
7 |
+
# )
|
8 |
+
|
9 |
import numpy as np
|
10 |
import streamlit as st
|
11 |
from openai import OpenAI
|
|
|
21 |
# initialize the client
|
22 |
client = OpenAI(
|
23 |
base_url="https://api-inference.huggingface.co/v1",
|
24 |
+
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
|
25 |
+
)
|
26 |
+
|
27 |
+
# Create supported models
|
28 |
+
model_links = {
|
29 |
+
"Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
30 |
+
"Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
31 |
+
"Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
|
32 |
+
"Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
33 |
+
"Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
|
34 |
+
"Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
|
35 |
+
"Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
|
36 |
+
"C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
37 |
+
"Aya-23-35B": "CohereForAI/aya-23-35B",
|
38 |
+
"Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
39 |
+
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
40 |
+
"Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
|
41 |
+
"Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
42 |
+
"Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
|
43 |
+
"Gemma-2-27b-it": "google/gemma-2-27b-it",
|
44 |
+
"Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
|
45 |
+
"Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
|
46 |
+
"Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
|
47 |
+
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
|
48 |
+
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
|
49 |
+
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
50 |
+
"Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
|
51 |
+
"Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
|
52 |
+
"Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
|
53 |
+
"Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
|
54 |
+
"Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
|
55 |
+
"Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
|
56 |
+
"Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
|
57 |
+
"Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
}
|
59 |
|
|
|
60 |
#Random dog images for error message
|
61 |
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
|
62 |
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
|
|
|
91 |
# Create the sidebar with the dropdown for model selection
|
92 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
93 |
|
94 |
+
# Create a temperature slider
|
95 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
96 |
|
97 |
|
|
|
101 |
|
102 |
# Create model description
|
103 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
|
|
|
|
104 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
105 |
+
# st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
|
106 |
|
107 |
|
108 |
|
|
|
122 |
repo_id = model_links[selected_model]
|
123 |
|
124 |
|
125 |
+
st.subheader(f'{selected_model}')
|
126 |
+
# # st.title(f'ChatBot Using {selected_model}')
|
127 |
|
128 |
# Set a default model
|
129 |
if selected_model not in st.session_state:
|
|
|
143 |
|
144 |
# Accept user input
|
145 |
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
|
|
146 |
# Display user message in chat message container
|
147 |
with st.chat_message("user"):
|
148 |
st.markdown(prompt)
|
149 |
# Add user message to chat history
|
150 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
151 |
+
|
152 |
|
153 |
# Display assistant response in chat message container
|
154 |
with st.chat_message("assistant"):
|
|
|
155 |
try:
|
156 |
stream = client.chat.completions.create(
|
157 |
model=model_links[selected_model],
|
|
|
182 |
|
183 |
|
184 |
|
185 |
+
|
186 |
+
|
187 |
+
|
188 |
st.session_state.messages.append({"role": "assistant", "content": response})
|