Spaces:
Sleeping
Sleeping
Ron Vallejo
commited on
Fixed np issue. Added Google Gemma model
Browse files
app.py
CHANGED
@@ -3,23 +3,24 @@ from openai import OpenAI
|
|
3 |
import os
|
4 |
import sys
|
5 |
from dotenv import load_dotenv, dotenv_values
|
|
|
6 |
|
7 |
load_dotenv()
|
8 |
|
9 |
-
#
|
10 |
client = OpenAI(
|
11 |
-
|
12 |
-
|
13 |
-
)
|
14 |
|
15 |
-
#Create supported models
|
16 |
-
model_links ={
|
17 |
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
18 |
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
19 |
"Gemma-7b-it": "google/gemma-7b-it",
|
20 |
}
|
21 |
|
22 |
-
#Pull info about the model to display
|
23 |
model_info = {
|
24 |
"Meta-Llama-3.1-8B": {
|
25 |
'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.
|
@@ -38,18 +39,17 @@ model_info = {
|
|
38 |
},
|
39 |
}
|
40 |
|
41 |
-
|
42 |
-
#Random dog images for error message
|
43 |
random_dog = ["BlueLogoBox.jpg"]
|
44 |
|
45 |
# Define the available models
|
46 |
-
models =[key for key in model_links.keys()]
|
47 |
|
48 |
# Create the sidebar with the dropdown for model selection
|
49 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
50 |
|
51 |
-
#Create a temperature slider
|
52 |
-
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0,
|
53 |
|
54 |
# Create model description
|
55 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
@@ -65,29 +65,27 @@ if st.session_state.prev_option != selected_model:
|
|
65 |
st.write(f"Changed to {selected_model}")
|
66 |
st.session_state.prev_option = selected_model
|
67 |
|
68 |
-
#Pull in the model we want to use
|
69 |
repo_id = model_links[selected_model]
|
70 |
|
71 |
-
st.header(
|
72 |
st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
|
73 |
-
# st.title(f'ChatBot Using {selected_model}')
|
74 |
|
75 |
# Set a default model
|
76 |
if selected_model not in st.session_state:
|
77 |
-
st.session_state[selected_model] = model_links[selected_model]
|
78 |
|
79 |
# Initialize chat history
|
80 |
if "messages" not in st.session_state:
|
81 |
st.session_state.messages = []
|
82 |
|
83 |
-
|
84 |
# Display chat messages from history on app rerun
|
85 |
for message in st.session_state.messages:
|
86 |
with st.chat_message(message["role"]):
|
87 |
st.markdown(message["content"])
|
88 |
|
89 |
# Accept user input
|
90 |
-
if prompt := st.chat_input(
|
91 |
|
92 |
# Display user message in chat message container
|
93 |
with st.chat_message("user"):
|
@@ -95,36 +93,34 @@ if prompt := st.chat_input(f"Type message here..."):
|
|
95 |
# Add user message to chat history
|
96 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
97 |
|
98 |
-
|
99 |
# Display assistant response in chat message container
|
100 |
with st.chat_message("assistant"):
|
101 |
|
102 |
try:
|
103 |
stream = client.chat.completions.create(
|
104 |
-
model=
|
105 |
messages=[
|
106 |
{"role": m["role"], "content": m["content"]}
|
107 |
for m in st.session_state.messages
|
108 |
],
|
109 |
-
temperature=temp_values
|
110 |
stream=True,
|
111 |
max_tokens=4000,
|
112 |
)
|
113 |
-
|
114 |
response = st.write_stream(stream)
|
115 |
|
116 |
except Exception as e:
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
\n Here's a random pic of a πΆ:"
|
124 |
st.write(response)
|
125 |
-
random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
|
126 |
st.image(random_dog_pick)
|
127 |
st.write("This was the error message:")
|
128 |
st.write(e)
|
129 |
-
|
130 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
3 |
import os
|
4 |
import sys
|
5 |
from dotenv import load_dotenv, dotenv_values
|
6 |
+
import numpy as np
|
7 |
|
8 |
load_dotenv()
|
9 |
|
10 |
+
# Initialize the client
|
11 |
client = OpenAI(
|
12 |
+
base_url="https://api-inference.huggingface.co/v1",
|
13 |
+
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
|
14 |
+
)
|
15 |
|
16 |
+
# Create supported models
|
17 |
+
model_links = {
|
18 |
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
19 |
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
20 |
"Gemma-7b-it": "google/gemma-7b-it",
|
21 |
}
|
22 |
|
23 |
+
# Pull info about the model to display
|
24 |
model_info = {
|
25 |
"Meta-Llama-3.1-8B": {
|
26 |
'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.
|
|
|
39 |
},
|
40 |
}
|
41 |
|
42 |
+
# Random dog images for error message
|
|
|
43 |
random_dog = ["BlueLogoBox.jpg"]
|
44 |
|
45 |
# Define the available models
|
46 |
+
models = [key for key in model_links.keys()]
|
47 |
|
48 |
# Create the sidebar with the dropdown for model selection
|
49 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
50 |
|
51 |
+
# Create a temperature slider
|
52 |
+
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
|
53 |
|
54 |
# Create model description
|
55 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
|
|
65 |
st.write(f"Changed to {selected_model}")
|
66 |
st.session_state.prev_option = selected_model
|
67 |
|
68 |
+
# Pull in the model we want to use
|
69 |
repo_id = model_links[selected_model]
|
70 |
|
71 |
+
st.header('Liahona.AI')
|
72 |
st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
|
|
|
73 |
|
74 |
# Set a default model
|
75 |
if selected_model not in st.session_state:
|
76 |
+
st.session_state[selected_model] = model_links[selected_model]
|
77 |
|
78 |
# Initialize chat history
|
79 |
if "messages" not in st.session_state:
|
80 |
st.session_state.messages = []
|
81 |
|
|
|
82 |
# Display chat messages from history on app rerun
|
83 |
for message in st.session_state.messages:
|
84 |
with st.chat_message(message["role"]):
|
85 |
st.markdown(message["content"])
|
86 |
|
87 |
# Accept user input
|
88 |
+
if prompt := st.chat_input("Type message here..."):
|
89 |
|
90 |
# Display user message in chat message container
|
91 |
with st.chat_message("user"):
|
|
|
93 |
# Add user message to chat history
|
94 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
95 |
|
|
|
96 |
# Display assistant response in chat message container
|
97 |
with st.chat_message("assistant"):
|
98 |
|
99 |
try:
|
100 |
stream = client.chat.completions.create(
|
101 |
+
model=repo_id,
|
102 |
messages=[
|
103 |
{"role": m["role"], "content": m["content"]}
|
104 |
for m in st.session_state.messages
|
105 |
],
|
106 |
+
temperature=temp_values,
|
107 |
stream=True,
|
108 |
max_tokens=4000,
|
109 |
)
|
110 |
+
|
111 |
response = st.write_stream(stream)
|
112 |
|
113 |
except Exception as e:
|
114 |
+
response = """π΅βπ« Looks like someone unplugged something!
|
115 |
+
\n Either the model space is being updated or something is down.
|
116 |
+
\n
|
117 |
+
\n Try again later.
|
118 |
+
\n
|
119 |
+
\n Here's a random pic of a πΆ:"""
|
|
|
120 |
st.write(response)
|
121 |
+
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
|
122 |
st.image(random_dog_pick)
|
123 |
st.write("This was the error message:")
|
124 |
st.write(e)
|
125 |
+
|
126 |
st.session_state.messages.append({"role": "assistant", "content": response})
|