Spaces:
Sleeping
Sleeping
Ron Vallejo
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,7 @@ client = OpenAI(
|
|
16 |
model_links ={
|
17 |
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
18 |
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
|
|
19 |
}
|
20 |
|
21 |
#Pull info about the model to display
|
@@ -28,24 +29,15 @@ model_info ={
|
|
28 |
{'description':"""The Mistral-7B-Instruct-v0.3 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.3.\n \
|
29 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
30 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
|
|
|
|
|
|
31 |
}
|
32 |
|
33 |
|
34 |
#Random dog images for error message
|
35 |
random_dog = ["BlueLogoBox.jpg"]
|
36 |
|
37 |
-
|
38 |
-
def reset_conversation():
|
39 |
-
'''
|
40 |
-
Resets Conversation
|
41 |
-
'''
|
42 |
-
st.session_state.conversation = []
|
43 |
-
st.session_state.messages = []
|
44 |
-
return None
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
# Define the available models
|
50 |
models =[key for key in model_links.keys()]
|
51 |
|
@@ -55,10 +47,6 @@ selected_model = st.sidebar.selectbox("Select Model", models)
|
|
55 |
#Create a temperature slider
|
56 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
57 |
|
58 |
-
|
59 |
-
#Add reset button to clear conversation
|
60 |
-
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
|
61 |
-
|
62 |
# Create model description
|
63 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
64 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
@@ -117,7 +105,7 @@ if prompt := st.chat_input(f"Type message here..."):
|
|
117 |
],
|
118 |
temperature=temp_values,#0.5,
|
119 |
stream=True,
|
120 |
-
max_tokens=
|
121 |
)
|
122 |
|
123 |
response = st.write_stream(stream)
|
|
|
16 |
model_links ={
|
17 |
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
18 |
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
19 |
+
"Gemma-7b": "google/gemma-7b",
|
20 |
}
|
21 |
|
22 |
#Pull info about the model to display
|
|
|
29 |
{'description':"""The Mistral-7B-Instruct-v0.3 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.3.\n \
|
30 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
31 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
32 |
+
"Gemma-7b":
|
33 |
+
{'description':"""Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.\n \
|
34 |
+
\nThey are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. \n"""},
|
35 |
}
|
36 |
|
37 |
|
38 |
#Random dog images for error message
|
39 |
random_dog = ["BlueLogoBox.jpg"]
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# Define the available models
|
42 |
models =[key for key in model_links.keys()]
|
43 |
|
|
|
47 |
#Create a temperature slider
|
48 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
49 |
|
|
|
|
|
|
|
|
|
50 |
# Create model description
|
51 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
52 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
|
|
105 |
],
|
106 |
temperature=temp_values,#0.5,
|
107 |
stream=True,
|
108 |
+
max_tokens=4000,
|
109 |
)
|
110 |
|
111 |
response = st.write_stream(stream)
|