Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
-
import numpy as np
|
5 |
from dotenv import load_dotenv
|
6 |
import random
|
7 |
|
@@ -30,21 +29,21 @@ model_links = {
|
|
30 |
"Gemma-7b-it": "google/gemma-7b-it",
|
31 |
}
|
32 |
|
33 |
-
#
|
34 |
model_info = {
|
35 |
"Meta-Llama-3.1-8B": {
|
36 |
'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.
|
37 |
-
\nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.**\n"""
|
38 |
"logo": "llama_logo.gif",
|
39 |
},
|
40 |
"Mistral-7B-Instruct-v0.3": {
|
41 |
'description': """The Mistral-7B-Instruct-v0.3 is an instruct-tuned version of Mistral-7B.
|
42 |
-
\nIt was created by [**Mistral AI**](https://mistral.ai/) and has **7 billion parameters.**\n"""
|
43 |
"logo": "mistrail.jpeg",
|
44 |
},
|
45 |
"Gemma-7b-it": {
|
46 |
'description': """Gemma is a family of lightweight, state-of-the-art open models from Google.
|
47 |
-
\nThe 7B-it variant is instruction-tuned and has **7 billion parameters.**\n"""
|
48 |
"logo": "gemma.jpeg",
|
49 |
}
|
50 |
}
|
@@ -61,9 +60,10 @@ def main():
|
|
61 |
|
62 |
st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
|
63 |
|
64 |
-
# Display model info
|
65 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
66 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
|
|
67 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
68 |
|
69 |
# Initialize chat history
|
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
3 |
import os
|
|
|
4 |
from dotenv import load_dotenv
|
5 |
import random
|
6 |
|
|
|
29 |
"Gemma-7b-it": "google/gemma-7b-it",
|
30 |
}
|
31 |
|
32 |
+
# Model information including logos
|
33 |
model_info = {
|
34 |
"Meta-Llama-3.1-8B": {
|
35 |
'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.
|
36 |
+
\nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.**\n""",
|
37 |
"logo": "llama_logo.gif",
|
38 |
},
|
39 |
"Mistral-7B-Instruct-v0.3": {
|
40 |
'description': """The Mistral-7B-Instruct-v0.3 is an instruct-tuned version of Mistral-7B.
|
41 |
+
\nIt was created by [**Mistral AI**](https://mistral.ai/) and has **7 billion parameters.**\n""",
|
42 |
"logo": "mistrail.jpeg",
|
43 |
},
|
44 |
"Gemma-7b-it": {
|
45 |
'description': """Gemma is a family of lightweight, state-of-the-art open models from Google.
|
46 |
+
\nThe 7B-it variant is instruction-tuned and has **7 billion parameters.**\n""",
|
47 |
"logo": "gemma.jpeg",
|
48 |
}
|
49 |
}
|
|
|
60 |
|
61 |
st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
|
62 |
|
63 |
+
# Display model info and logo
|
64 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
65 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
66 |
+
st.sidebar.image(model_info[selected_model]['logo'], use_column_width=True)
|
67 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
68 |
|
69 |
# Initialize chat history
|