Spaces:
Runtime error
Runtime error
Gokulnath2003
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,10 @@ import numpy as np
|
|
2 |
import streamlit as st
|
3 |
from openai import OpenAI
|
4 |
import os
|
5 |
-
import
|
6 |
-
from dotenv import load_dotenv, dotenv_values
|
7 |
load_dotenv()
|
8 |
|
9 |
-
#
|
10 |
client = OpenAI(
|
11 |
base_url="https://api-inference.huggingface.co/v1",
|
12 |
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
|
@@ -20,42 +19,33 @@ model_links = {
|
|
20 |
# Pull info about the model to display
|
21 |
model_info = {
|
22 |
"Meta-Llama-3-8B": {
|
23 |
-
'description': """The Llama (3) model is a **Large Language Model (LLM)**
|
24 |
-
\
|
25 |
-
|
|
|
26 |
}
|
27 |
}
|
28 |
|
29 |
-
#
|
30 |
-
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg", "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg"]
|
31 |
-
|
32 |
def reset_conversation():
|
33 |
-
'''Resets Conversation'''
|
34 |
st.session_state.conversation = []
|
35 |
st.session_state.messages = []
|
36 |
return None
|
37 |
|
38 |
-
#
|
39 |
-
|
40 |
-
|
41 |
-
# Create the sidebar with the dropdown for model selection
|
42 |
-
selected_model = st.sidebar.selectbox("Select Model", models)
|
43 |
-
|
44 |
-
# Custom description for SciMom
|
45 |
-
st.sidebar.write("Built for my mom, with love. This model is pretrained with textbooks of Science NCERT.")
|
46 |
-
st.sidebar.write("Model used: Meta Llama, trained using: Docker AutoTrain.")
|
47 |
|
48 |
-
#
|
49 |
-
|
|
|
50 |
|
51 |
-
# Add
|
52 |
-
st.sidebar.
|
|
|
53 |
|
54 |
-
#
|
55 |
-
|
56 |
-
st.sidebar.markdown(model_info[selected_model]['description'])
|
57 |
-
st.sidebar.image(model_info[selected_model]['logo'])
|
58 |
-
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
59 |
|
60 |
if "prev_option" not in st.session_state:
|
61 |
st.session_state.prev_option = selected_model
|
@@ -68,12 +58,6 @@ if st.session_state.prev_option != selected_model:
|
|
68 |
# Pull in the model we want to use
|
69 |
repo_id = model_links[selected_model]
|
70 |
|
71 |
-
st.subheader(f'AI - {selected_model}')
|
72 |
-
|
73 |
-
# Set a default model
|
74 |
-
if selected_model not in st.session_state:
|
75 |
-
st.session_state[selected_model] = model_links[selected_model]
|
76 |
-
|
77 |
# Initialize chat history
|
78 |
if "messages" not in st.session_state:
|
79 |
st.session_state.messages = []
|
@@ -84,7 +68,7 @@ for message in st.session_state.messages:
|
|
84 |
st.markdown(message["content"])
|
85 |
|
86 |
# Accept user input
|
87 |
-
if prompt := st.chat_input(
|
88 |
# Display user message in chat message container
|
89 |
with st.chat_message("user"):
|
90 |
st.markdown(prompt)
|
@@ -99,17 +83,15 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
|
99 |
{"role": m["role"], "content": m["content"]}
|
100 |
for m in st.session_state.messages
|
101 |
],
|
102 |
-
temperature=
|
103 |
stream=True,
|
104 |
max_tokens=3000,
|
105 |
)
|
106 |
response = st.write_stream(stream)
|
107 |
|
108 |
except Exception as e:
|
109 |
-
response = "π΅βπ«
|
110 |
st.write(response)
|
111 |
-
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
|
112 |
-
st.image(random_dog_pick)
|
113 |
st.write("This was the error message:")
|
114 |
st.write(e)
|
115 |
|
|
|
2 |
import streamlit as st
|
3 |
from openai import OpenAI
|
4 |
import os
|
5 |
+
from dotenv import load_dotenv
|
|
|
6 |
load_dotenv()
|
7 |
|
8 |
+
# Initialize the OpenAI client
|
9 |
client = OpenAI(
|
10 |
base_url="https://api-inference.huggingface.co/v1",
|
11 |
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
|
|
|
19 |
# Pull info about the model to display
|
20 |
model_info = {
|
21 |
"Meta-Llama-3-8B": {
|
22 |
+
'description': """The Llama (3) model is a **Large Language Model (LLM)** designed to assist with question and answer interactions.\n
|
23 |
+
\nThis model was created by Meta's AI team and has over 8 billion parameters.\n
|
24 |
+
**Training**: The model was fine-tuned on science textbooks from the NCERT curriculum using Docker AutoTrain to ensure it can provide relevant and accurate responses in the education domain.\n
|
25 |
+
**Purpose**: This version of Llama has been trained specifically for educational purposes, focusing on answering science-related queries in a clear and simple manner to help students and teachers alike.\n"""
|
26 |
}
|
27 |
}
|
28 |
|
29 |
+
# Reset the conversation
|
|
|
|
|
30 |
def reset_conversation():
|
|
|
31 |
st.session_state.conversation = []
|
32 |
st.session_state.messages = []
|
33 |
return None
|
34 |
|
35 |
+
# App title and description
|
36 |
+
st.title("Sci-Mom π©βπ« ")
|
37 |
+
st.subheader("AI chatbot for Solving your doubts π :)")
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
# Custom description for SciMom in the sidebar
|
40 |
+
st.sidebar.write("Built for my mom, with love β€οΈ. This model is pretrained with textbooks of Science NCERT.")
|
41 |
+
st.sidebar.write("Base-Model used: Meta Llama, trained using: Docker AutoTrain.")
|
42 |
|
43 |
+
# Add technical details in the sidebar
|
44 |
+
st.sidebar.markdown(model_info["Meta-Llama-3-8B"]['description'])
|
45 |
+
st.sidebar.markdown("*By Gokulnath β *")
|
46 |
|
47 |
+
# If model selection was needed (now removed)
|
48 |
+
selected_model = "Meta-Llama-3-8B" # Only one model remains
|
|
|
|
|
|
|
49 |
|
50 |
if "prev_option" not in st.session_state:
|
51 |
st.session_state.prev_option = selected_model
|
|
|
58 |
# Pull in the model we want to use
|
59 |
repo_id = model_links[selected_model]
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
# Initialize chat history
|
62 |
if "messages" not in st.session_state:
|
63 |
st.session_state.messages = []
|
|
|
68 |
st.markdown(message["content"])
|
69 |
|
70 |
# Accept user input
|
71 |
+
if prompt := st.chat_input("Ask Scimom!"):
|
72 |
# Display user message in chat message container
|
73 |
with st.chat_message("user"):
|
74 |
st.markdown(prompt)
|
|
|
83 |
{"role": m["role"], "content": m["content"]}
|
84 |
for m in st.session_state.messages
|
85 |
],
|
86 |
+
temperature=0.5, # Default temperature setting
|
87 |
stream=True,
|
88 |
max_tokens=3000,
|
89 |
)
|
90 |
response = st.write_stream(stream)
|
91 |
|
92 |
except Exception as e:
|
93 |
+
response = "π΅βπ« Something went wrong. Please try again later."
|
94 |
st.write(response)
|
|
|
|
|
95 |
st.write("This was the error message:")
|
96 |
st.write(e)
|
97 |
|