import os
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint
# Set the environment variable "HUGGINGFACEHUB_API_TOKEN" to the value of sec_key
sec_key = ""
os.environ["HUGGINGFACEHUB_API_TOKEN"] = sec_key
# Specify the repository IDs of the Hugging Face models you want to use
repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
repo_id_llama3 = "meta-llama/Meta-Llama-3-8B" # Replace with the actual repo ID for Llama3
# Streamlit app layout
st.title("🤖 AI Query Wizard 🧙")
# Custom background and styling
st.markdown(
"""
""",
unsafe_allow_html=True
)
# Input text area for user query with enhanced instructions
user_query = st.text_area(
"✨ Enter your magical query:",
height=100,
help="""
**Enhanced Prompting Instructions:**
- Be clear and specific about what you want to know.
- Use natural language to describe your query.
- If asking a question, ensure it is well-formed and unambiguous.
- For best results, provide context or background information if relevant.
"""
)
# Slider for adjusting the temperature
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=1.0,
value=0.7,
step=0.1,
help="""
**Temperature:**
- Lower values (e.g., 0.1) make the output more deterministic and focused.
- Higher values (e.g., 1.0) make the output more diverse and creative.
"""
)
# Slider for adjusting the max length
max_length = st.slider(
"Max Length",
min_value=32,
max_value=256,
value=128,
step=32,
help="""
**Max Length:**
- Controls the maximum number of tokens in the generated response.
- Adjust based on the desired length of the response.
"""
)
# Button to trigger the query
if st.button("🪄 Cast Spell"):
if user_query:
# Initialize the HuggingFaceEndpoint for Mistral
llm_mistral = HuggingFaceEndpoint(
repo_id=repo_id_mistral,
max_length=max_length,
temperature=temperature,
token=sec_key
)
# Initialize the HuggingFaceEndpoint for Llama3
llm_llama3 = HuggingFaceEndpoint(
repo_id=repo_id_llama3,
max_length=max_length,
temperature=temperature,
token=sec_key
)
# Invoke both models with the user's query
response_mistral = llm_mistral.invoke(user_query)
response_llama3 = llm_llama3.invoke(user_query)
# Display the responses side by side
col1, col2 = st.columns(2)
with col1:
st.markdown("🔮 Response from Mistral-7B-Instruct-v0.3:", unsafe_allow_html=True)
st.markdown(f"{response_mistral}", unsafe_allow_html=True)
with col2:
st.markdown("🔮 Response from Llama3:", unsafe_allow_html=True)
st.markdown(f"{response_llama3}", unsafe_allow_html=True)
# Save query and responses to session state
if 'history' not in st.session_state:
st.session_state.history = []
st.session_state.history.append((user_query, response_mistral, response_llama3))
else:
st.write("🚨 Please enter a query to cast your spell.")
# Button to clear history
if st.button("🗑️ Clear History"):
if 'history' in st.session_state:
st.session_state.history = []
st.success("History cleared!")
# Display history of queries and responses
if 'history' in st.session_state:
st.subheader("📜 Scroll of Spells Cast")
for query, response_mistral, response_llama3 in st.session_state.history:
st.write(f"**Query:** {query}")
col1, col2 = st.columns(2)
with col1:
st.markdown(f"**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}", unsafe_allow_html=True)
with col2:
st.markdown(f"**Response from Llama3:** {response_llama3}", unsafe_allow_html=True)
st.write("---")