Atreyu4EVR's picture
Update app.py
8b05694 verified
raw
history blame
2.7 kB
import streamlit as st
import os
import torch
from openai import OpenAI
import numpy as np
import sys
from dotenv import load_dotenv
import random
from huggingface_hub import InferenceClient
# Load environment variables
load_dotenv()
# Constants
MAX_TOKENS = 4000
DEFAULT_TEMPERATURE = 0.5
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('API_KEY') # Replace with your token
)
# Create supported models
model_links = {
"Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
}
# Random dog images for error message
random_dog_images = ["broken_llama3.jpeg"]
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
def main():
st.header('Multi-Models')
# Sidebar for model selection and temperature
selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
temperature = st.sidebar.slider('Select a temperature value', 0.0, 1.0, DEFAULT_TEMPERATURE)
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
# Display model info and logo
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input and response
if prompt := st.chat_input("Type message here..."):
process_user_input(client, prompt, selected_model, temperature)
def process_user_input(client, prompt, selected_model, temperature):
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
# Generate and display assistant response
with st.chat_message("assistant"):
response = """πŸ˜΅β€πŸ’« Looks like someone unplugged something!
\n Either the model space is being updated or something is down."""
st.write(response)
random_dog_pick = random.choice(random_dog_images)
st.image(random_dog_pick)
st.write("This was the error message:")
st.write(str(error))