import os import openai import streamlit as st from langchain.agents import AgentType, initialize_agent, load_tools from langchain.llms import OpenAI as l_OpenAI from transformers import pipeline from helpers.foundation_models import * OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"] openai_client = openai.OpenAI(api_key=OPENAI_API_KEY) # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) with st.expander("Instructions"): st.sidebar.markdown( r""" # 🌟 Streamlit + Hugging Face Demo 🤖 ## Introduction 📖 This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. """ ) option = st.sidebar.selectbox( "Which task do you want to do?", ("Sentiment Analysis", "Medical Summarization", "ChatGPT", "ChatGPT (with Google)"), ) clear_button = st.sidebar.button("Clear Conversation", key="clear") # Reset everything if clear_button: st.session_state.messages = [] # React to user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) with st.spinner("Wait for it..."): if option == "Sentiment Analysis": pipe_sentiment_analysis = pipeline("sentiment-analysis") if prompt: out = pipe_sentiment_analysis(prompt) doc = f""" Prompt: {prompt} Sentiment: {out[0]["label"]} Score: {out[0]["score"]} """ elif option == "Medical Summarization": pipe_summarization = pipeline( "summarization", model="Falconsai/medical_summarization" ) if prompt: out = pipe_summarization(prompt) doc = out[0]["summary_text"] elif option == "ChatGPT": if prompt: out = call_chatgpt(query=prompt) doc = out elif option == "ChatGPT (with Google)": if prompt: ans_langchain = call_langchain(prompt) prompt = f""" Based on the internet search results: {ans_langchain}; Answer the user question: {prompt} """ out = call_chatgpt(query=prompt) doc = out else: doc = "" response = f"{doc}" # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})