import os import openai import streamlit as st from langchain.agents import AgentType, initialize_agent, load_tools from langchain.llms import OpenAI as l_OpenAI from transformers import pipeline from transformers import AutoTokenizer, AutoModelForCausalLM from helpers.foundation_models import * OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"] openai_client = openai.OpenAI(api_key=OPENAI_API_KEY) # tokenizer = AutoTokenizer.from_pretrained("eagle0504/llama-2-7b-miniguanaco") # model = AutoModelForCausalLM.from_pretrained("eagle0504/llama-2-7b-miniguanaco") # def generate_response_from_llama2(query): # # Tokenize the input text # input_ids = tokenizer.encode(query, return_tensors="pt") # # Generate a response # # Adjust the parameters like max_length according to your needs # output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=0.7) # # Decode the output to human-readable text # generated_text = tokenizer.decode(output[0], skip_special_tokens=True) # # output # return generated_text # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) with st.expander("Instructions"): st.sidebar.markdown( r""" # 🌟 Streamlit + Hugging Face Demo 🤖 ## Introduction 📖 This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. """ ) option = st.sidebar.selectbox( "Which task do you want to do?", ("Sentiment Analysis", "Medical Summarization", "ChatGPT", "ChatGPT (with Google)"), ) clear_button = st.sidebar.button("Clear Conversation", key="clear") st.sidebar.write("---") st.sidebar.markdown("Yiqiao Yin: [Site](https://www.y-yin.io/) | [LinkedIn](https://www.linkedin.com/in/yiqiaoyin/)") # Reset everything if clear_button: st.session_state.messages = [] # React to user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) with st.spinner("Wait for it..."): if option == "Sentiment Analysis": pipe_sentiment_analysis = pipeline("sentiment-analysis") if prompt: out = pipe_sentiment_analysis(prompt) doc = f""" Prompt: {prompt} Sentiment: {out[0]["label"]} Score: {out[0]["score"]} """ elif option == "Medical Summarization": pipe_summarization = pipeline( "summarization", model="Falconsai/medical_summarization" ) if prompt: out = pipe_summarization(prompt) doc = out[0]["summary_text"] # elif option == "Llama2": # if prompt: # out = generate_response_from_llama2(query=prompt) # doc = out elif option == "ChatGPT": if prompt: out = call_chatgpt(query=prompt) doc = out elif option == "ChatGPT (with Google)": if prompt: ans_langchain = call_langchain(prompt) prompt = f""" Based on the internet search results: {ans_langchain}; Answer the user question: {prompt} """ out = call_chatgpt(query=prompt) doc = out else: doc = "" response = f"{doc}" # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})