Spaces:
Sleeping
Sleeping
File size: 4,031 Bytes
aec35e1 9206a9e aec35e1 bdb22b7 aec35e1 8d87756 aec35e1 09f1f96 9206a9e aec35e1 09f1f96 aec35e1 8d87756 aec35e1 11effaf aec35e1 09f1f96 aec35e1 09f1f96 aec35e1 09f1f96 16eba78 8d87756 16eba78 8d87756 de738f1 4463588 16eba78 8d87756 9206a9e 8d87756 16eba78 8d87756 aec35e1 8d87756 aec35e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import os
import openai
import streamlit as st
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI as l_OpenAI
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM
from helpers.foundation_models import *
import requests
# API Keys
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Sidebar
with st.expander("Instructions"):
st.sidebar.markdown(
r"""
# π Streamlit + Hugging Face Demo π€
## Introduction π
This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit.
"""
)
option = st.sidebar.selectbox(
"Which task do you want to do?",
("Sentiment Analysis", "Medical Summarization", "Llama2 on YSA", "ChatGPT", "ChatGPT (with Google)"),
)
clear_button = st.sidebar.button("Clear Conversation", key="clear")
st.sidebar.write("---")
st.sidebar.markdown("Yiqiao Yin: [Site](https://www.y-yin.io/) | [LinkedIn](https://www.linkedin.com/in/yiqiaoyin/)")
# Reset everything
if clear_button:
st.session_state.messages = []
# React to user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Execute options
with st.spinner("Wait for it..."):
if option == "Sentiment Analysis":
pipe_sentiment_analysis = pipeline("sentiment-analysis")
if prompt:
out = pipe_sentiment_analysis(prompt)
final_response = f"""
Prompt: {prompt}
Sentiment: {out[0]["label"]}
Score: {out[0]["score"]}
"""
elif option == "Medical Summarization":
pipe_summarization = pipeline(
"summarization", model="Falconsai/medical_summarization"
)
if prompt:
out = pipe_summarization(prompt)
final_response = out[0]["summary_text"]
elif option == "Llama2 on YSA":
if prompt:
try:
out = llama2_7b_ysa(prompt)
engineered_prompt = f"""
The user asked the question: {prompt}
We have found relevant content: {out}
Answer the user question based on the above content in paragraphs.
"""
final_response = call_chatgpt(query=engineered_prompt)
except:
final_response = "Sorry, the inference endpoint is temporarily down. π"
elif option == "ChatGPT":
if prompt:
out = call_chatgpt(query=prompt)
final_response = out
elif option == "ChatGPT (with Google)":
if prompt:
ans_langchain = call_langchain(prompt)
prompt = f"""
Based on the internet search results: {ans_langchain};
Answer the user question: {prompt}
"""
out = call_chatgpt(query=prompt)
final_response = out
else:
final_response = ""
response = f"{final_response}"
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
|