File size: 4,098 Bytes
aec35e1
 
 
 
9206a9e
 
aec35e1
bdb22b7
aec35e1
 
 
9206a9e
 
 
aec35e1
 
1f7869d
 
bdb22b7
 
1f7869d
bdb22b7
1f7869d
 
bdb22b7
1f7869d
 
 
bdb22b7
1f7869d
 
bdb22b7
1f7869d
 
bdb22b7
 
aec35e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f7869d
aec35e1
 
 
 
 
11effaf
 
 
 
aec35e1
 
 
 
 
 
 
 
 
 
 
 
 
16eba78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f7869d
 
 
 
16eba78
 
 
 
9206a9e
 
 
 
 
 
 
 
 
 
16eba78
 
aec35e1
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import os

import openai
import streamlit as st
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI as l_OpenAI
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM

from helpers.foundation_models import *

OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)


# tokenizer = AutoTokenizer.from_pretrained("eagle0504/llama-2-7b-miniguanaco")
# model = AutoModelForCausalLM.from_pretrained("eagle0504/llama-2-7b-miniguanaco")


# def generate_response_from_llama2(query):

#     # Tokenize the input text
#     input_ids = tokenizer.encode(query, return_tensors="pt")

#     # Generate a response
#     # Adjust the parameters like max_length according to your needs
#     output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=0.7)

#     # Decode the output to human-readable text
#     generated_text = tokenizer.decode(output[0], skip_special_tokens=True)

#     # output
#     return generated_text


# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []


# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])


with st.expander("Instructions"):
    st.sidebar.markdown(
        r"""
        # 🌟 Streamlit + Hugging Face Demo πŸ€–

        ## Introduction πŸ“–

        This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. 
        """
    )


option = st.sidebar.selectbox(
    "Which task do you want to do?",
    ("Sentiment Analysis", "Medical Summarization", "ChatGPT", "ChatGPT (with Google)"),
)


clear_button = st.sidebar.button("Clear Conversation", key="clear")

st.sidebar.write("---")

st.sidebar.markdown("Yiqiao Yin: [Site](https://www.y-yin.io/) | [LinkedIn](https://www.linkedin.com/in/yiqiaoyin/)")


# Reset everything
if clear_button:
    st.session_state.messages = []


# React to user input
if prompt := st.chat_input("What is up?"):
    # Display user message in chat message container
    st.chat_message("user").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    with st.spinner("Wait for it..."):
        if option == "Sentiment Analysis":
            pipe_sentiment_analysis = pipeline("sentiment-analysis")
            if prompt:
                out = pipe_sentiment_analysis(prompt)
                doc = f"""
                    Prompt: {prompt}
                    Sentiment: {out[0]["label"]}
                    Score: {out[0]["score"]}
                """
        elif option == "Medical Summarization":
            pipe_summarization = pipeline(
                "summarization", model="Falconsai/medical_summarization"
            )
            if prompt:
                out = pipe_summarization(prompt)
                doc = out[0]["summary_text"]
        # elif option == "Llama2":
        #     if prompt:
        #         out = generate_response_from_llama2(query=prompt)
        #         doc = out
        elif option == "ChatGPT":
            if prompt:
                out = call_chatgpt(query=prompt)
                doc = out
        elif option == "ChatGPT (with Google)":
            if prompt:
                ans_langchain = call_langchain(prompt)
                prompt = f"""
                    Based on the internet search results: {ans_langchain};

                    Answer the user question: {prompt}
                """
                out = call_chatgpt(query=prompt)
                doc = out
        else:
            doc = ""

    response = f"{doc}"
    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        st.markdown(response)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})