Spaces:
Sleeping
Sleeping
first commit
Browse files- app.py +121 -0
- helpers/foundation_models.py +38 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import openai
|
4 |
+
import streamlit as st
|
5 |
+
from transformers import pipeline
|
6 |
+
|
7 |
+
from helpers.foundation_models import *
|
8 |
+
|
9 |
+
openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
10 |
+
|
11 |
+
|
12 |
+
st.title("π Streamlit + Hugging Face Demo π€")
|
13 |
+
|
14 |
+
|
15 |
+
# Initialize chat history
|
16 |
+
if "messages" not in st.session_state:
|
17 |
+
st.session_state.messages = []
|
18 |
+
|
19 |
+
|
20 |
+
# Display chat messages from history on app rerun
|
21 |
+
for message in st.session_state.messages:
|
22 |
+
with st.chat_message(message["role"]):
|
23 |
+
st.markdown(message["content"])
|
24 |
+
|
25 |
+
|
26 |
+
with st.expander("Instructions"):
|
27 |
+
st.sidebar.markdown(
|
28 |
+
r"""
|
29 |
+
# π Streamlit + Hugging Face Demo π€
|
30 |
+
|
31 |
+
## Introduction π
|
32 |
+
|
33 |
+
This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit.
|
34 |
+
|
35 |
+
## Setup π οΈ
|
36 |
+
|
37 |
+
1. Install Requirements:
|
38 |
+
|
39 |
+
- Streamlit: `pip install streamlit`
|
40 |
+
- Hugging Face Transformers: `pip install transformers`
|
41 |
+
|
42 |
+
## Running the Demo π
|
43 |
+
|
44 |
+
1. Clone the repository: `git clone <repo-url>`
|
45 |
+
2. Navigate to the project directory: `cd <project-directory>`
|
46 |
+
3. Run Streamlit: `streamlit run app.py`
|
47 |
+
|
48 |
+
## Features π
|
49 |
+
|
50 |
+
- **Text Input** π: Enter your query in the text box.
|
51 |
+
- **Model Selection** π€: Choose an LLM from a dropdown menu.
|
52 |
+
- **Submit Button** β
: Click to submit your query to the model.
|
53 |
+
- **Responses** π¬: View the model's responses in real-time.
|
54 |
+
|
55 |
+
## Contributing π€
|
56 |
+
|
57 |
+
Feel free to fork the repository, make changes, and submit pull requests!
|
58 |
+
|
59 |
+
## License π
|
60 |
+
|
61 |
+
This project is licensed under the MIT License.
|
62 |
+
|
63 |
+
## Contact π¬
|
64 |
+
|
65 |
+
For any queries, contact us at `email@example.com`.
|
66 |
+
|
67 |
+
## Happy Coding! π
|
68 |
+
"""
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
option = st.sidebar.selectbox(
|
73 |
+
"Which task do you want to do?",
|
74 |
+
("Sentiment Analysis", "Medical Summarization", "ChatGPT"),
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
clear_button = st.sidebar.button("Clear Conversation", key="clear")
|
79 |
+
|
80 |
+
|
81 |
+
# Reset everything
|
82 |
+
if clear_button:
|
83 |
+
st.session_state.messages = []
|
84 |
+
|
85 |
+
|
86 |
+
# React to user input
|
87 |
+
if prompt := st.chat_input("What is up?"):
|
88 |
+
# Display user message in chat message container
|
89 |
+
st.chat_message("user").markdown(prompt)
|
90 |
+
# Add user message to chat history
|
91 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
92 |
+
|
93 |
+
if option == "Sentiment Analysis":
|
94 |
+
pipe_sentiment_analysis = pipeline("sentiment-analysis")
|
95 |
+
if prompt:
|
96 |
+
out = pipe_sentiment_analysis(prompt)
|
97 |
+
doc = f"""
|
98 |
+
Prompt: {prompt}
|
99 |
+
Sentiment: {out[0]["label"]}
|
100 |
+
Score: {out[0]["score"]}
|
101 |
+
"""
|
102 |
+
elif option == "Medical Summarization":
|
103 |
+
pipe_summarization = pipeline(
|
104 |
+
"summarization", model="Falconsai/medical_summarization"
|
105 |
+
)
|
106 |
+
if prompt:
|
107 |
+
out = pipe_summarization(prompt)
|
108 |
+
doc = out[0]["summary_text"]
|
109 |
+
elif option == "ChatGPT":
|
110 |
+
if prompt:
|
111 |
+
out = call_chatgpt(query=prompt)
|
112 |
+
doc = out
|
113 |
+
else:
|
114 |
+
None
|
115 |
+
|
116 |
+
response = f"{doc}"
|
117 |
+
# Display assistant response in chat message container
|
118 |
+
with st.chat_message("assistant"):
|
119 |
+
st.markdown(response)
|
120 |
+
# Add assistant response to chat history
|
121 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
helpers/foundation_models.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List, Tuple
|
3 |
+
|
4 |
+
import openai
|
5 |
+
import streamlit as st
|
6 |
+
|
7 |
+
openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
8 |
+
|
9 |
+
|
10 |
+
def call_chatgpt(query: str, model: str = "gpt-3.5-turbo") -> str:
|
11 |
+
"""
|
12 |
+
Generates a response to a query using the specified language model.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
query (str): The user's query that needs to be processed.
|
16 |
+
model (str, optional): The language model to be used. Defaults to "gpt-3.5-turbo".
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
str: The generated response to the query.
|
20 |
+
"""
|
21 |
+
|
22 |
+
# Prepare the conversation context with system and user messages.
|
23 |
+
messages = [
|
24 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
25 |
+
{"role": "user", "content": f"Question: {query}."},
|
26 |
+
]
|
27 |
+
|
28 |
+
# Use the OpenAI client to generate a response based on the model and the conversation context.
|
29 |
+
response = openai_client.chat.completions.create(
|
30 |
+
model=model,
|
31 |
+
messages=messages,
|
32 |
+
)
|
33 |
+
|
34 |
+
# Extract the content of the response from the first choice.
|
35 |
+
content: str = response.choices[0].message.content
|
36 |
+
|
37 |
+
# Return the generated content.
|
38 |
+
return content
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
openai
|
3 |
+
transformers
|