Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,31 +8,35 @@ from transformers import pipeline
|
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
|
10 |
from helpers.foundation_models import *
|
|
|
|
|
11 |
|
12 |
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
13 |
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
|
14 |
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
|
15 |
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
20 |
|
21 |
-
# def generate_response_from_llama2(query):
|
22 |
|
23 |
-
|
24 |
-
|
|
|
25 |
|
26 |
-
# # Generate a response
|
27 |
-
# # Adjust the parameters like max_length according to your needs
|
28 |
-
# output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=0.7)
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
# return generated_text
|
35 |
|
|
|
36 |
|
37 |
# Initialize chat history
|
38 |
if "messages" not in st.session_state:
|
@@ -59,7 +63,7 @@ with st.expander("Instructions"):
|
|
59 |
|
60 |
option = st.sidebar.selectbox(
|
61 |
"Which task do you want to do?",
|
62 |
-
("Sentiment Analysis", "Medical Summarization", "ChatGPT", "ChatGPT (with Google)"),
|
63 |
)
|
64 |
|
65 |
|
@@ -87,7 +91,7 @@ if prompt := st.chat_input("What is up?"):
|
|
87 |
pipe_sentiment_analysis = pipeline("sentiment-analysis")
|
88 |
if prompt:
|
89 |
out = pipe_sentiment_analysis(prompt)
|
90 |
-
|
91 |
Prompt: {prompt}
|
92 |
Sentiment: {out[0]["label"]}
|
93 |
Score: {out[0]["score"]}
|
@@ -98,15 +102,22 @@ if prompt := st.chat_input("What is up?"):
|
|
98 |
)
|
99 |
if prompt:
|
100 |
out = pipe_summarization(prompt)
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
elif option == "ChatGPT":
|
107 |
if prompt:
|
108 |
out = call_chatgpt(query=prompt)
|
109 |
-
|
110 |
elif option == "ChatGPT (with Google)":
|
111 |
if prompt:
|
112 |
ans_langchain = call_langchain(prompt)
|
@@ -116,11 +127,11 @@ if prompt := st.chat_input("What is up?"):
|
|
116 |
Answer the user question: {prompt}
|
117 |
"""
|
118 |
out = call_chatgpt(query=prompt)
|
119 |
-
|
120 |
else:
|
121 |
-
|
122 |
|
123 |
-
response = f"{
|
124 |
# Display assistant response in chat message container
|
125 |
with st.chat_message("assistant"):
|
126 |
st.markdown(response)
|
|
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
|
10 |
from helpers.foundation_models import *
|
11 |
+
import requests
|
12 |
+
|
13 |
|
14 |
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
15 |
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
|
16 |
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
|
17 |
|
18 |
|
19 |
+
API_URL = "https://sks7h7h5qkhoxwxo.us-east-1.aws.endpoints.huggingface.cloud"
|
20 |
+
headers = {
|
21 |
+
"Accept" : "application/json",
|
22 |
+
"Content-Type": "application/json"
|
23 |
+
}
|
24 |
|
|
|
25 |
|
26 |
+
def query(payload):
|
27 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
28 |
+
return response.json()
|
29 |
|
|
|
|
|
|
|
30 |
|
31 |
+
def llama2_7b_ysa(prompt: str) -> str:
|
32 |
+
output = query({
|
33 |
+
"inputs": prompt,
|
34 |
+
"parameters": {}
|
35 |
+
})
|
36 |
|
37 |
+
response = output[0]['generated_text']
|
|
|
38 |
|
39 |
+
return response
|
40 |
|
41 |
# Initialize chat history
|
42 |
if "messages" not in st.session_state:
|
|
|
63 |
|
64 |
option = st.sidebar.selectbox(
|
65 |
"Which task do you want to do?",
|
66 |
+
("Sentiment Analysis", "Medical Summarization", "Llama2 on YSA", "ChatGPT", "ChatGPT (with Google)"),
|
67 |
)
|
68 |
|
69 |
|
|
|
91 |
pipe_sentiment_analysis = pipeline("sentiment-analysis")
|
92 |
if prompt:
|
93 |
out = pipe_sentiment_analysis(prompt)
|
94 |
+
final_response = f"""
|
95 |
Prompt: {prompt}
|
96 |
Sentiment: {out[0]["label"]}
|
97 |
Score: {out[0]["score"]}
|
|
|
102 |
)
|
103 |
if prompt:
|
104 |
out = pipe_summarization(prompt)
|
105 |
+
final_response = out[0]["summary_text"]
|
106 |
+
elif option == "Llama2 on YSA":
|
107 |
+
if prompt:
|
108 |
+
out = llama2_7b_ysa(query=prompt)
|
109 |
+
engineered_prompt = f"""
|
110 |
+
The user asked the question: {prompt}
|
111 |
+
|
112 |
+
We have found relevant content: {out}
|
113 |
+
|
114 |
+
Answer the user question based on the above content in paragraphs.
|
115 |
+
"""
|
116 |
+
final_response = call_chatgpt(query=engineered_prompt)
|
117 |
elif option == "ChatGPT":
|
118 |
if prompt:
|
119 |
out = call_chatgpt(query=prompt)
|
120 |
+
final_response = out
|
121 |
elif option == "ChatGPT (with Google)":
|
122 |
if prompt:
|
123 |
ans_langchain = call_langchain(prompt)
|
|
|
127 |
Answer the user question: {prompt}
|
128 |
"""
|
129 |
out = call_chatgpt(query=prompt)
|
130 |
+
final_response = out
|
131 |
else:
|
132 |
+
final_response = ""
|
133 |
|
134 |
+
response = f"{final_response}"
|
135 |
# Display assistant response in chat message container
|
136 |
with st.chat_message("assistant"):
|
137 |
st.markdown(response)
|