llm_3 / app.py
lorentz's picture
Update app.py
f7b57fb verified
raw
history blame
3.78 kB
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain import FewShotPromptTemplate
from langchain.prompts.example_selector import LengthBasedExampleSelector
from dotenv import load_dotenv
load_dotenv() # load the env-sample.txt file
def getLLMResponse(query, age_option,tasktype_option):
examples = []
llm = OpenAI(temperature=.9, model="gpt-3.5-turbo-instruct")
example_template = """
Question: {query}
Response: {answer}
"""
example_prompt = PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)
prefix = """You are a {template_ageoption}, and you are going to {template_tasktype_option} ,
you give one answer for each query. it is strictly limited to 1 answer only, and the answer MUST be LESS THAN 200 words.
For a tweet, you SHOULD NOT give more than 280 characters. If it is not to write for a tweet, DO NOT give a tweet suggestion in your answer.
"""
suffix = """
Question: {template_userInput}
Response: """
example_selector = LengthBasedExampleSelector(
examples=examples,
example_prompt=example_prompt,
max_length = numberOfWords
)
new_prompt_template = FewShotPromptTemplate(
example_selector=example_selector, # use example_selector instead of examples
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["template_userInput","template_ageoption","template_tasktype_option"],
example_separator="\n"
)
print(new_prompt_template.format(template_userInput=query,template_ageoption=age_option,template_tasktype_option=tasktype_option))
response=llm(new_prompt_template.format(template_userInput=query,template_ageoption=age_option,template_tasktype_option=tasktype_option))
print(response)
return response
#UI Starts here
st.set_page_config(page_title="PitchPal: Your Friendly Copy Assistant",
page_icon='💻',
layout='centered',
initial_sidebar_state='collapsed')
# Custom CSS for styling
st.markdown(
"""
<style>
.big-font {
font-size:20px !important;
}
.title-font {
font-size:30px !important;
font-weight: bold;
}
.streamlit-container {
margin-top: 2rem;
}
</style>
""", unsafe_allow_html=True)
st.markdown("<h1 style='text-align: center'>PitchPal</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center'>Your Efficient Sales Copy Assistant</h2>", unsafe_allow_html=True)
st.markdown("<p style='text-align: right'>By <a href='https://entzyeung.github.io/portfolio/index.html'>Lorentz Yeung</a></p>", unsafe_allow_html=True)
#st.title("PitchPal")
#st.subheader("Your Friendly Sales Copy Assistant")
#st.markdown(
# """
# - by [Lorentz Yeung]()
# """
# )
# Layout
col1, col2 = st.columns(2)
with col1:
form_input = st.text_area('Enter the product or service:', 'PlayStation 6', height=100)
with col2:
tasktype_option = st.selectbox(
'Marketing copy type:',
('Draft a Twitter post', 'Draft a sales copy', 'Draft a product description'),
index=0) # specifies the default selection by its position in the options list. 0 = the first
age_option = st.selectbox(
'Target customers age group:',
('below age 18', 'age 18-45', 'age 46-65', 'age > 65'),
index=1)
# numberOfWords= st.slider('Words limit', 1, 200, 25)
numberOfWords = 40 # the new model doesn't support this.
submit = st.button("Generate Your Sales Copy")
if submit:
st.write(getLLMResponse(form_input,tasktype_option,age_option))