import gradio as gr
from src.utils import LLMHandler, initialize_newsletter, integrate_personalized_text, build_context, build_prompt
from src.utils_api import get_recommendations
import yaml
import logging
import argparse
import os
import tempfile
# aggiungo commmento Bernardino per prova push
# logging.basicConfig(filename='logs/app.log', encoding='utf-8', level=logging.info)
logging.basicConfig(level=logging.INFO)
def main():
# get arguments with argparse
parser = argparse.ArgumentParser(description='Newsletter Generator')
parser.add_argument('--config-file', type=str, default='./config/config.yaml', help='Path to the configuration file.')
args = parser.parse_args()
logging.info("Starting the Newsletter Generator app...")
# Load configuration from YAML file
logging.info("Loading configuration from config.yaml...")
with open(args.config_file, "r") as file:
config = yaml.safe_load(file)
# setup
#try:
# os.environ["RECOMMENDER_URL"] = config['recommender_api']['base_url']
# os.environ["RECOMMENDER_KEY"] = config['recommender_api']['key']
# os.environ["OPENAI_KEY"] = config['llm']['api_key']
#except:
# pass
llm_settings = config['llm']
config['llm']['api_key'] = os.environ["OPENAI_KEY"]
newsletter_meta_info = config['newsletter']
logging.debug(f"Configuration loaded: {config}")
# Initialize the LLM handler
llm_handler = LLMHandler(**llm_settings)
logging.info(f"LLM handler initialized with the following settings: {config['llm']}")
# Define the function to generate the newsletter using the OpenAI API
def generate_newsletter(
customer_id,
model_name,
temperature,
max_tokens,
system_message,
textual_preferences,
few_shot=None,
custom_template=None,
progress=gr.Progress()
):
# get recommendations
progress(0.1, "Fetching Client History...")
logging.info("Getting recommendations...")
customer_info, recommendations, transactions = get_recommendations(
customer_id,
max_recs=newsletter_meta_info['max_recommendations'],
max_transactions=newsletter_meta_info['max_recents_items'])
logging.debug(f"Recommendations: {recommendations}")
logging.debug(f"Transactions: {transactions}")
print("customer info", customer_info)
# Load the html template and replace the placeholders for images with the actual content
logging.info("Initializing newsletter template...")
progress(0.5, "Initializing personalized content...")
# override the default template if a custom one is provided
if custom_template:
newsletter_meta_info['newsletter_example_path'] = custom_template
newsletter_text = initialize_newsletter(newsletter_meta_info, transactions, recommendations)
# Build context from the user preferences, the recommendations and the transactions
context = build_context(
recommendations,
transactions,
textual_preferences,
customer_info)
logging.info(f"Context: {context}")
# Build the prompt for the LLM
progress(0.7, "Generating personalized content...")
prompt = build_prompt(context, few_shot)
logging.info(f"Prompt: {prompt}")
# Generate the newsletter
sections = llm_handler.generate(
prompt,
model_name,
temperature,
max_tokens,
system_message)
logging.info(f"Sections: {sections}")
# Intergrate personalized text
logging.info("Integrating personalized text...")
newsletter_text = integrate_personalized_text(newsletter_text, customer_info, sections)
# Save HTML to a temporary file for download
with tempfile.NamedTemporaryFile(delete=False, suffix=".html") as temp_file:
temp_file.write(newsletter_text.encode("utf-8"))
temp_file_path = temp_file.name
progress(1.0)
return newsletter_text, temp_file_path
logging.info("Creating interface...")
with gr.Blocks() as demo:
# Header Section
gr.Markdown("## AI-Powered Newsletter for Fashion Brands", elem_id="header")
# Input Section
with gr.Row():
customer_id = gr.Dropdown(
label="Customer ID",
#value="04a183a27a6877e560e1025216d0a3b40d88668c68366da17edfb18ed89c574c",
interactive=True,
choices=[
("User Story 1", "04a183a27a6877e560e1025216d0a3b40d88668c68366da17edfb18ed89c574c"),
("User Story 2", "1abaca5cd299000720538c70ba2ed246db6731bce924b5b4ca81770a47842656"),
("User Story 3", "1741b0d1b2c29994084b7312001c1b11ab8b112b3fd05ac765f4d232afdc4eaf")
]
)
with gr.Row():
textual_preferences = gr.Textbox(
label="Newsletter Preferences",
placeholder="Enter rich newsletter preferences."
)
# Advanced Settings
with gr.Accordion("⚙️ Advanced Settings", open=False):
with gr.Row():
model_name = gr.Dropdown(
label="LLM Model",
choices=["gpt-3.5-turbo", "gpt-4o"],
value=llm_handler.model_name
)
temperature = gr.Slider(
label="Temperature",
minimum=0.0,
maximum=1.0,
step=0.05,
value=llm_handler.default_temperature
)
with gr.Row():
max_tokens = gr.Number(
label="Max Tokens",
value=llm_handler.default_max_tokens,
scale=1,
precision=0
)
custom_template = gr.File(
label="Custom Template",
scale=1,
visible=True)
with gr.Row():
system_message = gr.Textbox(
label="System Message",
placeholder="Enter a custom system message (optional).",
value=llm_handler.default_system_message,
visible=False
)
few_shot = gr.Textbox(
label="Few-Shot Examples",
placeholder=config.get("default_few_shot", ""),
value=config.get("default_few_shot", ""),
visible=True,
lines=20,
max_lines=100
)
# User Context (Hidden by Default)
with gr.Accordion("🧑💻 User Context", open=False, visible=False):
pass # Placeholder for future user context integration.
# Output Section
with gr.Row():
generate_button = gr.Button("Generate Personalized Newsletter", variant="primary")
download = gr.DownloadButton("Download")
newsletter_output = gr.HTML(
label="Generated Newsletter",
value="
",
min_height=500,
render=True
)
# Event Binding
generate_button.click(
fn=generate_newsletter,
inputs=[
customer_id,
model_name,
temperature,
max_tokens,
system_message,
textual_preferences,
few_shot,
custom_template
],
outputs=[newsletter_output, download]
)
# Launch App
demo.queue().launch(
share=config['app']['share'],
server_port=config['app']['server_port']
)
if __name__ == "__main__":
main()