alessiodevoto commited on
Commit
4eeb29b
β€’
1 Parent(s): 9cbaaef
Files changed (3) hide show
  1. app.py +26 -47
  2. config/config.yaml +2 -1
  3. src/utils.py +10 -2
app.py CHANGED
@@ -34,7 +34,6 @@ def main():
34
  #except:
35
  # pass
36
 
37
-
38
  llm_settings = config['llm']
39
  config['llm']['api_key'] = os.environ["OPENAI_KEY"]
40
  newsletter_meta_info = config['newsletter']
@@ -54,6 +53,8 @@ def main():
54
  max_tokens,
55
  system_message,
56
  textual_preferences,
 
 
57
  progress=gr.Progress()
58
  ):
59
 
@@ -67,12 +68,15 @@ def main():
67
  max_transactions=newsletter_meta_info['max_recents_items'])
68
  logging.debug(f"Recommendations: {recommendations}")
69
  logging.debug(f"Transactions: {transactions}")
70
- print("cusomter info", customer_info)
71
 
72
 
73
  # Load the html template and replace the placeholders for images with the actual content
74
  logging.info("Initializing newsletter template...")
75
  progress(0.5, "Initializing personalized content...")
 
 
 
76
  newsletter_text = initialize_newsletter(newsletter_meta_info, transactions, recommendations)
77
 
78
  # Build context from the user preferences, the recommendations and the transactions
@@ -85,7 +89,7 @@ def main():
85
 
86
  # Build the prompt for the LLM
87
  progress(0.7, "Generating personalized content...")
88
- prompt = build_prompt(context)
89
  logging.info(f"Prompt: {prompt}")
90
 
91
 
@@ -156,12 +160,28 @@ def main():
156
  value=llm_handler.default_max_tokens,
157
  precision=0
158
  )
 
 
 
 
 
 
 
159
  system_message = gr.Textbox(
160
  label="System Message",
161
  placeholder="Enter a custom system message (optional).",
162
  value=llm_handler.default_system_message,
163
  visible=False
164
  )
 
 
 
 
 
 
 
 
 
165
 
166
  # User Context (Hidden by Default)
167
  with gr.Accordion("πŸ§‘β€πŸ’» User Context", open=False, visible=False):
@@ -188,7 +208,9 @@ def main():
188
  temperature,
189
  max_tokens,
190
  system_message,
191
- textual_preferences
 
 
192
  ],
193
  outputs=[newsletter_output, download]
194
  )
@@ -200,48 +222,5 @@ def main():
200
  )
201
 
202
 
203
- # Gradio interface for the app
204
- """ logging.info("Creating interface...")
205
- with gr.Blocks() as demo:
206
- gr.Markdown("### Newsletter Generator")
207
-
208
- customer_id = gr.Textbox(label="Client ID", value="04a183a27a6877e560e1025216d0a3b40d88668c68366da17edfb18ed89c574c")
209
- textual_preferences = gr.Textbox(label="Newsletter preferences", placeholder="The newsletter should be catchy.")
210
-
211
- # llm_preferences = gr.Textbox(label="LLM Preferences", placeholder="Enter LLM preferences.", visible=False)
212
- # create an openable block for the llm preferences
213
- with gr.Accordion("LLM Preferences", open=False):
214
- model_name = gr.Dropdown(label="Model Name", choices=["gpt-3.5-turbo", "gpt-4o"], value=llm_handler.model_name)
215
- temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, step=0.05, value=llm_handler.default_temperature)
216
- max_tokens = gr.Number(label="Max Tokens", value=llm_handler.default_max_tokens)
217
- system_message = gr.Textbox(label="System Message", placeholder="Enter the system message or Leave Blank.", value=llm_handler.default_system_message)
218
-
219
- with gr.Accordion("User Context", open=False, visible=False):
220
- # get profiled user context
221
- pass
222
-
223
- generate_button = gr.Button("Generate Newsletter")
224
-
225
- # create a button to open the newsletter in a new tab
226
- download = gr.DownloadButton(label="Download Newsletter")
227
-
228
- newsletter_output = gr.HTML(label="Generated Newsletter", min_height="500", value="<br><br><br><br><br>")
229
-
230
- generate_button.click(
231
- fn=generate_newsletter,
232
- inputs=[
233
- customer_id,
234
- # llm preferences
235
- model_name,
236
- temperature,
237
- max_tokens,
238
- system_message,
239
- # newsletter preferences
240
- textual_preferences],
241
- outputs=[newsletter_output, download]
242
- )
243
-
244
- demo.queue().launch(share=config['app']['share'], server_port=config['app']['server_port'])"""
245
-
246
  if __name__ == "__main__":
247
  main()
 
34
  #except:
35
  # pass
36
 
 
37
  llm_settings = config['llm']
38
  config['llm']['api_key'] = os.environ["OPENAI_KEY"]
39
  newsletter_meta_info = config['newsletter']
 
53
  max_tokens,
54
  system_message,
55
  textual_preferences,
56
+ few_shot=None,
57
+ custom_template=None,
58
  progress=gr.Progress()
59
  ):
60
 
 
68
  max_transactions=newsletter_meta_info['max_recents_items'])
69
  logging.debug(f"Recommendations: {recommendations}")
70
  logging.debug(f"Transactions: {transactions}")
71
+ print("customer info", customer_info)
72
 
73
 
74
  # Load the html template and replace the placeholders for images with the actual content
75
  logging.info("Initializing newsletter template...")
76
  progress(0.5, "Initializing personalized content...")
77
+ # override the default template if a custom one is provided
78
+ if custom_template:
79
+ newsletter_meta_info['newsletter_example_path'] = custom_template
80
  newsletter_text = initialize_newsletter(newsletter_meta_info, transactions, recommendations)
81
 
82
  # Build context from the user preferences, the recommendations and the transactions
 
89
 
90
  # Build the prompt for the LLM
91
  progress(0.7, "Generating personalized content...")
92
+ prompt = build_prompt(context, few_shot)
93
  logging.info(f"Prompt: {prompt}")
94
 
95
 
 
160
  value=llm_handler.default_max_tokens,
161
  precision=0
162
  )
163
+
164
+ custom_template = gr.File(
165
+ label="Custom Template",
166
+ file_types="html",
167
+ visible=True)
168
+
169
+ with gr.Row():
170
  system_message = gr.Textbox(
171
  label="System Message",
172
  placeholder="Enter a custom system message (optional).",
173
  value=llm_handler.default_system_message,
174
  visible=False
175
  )
176
+
177
+ few_shot = gr.Textbox(
178
+ label="Few Shot",
179
+ placeholder="Enter a custom few shot (optional).",
180
+ value=None,
181
+ visible=True)
182
+
183
+
184
+
185
 
186
  # User Context (Hidden by Default)
187
  with gr.Accordion("πŸ§‘β€πŸ’» User Context", open=False, visible=False):
 
208
  temperature,
209
  max_tokens,
210
  system_message,
211
+ textual_preferences,
212
+ few_shot,
213
+ custom_template
214
  ],
215
  outputs=[newsletter_output, download]
216
  )
 
222
  )
223
 
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  if __name__ == "__main__":
226
  main()
config/config.yaml CHANGED
@@ -19,6 +19,7 @@ llm:
19
  "recommendations": "your recommendations text here",
20
  "closing": "your closing text here"
21
  }
 
22
 
23
 
24
  recommender_api:
@@ -34,7 +35,7 @@ app:
34
  newsletter:
35
  newsletter_example_path: "./newsletter_examples/1.html"
36
  brand_logo: "https://seeklogo.com/images/L/luisa-spagnoli-logo-EF482BEE89-seeklogo.com.png"
37
- brand_name: "AI x Fashion"
38
  max_recommendations: 2
39
  max_recents_items: 2
40
 
 
19
  "recommendations": "your recommendations text here",
20
  "closing": "your closing text here"
21
  }
22
+ default_few_shot: null
23
 
24
 
25
  recommender_api:
 
35
  newsletter:
36
  newsletter_example_path: "./newsletter_examples/1.html"
37
  brand_logo: "https://seeklogo.com/images/L/luisa-spagnoli-logo-EF482BEE89-seeklogo.com.png"
38
+ brand_name: "Luisa Spagnoli"
39
  max_recommendations: 2
40
  max_recents_items: 2
41
 
src/utils.py CHANGED
@@ -52,7 +52,9 @@ def format_personalization(personalization):
52
 
53
 
54
  def build_prompt(
55
- context: Dict[str, Any]) -> str:
 
 
56
  """
57
  Create a detailed prompt incorporating all personalization factors
58
 
@@ -62,6 +64,11 @@ def build_prompt(
62
  :return: Formatted prompt string
63
  """
64
 
 
 
 
 
 
65
  # Create a detailed user prompt with all context and personalization
66
  user_prompt = f"""
67
  Generate personalized newsletter content based on the following information:
@@ -79,13 +86,14 @@ def build_prompt(
79
  3. recommendations: Present recommended items naturally, explaining why they match the customer's preferences and previous purchases.
80
  4. closing: Wrap up maintaining the established tone.
81
 
82
- General reequirements:
83
  - Write in first person.
84
  - Don't be too formal, maintain a friendly, warm and engaging tone, avoid stuff like "I hope you are doing well".
85
  - The reader should feel like they have a special dedicated fashion assistant, who is also a friend.
86
  - When mentioning items, use the item name and avoid mentioning colors that are not present in the image, or using adjectives like "vibrant".
87
  - The goal should be to make the reader feel special and excited about the items they are being recommended.
88
 
 
89
 
90
  Please generate all sections in a single, coherent response that maintains consistency in tone and style throughout. Begin each section with the section name in all caps.
91
  """
 
52
 
53
 
54
  def build_prompt(
55
+ context: Dict[str, Any],
56
+ few_shot: str = None,
57
+ ) -> str:
58
  """
59
  Create a detailed prompt incorporating all personalization factors
60
 
 
64
  :return: Formatted prompt string
65
  """
66
 
67
+ if few_shot is not None and len(few_shot) > 0:
68
+ few_shot = f"Few-shot examples:\n{few_shot}\n"
69
+ else:
70
+ few_shot = ""
71
+
72
  # Create a detailed user prompt with all context and personalization
73
  user_prompt = f"""
74
  Generate personalized newsletter content based on the following information:
 
86
  3. recommendations: Present recommended items naturally, explaining why they match the customer's preferences and previous purchases.
87
  4. closing: Wrap up maintaining the established tone.
88
 
89
+ General requirements:
90
  - Write in first person.
91
  - Don't be too formal, maintain a friendly, warm and engaging tone, avoid stuff like "I hope you are doing well".
92
  - The reader should feel like they have a special dedicated fashion assistant, who is also a friend.
93
  - When mentioning items, use the item name and avoid mentioning colors that are not present in the image, or using adjectives like "vibrant".
94
  - The goal should be to make the reader feel special and excited about the items they are being recommended.
95
 
96
+ {few_shot}
97
 
98
  Please generate all sections in a single, coherent response that maintains consistency in tone and style throughout. Begin each section with the section name in all caps.
99
  """