File size: 17,486 Bytes
448716e
 
 
 
 
 
 
531606c
 
448716e
b4eab22
 
 
 
 
 
 
 
 
d867642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4eab22
 
 
 
 
 
 
 
 
 
 
 
69cb436
 
b4eab22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4b3687
 
 
69cb436
7c962de
d4b3687
 
 
 
 
7c962de
 
 
 
d4b3687
 
 
 
 
7c962de
 
 
d4b3687
 
7c962de
 
 
 
 
 
d4b3687
 
7c962de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4b3687
7c962de
d4b3687
7c962de
 
d4b3687
7c962de
 
b4eab22
7c962de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4b3687
7c962de
 
 
d4b3687
 
 
7c962de
 
 
 
 
d4b3687
7c962de
d4b3687
 
 
b4eab22
d4b3687
 
 
 
 
6c9634b
383989e
b4eab22
d867642
6c9634b
383989e
69cb436
d4b3687
d867642
69cb436
 
383989e
 
 
6c9634b
d4b3687
 
d867642
d4b3687
 
 
b4eab22
 
 
d4b3687
d867642
383989e
b4eab22
448716e
531606c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d867642
531606c
 
 
d867642
531606c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d867642
 
531606c
d867642
531606c
d867642
 
 
 
 
 
 
 
 
 
531606c
 
 
 
 
 
 
 
 
 
448716e
a8e55be
531606c
7ae0911
a8e55be
 
 
 
e8a9ce9
 
 
 
 
 
 
a8e55be
448716e
a4cf769
 
9fb1b7d
e8a9ce9
 
b4eab22
8b31c18
 
 
 
 
e8a9ce9
b4eab22
 
 
 
e8a9ce9
8b31c18
b4eab22
a8e55be
e8a9ce9
 
 
 
 
 
 
 
 
 
 
 
8b31c18
e8a9ce9
 
a4cf769
 
 
9fb1b7d
 
 
 
e8a9ce9
 
 
4d843b5
 
 
 
 
a8e55be
a4cf769
 
 
 
 
 
 
 
 
 
 
b4eab22
 
a4cf769
b4eab22
 
 
4d843b5
a4cf769
b4eab22
 
a4cf769
 
 
 
 
a8e55be
b4eab22
448716e
 
 
b4eab22
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import nltk
from datetime import datetime, timedelta
import requests
from bs4 import BeautifulSoup
import time
import emoji

# Download required NLTK data
try:
    nltk.data.find('tokenizers/punkt')
except LookupError:
    nltk.download('punkt')

# Global variables to cache models
CACHED_MODELS = {}

def analyze_detailed_sentiment(text):
    """Custom function to determine more specific sentiment based on content analysis"""
    sentiment_indicators = {
        'excited': ['amazing', 'exciting', 'incredible', 'transform', 'revolutionary'],
        'happy': ['happy', 'joy', 'enjoy', 'perfect', 'wonderful'],
        'cheerful': ['bright', 'fun', 'delightful', 'cheerful', 'pleasant'],
        'proud': ['proud', 'achievement', 'excellence', 'premium', 'superior'],
        'elated': ['extraordinary', 'exceptional', 'outstanding', 'remarkable'],
        'inspired': ['innovative', 'creative', 'inspiring', 'groundbreaking'],
        'confident': ['guaranteed', 'proven', 'trusted', 'reliable', 'assured'],
        'loving': ['love', 'care', 'cherish', 'adore', 'treasure'],
        'enthusiastic': ['fantastic', 'awesome', 'brilliant', 'excellent'],
        'delighted': ['pleased', 'satisfied', 'gratified', 'overjoyed']
    }
    
    text_lower = text.lower()
    
    # Count matches for each sentiment
    sentiment_scores = {}
    for sentiment, keywords in sentiment_indicators.items():
        score = sum(1 for keyword in keywords if keyword in text_lower)
        if score > 0:
            sentiment_scores[sentiment] = score
    
    # If no specific sentiment is detected, return a default
    if not sentiment_scores:
        return 'positive'
    
    # Return the sentiment with the highest score
    return max(sentiment_scores.items(), key=lambda x: x[1])[0]

def load_models():
    global CACHED_MODELS
    
    if CACHED_MODELS:
        return (
            CACHED_MODELS['generator_tokenizer'],
            CACHED_MODELS['generator'],
            CACHED_MODELS['sentiment_analyzer'],
            CACHED_MODELS['content_checker']
        )
    
    try:
        # Use GPT-2 instead of DistilGPT-2
        generator_model = "gpt2"
        generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
        generator = AutoModelForCausalLM.from_pretrained(generator_model)
        
        # Sentiment analysis
        sentiment_analyzer = pipeline(
            "sentiment-analysis",
            model="finiteautomata/bertweet-base-sentiment-analysis"
        )
        
        # Content safety checker
        content_checker = pipeline(
            "text-classification",
            model="facebook/roberta-hate-speech-dynabench-r4-target"
        )
        
        # Cache the models
        CACHED_MODELS['generator_tokenizer'] = generator_tokenizer
        CACHED_MODELS['generator'] = generator
        CACHED_MODELS['sentiment_analyzer'] = sentiment_analyzer
        CACHED_MODELS['content_checker'] = content_checker
        
        return generator_tokenizer, generator, sentiment_analyzer, content_checker
    except Exception as e:
        print(f"Error loading models: {str(e)}")
        raise

def generate_content(
    product_name,
    product_description,
    target_audience,
    key_features,
    unique_benefits,
    platform,
    tone,
    generator_tokenizer,
    generator,
    sentiment_analyzer,
    content_checker
):
    char_limit = 280 if platform == "Twitter" else 500
    
    # Split features and benefits into lists
    features = [f.strip() for f in key_features.split(',')]
    benefits = [b.strip() for b in unique_benefits.split(',')]
    
    # Expanded intro phrases for each tone
    intro_phrases = {
        'professional': [
            f"Introducing {product_name}:",
            f"Discover {product_name}:",
            f"Meet {product_name}:",
            f"Presenting {product_name}:",
            f"Announcing {product_name}:",
            f"Experience {product_name}:",
            f"Elevate your life with {product_name}:",
        ],
        'casual': [
            f"Check out {product_name}!",
            f"Say hello to {product_name}!",
            f"Get ready for {product_name}!",
            f"Looking for something special? Try {product_name}!",
            f"Meet your new favorite: {product_name}!",
            f"Game-changer alert: {product_name} is here!",
        ],
        'friendly': [
            f"We're excited to share {product_name} with you!",
            f"You'll love what {product_name} can do!",
            f"Let {product_name} transform your day!",
            f"Ready to discover {product_name}?",
            f"Here's why you'll love {product_name}:",
            f"Make every day better with {product_name}!",
        ]
    }

    # Description connectors
    description_connectors = [
        f" - {product_description}",
        f": {product_description}",
        f"! {product_description}",
        f", {product_description}",
        f". {product_description}",
    ]

    # Feature introduction phrases
    feature_intros = [
        "Featuring",
        "With",
        "Including",
        "Equipped with",
        "Powered by",
        "Designed with",
        "Built with",
        "Offering",
    ]

    # Benefit connectors
    benefit_connectors = [
        "Experience",
        "Enjoy",
        "Benefit from",
        "Take advantage of",
        "Discover",
        "Appreciate",
        "Make the most of",
    ]

    # Target audience phrases
    audience_phrases = [
        f"Perfect for {target_audience}",
        f"Ideal for {target_audience}",
        f"Designed for {target_audience}",
        f"Made specially for {target_audience}",
        f"Tailored for {target_audience}",
        f"Created with {target_audience} in mind",
    ]

    # Call-to-action phrases
    cta_phrases = {
        'Twitter': [
            "Learn more today!",
            "Discover more β†’",
            "Get yours now!",
            "Visit our website!",
            "Join us today!",
            "Transform your life today!",
        ],
        'Instagram': [
            f"\n\n#{product_name.replace(' ', '')}",
            f"\n\nLearn more - Link in bio! #{product_name.replace(' ', '')}",
            f"\n\nDiscover more ↗️ #{product_name.replace(' ', '')}",
            f"\n\nTap link to learn more! #{product_name.replace(' ', '')}",
        ]
    }

    import random

    def create_post():
        # Select tone
        selected_tone = tone.lower() if tone.lower() in intro_phrases else 'professional'
        
        # Random structure selection (1-4)
        structure = random.randint(1, 4)
        
        if structure == 1:
            # Standard structure
            post = random.choice(intro_phrases[selected_tone])
            post += random.choice(description_connectors)
            feature = random.choice(features)
            benefit = random.choice(benefits)
            
            if len(post) + len(feature) + len(benefit) + 20 < char_limit:
                post += f" {random.choice(feature_intros)} {feature}."
                post += f" {random.choice(benefit_connectors)} {benefit}."
                
        elif structure == 2:
            # Benefit-first structure
            benefit = random.choice(benefits)
            post = f"Ready to {benefit.lower()}? "
            post += random.choice(intro_phrases[selected_tone])
            post += random.choice(description_connectors)
            
            if len(post) + 30 < char_limit:
                feature = random.choice(features)
                post += f" {random.choice(feature_intros)} {feature}."
                
        elif structure == 3:
            # Question-based structure
            post = f"Looking for {product_description.lower()}? "
            post += random.choice(intro_phrases[selected_tone]).replace(':', '!')
            feature = random.choice(features)
            benefit = random.choice(benefits)
            
            if len(post) + len(feature) + len(benefit) + 20 < char_limit:
                post += f" {random.choice(feature_intros)} {feature}."
                post += f" {benefit}."
                
        else:
            # Feature-focused structure
            feature = random.choice(features)
            post = f"From {feature} to {random.choice(benefits).lower()}, "
            post += f"{product_name} has it all! "
            post += product_description

        # Add target audience if space allows
        if len(post) + 50 < char_limit:
            post += f" {random.choice(audience_phrases)}."

        # Add call to action
        if platform == "Twitter":
            if len(post) + 30 < char_limit:
                post += f" {random.choice(cta_phrases['Twitter'])}"
        else:
            if len(post) + 50 < char_limit:
                post += random.choice(cta_phrases['Instagram'])

        return post.strip()

    try:
        # Generate multiple versions
        posts = [create_post() for _ in range(2)]
        filtered_content = []
        
        for post in posts:
            # Verify length
            if len(post) > char_limit:
                post = post[:char_limit-3] + "..."
            
            # Check sentiment and safety
            try:
                sentiment = analyze_detailed_sentiment(post)  # Use our custom sentiment analysis
                safety_check = content_checker(post)[0]
                
                filtered_content.append({
                    'text': post,
                    'sentiment': sentiment.title(),  # Capitalize the sentiment
                    'safety_score': f"{float(safety_check.get('score', 0)):.2f}"
                })
            except Exception as e:
                print(f"Error in content analysis: {str(e)}")
                continue
        
        return filtered_content if filtered_content else [{
            'text': create_post(),
            'sentiment': 'Positive',
            'safety_score': '1.00'
        }]
    
    except Exception as e:
        print(f"Error in content generation: {str(e)}")
        return [{
            'text': f"Introducing {product_name}: {product_description[:100]}... Learn more!",
            'sentiment': 'Neutral',
            'safety_score': '1.00'
        }]

def process_input_with_loading(
    product_name,
    product_description,
    target_audience,
    key_features,
    unique_benefits,
    platform,
    tone,
    progress=gr.Progress()
):
    # Initial loading message
    features_list = """⚑ While I generate your content, here's what I can do:
    
πŸ“ Generate multiple unique marketing messages
🎯 Adapt content for different platforms
πŸ” Ensure ethical content generation
πŸ“Š Analyze sentiment and safety
✨ Create platform-specific formatting
    
Processing your request..."""
    
    yield features_list + "\n\n⏳ Starting generation..."
    time.sleep(1)
    
    # Update message with steps
    steps = [
        "Loading language models...",
        "Analyzing product information...",
        "Generating content variations...",
        "Checking content safety...",
        "Performing final adjustments..."  # Changed the last step name
    ]
    
    for i, step in enumerate(steps, 1):
        progress((i/len(steps)) * 0.99)  # Modified to max out at 99%
        yield features_list + f"\n\n⏳ {step}"
        time.sleep(1)
    
    try:
        results = generate_content(
            product_name,
            product_description,
            target_audience,
            key_features,
            unique_benefits,
            platform,
            tone,
            generator_tokenizer,
            generator,
            sentiment_analyzer,
            content_checker
        )
        
        # Ensure different sentiments for each version
        used_sentiments = set()
        output = "🎯 Generated Marketing Content:\n\n"
        
        for i, content in enumerate(results, 1):
            # Ensure different sentiment for each version
            if content['sentiment'].lower() in used_sentiments:
                alternative_sentiments = ['Confident', 'Enthusiastic', 'Inspired', 'Proud', 'Happy']
                for alt_sentiment in alternative_sentiments:
                    if alt_sentiment.lower() not in used_sentiments:
                        content['sentiment'] = alt_sentiment
                        break
            
            used_sentiments.add(content['sentiment'].lower())
            
            output += f"Version {i}:\n"
            output += f"πŸ“ Content: {content['text']}\n"
            output += f"😊 Sentiment: {content['sentiment']}\n"
            output += f"βœ… Safety Score: {content['safety_score']}\n"
            output += "-" * 50 + "\n"
        
        yield output
    except Exception as e:
        yield f"An error occurred: {str(e)}"

def create_interface():
    print("Loading models...")
    global generator_tokenizer, generator, sentiment_analyzer, content_checker
    generator_tokenizer, generator, sentiment_analyzer, content_checker = load_models()
    print("Models loaded successfully!")
    
    def fill_sample_data():
        return [
            "EcoBottle",
            "Sustainable water bottle made from recycled ocean plastic",
            "Environmentally conscious young professionals",
            "100% recycled materials, Insulated design, Leak-proof",
            "Helps clean oceans, Keeps drinks cold for 24 hours",
            "Twitter",
            "professional"
        ]
    
    def clear_form():
        return [""] * 7  # Returns empty strings for all 7 input fields

    # Create the interface with blocks for custom layout
    with gr.Blocks(theme=gr.themes.Default()) as demo:
        gr.Markdown("# Ethimar - AI Marketing Content Generator")
        gr.Markdown("Generate ethical marketing content with AI-powered insights.\n⏳ Note: First generation might take 1-3 minutes due to model loading. Subsequent generations will be faster!")
        gr.Markdown("---------------------")
        gr.Markdown("Current limitations: 1. Uses a simplified language GPT model to generate content due to using the free version of the Hugging Face Spaces, so the results might not always be perfect. 2. Only supports English language & Twitter(X) and Instagram at the moment. 3. Uses a template based sentiment analysis since Bertweet model only supports 3 sentiments (negative/neutral/positive)")
        gr.Markdown("Next Steps: 1. Add more platforms in addition to Twitter(X) and Instagram. 2. Create image & videos in addition to generating text, using DALL-E 3 & Meta's Segment Anything Model.")

        # Sample data button with custom styling
        with gr.Row():
            fill_button = gr.Button(
                "Fill the form with sample data",
                variant="primary",
                size="sm",
                scale=1
            )

        # Main content area with two columns
        with gr.Row():
            # Left column - Input form
            with gr.Column(scale=1):
                product_name = gr.Textbox(label="Product Name", placeholder="Enter product name")
                product_description = gr.Textbox(label="Product Description", lines=3, placeholder="Brief description of your product")
                target_audience = gr.Textbox(label="Target Audience", placeholder="Who is this product for?")
                key_features = gr.Textbox(label="Key Features", lines=2, placeholder="Main features of your product")
                unique_benefits = gr.Textbox(label="Unique Benefits", lines=2, placeholder="What makes your product special?")
                platform = gr.Radio(
                    choices=["Twitter", "Instagram"],
                    label="Platform",
                    value="Twitter(X)"
                )
                tone = gr.Textbox(label="Tone", placeholder="e.g., professional, casual, friendly")
                
                # Buttons row at the bottom of the form
                with gr.Row():
                    with gr.Column(scale=2):
                        submit_button = gr.Button("Generate Content", variant="primary", scale=1)
                    with gr.Column(scale=1):
                        clear_button = gr.Button("Clear Form", variant="secondary", scale=1)

            # Right column - Output
            with gr.Column(scale=1):
                output = gr.Textbox(
                    label="Generated Content", 
                    lines=12,
                    value="✨ Enter your product details and click 'Generate Content' to start!"
                )

        # Connect all buttons to functions
        input_components = [
            product_name,
            product_description,
            target_audience,
            key_features,
            unique_benefits,
            platform,
            tone
        ]
        
        fill_button.click(
            fn=fill_sample_data,
            outputs=input_components
        )
        
        submit_button.click(
            fn=process_input_with_loading,
            inputs=input_components,
            outputs=output
        )
        
        clear_button.click(
            fn=clear_form,
            outputs=input_components
        )

    return demo

# Launch the app
if __name__ == "__main__":
    demo = create_interface()
    demo.launch()