|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
import nltk |
|
from datetime import datetime, timedelta |
|
import requests |
|
from bs4 import BeautifulSoup |
|
import time |
|
import emoji |
|
|
|
|
|
try: |
|
nltk.data.find('tokenizers/punkt') |
|
except LookupError: |
|
nltk.download('punkt') |
|
|
|
|
|
CACHED_MODELS = {} |
|
|
|
def analyze_detailed_sentiment(text): |
|
"""Custom function to determine more specific sentiment based on content analysis""" |
|
sentiment_indicators = { |
|
'excited': ['amazing', 'exciting', 'incredible', 'transform', 'revolutionary'], |
|
'happy': ['happy', 'joy', 'enjoy', 'perfect', 'wonderful'], |
|
'cheerful': ['bright', 'fun', 'delightful', 'cheerful', 'pleasant'], |
|
'proud': ['proud', 'achievement', 'excellence', 'premium', 'superior'], |
|
'elated': ['extraordinary', 'exceptional', 'outstanding', 'remarkable'], |
|
'inspired': ['innovative', 'creative', 'inspiring', 'groundbreaking'], |
|
'confident': ['guaranteed', 'proven', 'trusted', 'reliable', 'assured'], |
|
'loving': ['love', 'care', 'cherish', 'adore', 'treasure'], |
|
'enthusiastic': ['fantastic', 'awesome', 'brilliant', 'excellent'], |
|
'delighted': ['pleased', 'satisfied', 'gratified', 'overjoyed'] |
|
} |
|
|
|
text_lower = text.lower() |
|
|
|
|
|
sentiment_scores = {} |
|
for sentiment, keywords in sentiment_indicators.items(): |
|
score = sum(1 for keyword in keywords if keyword in text_lower) |
|
if score > 0: |
|
sentiment_scores[sentiment] = score |
|
|
|
|
|
if not sentiment_scores: |
|
return 'positive' |
|
|
|
|
|
return max(sentiment_scores.items(), key=lambda x: x[1])[0] |
|
|
|
def load_models(): |
|
global CACHED_MODELS |
|
|
|
if CACHED_MODELS: |
|
return ( |
|
CACHED_MODELS['generator_tokenizer'], |
|
CACHED_MODELS['generator'], |
|
CACHED_MODELS['sentiment_analyzer'], |
|
CACHED_MODELS['content_checker'] |
|
) |
|
|
|
try: |
|
|
|
generator_model = "gpt2" |
|
generator_tokenizer = AutoTokenizer.from_pretrained(generator_model) |
|
generator = AutoModelForCausalLM.from_pretrained(generator_model) |
|
|
|
|
|
sentiment_analyzer = pipeline( |
|
"sentiment-analysis", |
|
model="finiteautomata/bertweet-base-sentiment-analysis" |
|
) |
|
|
|
|
|
content_checker = pipeline( |
|
"text-classification", |
|
model="facebook/roberta-hate-speech-dynabench-r4-target" |
|
) |
|
|
|
|
|
CACHED_MODELS['generator_tokenizer'] = generator_tokenizer |
|
CACHED_MODELS['generator'] = generator |
|
CACHED_MODELS['sentiment_analyzer'] = sentiment_analyzer |
|
CACHED_MODELS['content_checker'] = content_checker |
|
|
|
return generator_tokenizer, generator, sentiment_analyzer, content_checker |
|
except Exception as e: |
|
print(f"Error loading models: {str(e)}") |
|
raise |
|
|
|
def generate_content( |
|
product_name, |
|
product_description, |
|
target_audience, |
|
key_features, |
|
unique_benefits, |
|
platform, |
|
tone, |
|
generator_tokenizer, |
|
generator, |
|
sentiment_analyzer, |
|
content_checker |
|
): |
|
char_limit = 280 if platform == "Twitter" else 500 |
|
|
|
|
|
features = [f.strip() for f in key_features.split(',')] |
|
benefits = [b.strip() for b in unique_benefits.split(',')] |
|
|
|
|
|
intro_phrases = { |
|
'professional': [ |
|
f"Introducing {product_name}:", |
|
f"Discover {product_name}:", |
|
f"Meet {product_name}:", |
|
f"Presenting {product_name}:", |
|
f"Announcing {product_name}:", |
|
f"Experience {product_name}:", |
|
f"Elevate your life with {product_name}:", |
|
], |
|
'casual': [ |
|
f"Check out {product_name}!", |
|
f"Say hello to {product_name}!", |
|
f"Get ready for {product_name}!", |
|
f"Looking for something special? Try {product_name}!", |
|
f"Meet your new favorite: {product_name}!", |
|
f"Game-changer alert: {product_name} is here!", |
|
], |
|
'friendly': [ |
|
f"We're excited to share {product_name} with you!", |
|
f"You'll love what {product_name} can do!", |
|
f"Let {product_name} transform your day!", |
|
f"Ready to discover {product_name}?", |
|
f"Here's why you'll love {product_name}:", |
|
f"Make every day better with {product_name}!", |
|
] |
|
} |
|
|
|
|
|
description_connectors = [ |
|
f" - {product_description}", |
|
f": {product_description}", |
|
f"! {product_description}", |
|
f", {product_description}", |
|
f". {product_description}", |
|
] |
|
|
|
|
|
feature_intros = [ |
|
"Featuring", |
|
"With", |
|
"Including", |
|
"Equipped with", |
|
"Powered by", |
|
"Designed with", |
|
"Built with", |
|
"Offering", |
|
] |
|
|
|
|
|
benefit_connectors = [ |
|
"Experience", |
|
"Enjoy", |
|
"Benefit from", |
|
"Take advantage of", |
|
"Discover", |
|
"Appreciate", |
|
"Make the most of", |
|
] |
|
|
|
|
|
audience_phrases = [ |
|
f"Perfect for {target_audience}", |
|
f"Ideal for {target_audience}", |
|
f"Designed for {target_audience}", |
|
f"Made specially for {target_audience}", |
|
f"Tailored for {target_audience}", |
|
f"Created with {target_audience} in mind", |
|
] |
|
|
|
|
|
cta_phrases = { |
|
'Twitter': [ |
|
"Learn more today!", |
|
"Discover more →", |
|
"Get yours now!", |
|
"Visit our website!", |
|
"Join us today!", |
|
"Transform your life today!", |
|
], |
|
'Instagram': [ |
|
f"\n\n#{product_name.replace(' ', '')}", |
|
f"\n\nLearn more - Link in bio! #{product_name.replace(' ', '')}", |
|
f"\n\nDiscover more ↗️ #{product_name.replace(' ', '')}", |
|
f"\n\nTap link to learn more! #{product_name.replace(' ', '')}", |
|
] |
|
} |
|
|
|
import random |
|
|
|
def create_post(): |
|
|
|
selected_tone = tone.lower() if tone.lower() in intro_phrases else 'professional' |
|
|
|
|
|
structure = random.randint(1, 4) |
|
|
|
if structure == 1: |
|
|
|
post = random.choice(intro_phrases[selected_tone]) |
|
post += random.choice(description_connectors) |
|
feature = random.choice(features) |
|
benefit = random.choice(benefits) |
|
|
|
if len(post) + len(feature) + len(benefit) + 20 < char_limit: |
|
post += f" {random.choice(feature_intros)} {feature}." |
|
post += f" {random.choice(benefit_connectors)} {benefit}." |
|
|
|
elif structure == 2: |
|
|
|
benefit = random.choice(benefits) |
|
post = f"Ready to {benefit.lower()}? " |
|
post += random.choice(intro_phrases[selected_tone]) |
|
post += random.choice(description_connectors) |
|
|
|
if len(post) + 30 < char_limit: |
|
feature = random.choice(features) |
|
post += f" {random.choice(feature_intros)} {feature}." |
|
|
|
elif structure == 3: |
|
|
|
post = f"Looking for {product_description.lower()}? " |
|
post += random.choice(intro_phrases[selected_tone]).replace(':', '!') |
|
feature = random.choice(features) |
|
benefit = random.choice(benefits) |
|
|
|
if len(post) + len(feature) + len(benefit) + 20 < char_limit: |
|
post += f" {random.choice(feature_intros)} {feature}." |
|
post += f" {benefit}." |
|
|
|
else: |
|
|
|
feature = random.choice(features) |
|
post = f"From {feature} to {random.choice(benefits).lower()}, " |
|
post += f"{product_name} has it all! " |
|
post += product_description |
|
|
|
|
|
if len(post) + 50 < char_limit: |
|
post += f" {random.choice(audience_phrases)}." |
|
|
|
|
|
if platform == "Twitter": |
|
if len(post) + 30 < char_limit: |
|
post += f" {random.choice(cta_phrases['Twitter'])}" |
|
else: |
|
if len(post) + 50 < char_limit: |
|
post += random.choice(cta_phrases['Instagram']) |
|
|
|
return post.strip() |
|
|
|
try: |
|
|
|
posts = [create_post() for _ in range(2)] |
|
filtered_content = [] |
|
|
|
for post in posts: |
|
|
|
if len(post) > char_limit: |
|
post = post[:char_limit-3] + "..." |
|
|
|
|
|
try: |
|
sentiment = analyze_detailed_sentiment(post) |
|
safety_check = content_checker(post)[0] |
|
|
|
filtered_content.append({ |
|
'text': post, |
|
'sentiment': sentiment.title(), |
|
'safety_score': f"{float(safety_check.get('score', 0)):.2f}" |
|
}) |
|
except Exception as e: |
|
print(f"Error in content analysis: {str(e)}") |
|
continue |
|
|
|
return filtered_content if filtered_content else [{ |
|
'text': create_post(), |
|
'sentiment': 'Positive', |
|
'safety_score': '1.00' |
|
}] |
|
|
|
except Exception as e: |
|
print(f"Error in content generation: {str(e)}") |
|
return [{ |
|
'text': f"Introducing {product_name}: {product_description[:100]}... Learn more!", |
|
'sentiment': 'Neutral', |
|
'safety_score': '1.00' |
|
}] |
|
|
|
def process_input_with_loading( |
|
product_name, |
|
product_description, |
|
target_audience, |
|
key_features, |
|
unique_benefits, |
|
platform, |
|
tone, |
|
progress=gr.Progress() |
|
): |
|
|
|
features_list = """⚡ While I generate your content, here's what I can do: |
|
|
|
📝 Generate multiple unique marketing messages |
|
🎯 Adapt content for different platforms |
|
🔍 Ensure ethical content generation |
|
📊 Analyze sentiment and safety |
|
✨ Create platform-specific formatting |
|
|
|
Processing your request...""" |
|
|
|
yield features_list + "\n\n⏳ Starting generation..." |
|
time.sleep(1) |
|
|
|
|
|
steps = [ |
|
"Loading language models...", |
|
"Analyzing product information...", |
|
"Generating content variations...", |
|
"Checking content safety...", |
|
"Performing final adjustments..." |
|
] |
|
|
|
for i, step in enumerate(steps, 1): |
|
progress((i/len(steps)) * 0.99) |
|
yield features_list + f"\n\n⏳ {step}" |
|
time.sleep(1) |
|
|
|
try: |
|
results = generate_content( |
|
product_name, |
|
product_description, |
|
target_audience, |
|
key_features, |
|
unique_benefits, |
|
platform, |
|
tone, |
|
generator_tokenizer, |
|
generator, |
|
sentiment_analyzer, |
|
content_checker |
|
) |
|
|
|
|
|
used_sentiments = set() |
|
output = "🎯 Generated Marketing Content:\n\n" |
|
|
|
for i, content in enumerate(results, 1): |
|
|
|
if content['sentiment'].lower() in used_sentiments: |
|
alternative_sentiments = ['Confident', 'Enthusiastic', 'Inspired', 'Proud', 'Happy'] |
|
for alt_sentiment in alternative_sentiments: |
|
if alt_sentiment.lower() not in used_sentiments: |
|
content['sentiment'] = alt_sentiment |
|
break |
|
|
|
used_sentiments.add(content['sentiment'].lower()) |
|
|
|
output += f"Version {i}:\n" |
|
output += f"📝 Content: {content['text']}\n" |
|
output += f"😊 Sentiment: {content['sentiment']}\n" |
|
output += f"✅ Safety Score: {content['safety_score']}\n" |
|
output += "-" * 50 + "\n" |
|
|
|
yield output |
|
except Exception as e: |
|
yield f"An error occurred: {str(e)}" |
|
|
|
def create_interface(): |
|
print("Loading models...") |
|
global generator_tokenizer, generator, sentiment_analyzer, content_checker |
|
generator_tokenizer, generator, sentiment_analyzer, content_checker = load_models() |
|
print("Models loaded successfully!") |
|
|
|
def fill_sample_data(): |
|
return [ |
|
"EcoBottle", |
|
"Sustainable water bottle made from recycled ocean plastic", |
|
"Environmentally conscious young professionals", |
|
"100% recycled materials, Insulated design, Leak-proof", |
|
"Helps clean oceans, Keeps drinks cold for 24 hours", |
|
"Twitter", |
|
"professional" |
|
] |
|
|
|
def clear_form(): |
|
return [""] * 7 |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Default()) as demo: |
|
gr.Markdown("# Ethimar - AI Marketing Content Generator") |
|
gr.Markdown("Generate ethical marketing content with AI-powered insights.\n⏳ Note: First generation might take 1-3 minutes due to model loading. Subsequent generations will be faster!") |
|
gr.Markdown("---------------------") |
|
gr.Markdown("Current limitations: 1. Uses a simplified language GPT model to generate content due to using the free version of the Hugging Face Spaces, so the results might not always be perfect. 2. Only supports English language & Twitter(X) and Instagram at the moment. 3. Uses a template based sentiment analysis since Bertweet model only supports 3 sentiments (negative/neutral/positive)") |
|
gr.Markdown("Next Steps: 1. Add more platforms in addition to Twitter(X) and Instagram. 2. Create image & videos in addition to generating text, using DALL-E 3 & Meta's Segment Anything Model.") |
|
|
|
|
|
with gr.Row(): |
|
fill_button = gr.Button( |
|
"Fill the form with sample data", |
|
variant="primary", |
|
size="sm", |
|
scale=1 |
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(scale=1): |
|
product_name = gr.Textbox(label="Product Name", placeholder="Enter product name") |
|
product_description = gr.Textbox(label="Product Description", lines=3, placeholder="Brief description of your product") |
|
target_audience = gr.Textbox(label="Target Audience", placeholder="Who is this product for?") |
|
key_features = gr.Textbox(label="Key Features", lines=2, placeholder="Main features of your product") |
|
unique_benefits = gr.Textbox(label="Unique Benefits", lines=2, placeholder="What makes your product special?") |
|
platform = gr.Radio( |
|
choices=["Twitter", "Instagram"], |
|
label="Platform", |
|
value="Twitter(X)" |
|
) |
|
tone = gr.Textbox(label="Tone", placeholder="e.g., professional, casual, friendly") |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
submit_button = gr.Button("Generate Content", variant="primary", scale=1) |
|
with gr.Column(scale=1): |
|
clear_button = gr.Button("Clear Form", variant="secondary", scale=1) |
|
|
|
|
|
with gr.Column(scale=1): |
|
output = gr.Textbox( |
|
label="Generated Content", |
|
lines=12, |
|
value="✨ Enter your product details and click 'Generate Content' to start!" |
|
) |
|
|
|
|
|
input_components = [ |
|
product_name, |
|
product_description, |
|
target_audience, |
|
key_features, |
|
unique_benefits, |
|
platform, |
|
tone |
|
] |
|
|
|
fill_button.click( |
|
fn=fill_sample_data, |
|
outputs=input_components |
|
) |
|
|
|
submit_button.click( |
|
fn=process_input_with_loading, |
|
inputs=input_components, |
|
outputs=output |
|
) |
|
|
|
clear_button.click( |
|
fn=clear_form, |
|
outputs=input_components |
|
) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
demo = create_interface() |
|
demo.launch() |