Ozgur Unlu
commited on
Commit
•
448716e
1
Parent(s):
0311f19
first version
Browse files- app.py +211 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
+
from newspaper import Article
|
5 |
+
import nltk
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
import requests
|
8 |
+
from bs4 import BeautifulSoup
|
9 |
+
import re
|
10 |
+
|
11 |
+
# Download required NLTK data
|
12 |
+
try:
|
13 |
+
nltk.data.find('tokenizers/punkt')
|
14 |
+
except LookupError:
|
15 |
+
nltk.download('punkt')
|
16 |
+
|
17 |
+
# Initialize models and tokenizers
|
18 |
+
def load_models():
|
19 |
+
# Text generation model
|
20 |
+
generator_model = "facebook/opt-350m"
|
21 |
+
generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
|
22 |
+
generator = AutoModelForCausalLM.from_pretrained(generator_model)
|
23 |
+
|
24 |
+
# Sentiment analysis
|
25 |
+
sentiment_analyzer = pipeline(
|
26 |
+
"sentiment-analysis",
|
27 |
+
model="finiteautomata/bertweet-base-sentiment-analysis"
|
28 |
+
)
|
29 |
+
|
30 |
+
# Bias detection
|
31 |
+
bias_detector = pipeline(
|
32 |
+
"text-classification",
|
33 |
+
model="unitary/unbiased-bertscore"
|
34 |
+
)
|
35 |
+
|
36 |
+
return generator_tokenizer, generator, sentiment_analyzer, bias_detector
|
37 |
+
|
38 |
+
# Function to fetch recent news
|
39 |
+
def fetch_recent_news(query, num_articles=3):
|
40 |
+
base_url = "https://news.google.com/rss/search"
|
41 |
+
params = {
|
42 |
+
'q': query,
|
43 |
+
'hl': 'en-US',
|
44 |
+
'gl': 'US',
|
45 |
+
'ceid': 'US:en'
|
46 |
+
}
|
47 |
+
|
48 |
+
try:
|
49 |
+
response = requests.get(base_url, params=params)
|
50 |
+
soup = BeautifulSoup(response.content, 'xml')
|
51 |
+
items = soup.find_all('item')[:num_articles]
|
52 |
+
|
53 |
+
news_data = []
|
54 |
+
for item in items:
|
55 |
+
try:
|
56 |
+
article = Article(item.link.text)
|
57 |
+
article.download()
|
58 |
+
article.parse()
|
59 |
+
article.nlp()
|
60 |
+
news_data.append({
|
61 |
+
'title': article.title,
|
62 |
+
'summary': article.summary
|
63 |
+
})
|
64 |
+
except:
|
65 |
+
continue
|
66 |
+
|
67 |
+
return news_data
|
68 |
+
except Exception as e:
|
69 |
+
return [{'title': 'Error fetching news', 'summary': str(e)}]
|
70 |
+
|
71 |
+
# Generate content with ethical oversight
|
72 |
+
def generate_content(
|
73 |
+
product_name,
|
74 |
+
product_description,
|
75 |
+
target_audience,
|
76 |
+
key_features,
|
77 |
+
unique_benefits,
|
78 |
+
platform,
|
79 |
+
tone,
|
80 |
+
generator_tokenizer,
|
81 |
+
generator,
|
82 |
+
sentiment_analyzer,
|
83 |
+
bias_detector
|
84 |
+
):
|
85 |
+
# Format prompt based on platform
|
86 |
+
char_limit = 280 if platform == "Twitter" else 500
|
87 |
+
|
88 |
+
# Get recent news for context
|
89 |
+
news_data = fetch_recent_news(f"{product_name} {target_audience}")
|
90 |
+
news_context = "\n".join([f"Recent news: {item['title']}" for item in news_data])
|
91 |
+
|
92 |
+
# Create prompt
|
93 |
+
prompt = f"""
|
94 |
+
Product: {product_name}
|
95 |
+
Description: {product_description}
|
96 |
+
Target Audience: {target_audience}
|
97 |
+
Key Features: {key_features}
|
98 |
+
Unique Benefits: {unique_benefits}
|
99 |
+
Tone: {tone}
|
100 |
+
Platform: {platform}
|
101 |
+
Character Limit: {char_limit}
|
102 |
+
|
103 |
+
{news_context}
|
104 |
+
|
105 |
+
Create a {platform} post that highlights the product's benefits while maintaining a {tone} tone:
|
106 |
+
"""
|
107 |
+
|
108 |
+
# Generate initial content
|
109 |
+
inputs = generator_tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
|
110 |
+
outputs = generator.generate(
|
111 |
+
inputs["input_ids"],
|
112 |
+
max_length=char_limit,
|
113 |
+
num_return_sequences=3,
|
114 |
+
temperature=0.7,
|
115 |
+
top_p=0.9,
|
116 |
+
do_sample=True,
|
117 |
+
)
|
118 |
+
|
119 |
+
generated_texts = [generator_tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
|
120 |
+
|
121 |
+
# Filter and analyze content
|
122 |
+
filtered_content = []
|
123 |
+
for text in generated_texts:
|
124 |
+
# Clean up text
|
125 |
+
text = text.replace(prompt, "").strip()
|
126 |
+
|
127 |
+
# Check sentiment
|
128 |
+
sentiment = sentiment_analyzer(text)[0]
|
129 |
+
|
130 |
+
# Check bias
|
131 |
+
bias = bias_detector(text)[0]
|
132 |
+
|
133 |
+
# Filter based on ethical considerations
|
134 |
+
if (
|
135 |
+
sentiment['label'] != 'negative' and
|
136 |
+
float(bias['score']) < 0.7 and # Adjust threshold as needed
|
137 |
+
len(text) <= char_limit
|
138 |
+
):
|
139 |
+
filtered_content.append({
|
140 |
+
'text': text,
|
141 |
+
'sentiment': sentiment['label'],
|
142 |
+
'bias_score': f"{float(bias['score']):.2f}"
|
143 |
+
})
|
144 |
+
|
145 |
+
return filtered_content
|
146 |
+
|
147 |
+
# Gradio interface
|
148 |
+
def create_interface():
|
149 |
+
generator_tokenizer, generator, sentiment_analyzer, bias_detector = load_models()
|
150 |
+
|
151 |
+
def process_input(
|
152 |
+
product_name,
|
153 |
+
product_description,
|
154 |
+
target_audience,
|
155 |
+
key_features,
|
156 |
+
unique_benefits,
|
157 |
+
platform,
|
158 |
+
tone
|
159 |
+
):
|
160 |
+
results = generate_content(
|
161 |
+
product_name,
|
162 |
+
product_description,
|
163 |
+
target_audience,
|
164 |
+
key_features,
|
165 |
+
unique_benefits,
|
166 |
+
platform,
|
167 |
+
tone,
|
168 |
+
generator_tokenizer,
|
169 |
+
generator,
|
170 |
+
sentiment_analyzer,
|
171 |
+
bias_detector
|
172 |
+
)
|
173 |
+
|
174 |
+
output = ""
|
175 |
+
for i, content in enumerate(results, 1):
|
176 |
+
output += f"\nVersion {i}:\n"
|
177 |
+
output += f"Content: {content['text']}\n"
|
178 |
+
output += f"Sentiment: {content['sentiment']}\n"
|
179 |
+
output += f"Bias Score: {content['bias_score']}\n"
|
180 |
+
output += "-" * 50 + "\n"
|
181 |
+
|
182 |
+
return output
|
183 |
+
|
184 |
+
# Create the interface
|
185 |
+
iface = gr.Interface(
|
186 |
+
fn=process_input,
|
187 |
+
inputs=[
|
188 |
+
gr.Textbox(label="Product Name"),
|
189 |
+
gr.Textbox(label="Product Description", lines=3),
|
190 |
+
gr.Textbox(label="Target Audience"),
|
191 |
+
gr.Textbox(label="Key Features", lines=2),
|
192 |
+
gr.Textbox(label="Unique Benefits", lines=2),
|
193 |
+
gr.Radio(
|
194 |
+
choices=["Twitter", "Instagram"],
|
195 |
+
label="Platform",
|
196 |
+
value="Twitter"
|
197 |
+
),
|
198 |
+
gr.Textbox(label="Tone (e.g., professional, casual, friendly)"),
|
199 |
+
],
|
200 |
+
outputs=gr.Textbox(label="Generated Content", lines=10),
|
201 |
+
title="Ethimar - AI Marketing Content Generator",
|
202 |
+
description="Generate ethical marketing content with AI-powered insights",
|
203 |
+
theme="default"
|
204 |
+
)
|
205 |
+
|
206 |
+
return iface
|
207 |
+
|
208 |
+
# Launch the app
|
209 |
+
if __name__ == "__main__":
|
210 |
+
iface = create_interface()
|
211 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torch
|
3 |
+
transformers
|
4 |
+
newspaper3k
|
5 |
+
nltk
|
6 |
+
requests
|
7 |
+
beautifulsoup4
|