Ozgur Unlu
commited on
Commit
•
76c0903
1
Parent(s):
448716e
inital setup and fixes changed news api
Browse files- app.py +22 -19
- requirements.txt +3 -2
app.py
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
-
from newspaper import Article
|
5 |
import nltk
|
6 |
from datetime import datetime, timedelta
|
7 |
import requests
|
8 |
from bs4 import BeautifulSoup
|
9 |
-
import
|
10 |
|
11 |
# Download required NLTK data
|
12 |
try:
|
@@ -35,8 +34,9 @@ def load_models():
|
|
35 |
|
36 |
return generator_tokenizer, generator, sentiment_analyzer, bias_detector
|
37 |
|
38 |
-
#
|
39 |
def fetch_recent_news(query, num_articles=3):
|
|
|
40 |
base_url = "https://news.google.com/rss/search"
|
41 |
params = {
|
42 |
'q': query,
|
@@ -46,27 +46,23 @@ def fetch_recent_news(query, num_articles=3):
|
|
46 |
}
|
47 |
|
48 |
try:
|
49 |
-
response = requests.get(base_url, params=params)
|
50 |
soup = BeautifulSoup(response.content, 'xml')
|
51 |
-
items = soup.find_all('item')
|
52 |
|
53 |
news_data = []
|
54 |
for item in items:
|
55 |
try:
|
56 |
-
article = Article(item.link.text)
|
57 |
-
article.download()
|
58 |
-
article.parse()
|
59 |
-
article.nlp()
|
60 |
news_data.append({
|
61 |
-
'title':
|
62 |
-
'
|
63 |
})
|
64 |
except:
|
65 |
continue
|
66 |
|
67 |
return news_data
|
68 |
except Exception as e:
|
69 |
-
return [{'title': '
|
70 |
|
71 |
# Generate content with ethical oversight
|
72 |
def generate_content(
|
@@ -87,7 +83,7 @@ def generate_content(
|
|
87 |
|
88 |
# Get recent news for context
|
89 |
news_data = fetch_recent_news(f"{product_name} {target_audience}")
|
90 |
-
news_context = "\n".join([f"Recent
|
91 |
|
92 |
# Create prompt
|
93 |
prompt = f"""
|
@@ -124,6 +120,10 @@ def generate_content(
|
|
124 |
# Clean up text
|
125 |
text = text.replace(prompt, "").strip()
|
126 |
|
|
|
|
|
|
|
|
|
127 |
# Check sentiment
|
128 |
sentiment = sentiment_analyzer(text)[0]
|
129 |
|
@@ -171,6 +171,9 @@ def create_interface():
|
|
171 |
bias_detector
|
172 |
)
|
173 |
|
|
|
|
|
|
|
174 |
output = ""
|
175 |
for i, content in enumerate(results, 1):
|
176 |
output += f"\nVersion {i}:\n"
|
@@ -185,17 +188,17 @@ def create_interface():
|
|
185 |
iface = gr.Interface(
|
186 |
fn=process_input,
|
187 |
inputs=[
|
188 |
-
gr.Textbox(label="Product Name"),
|
189 |
-
gr.Textbox(label="Product Description", lines=3),
|
190 |
-
gr.Textbox(label="Target Audience"),
|
191 |
-
gr.Textbox(label="Key Features", lines=2),
|
192 |
-
gr.Textbox(label="Unique Benefits", lines=2),
|
193 |
gr.Radio(
|
194 |
choices=["Twitter", "Instagram"],
|
195 |
label="Platform",
|
196 |
value="Twitter"
|
197 |
),
|
198 |
-
gr.Textbox(label="Tone
|
199 |
],
|
200 |
outputs=gr.Textbox(label="Generated Content", lines=10),
|
201 |
title="Ethimar - AI Marketing Content Generator",
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
|
4 |
import nltk
|
5 |
from datetime import datetime, timedelta
|
6 |
import requests
|
7 |
from bs4 import BeautifulSoup
|
8 |
+
import json
|
9 |
|
10 |
# Download required NLTK data
|
11 |
try:
|
|
|
34 |
|
35 |
return generator_tokenizer, generator, sentiment_analyzer, bias_detector
|
36 |
|
37 |
+
# Simplified news fetching function
|
38 |
def fetch_recent_news(query, num_articles=3):
|
39 |
+
# Using Google News RSS feed
|
40 |
base_url = "https://news.google.com/rss/search"
|
41 |
params = {
|
42 |
'q': query,
|
|
|
46 |
}
|
47 |
|
48 |
try:
|
49 |
+
response = requests.get(base_url, params=params, timeout=5)
|
50 |
soup = BeautifulSoup(response.content, 'xml')
|
51 |
+
items = soup.find_all('item', limit=num_articles)
|
52 |
|
53 |
news_data = []
|
54 |
for item in items:
|
55 |
try:
|
|
|
|
|
|
|
|
|
56 |
news_data.append({
|
57 |
+
'title': item.title.text,
|
58 |
+
'description': item.description.text if item.description else ""
|
59 |
})
|
60 |
except:
|
61 |
continue
|
62 |
|
63 |
return news_data
|
64 |
except Exception as e:
|
65 |
+
return [{'title': f'Using default context due to error: {str(e)}', 'description': ''}]
|
66 |
|
67 |
# Generate content with ethical oversight
|
68 |
def generate_content(
|
|
|
83 |
|
84 |
# Get recent news for context
|
85 |
news_data = fetch_recent_news(f"{product_name} {target_audience}")
|
86 |
+
news_context = "\n".join([f"Recent context: {item['title']}" for item in news_data])
|
87 |
|
88 |
# Create prompt
|
89 |
prompt = f"""
|
|
|
120 |
# Clean up text
|
121 |
text = text.replace(prompt, "").strip()
|
122 |
|
123 |
+
# Skip if text is too short
|
124 |
+
if len(text) < 10:
|
125 |
+
continue
|
126 |
+
|
127 |
# Check sentiment
|
128 |
sentiment = sentiment_analyzer(text)[0]
|
129 |
|
|
|
171 |
bias_detector
|
172 |
)
|
173 |
|
174 |
+
if not results:
|
175 |
+
return "No suitable content generated. Please try again with different parameters."
|
176 |
+
|
177 |
output = ""
|
178 |
for i, content in enumerate(results, 1):
|
179 |
output += f"\nVersion {i}:\n"
|
|
|
188 |
iface = gr.Interface(
|
189 |
fn=process_input,
|
190 |
inputs=[
|
191 |
+
gr.Textbox(label="Product Name", placeholder="Enter product name"),
|
192 |
+
gr.Textbox(label="Product Description", lines=3, placeholder="Brief description of your product"),
|
193 |
+
gr.Textbox(label="Target Audience", placeholder="Who is this product for?"),
|
194 |
+
gr.Textbox(label="Key Features", lines=2, placeholder="Main features of your product"),
|
195 |
+
gr.Textbox(label="Unique Benefits", lines=2, placeholder="What makes your product special?"),
|
196 |
gr.Radio(
|
197 |
choices=["Twitter", "Instagram"],
|
198 |
label="Platform",
|
199 |
value="Twitter"
|
200 |
),
|
201 |
+
gr.Textbox(label="Tone", placeholder="e.g., professional, casual, friendly"),
|
202 |
],
|
203 |
outputs=gr.Textbox(label="Generated Content", lines=10),
|
204 |
title="Ethimar - AI Marketing Content Generator",
|
requirements.txt
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
gradio
|
2 |
torch
|
3 |
transformers
|
4 |
-
|
5 |
nltk
|
6 |
requests
|
7 |
-
beautifulsoup4
|
|
|
|
1 |
gradio
|
2 |
torch
|
3 |
transformers
|
4 |
+
newsapi-python
|
5 |
nltk
|
6 |
requests
|
7 |
+
beautifulsoup4
|
8 |
+
lxml
|