Spaces:
Sleeping
Sleeping
File size: 5,634 Bytes
31c2313 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
import gradio as gr
from transformers import pipeline
import feedparser
from datetime import datetime, timedelta
import pytz
from bs4 import BeautifulSoup
import hashlib
import threading
import pandas as pd
# Global settings
SUMMARIZER_MODELS = {
"Default (facebook/bart-large-cnn)": "facebook/bart-large-cnn",
"Free Model (distilbart-cnn-6-6)": "sshleifer/distilbart-cnn-6-6"
}
CACHE_SIZE = 500
RSS_FETCH_INTERVAL = timedelta(hours=8)
ARTICLE_LIMIT = 5
NEWS_SOURCES = {
"Movilizaciones Sindicales": {
"Pagina12": "https://www.pagina12.com.ar/rss/edicion-impresa",
}
}
class NewsCache:
def __init__(self, size):
self.cache = {}
self.size = size
self.lock = threading.Lock()
def get(self, key):
with self.lock:
return self.cache.get(key)
def set(self, key, value):
with self.lock:
if len(self.cache) >= self.size:
oldest_key = next(iter(self.cache))
del self.cache[oldest_key]
self.cache[key] = value
cache = NewsCache(CACHE_SIZE)
def fetch_rss_news(categories):
articles = []
cutoff_time = datetime.now(pytz.UTC) - RSS_FETCH_INTERVAL
for category in categories:
for source, url in NEWS_SOURCES.get(category, {}).items():
try:
feed = feedparser.parse(url)
for entry in feed.entries:
published = datetime(*entry.published_parsed[:6], tzinfo=pytz.UTC)
if published > cutoff_time:
articles.append({
"title": entry.title,
"description": BeautifulSoup(entry.description, "html.parser").get_text(),
"link": entry.link,
"category": category,
"source": source,
"published": published
})
except Exception:
continue
articles = sorted(articles, key=lambda x: x["published"], reverse=True)[:ARTICLE_LIMIT]
return articles
def summarize_text(text, model_name):
summarizer = pipeline("summarization", model=model_name, device=-1)
content_hash = hashlib.md5(text.encode()).hexdigest()
cached_summary = cache.get(content_hash)
if cached_summary:
return cached_summary
try:
result = summarizer(text, max_length=120, min_length=40, truncation=True)
summary = result[0]['summary_text']
cache.set(content_hash, summary)
return summary
except Exception:
return "Summary unavailable."
def summarize_articles(articles, model_name):
summaries = []
for article in articles:
content = article["description"]
summary = summarize_text(content, model_name)
summaries.append(f"""
📰 {article['title']}
- 📁 Category: {article['category']}
- 💡 Source: {article['source']}
- 🔗 Read More: {article['link']}
📃 Summary: {summary}
""")
return "\n".join(summaries)
def generate_summary(selected_categories, model_name):
if not selected_categories:
return "Please select at least one category."
articles = fetch_rss_news(selected_categories)
if not articles:
return "No recent news found in the selected categories."
return summarize_articles(articles, model_name)
def fetch_union_mobilizations():
articles = []
cutoff_time = datetime.now(pytz.UTC) - timedelta(days=1)
for source, url in NEWS_SOURCES["Movilizaciones Sindicales"].items():
try:
feed = feedparser.parse(url)
for entry in feed.entries:
published = datetime(*entry.published_parsed[:6], tzinfo=pytz.UTC)
if published > cutoff_time:
# Filtrar por movilizaciones sindicales
if "movilización" in entry.title.lower() or "sindical" in entry.title.lower():
articles.append({
"title": entry.title,
"description": BeautifulSoup(entry.description, "html.parser").get_text(),
"link": entry.link,
"source": source,
"published": published
})
except Exception:
continue
return articles
def create_mobilization_table():
articles = fetch_union_mobilizations()
if not articles:
return "No se encontraron movilizaciones sindicales recientes."
# Crear una tabla con pandas
df = pd.DataFrame(articles)
return df.to_string(index=False)
# Gradio Interface
demo = gr.Blocks()
with demo:
gr.Markdown("# 📰 AI News Summarizer")
with gr.Row():
categories = gr.CheckboxGroup(
choices=list(NEWS_SOURCES.keys()),
label="Select News Categories"
)
model_selector = gr.Radio(
choices=list(SUMMARIZER_MODELS.keys()),
label="Choose Summarization Model",
value="Default (facebook/bart-large-cnn)"
)
summarize_button = gr.Button("Get News Summary")
summary_output = gr.Textbox(label="News Summary", lines=20)
def get_summary(selected_categories, selected_model):
model_name = SUMMARIZER_MODELS[selected_model]
return generate_summary(selected_categories, model_name)
summarize_button.click(get_summary, inputs=[categories, model_selector], outputs=summary_output)
if __name__ == "__main__":
demo.launch()
|