Spaces:
Sleeping
Sleeping
import os | |
import random | |
import base64 | |
import requests | |
from selenium import webdriver | |
from selenium.webdriver.support.ui import WebDriverWait | |
from selenium.webdriver.support import expected_conditions as EC | |
from selenium.webdriver.common.by import By | |
from selenium.common.exceptions import WebDriverException, TimeoutException | |
from PIL import Image | |
from io import BytesIO | |
from datetime import datetime | |
import gradio as gr | |
from typing import List, Tuple # List μΆκ° | |
import time | |
from pathlib import Path | |
from datetime import datetime, timedelta | |
from huggingface_hub import InferenceClient | |
from dotenv import load_dotenv | |
from bs4 import BeautifulSoup | |
from urllib.parse import urljoin | |
from gtts import gTTS | |
from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip, concatenate_videoclips | |
import tempfile | |
import shutil | |
import numpy as np | |
# .env νμΌμμ νκ²½ λ³μ λ‘λ | |
load_dotenv() | |
# HuggingFace μΈνΌλ°μ€ ν΄λΌμ΄μΈνΈ μ€μ | |
hf_client = InferenceClient( | |
"CohereForAI/c4ai-command-r-plus-08-2024", | |
token=os.getenv("HF_TOKEN") | |
) | |
# μ€ν¬λ¦°μ· μΊμ λλ ν 리 μ€μ | |
CACHE_DIR = Path("screenshot_cache") | |
CACHE_DIR.mkdir(exist_ok=True) | |
# μ μ λ³μλ‘ μ€ν¬λ¦°μ· μΊμ μ μΈ | |
SCREENSHOT_CACHE = {} | |
def get_cached_screenshot(url: str) -> str: | |
"""μΊμλ μ€ν¬λ¦°μ· κ°μ Έμ€κΈ° λλ μλ‘ μμ±""" | |
try: | |
# URLμ μμ ν νμΌλͺ μΌλ‘ λ³ν | |
safe_filename = base64.urlsafe_b64encode(url.encode()).decode() | |
cache_file = CACHE_DIR / f"{safe_filename[:200]}.jpg" # PNG λμ JPG μ¬μ© | |
if cache_file.exists(): | |
try: | |
with Image.open(cache_file) as img: | |
buffered = BytesIO() | |
img.save(buffered, format="JPEG", quality=85, optimize=True) | |
return base64.b64encode(buffered.getvalue()).decode() | |
except Exception as e: | |
print(f"Cache read error for {url}: {e}") | |
if cache_file.exists(): | |
cache_file.unlink() | |
return take_screenshot(url) | |
except Exception as e: | |
print(f"Screenshot cache error for {url}: {e}") | |
return "" | |
def take_screenshot(url: str) -> str: | |
"""μΉμ¬μ΄νΈ μ€ν¬λ¦°μ· 촬μ""" | |
if not url.startswith('http'): | |
url = f"https://{url}" | |
options = webdriver.ChromeOptions() | |
options.add_argument('--headless') | |
options.add_argument('--no-sandbox') | |
options.add_argument('--disable-dev-shm-usage') | |
options.add_argument('--window-size=1080,720') | |
driver = None | |
try: | |
driver = webdriver.Chrome(options=options) | |
driver.get(url) | |
# νμ΄μ§ λ‘λ© λκΈ° | |
WebDriverWait(driver, 15).until( | |
EC.presence_of_element_located((By.TAG_NAME, "body")) | |
) | |
# μΆκ° λκΈ° μκ° | |
time.sleep(3) | |
# μ€ν¬λ¦°μ· 촬μ λ° μ΅μ ν | |
screenshot = driver.get_screenshot_as_png() | |
img = Image.open(BytesIO(screenshot)) | |
# μ΄λ―Έμ§ ν¬κΈ° μ΅μ ν | |
max_size = (800, 600) | |
img.thumbnail(max_size, Image.Resampling.LANCZOS) | |
# JPEGλ‘ λ³ν λ° μ΅μ ν | |
if img.mode in ('RGBA', 'LA'): | |
background = Image.new('RGB', img.size, (255, 255, 255)) | |
background.paste(img, mask=img.split()[-1]) | |
img = background | |
# μΊμ μ μ₯ | |
safe_filename = base64.urlsafe_b64encode(url.encode()).decode() | |
cache_file = CACHE_DIR / f"{safe_filename[:200]}.jpg" | |
img.save(cache_file, format="JPEG", quality=85, optimize=True) | |
# λ°νμ© μ΄λ―Έμ§ μμ± | |
buffered = BytesIO() | |
img.save(buffered, format="JPEG", quality=85, optimize=True) | |
return base64.b64encode(buffered.getvalue()).decode() | |
except Exception as e: | |
print(f"Screenshot error for {url}: {e}") | |
return "" | |
finally: | |
if driver: | |
driver.quit() | |
def cleanup_cache(): | |
"""μΊμ μ 리""" | |
try: | |
current_time = time.time() | |
for cache_file in CACHE_DIR.glob("*.jpg"): | |
try: | |
# 24μκ° μ΄μ λ νμΌ λλ 0λ°μ΄νΈ νμΌ μμ | |
if (current_time - cache_file.stat().st_mtime > 86400) or cache_file.stat().st_size == 0: | |
cache_file.unlink() | |
except Exception as e: | |
print(f"Error cleaning cache file {cache_file}: {e}") | |
except Exception as e: | |
print(f"Cache cleanup error: {e}") | |
# μ± μμ μ μΊμ μ 리 | |
cleanup_cache() | |
def calculate_rising_rate(created_date: str, rank: int) -> int: | |
"""AI Rising Rate κ³μ°""" | |
# μμ±μΌ κΈ°μ€ μ μ κ³μ° | |
created = datetime.strptime(created_date.split('T')[0], '%Y-%m-%d') | |
today = datetime.now() | |
days_diff = (today - created).days | |
date_score = max(0, 300 - days_diff) # μ΅λ 300μ | |
# μμ κΈ°μ€ μ μ κ³μ° | |
rank_score = max(0, 600 - rank) # μ΅λ 300μ | |
# μ΄μ κ³μ° | |
total_score = date_score + rank_score | |
# λ³ κ°μ κ³μ° (0~5) | |
if total_score <= 200: | |
stars = 1 | |
elif total_score <= 400: | |
stars = 2 | |
elif total_score <= 600: | |
stars = 3 | |
elif total_score <= 800: | |
stars = 4 | |
else: | |
stars = 5 | |
return stars | |
def get_popularity_grade(likes: int, stars: int) -> tuple: | |
"""AI Popularity Score λ±κΈ κ³μ°""" | |
# κΈ°λ³Έ μ μ (likes) | |
base_score = min(likes, 10000) # μ΅λ 10000μ | |
# λ³μ μΆκ° μ μ (λ³ νλλΉ 500μ ) | |
star_score = stars * 1000 | |
# μ΄μ | |
total_score = base_score + star_score | |
# λ±κΈ ν μ΄λΈ (18λ¨κ³) | |
grades = [ | |
(14500, "AAA+"), (14000, "AAA"), (13500, "AAA-"), | |
(13000, "AA+"), (12500, "AA"), (12000, "AA-"), | |
(11500, "A+"), (11000, "A"), (10000, "A-"), | |
(9000, "BBB+"), (8000, "BBB"), (7000, "BBB-"), | |
(6000, "BB+"), (5000, "BB"), (4000, "BB-"), | |
(3000, "B+"), (2000, "B"), (1000, "B-") | |
] | |
for threshold, grade in grades: | |
if total_score >= threshold: | |
return grade, total_score | |
return "B-", total_score | |
# get_card ν¨μ λ΄μ hardware_info λΆλΆμ λ€μμΌλ‘ κ΅μ²΄: | |
def get_rating_info(item: dict, index: int) -> str: | |
"""νκ° μ 보 HTML μμ±""" | |
created = item.get('createdAt', '').split('T')[0] | |
likes = int(str(item.get('likes', '0')).replace(',', '')) | |
# AI Rising Rate κ³μ° | |
stars = calculate_rising_rate(created, index + 1) | |
star_html = "β " * stars + "β" * (5 - stars) # μ±μμ§ λ³κ³Ό λΉ λ³ μ‘°ν© | |
# AI Popularity Score κ³μ° | |
grade, score = get_popularity_grade(likes, stars) | |
# λ±κΈλ³ μμ μ€μ | |
grade_colors = { | |
'AAA': '#FFD700', 'AA': '#FFA500', 'A': '#FF4500', | |
'BBB': '#4169E1', 'BB': '#1E90FF', 'B': '#00BFFF' | |
} | |
grade_base = grade.rstrip('+-') | |
grade_color = grade_colors.get(grade_base, '#666666') | |
return f""" | |
<div style=' | |
margin-top: 15px; | |
padding: 15px; | |
background: rgba(255,255,255,0.4); | |
border-radius: 10px; | |
font-size: 0.9em; | |
box-shadow: 0 2px 10px rgba(0,0,0,0.1);'> | |
<div style=' | |
display: grid; | |
grid-template-columns: repeat(2, 1fr); | |
gap: 15px;'> | |
<div style=' | |
color: #333; | |
display: flex; | |
flex-direction: column; | |
gap: 5px;'> | |
<span style='font-weight: bold;'>AI Rising Rate:</span> | |
<span style=' | |
color: #FF8C00; | |
font-size: 1.4em; | |
letter-spacing: 2px; | |
text-shadow: 1px 1px 2px rgba(0,0,0,0.1);'>{star_html}</span> | |
</div> | |
<div style=' | |
color: #333; | |
display: flex; | |
flex-direction: column; | |
gap: 5px;'> | |
<span style='font-weight: bold;'>AI Popularity Score:</span> | |
<span style=' | |
font-size: 1.2em; | |
font-weight: bold; | |
color: {grade_color}; | |
text-shadow: 1px 1px 2px rgba(0,0,0,0.1);'>{grade} ({score:,})</span> | |
</div> | |
</div> | |
</div> | |
""" | |
def get_hardware_info(item: dict) -> tuple: | |
"""νλμ¨μ΄ μ 보 μΆμΆ""" | |
try: | |
# runtime μ 보 νμΈ | |
runtime = item.get('runtime', {}) | |
# CPU μ 보 μ²λ¦¬ | |
cpu_info = runtime.get('cpu', 'Standard') | |
# GPU μ 보 μ²λ¦¬ | |
gpu_info = "None" | |
if runtime.get('accelerator') == "gpu": | |
gpu_type = runtime.get('gpu', {}).get('name', '') | |
gpu_memory = runtime.get('gpu', {}).get('memory', '') | |
if gpu_type: | |
gpu_info = f"{gpu_type}" | |
if gpu_memory: | |
gpu_info += f" ({gpu_memory}GB)" | |
# spaces decorator νμΈ | |
if '@spaces.GPU' in str(item.get('sdk_version', '')): | |
if gpu_info == "None": | |
gpu_info = "GPU Enabled" | |
# SDK μ 보 μ²λ¦¬ | |
sdk = item.get('sdk', 'N/A') | |
print(f"Debug - Runtime Info: {runtime}") # λλ²κ·Έ μΆλ ₯ | |
print(f"Debug - GPU Info: {gpu_info}") # λλ²κ·Έ μΆλ ₯ | |
return cpu_info, gpu_info, sdk | |
except Exception as e: | |
print(f"Error parsing hardware info: {str(e)}") | |
return 'Standard', 'None', 'N/A' | |
def get_card(item: dict, index: int, card_type: str = "space") -> str: | |
"""ν΅ν© μΉ΄λ HTML μμ±""" | |
item_id = item.get('id', '') | |
author, title = item_id.split('/', 1) | |
likes = format(item.get('likes', 0), ',') | |
created = item.get('createdAt', '').split('T')[0] | |
# short_description κ°μ Έμ€κΈ° | |
short_description = item.get('cardData', {}).get('short_description', '') | |
# URL μ μ | |
if card_type == "space": | |
url = f"https://huggingface.co/spaces/{item_id}" | |
elif card_type == "model": | |
url = f"https://huggingface.co/{item_id}" | |
else: # dataset | |
url = f"https://huggingface.co/datasets/{item_id}" | |
# λ©νλ°μ΄ν° μ²λ¦¬ | |
tags = item.get('tags', []) | |
pipeline_tag = item.get('pipeline_tag', '') | |
license = item.get('license', '') | |
sdk = item.get('sdk', 'N/A') | |
# AI Rating μ 보 κ°μ Έμ€κΈ° | |
rating_info = get_rating_info(item, index) | |
# μΉ΄λ νμ λ³ κ·ΈλΌλ°μ΄μ μ€μ | |
if card_type == "space": | |
gradient_colors = """ | |
rgba(255, 182, 193, 0.7), /* νμ€ν νν¬ */ | |
rgba(173, 216, 230, 0.7), /* νμ€ν λΈλ£¨ */ | |
rgba(255, 218, 185, 0.7) /* νμ€ν νΌμΉ */ | |
""" | |
bg_content = f""" | |
background-image: url(data:image/png;base64,{get_cached_screenshot(url) if get_cached_screenshot(url) else ''}); | |
background-size: cover; | |
background-position: center; | |
""" | |
type_icon = "π―" | |
type_label = "SPACE" | |
elif card_type == "model": | |
gradient_colors = """ | |
rgba(110, 142, 251, 0.7), /* λͺ¨λΈ λΈλ£¨ */ | |
rgba(130, 158, 251, 0.7), | |
rgba(150, 174, 251, 0.7) | |
""" | |
bg_content = f""" | |
background: linear-gradient(135deg, #6e8efb, #4a6cf7); | |
padding: 15px; | |
""" | |
type_icon = "π€" | |
type_label = "MODEL" | |
else: # dataset | |
gradient_colors = """ | |
rgba(255, 107, 107, 0.7), /* λ°μ΄ν°μ λ λ */ | |
rgba(255, 127, 127, 0.7), | |
rgba(255, 147, 147, 0.7) | |
""" | |
bg_content = f""" | |
background: linear-gradient(135deg, #ff6b6b, #ff8787); | |
padding: 15px; | |
""" | |
type_icon = "π" | |
type_label = "DATASET" | |
content_bg = f""" | |
background: linear-gradient(135deg, {gradient_colors}); | |
backdrop-filter: blur(10px); | |
""" | |
# νκ·Έ νμ (modelsμ datasetsμ©) | |
tags_html = "" | |
if card_type != "space": | |
tags_html = f""" | |
<div style=' | |
position: absolute; | |
top: 50%; | |
left: 50%; | |
transform: translate(-50%, -50%); | |
display: flex; | |
flex-wrap: wrap; | |
gap: 5px; | |
justify-content: center; | |
width: 90%;'> | |
{' '.join([f''' | |
<span style=' | |
background: rgba(255,255,255,0.2); | |
padding: 5px 10px; | |
border-radius: 15px; | |
color: white; | |
font-size: 0.8em;'> | |
#{tag} | |
</span> | |
''' for tag in tags[:5]])} | |
</div> | |
""" | |
# μΉ΄λ HTML λ°ν | |
return f""" | |
<div class="card" style=' | |
position: relative; | |
border: none; | |
padding: 0; | |
margin: 10px; | |
border-radius: 20px; | |
box-shadow: 0 10px 20px rgba(0,0,0,0.1); | |
background: white; | |
transition: all 0.3s ease; | |
overflow: hidden; | |
min-height: 400px; | |
cursor: pointer; | |
transform-origin: center;' | |
onmouseover="this.style.transform='scale(0.98) translateY(5px)'; this.style.boxShadow='0 5px 15px rgba(0,0,0,0.2)';" | |
onmouseout="this.style.transform='scale(1) translateY(0)'; this.style.boxShadow='0 10px 20px rgba(0,0,0,0.1)';" | |
onclick="window.open('{url}', '_blank')"> | |
<!-- μλ¨ μμ --> | |
<div style=' | |
width: 100%; | |
height: 200px; | |
{bg_content} | |
position: relative;'> | |
<!-- μμ λ±μ§ --> | |
<div style=' | |
position: absolute; | |
top: 10px; | |
left: 10px; | |
background: rgba(0,0,0,0.7); | |
color: white; | |
padding: 5px 15px; | |
border-radius: 20px; | |
font-weight: bold; | |
font-size: 0.9em; | |
backdrop-filter: blur(5px);'> | |
#{index + 1} | |
</div> | |
<!-- νμ λ±μ§ --> | |
<div style=' | |
position: absolute; | |
top: 10px; | |
right: 10px; | |
background: rgba(255,255,255,0.9); | |
padding: 5px 15px; | |
border-radius: 20px; | |
font-weight: bold; | |
font-size: 0.8em;'> | |
{type_icon} {type_label} | |
</div> | |
{tags_html} | |
</div> | |
<!-- μ½ν μΈ μμ --> | |
<div style=' | |
padding: 20px; | |
{content_bg} | |
border-radius: 0 0 20px 20px; | |
border-top: 1px solid rgba(255,255,255,0.5);'> | |
<h3 style=' | |
margin: 0 0 15px 0; | |
color: #333; | |
font-size: 1.3em; | |
line-height: 1.4; | |
display: -webkit-box; | |
-webkit-line-clamp: 2; | |
-webkit-box-orient: vertical; | |
overflow: hidden; | |
text-overflow: ellipsis; | |
text-shadow: 1px 1px 1px rgba(255,255,255,0.8);'> | |
{title} | |
</h3> | |
{f''' | |
<!-- Short Description (Space μΉ΄λμλ§ νμ) --> | |
<div style=' | |
margin: 0 0 15px 0; | |
color: #444; | |
font-size: 0.9em; | |
line-height: 1.5; | |
display: -webkit-box; | |
-webkit-line-clamp: 3; | |
-webkit-box-orient: vertical; | |
overflow: hidden; | |
text-overflow: ellipsis; | |
background: rgba(255,255,255,0.4); | |
padding: 10px; | |
border-radius: 8px;'> | |
{short_description} | |
</div> | |
''' if card_type == "space" and short_description else ''} | |
<div style=' | |
display: grid; | |
grid-template-columns: repeat(2, 1fr); | |
gap: 10px; | |
font-size: 0.9em; | |
background: rgba(255,255,255,0.3); | |
padding: 10px; | |
border-radius: 10px;'> | |
<div style='color: #444;'> | |
<span style='margin-right: 5px;'>π€</span> {author} | |
</div> | |
<div style='color: #444;'> | |
<span style='margin-right: 5px;'>β€οΈ</span> {likes} | |
</div> | |
<div style='color: #444; grid-column: span 2;'> | |
<span style='margin-right: 5px;'>π </span> {created} | |
</div> | |
</div> | |
{rating_info} | |
</div> | |
</div> | |
""" | |
def get_trending_spaces(search_query="", sort_by="rank", progress=gr.Progress()) -> Tuple[str, str]: | |
"""νΈλ λ© μ€νμ΄μ€ κ°μ Έμ€κΈ°""" | |
url = "https://huggingface.co/api/spaces" | |
try: | |
progress(0, desc="Fetching spaces data...") | |
params = { | |
'full': 'true', | |
'limit': 24 | |
} | |
response = requests.get(url, params=params) | |
response.raise_for_status() | |
spaces = response.json() | |
# κ²μμ΄λ‘ νν°λ§ | |
if search_query: | |
spaces = [space for space in spaces if search_query.lower() in | |
(space.get('id', '') + ' ' + space.get('title', '')).lower()] | |
# μ λ ¬ | |
sort_by = sort_by.lower() | |
if sort_by == "rising_rate": | |
spaces.sort(key=lambda x: calculate_rising_rate(x.get('createdAt', ''), 0), reverse=True) | |
elif sort_by == "popularity": | |
spaces.sort(key=lambda x: get_popularity_grade( | |
int(str(x.get('likes', '0')).replace(',', '')), | |
calculate_rising_rate(x.get('createdAt', ''), 0))[1], | |
reverse=True) | |
progress(0.1, desc="Creating gallery...") | |
html_content = """ | |
<div style='padding: 20px; background: #f5f5f5;'> | |
<div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px;'> | |
""" | |
for idx, space in enumerate(spaces): | |
html_content += get_card(space, idx, "space") | |
progress((0.1 + 0.9 * idx/len(spaces)), desc=f"Loading space {idx+1}/{len(spaces)}...") | |
html_content += "</div></div>" | |
progress(1.0, desc="Complete!") | |
return html_content, f"Found {len(spaces)} spaces" | |
except Exception as e: | |
error_html = f'<div style="color: red; padding: 20px;">Error: {str(e)}</div>' | |
return error_html, f"Error: {str(e)}" | |
def get_models(search_query="", sort_by="rank", progress=gr.Progress()) -> Tuple[str, str]: | |
"""μΈκΈ° λͺ¨λΈ κ°μ Έμ€κΈ°""" | |
url = "https://huggingface.co/api/models" | |
try: | |
progress(0, desc="Fetching models data...") | |
params = { | |
'full': 'true', | |
'limit': 300 | |
} | |
response = requests.get(url, params=params) | |
response.raise_for_status() | |
models = response.json() | |
# κ²μμ΄λ‘ νν°λ§ | |
if search_query: | |
models = [model for model in models if search_query.lower() in | |
(model.get('id', '') + ' ' + model.get('title', '')).lower()] | |
# μ λ ¬ | |
sort_by = sort_by.lower() | |
if sort_by == "rising_rate": | |
models.sort(key=lambda x: calculate_rising_rate(x.get('createdAt', ''), 0), reverse=True) | |
elif sort_by == "popularity": | |
models.sort(key=lambda x: get_popularity_grade( | |
int(str(x.get('likes', '0')).replace(',', '')), | |
calculate_rising_rate(x.get('createdAt', ''), 0))[1], | |
reverse=True) | |
progress(0.1, desc="Creating gallery...") | |
html_content = """ | |
<div style='padding: 20px; background: #f5f5f5;'> | |
<div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px;'> | |
""" | |
for idx, model in enumerate(models): | |
html_content += get_card(model, idx, "model") | |
progress((0.1 + 0.9 * idx/len(models)), desc=f"Loading model {idx+1}/{len(models)}...") | |
html_content += "</div></div>" | |
progress(1.0, desc="Complete!") | |
return html_content, f"Found {len(models)} models" | |
except Exception as e: | |
error_html = f'<div style="color: red; padding: 20px;">Error: {str(e)}</div>' | |
return error_html, f"Error: {str(e)}" | |
def get_datasets(search_query="", sort_by="rank", progress=gr.Progress()) -> Tuple[str, str]: | |
"""μΈκΈ° λ°μ΄ν°μ κ°μ Έμ€κΈ°""" | |
url = "https://huggingface.co/api/datasets" | |
try: | |
progress(0, desc="Fetching datasets data...") | |
params = { | |
'full': 'true', | |
'limit': 300 | |
} | |
response = requests.get(url, params=params) | |
response.raise_for_status() | |
datasets = response.json() | |
# κ²μμ΄λ‘ νν°λ§ | |
if search_query: | |
datasets = [dataset for dataset in datasets if search_query.lower() in | |
(dataset.get('id', '') + ' ' + dataset.get('title', '')).lower()] | |
# μ λ ¬ | |
sort_by = sort_by.lower() | |
if sort_by == "rising_rate": | |
datasets.sort(key=lambda x: calculate_rising_rate(x.get('createdAt', ''), 0), reverse=True) | |
elif sort_by == "popularity": | |
datasets.sort(key=lambda x: get_popularity_grade( | |
int(str(x.get('likes', '0')).replace(',', '')), | |
calculate_rising_rate(x.get('createdAt', ''), 0))[1], | |
reverse=True) | |
progress(0.1, desc="Creating gallery...") | |
html_content = """ | |
<div style='padding: 20px; background: #f5f5f5;'> | |
<div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px;'> | |
""" | |
for idx, dataset in enumerate(datasets): | |
html_content += get_card(dataset, idx, "dataset") | |
progress((0.1 + 0.9 * idx/len(datasets)), desc=f"Loading dataset {idx+1}/{len(datasets)}...") | |
html_content += "</div></div>" | |
progress(1.0, desc="Complete!") | |
return html_content, f"Found {len(datasets)} datasets" | |
except Exception as e: | |
error_html = f'<div style="color: red; padding: 20px;">Error: {str(e)}</div>' | |
return error_html, f"Error: {str(e)}" | |
# μ λ ¬ ν¨μ μΆκ° | |
def sort_items(items, sort_by): | |
if sort_by == "rank": | |
return items # μ΄λ―Έ μμλλ‘ μ λ ¬λμ΄ μμ | |
elif sort_by == "rising_rate": | |
return sorted(items, key=lambda x: calculate_rising_rate(x.get('createdAt', ''), 0), reverse=True) | |
elif sort_by == "popularity": | |
return sorted(items, key=lambda x: get_popularity_grade(int(str(x.get('likes', '0')).replace(',', '')), | |
calculate_rising_rate(x.get('createdAt', ''), 0))[1], reverse=True) | |
return items | |
# API νΈμΆ ν¨μ μμ | |
def fetch_items(item_type, search_query="", sort_by="rank", limit=1000): | |
"""μμ΄ν κ°μ Έμ€κΈ° (spaces/models/datasets)""" | |
base_url = f"https://huggingface.co/api/{item_type}" | |
params = { | |
'full': 'true', | |
'limit': limit, | |
'search': search_query | |
} | |
try: | |
response = requests.get(base_url, params=params) | |
response.raise_for_status() | |
items = response.json() | |
# κ²μμ΄λ‘ νν°λ§ | |
if search_query: | |
items = [item for item in items if search_query.lower() in | |
(item.get('id', '') + item.get('title', '')).lower()] | |
# μ λ ¬ | |
items = sort_items(items, sort_by) | |
return items[:300] # μμ 300κ°λ§ λ°ν | |
except Exception as e: | |
print(f"Error fetching items: {e}") | |
return [] | |
def get_space_source(space_id: str) -> dict: | |
"""μ€νμ΄μ€μ μμ€μ½λ κ°μ Έμ€κΈ°""" | |
try: | |
# HuggingFace APIλ₯Ό ν΅ν΄ μμ€μ½λ κ°μ Έμ€κΈ° | |
headers = {"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"} | |
# app.py μλ | |
app_url = f"https://huggingface.co/spaces/{space_id}/raw/main/app.py" | |
app_response = requests.get(app_url, headers=headers) | |
# index.html μλ | |
index_url = f"https://huggingface.co/spaces/{space_id}/raw/main/index.html" | |
index_response = requests.get(index_url, headers=headers) | |
source = { | |
"app.py": app_response.text if app_response.status_code == 200 else "", | |
"index.html": index_response.text if index_response.status_code == 200 else "" | |
} | |
return source | |
except Exception as e: | |
print(f"Error fetching source for {space_id}: {str(e)}") | |
return {"app.py": "", "index.html": ""} | |
def analyze_space(space_info: dict) -> str: | |
"""μ€νμ΄μ€ λΆμ""" | |
try: | |
space_id = space_info.get('id', '') | |
url = f"https://huggingface.co/spaces/{space_id}" | |
# μμ€μ½λ κ°μ Έμ€κΈ° | |
source = get_space_source(space_id) | |
source_code = source["app.py"] or source["index.html"] | |
if not source_code: | |
return f""" | |
<div style=' | |
padding: 20px; | |
color: #333 !important; | |
background: white !important; | |
'> | |
<h3 style='color: #333 !important; margin-bottom: 10px;'> | |
#{space_info.get('rank', '0')} {space_id} | |
</h3> | |
<p style='color: red;'>μμ€μ½λλ₯Ό κ°μ Έμ¬ μ μμ΅λλ€.</p> | |
</div> | |
""" | |
# LLM λΆμ ν둬ννΈ | |
prompt = f""" | |
λ€μμ HuggingFace μ€νμ΄μ€({url})μ μ½λ λλ μ£Όμμ λλ€: | |
``` | |
{source_code[:4000]} | |
``` | |
μ΄ λ΄μ©μ κΈ°λ°μΌλ‘ λ€μ νλͺ©μ λΆμν΄μ£ΌμΈμ: | |
1. κ°μ: (ν μ€λ‘) | |
2. μμ½: (ν μ€λ‘) | |
3. νΉμ§ λ° μ₯μ : (ν μ€λ‘) | |
4. μ¬μ© λμ: (ν μ€λ‘) | |
5. μ¬μ© λ°©λ²: (ν μ€λ‘) | |
6. μ μ¬ μλΉμ€μμ μ°¨λ³μ : (ν μ€λ‘) | |
κ° νλͺ©μ μ€μ νμΈλ λ΄μ©λ§ ν¬ν¨νμ¬ ν μ€λ‘ μμ±νμΈμ. | |
μ½λκ° λ³΄μμ²λ¦¬λ κ²½μ° μ£Όμμ κΈ°λ°μΌλ‘ λΆμνμΈμ. | |
""" | |
# LLM λΆμ μ€ν | |
messages = [ | |
{"role": "system", "content": "μμ€μ½λ λΆμ μ λ¬Έκ°λ‘μ μ€μ μ½λ λ΄μ©λ§ κΈ°λ°μΌλ‘ λΆμνμΈμ."}, | |
{"role": "user", "content": prompt} | |
] | |
response = hf_client.chat_completion( | |
messages, | |
max_tokens=3800, | |
temperature=0.3 | |
) | |
analysis = response.choices[0].message.content | |
return f""" | |
<div style=' | |
padding: 20px; | |
color: #333 !important; | |
background: white !important; | |
'> | |
<h3 style='color: #333 !important; margin-bottom: 15px;'> | |
#{space_info.get('rank', '0')} {space_id} | |
</h3> | |
<div style='color: #444 !important; font-size: 0.95em;'> | |
{analysis} | |
</div> | |
<div style='margin-top: 15px;'> | |
<details> | |
<summary style='cursor: pointer; color: #666 !important;'> | |
μμ€μ½λ 미리보기 | |
</summary> | |
<pre style=' | |
background: #f5f5f5 !important; | |
padding: 10px; | |
border-radius: 5px; | |
font-size: 0.8em; | |
margin-top: 10px; | |
white-space: pre-wrap; | |
word-break: break-all; | |
color: #333 !important; | |
'>{source_code[:500]}...</pre> | |
</details> | |
</div> | |
</div> | |
""" | |
except Exception as e: | |
return f"<div style='color: red !important; padding: 20px;'>λΆμ μ€λ₯: {str(e)}</div>" | |
def analyze_top_spaces(progress=gr.Progress()) -> Tuple[str, str]: | |
"""μμ 24κ° μ€νμ΄μ€ λΆμ""" | |
try: | |
progress(0, desc="μ€νμ΄μ€ λ°μ΄ν° κ°μ Έμ€λ μ€...") | |
url = "https://huggingface.co/api/spaces" | |
response = requests.get(url, params={'full': 'true', 'limit': 24}) | |
response.raise_for_status() | |
spaces = response.json()[:24] | |
# μλ¨ μ λ ₯ λ°μ€μ κΈ°λ³Έ ν μ€νΈλ₯Ό ν¬ν¨ν HTML μμ | |
html_content = """ | |
<div style='padding: 20px; background: #ffffff;'> | |
<div style='margin-bottom: 30px;'> | |
<textarea id='intro_text' rows='4' style=' | |
width: 100%; | |
padding: 15px; | |
border: 1px solid #ddd; | |
border-radius: 10px; | |
font-size: 1.1em; | |
line-height: 1.5; | |
resize: vertical; | |
background: #f8f9fa; | |
'>μλ νμΈμ. λ§€μΌ κΈλ‘λ² μ΅μ AI μΈκΈ° νΈλ λ μλΉμ€λ₯Ό μμ보λ 'λ°μΌλ¦¬ AI νΈλ λ©' λ΄μ€μ λλ€. μ€λμ νκΉ νμ΄μ€ μΈκΈ° μμ 1μλΆν° 24μκΉμ§, λΆμκ³Ό ν΅μ¬ λ΄μ©μ μ΄ν΄λ³΄κ² μ΅λλ€.</textarea> | |
</div> | |
<style> | |
.script-card { | |
background: white !important; | |
border-radius: 10px; | |
padding: 20px; | |
margin-bottom: 20px; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
border: 1px solid #e0e0e0; | |
} | |
.script-content { | |
color: #444 !important; | |
font-size: 1.1em; | |
line-height: 1.6; | |
white-space: pre-line; | |
} | |
</style> | |
""" | |
for idx, space in enumerate(spaces): | |
progress((idx + 1) / 24, desc=f"λΆμ μ€... {idx+1}/24") | |
try: | |
source = get_space_source(space['id']) | |
source_code = source["app.py"] or source["index.html"] | |
# μ€νμ΄μ€ IDμμ μ¬μ©μλͺ μ κ±°νκ³ νλ‘μ νΈλͺ λ§ μΆμΆ | |
project_name = space['id'].split('/')[-1] | |
prompt = f""" | |
λ€μ HuggingFace μ€νμ΄μ€λ₯Ό μ νλΈ λ΄μ€ 리ν¬νΈ νμμΌλ‘ μ€λͺ ν΄μ£ΌμΈμ. | |
μμμ λ°λμ "μ€λμ μΈκΈ°μμ {idx + 1}μμΈ {project_name}μ λλ€."λ‘ μμνκ³ , | |
μ΄μ΄μ μ£Όμ κΈ°λ₯, νΉμ§, νμ©λ°©μμ 2-3λ¬Έμ₯μΌλ‘ μμ°μ€λ½κ² μ€λͺ ν΄μ£ΌμΈμ. | |
μ 체 κΈΈμ΄λ 3-4λ¬Έμ₯μΌλ‘ μ ννκ³ , μ€λͺ μ λ΄μ€ 리ν¬ν°μ²λΌ λͺ ννκ³ μ λ¬Έμ μΌλ‘ ν΄μ£ΌμΈμ. | |
μμ€μ½λ: | |
``` | |
{source_code[:1500]} | |
``` | |
""" | |
messages = [ | |
{"role": "system", "content": "AI κΈ°μ μ λ¬Έ λ΄μ€ 리ν¬ν°μ λλ€."}, | |
{"role": "user", "content": prompt} | |
] | |
response = hf_client.chat_completion( | |
messages, | |
max_tokens=200, | |
temperature=0.7 | |
) | |
script = response.choices[0].message.content.strip() | |
html_content += f""" | |
<div class='script-card'> | |
<div class='script-content'>{script}</div> | |
</div> | |
""" | |
except Exception as e: | |
print(f"Error analyzing space {space['id']}: {e}") | |
html_content += f""" | |
<div class='script-card'> | |
<div class='script-content' style='color: red !important;'> | |
μμ {idx + 1}μ λΆμ μ€ μ€λ₯κ° λ°μνμ΅λλ€. | |
</div> | |
</div> | |
""" | |
html_content += "</div>" | |
return html_content, f"24κ° μ€νμ΄μ€ λΆμ μλ£" | |
except Exception as e: | |
error_msg = f"Error: {str(e)}" | |
return f"<div style='color: red; padding: 20px;'>{error_msg}</div>", error_msg | |
def analyze_single_space(space: dict, source_code: str) -> str: | |
"""λ¨μΌ μ€νμ΄μ€ λΆμ""" | |
try: | |
if not source_code: | |
return "μμ€μ½λλ₯Ό κ°μ Έμ¬ μ μμ΅λλ€." | |
prompt = f""" | |
λ€μ μ€νμ΄μ€μ μμ€μ½λλ₯Ό λΆμν΄μ£ΌμΈμ: | |
``` | |
{source_code[:4000]} | |
``` | |
λ€μ νλͺ©μ κ°κ° ν μ€λ‘ μμ½ν΄μ£ΌμΈμ: | |
1. κ°μ: | |
2. μμ½: | |
3. νΉμ§ λ° μ₯μ : | |
4. μ¬μ© λμ: | |
5. μ¬μ© λ°©λ²: | |
6. μ μ¬ μλΉμ€μμ μ°¨λ³μ : | |
""" | |
messages = [ | |
{"role": "system", "content": "μμ€μ½λ λΆμ μ λ¬Έκ°μ λλ€."}, | |
{"role": "user", "content": prompt} | |
] | |
response = hf_client.chat_completion( | |
messages, | |
max_tokens=3800, | |
temperature=0.3 | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"λΆμ μ€ μ€λ₯ λ°μ: {str(e)}" | |
def create_editable_space_analysis(progress=gr.Progress()) -> List[str]: | |
"""24κ° μ€νμ΄μ€ λΆμ ν μ€νΈ μμ±""" | |
try: | |
progress(0, desc="μ€νμ΄μ€ λ°μ΄ν° κ°μ Έμ€λ μ€...") | |
url = "https://huggingface.co/api/spaces" | |
response = requests.get(url, params={'full': 'true', 'limit': 24}) | |
response.raise_for_status() | |
spaces = response.json()[:24] | |
analysis_texts = [] | |
for idx, space in enumerate(spaces): | |
progress((idx + 1) / 24, desc=f"λΆμ μ€... {idx+1}/24") | |
try: | |
source = get_space_source(space['id']) | |
source_code = source["app.py"] or source["index.html"] | |
# νλ‘μ νΈλͺ λ§ μΆμΆ | |
project_name = space['id'].split('/')[-1] | |
prompt = f""" | |
λ€μ HuggingFace μ€νμ΄μ€λ₯Ό λΆμνμ¬ λ΄μ€ 리ν¬νΈ νμμΌλ‘ μ€λͺ ν΄μ£ΌμΈμ: | |
μμμ λ°λμ "μ€λμ μΈκΈ°μμ {idx + 1}μμΈ {project_name}μ λλ€."λ‘ μμνκ³ , | |
μ΄μ΄μ μ£Όμ κΈ°λ₯, νΉμ§, νμ©λ°©μμ μμ°μ€λ½κ² μ€λͺ ν΄μ£ΌμΈμ. | |
μμ€μ½λ: | |
``` | |
{source_code[:1500]} | |
``` | |
""" | |
messages = [ | |
{"role": "system", "content": "AI κΈ°μ μ λ¬Έ λ΄μ€ 리ν¬ν°μ λλ€."}, | |
{"role": "user", "content": prompt} | |
] | |
response = hf_client.chat_completion( | |
messages, | |
max_tokens=200, | |
temperature=0.7 | |
) | |
analysis_texts.append(response.choices[0].message.content.strip()) | |
except Exception as e: | |
analysis_texts.append(f"μ€λμ μΈκΈ°μμ {idx + 1}μμΈ {project_name}μ λλ€.") | |
return analysis_texts | |
except Exception as e: | |
return [f"μμ {i+1}μ λΆμμ μ€λΉμ€μ λλ€." for i in range(24)] | |
def generate_video(texts: List[str], progress=gr.Progress()) -> str: | |
"""μμ μμ±""" | |
try: | |
temp_dir = tempfile.mkdtemp() | |
clips = [] | |
# μΈνΈλ‘ μμ± | |
intro_image = Image.open('intro.png') | |
intro_audio = gTTS(text=texts[0], lang='ko', slow=False) | |
intro_audio.save(f"{temp_dir}/intro.mp3") | |
intro_clip = ImageClip(np.array(intro_image)).set_duration(5) # 5μ΄ λλ μ€λμ€ κΈΈμ΄μ λ§κ² μ‘°μ | |
intro_audio_clip = AudioFileClip(f"{temp_dir}/intro.mp3") | |
intro_clip = intro_clip.set_audio(intro_audio_clip) | |
clips.append(intro_clip) | |
# κ° μ€νμ΄μ€λ³ ν΄λ¦½ μμ± | |
for idx, text in enumerate(texts[1:], 1): | |
progress((idx / 24), desc=f"μμ μμ± μ€... {idx}/24") | |
# μ€ν¬λ¦°μ· μΊ‘μ² | |
space_id = spaces[idx-1]['id'] | |
url = f"https://huggingface.co/spaces/{space_id}" | |
screenshot = get_cached_screenshot(url) | |
# μ΄λ―Έμ§ ν΄λ¦½ μμ± | |
image = Image.open(BytesIO(base64.b64decode(screenshot))) | |
image_clip = ImageClip(np.array(image)).set_duration(5) # 5μ΄ λλ μ€λμ€ κΈΈμ΄μ λ§κ² μ‘°μ | |
# μμ± μμ± | |
tts = gTTS(text=text, lang='ko', slow=False) | |
tts.save(f"{temp_dir}/audio_{idx}.mp3") | |
audio_clip = AudioFileClip(f"{temp_dir}/audio_{idx}.mp3") | |
# μ΄λ―Έμ§μ μμ± κ²°ν© | |
video_clip = image_clip.set_audio(audio_clip) | |
clips.append(video_clip) | |
# λͺ¨λ ν΄λ¦½ μ°κ²° | |
final_clip = concatenate_videoclips(clips) | |
# MP4λ‘ μ μ₯ | |
output_path = "output_video.mp4" | |
final_clip.write_videofile(output_path, fps=24, codec='libx264') | |
# μμ νμΌ μ 리 | |
shutil.rmtree(temp_dir) | |
return output_path | |
except Exception as e: | |
print(f"Error generating video: {e}") | |
if temp_dir: | |
shutil.rmtree(temp_dir) | |
return "" | |
def create_interface(): | |
with gr.Blocks(title="HuggingFace Trending Board", css=""" | |
.search-sort-container { | |
background: linear-gradient(135deg, rgba(255,255,255,0.95), rgba(240,240,255,0.95)); | |
border-radius: 15px; | |
padding: 20px; | |
margin: 10px 0; | |
box-shadow: 0 4px 6px rgba(0,0,0,0.1); | |
overflow: visible; | |
} | |
.search-box { | |
border: 2px solid #e1e1e1; | |
border-radius: 10px; | |
padding: 12px; | |
transition: all 0.3s ease; | |
background: linear-gradient(135deg, #ffffff, #f8f9ff); | |
width: 100%; | |
} | |
.search-box:focus { | |
border-color: #7b61ff; | |
box-shadow: 0 0 0 2px rgba(123,97,255,0.2); | |
background: linear-gradient(135deg, #ffffff, #f0f3ff); | |
} | |
.refresh-btn { | |
background: linear-gradient(135deg, #7b61ff, #6366f1); | |
color: white; | |
border: none; | |
padding: 10px 20px; | |
border-radius: 10px; | |
cursor: pointer; | |
transition: all 0.3s ease; | |
width: 120px; | |
height: 80px !important; | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
margin-left: auto; | |
font-size: 1.2em !important; | |
box-shadow: 0 4px 6px rgba(0,0,0,0.1); | |
} | |
.refresh-btn:hover { | |
transform: translateY(-2px); | |
box-shadow: 0 6px 12px rgba(0,0,0,0.2); | |
background: linear-gradient(135deg, #8b71ff, #7376f1); | |
} | |
""") as interface: | |
gr.Markdown(""" | |
# π€ HuggingFace Trending Board | |
<div style='margin-bottom: 20px; padding: 10px; background: linear-gradient(135deg, rgba(123,97,255,0.1), rgba(99,102,241,0.1)); border-radius: 10px;'> | |
Explore, search, and sort through the Shows Top 300 Trending spaces with AI Ratings | |
</div> | |
""") | |
with gr.Tabs() as tabs: | |
# Spaces ν | |
with gr.Tab("π― Trending Spaces"): | |
with gr.Row(elem_classes="search-sort-container"): | |
with gr.Column(scale=2): | |
spaces_search = gr.Textbox( | |
label="π Search Spaces", | |
placeholder="Enter keywords to search...", | |
elem_classes="search-box" | |
) | |
with gr.Column(scale=2): | |
spaces_sort = gr.Radio( | |
choices=["rank", "rising_rate", "popularity"], | |
value="rank", | |
label="Sort by", | |
interactive=True | |
) | |
with gr.Column(scale=1): | |
spaces_refresh_btn = gr.Button( | |
"π Refresh", | |
variant="primary", | |
elem_classes="refresh-btn" | |
) | |
spaces_gallery = gr.HTML() | |
spaces_status = gr.Markdown("Loading...") | |
# Models ν | |
with gr.Tab("π€ Trending Models"): | |
with gr.Row(elem_classes="search-sort-container"): | |
with gr.Column(scale=2): | |
models_search = gr.Textbox( | |
label="π Search Models", | |
placeholder="Enter keywords to search...", | |
elem_classes="search-box" | |
) | |
with gr.Column(scale=2): | |
models_sort = gr.Radio( | |
choices=["rank", "rising_rate", "popularity"], | |
value="rank", | |
label="Sort by", | |
interactive=True | |
) | |
with gr.Column(scale=1): | |
models_refresh_btn = gr.Button( | |
"π Refresh", | |
variant="primary", | |
elem_classes="refresh-btn" | |
) | |
models_gallery = gr.HTML() | |
models_status = gr.Markdown("Loading...") | |
# Datasets ν | |
with gr.Tab("π Trending Datasets"): | |
with gr.Row(elem_classes="search-sort-container"): | |
with gr.Column(scale=2): | |
datasets_search = gr.Textbox( | |
label="π Search Datasets", | |
placeholder="Enter keywords to search...", | |
elem_classes="search-box" | |
) | |
with gr.Column(scale=2): | |
datasets_sort = gr.Radio( | |
choices=["rank", "rising_rate", "popularity"], | |
value="rank", | |
label="Sort by", | |
interactive=True | |
) | |
with gr.Column(scale=1): | |
datasets_refresh_btn = gr.Button( | |
"π Refresh", | |
variant="primary", | |
elem_classes="refresh-btn" | |
) | |
datasets_gallery = gr.HTML() | |
datasets_status = gr.Markdown("Loading...") | |
# λΆμ ν μμ | |
with gr.Tab("π Top 24 Spaces Analysis"): | |
with gr.Row(elem_classes="search-sort-container"): | |
analysis_refresh_btn = gr.Button( | |
"π Analyze All 24 Spaces", | |
variant="primary", | |
elem_classes="refresh-btn" | |
) | |
# νΈμ§ κ°λ₯ν μΈνΈλ‘ ν μ€νΈ | |
intro_text = gr.Textbox( | |
value="μλ νμΈμ. λ§€μΌ κΈλ‘λ² μ΅μ AI μΈκΈ° νΈλ λ μλΉμ€λ₯Ό μμ보λ 'λ°μΌλ¦¬ AI νΈλ λ©' λ΄μ€μ λλ€. μ€λμ νκΉ νμ΄μ€ μΈκΈ° μμ 1μλΆν° 24μκΉμ§, λΆμκ³Ό ν΅μ¬ λ΄μ©μ μ΄ν΄λ³΄κ² μ΅λλ€.", | |
label="μΈνΈλ‘ ν μ€νΈ", | |
lines=4 | |
) | |
# 24κ°μ νΈμ§ κ°λ₯ν ν μ€νΈ λ°μ€λ₯Ό λ΄μ 컨ν μ΄λ | |
with gr.Column(elem_id="analysis-container"): | |
analysis_boxes = [gr.Textbox(label=f"Space #{i+1}", lines=3) for i in range(24)] | |
analysis_status = gr.Markdown() | |
# λΉλμ€ μμ± μΉμ | |
with gr.Row(): | |
generate_btn = gr.Button( | |
"π¬ μμ μμ±", | |
variant="primary", | |
size="lg" | |
) | |
video_output = gr.Video(label="μμ±λ μμ") | |
# Event handlers | |
spaces_refresh_btn.click( | |
fn=get_trending_spaces, | |
inputs=[spaces_search, spaces_sort], | |
outputs=[spaces_gallery, spaces_status] | |
) | |
models_refresh_btn.click( | |
fn=get_models, | |
inputs=[models_search, models_sort], | |
outputs=[models_gallery, models_status] | |
) | |
datasets_refresh_btn.click( | |
fn=get_datasets, | |
inputs=[datasets_search, datasets_sort], | |
outputs=[datasets_gallery, datasets_status] | |
) | |
# κ²μμ΄ λ³κ²½ μ μλ μλ‘κ³ μΉ¨ | |
spaces_search.change( | |
fn=get_trending_spaces, | |
inputs=[spaces_search, spaces_sort], | |
outputs=[spaces_gallery, spaces_status] | |
) | |
models_search.change( | |
fn=get_models, | |
inputs=[models_search, models_sort], | |
outputs=[models_gallery, models_status] | |
) | |
datasets_search.change( | |
fn=get_datasets, | |
inputs=[datasets_search, datasets_sort], | |
outputs=[datasets_gallery, datasets_status] | |
) | |
# μ λ ¬ λ°©μ λ³κ²½ μ μλ μλ‘κ³ μΉ¨ | |
spaces_sort.change( | |
fn=get_trending_spaces, | |
inputs=[spaces_search, spaces_sort], | |
outputs=[spaces_gallery, spaces_status] | |
) | |
models_sort.change( | |
fn=get_models, | |
inputs=[models_search, models_sort], | |
outputs=[models_gallery, models_status] | |
) | |
datasets_sort.change( | |
fn=get_datasets, | |
inputs=[datasets_search, datasets_sort], | |
outputs=[datasets_gallery, datasets_status] | |
) | |
# λΆμ ν μ΄λ²€νΈ νΈλ€λ¬ | |
analysis_refresh_btn.click( | |
fn=on_analyze, | |
outputs=analysis_boxes | |
) | |
generate_btn.click( | |
fn=on_generate_video, | |
inputs=[intro_text] + analysis_boxes, | |
outputs=video_output | |
) | |
# μ΄κΈ° λ°μ΄ν° λ‘λ | |
interface.load( | |
fn=get_trending_spaces, | |
inputs=[spaces_search, spaces_sort], | |
outputs=[spaces_gallery, spaces_status] | |
) | |
interface.load( | |
fn=get_models, | |
inputs=[models_search, models_sort], | |
outputs=[models_gallery, models_status] | |
) | |
interface.load( | |
fn=get_datasets, | |
inputs=[datasets_search, datasets_sort], | |
outputs=[datasets_gallery, datasets_status] | |
) | |
interface.load( | |
fn=on_analyze, | |
outputs=analysis_boxes | |
) | |
return interface | |
# λΆμ λ° λΉλμ€ μμ± ν¨μλ€ | |
def on_analyze(progress=gr.Progress()): | |
"""λΆμ μ€ν λ° ν μ€νΈλ°μ€ μ λ°μ΄νΈ""" | |
try: | |
url = "https://huggingface.co/api/spaces" | |
response = requests.get(url, params={'full': 'true', 'limit': 24}) | |
response.raise_for_status() | |
spaces = response.json()[:24] | |
analysis_texts = [] | |
for idx, space in enumerate(spaces): | |
progress((idx + 1) / 24, desc=f"λΆμ μ€... {idx+1}/24") | |
try: | |
source = get_space_source(space['id']) | |
source_code = source["app.py"] or source["index.html"] | |
project_name = space['id'].split('/')[-1] | |
prompt = f""" | |
λ€μ HuggingFace μ€νμ΄μ€λ₯Ό λΆμνμ¬ λ΄μ€ 리ν¬νΈ νμμΌλ‘ μ€λͺ ν΄μ£ΌμΈμ: | |
μμμ λ°λμ "μ€λμ μΈκΈ°μμ {idx + 1}μμΈ {project_name}μ λλ€."λ‘ μμνκ³ , | |
μ΄μ΄μ μ£Όμ κΈ°λ₯, νΉμ§, νμ©λ°©μμ μμ°μ€λ½κ² μ€λͺ ν΄μ£ΌμΈμ. | |
μμ€μ½λ: | |
``` | |
{source_code[:1500]} | |
``` | |
""" | |
messages = [ | |
{"role": "system", "content": "AI κΈ°μ μ λ¬Έ λ΄μ€ 리ν¬ν°μ λλ€."}, | |
{"role": "user", "content": prompt} | |
] | |
response = hf_client.chat_completion( | |
messages, | |
max_tokens=200, | |
temperature=0.7 | |
) | |
analysis_texts.append(response.choices[0].message.content.strip()) | |
except Exception as e: | |
analysis_texts.append(f"μ€λμ μΈκΈ°μμ {idx + 1}μμΈ {project_name}μ λλ€.") | |
# μ νν 24κ°μ κ°μ λ°ννλλ‘ λ³΄μ₯ | |
if len(analysis_texts) < 24: | |
analysis_texts.extend([f"μμ {i+1}μ λΆμμ μ€λΉμ€μ λλ€." for i in range(len(analysis_texts), 24)]) | |
return analysis_texts[:24] | |
except Exception as e: | |
return [f"μμ {i+1}μ λΆμμ μ€λΉμ€μ λλ€." for i in range(24)] | |
def on_generate_video(intro, *texts, progress=gr.Progress()): | |
"""μμ μμ±""" | |
all_texts = [intro] + list(texts) | |
return generate_video(all_texts, progress) | |
if __name__ == "__main__": | |
try: | |
CACHE_DIR.mkdir(exist_ok=True) | |
cleanup_cache() | |
demo = create_interface() | |
demo.launch( | |
share=True, | |
inbrowser=True, | |
show_api=False, | |
max_threads=4 | |
) | |
except Exception as e: | |
print(f"Application error: {e}") |