Spaces:
Sleeping
Sleeping
File size: 1,595 Bytes
0a6201c 7ba5c3e 0a6201c 7ba5c3e e5f7529 7ba5c3e e5f7529 7ba5c3e e5f7529 7ba5c3e 0a6201c 7ba5c3e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import gradio as gr
import requests
from bs4 import BeautifulSoup
# Function to scrape a specific website (https://chatgpt.com/)
def scrape_website(url):
try:
# Send a request to the website
response = requests.get(url, allow_redirects=True) # This will handle redirects automatically
# Check if the response was successful
if response.status_code == 200:
# Parse the page content
soup = BeautifulSoup(response.content, 'html.parser')
# Extract the title of the webpage
title = soup.find('title').get_text()
# Extract the meta description if available
meta_description = soup.find('meta', attrs={'name': 'description'})
if meta_description:
meta_description = meta_description.get('content')
else:
meta_description = "No meta description available"
return f"Title: {title}\nMeta Description: {meta_description}"
else:
return f"Failed to access {url} (status code: {response.status_code})"
except Exception as e:
return f"An error occurred: {str(e)}"
# Gradio interface to input URL and display scraped content
with gr.Blocks() as demo:
url_input = gr.Textbox(value="https://chatgpt.com", label="URL", placeholder="Enter URL")
output = gr.Textbox(label="Scraped Data")
submit_btn = gr.Button("Scrape Website")
# Set the button action
submit_btn.click(scrape_website, url_input, output)
demo.launch()
|