Spaces:
Sleeping
Sleeping
import requests | |
import pandas as pd | |
import streamlit as st | |
from io import BytesIO | |
from bs4 import BeautifulSoup | |
def fetch_data(): | |
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/" | |
headers = { | |
"User-Agent": ( | |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " | |
"(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" | |
) | |
} | |
response = requests.get(url, headers=headers) | |
if response.status_code == 200: | |
soup = BeautifulSoup(response.content, "html.parser") | |
# Find the link to download the file | |
file_url = None | |
links = soup.find_all('a') | |
for link in links: | |
link_text = link.get_text(strip=True) | |
if "Visa decisions made from 1 January 2025 to" in link_text: | |
file_url = link.get('href') | |
break | |
if file_url: | |
# Make the link absolute if it's relative | |
if not file_url.startswith('http'): | |
file_url = requests.compat.urljoin(url, file_url) | |
file_response = requests.get(file_url, headers=headers) | |
if file_response.status_code == 200: | |
file_data = BytesIO(file_response.content) | |
df = pd.read_excel(file_data, engine='odf') | |
# Clean up and process the DataFrame | |
df.drop(columns=["Unnamed: 0", "Unnamed: 1"], inplace=True, errors='ignore') | |
df.dropna(how='all', inplace=True) | |
df.reset_index(drop=True, inplace=True) | |
# Assuming the header row is correct | |
df.columns = ['Application Number', 'Decision'] | |
# Debugging: Display the first few rows of the dataframe | |
#st.write("First few rows of the data:") | |
#st.write(df.head()) | |
return df | |
else: | |
st.error("Failed to download the file.") | |
else: | |
st.error("The file link was not found on the webpage.") | |
else: | |
st.error("Failed to retrieve the webpage.") | |
return None | |