|
import requests |
|
import pandas as pd |
|
import streamlit as st |
|
from io import BytesIO |
|
from bs4 import BeautifulSoup |
|
from fpdf import FPDF |
|
|
|
|
|
@st.cache_data(ttl=3600) |
|
def fetch_data(): |
|
|
|
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/" |
|
headers = { |
|
"User-Agent": ( |
|
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " |
|
"(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" |
|
) |
|
} |
|
|
|
|
|
response = requests.get(url, headers=headers) |
|
if response.status_code != 200: |
|
st.error("Failed to fetch the webpage. Please try again later.") |
|
return None, None |
|
|
|
|
|
soup = BeautifulSoup(response.content, "html.parser") |
|
file_url = None |
|
file_name = None |
|
for link in soup.find_all("a"): |
|
if "Visa decisions made from 1 January 2025" in link.get_text(): |
|
file_url = link.get("href") |
|
file_name = link.get_text().strip() |
|
if not file_url.startswith("http"): |
|
file_url = requests.compat.urljoin(url, file_url) |
|
break |
|
|
|
if not file_url or not file_name: |
|
st.error("Could not find the visa decisions file link on the website.") |
|
return None, None |
|
|
|
|
|
ods_response = requests.get(file_url, headers=headers) |
|
if ods_response.status_code != 200: |
|
st.error("Failed to download the visa decisions file.") |
|
return None, None |
|
|
|
|
|
ods_file = BytesIO(ods_response.content) |
|
df = pd.read_excel(ods_file, engine="odf") |
|
|
|
|
|
df.dropna(how="all", inplace=True) |
|
df.reset_index(drop=True, inplace=True) |
|
|
|
|
|
if len(df.columns) > 2: |
|
df = df.iloc[:, :2] |
|
|
|
|
|
if len(df.columns) == 2: |
|
df.columns = ["Application Number", "Decision"] |
|
else: |
|
st.error("Insufficient data columns detected.") |
|
return None, None |
|
|
|
df["Application Number"] = df["Application Number"].astype(str) |
|
|
|
return df, file_name |
|
|
|
|
|
precomputed_df, file_name = fetch_data() |
|
|
|
|
|
def generate_pdf(dataframe, filename): |
|
pdf = FPDF() |
|
pdf.set_auto_page_break(auto=True, margin=10) |
|
pdf.add_page() |
|
pdf.set_font("Arial", size=12) |
|
|
|
pdf.cell(200, 10, txt=filename, ln=True, align="C") |
|
pdf.ln(10) |
|
|
|
|
|
pdf.set_font("Arial", "B", 10) |
|
pdf.cell(50, 10, "Application Number", border=1) |
|
pdf.cell(50, 10, "Decision", border=1) |
|
pdf.ln() |
|
|
|
|
|
pdf.set_font("Arial", size=10) |
|
for _, row in dataframe.iterrows(): |
|
pdf.cell(50, 10, row["Application Number"], border=1) |
|
pdf.cell(50, 10, row["Decision"], border=1) |
|
pdf.ln() |
|
|
|
pdf_output = BytesIO() |
|
pdf.output(pdf_output, "F") |
|
pdf_output.seek(0) |
|
return pdf_output |
|
|
|
|
|
if precomputed_df is not None and file_name: |
|
st.success("Data fetched successfully!") |
|
|
|
|
|
excel_buffer = BytesIO() |
|
precomputed_df.to_excel(excel_buffer, index=False, engine="openpyxl") |
|
excel_buffer.seek(0) |
|
st.download_button( |
|
label="Download Excel File", |
|
data=excel_buffer, |
|
file_name=f"{file_name}.xlsx", |
|
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", |
|
) |
|
|
|
|
|
pdf_buffer = generate_pdf(precomputed_df, file_name) |
|
st.download_button( |
|
label="Download PDF File", |
|
data=pdf_buffer, |
|
file_name=f"{file_name}.pdf", |
|
mime="application/pdf", |
|
) |
|
|