Spaces:
Sleeping
Sleeping
#!/usr/bin/env python | |
# coding: utf-8 | |
# In[10]: | |
import requests | |
import pandas as pd | |
from io import BytesIO | |
from bs4 import BeautifulSoup | |
# URL of the website to scrape | |
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/" | |
# Headers to mimic a browser request | |
headers = { | |
"User-Agent": ( | |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " | |
"(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" | |
) | |
} | |
# Send an HTTP GET request to the website | |
response = requests.get(url, headers=headers) | |
# Check if the request was successful | |
if response.status_code == 200: | |
# Parse the HTML content of the page | |
soup = BeautifulSoup(response.content, 'html.parser') | |
# Find all anchor tags | |
links = soup.find_all('a') | |
# Search for the link that contains the specific text | |
file_url = None | |
for link in links: | |
link_text = link.get_text(strip=True) | |
if "Visa decisions made from 1 January 2024 to" in link_text: | |
file_url = link.get('href') | |
break | |
# Check if the link was found | |
if file_url: | |
# Make the link absolute if it is relative | |
if not file_url.startswith('http'): | |
file_url = requests.compat.urljoin(url, file_url) | |
print(f"Found link: {file_url}") | |
# Download the file into memory | |
file_response = requests.get(file_url, headers=headers) | |
if file_response.status_code == 200: | |
# Load the file into a BytesIO object | |
ods_file = BytesIO(file_response.content) | |
# Read the .ods file using pandas | |
try: | |
# Load the .ods file into a DataFrame | |
df = pd.read_excel(ods_file, engine='odf') | |
# Step 1: Clean the DataFrame | |
# Drop the first two columns | |
df.drop(columns=["Unnamed: 0", "Unnamed: 1"], inplace=True) | |
# Step 2: Drop NaN rows until we find the actual header row | |
df.dropna(how='all', inplace=True) | |
# Step 3: Reset index and rename the header row | |
df.reset_index(drop=True, inplace=True) | |
# Step 4: Find the row where "Application Number" starts | |
for idx, row in df.iterrows(): | |
if row['Unnamed: 2'] == 'Application Number' and row['Unnamed: 3'] == 'Decision': | |
df.columns = ['Application Number', 'Decision'] | |
df = df.iloc[idx + 1:] # Skip the header row | |
break | |
# Step 5: Reset the index after cleaning | |
df.reset_index(drop=True, inplace=True) | |
# Convert "Application Number" to string for consistency | |
df['Application Number'] = df['Application Number'].astype(str) | |
# Display the cleaned DataFrame | |
#print("Cleaned Data:") | |
#print(df.to_string(index=False)) | |
# Step 6: Ask the user for their application number | |
user_application_number = input("\nEnter your Application Number: ").strip() | |
# Step 7: Check if the application number exists in the DataFrame | |
result = df[df['Application Number'] == user_application_number] | |
if not result.empty: | |
print(f"\nCongratulations! Your visa application ({user_application_number}) has been {result.iloc[0]['Decision']}.") | |
else: | |
print(f"\nSorry, no record found for Application Number: {user_application_number}.") | |
except Exception as e: | |
print("Error reading the .ods file:", e) | |
else: | |
print("Failed to download the file. Status code:", file_response.status_code) | |
else: | |
print("The specified link was not found.") | |
else: | |
print(f"Failed to retrieve the webpage. Status code: {response.status_code}") | |