MaksG commited on
Commit
1a99a9c
1 Parent(s): 529f969

Update scrape_3gpp.py

Browse files
Files changed (1) hide show
  1. scrape_3gpp.py +23 -18
scrape_3gpp.py CHANGED
@@ -62,12 +62,18 @@ def extract_statuses(url):
62
  return []
63
 
64
 
 
 
 
 
 
 
65
  def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
66
  filenames = []
67
  status_filenames = []
68
  df = pd.DataFrame() # Initialize df to ensure it's always defined
69
 
70
- # Only proceed if excel_file is not None and it exists
71
  if excel_file and os.path.exists(excel_file):
72
  try:
73
  df = pd.read_excel(excel_file)
@@ -76,25 +82,27 @@ def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
76
  if 'TDoc Status' in df.columns and status_list:
77
  df = df[df['TDoc Status'].isin(status_list)]
78
  print(f"Filtered DataFrame size: {len(df)}")
79
- else:
80
- print("No filtering applied based on TDoc Status")
81
 
82
  if not df.empty:
83
  if 'TDoc' in df.columns and not df['TDoc'].isnull().all():
84
  status_filenames = [f"{url}{row['TDoc']}.zip" for index, row in df.iterrows()]
85
  elif 'URL' in df.columns and not df['URL'].isnull().all():
86
  status_filenames = df['URL'].tolist()
87
- else:
88
- print("No valid 'TDoc' or 'URL' entries found.")
89
-
90
- print(f"Filenames: {status_filenames}")
91
- else:
92
- print("DataFrame is empty after filtering.")
93
 
 
94
  except Exception as e:
95
  print(f"Error reading Excel file: {e}")
96
- else:
97
- print("No valid excel_file path provided.")
 
 
 
 
 
 
 
 
 
98
 
99
  download_directory = folder_name
100
  if not os.path.exists(download_directory):
@@ -102,16 +110,12 @@ def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
102
 
103
  pourcentss = 0.05
104
 
105
- if not status_filenames:
106
- print("No Excel file provided, or no valid URLs found in the file.")
107
- return False, 0
108
-
109
- # Proceed with downloading files using the filenames list
110
  for file_url in status_filenames:
111
  filename = os.path.basename(file_url)
112
  save_path = os.path.join(download_directory, filename)
113
  progress(pourcentss, desc='Downloading')
114
- pourcentss += 0.4 / len(status_filenames) if status_filenames else 1 # Adjust to prevent division by zero
115
  try:
116
  with requests.get(file_url, stream=True) as r:
117
  r.raise_for_status()
@@ -119,7 +123,7 @@ def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
119
  for chunk in r.iter_content(chunk_size=8192):
120
  f.write(chunk)
121
  except requests.exceptions.HTTPError as e:
122
- print(f"HTTP error occurred: {file_url}: {e}")
123
 
124
  return True, len(status_filenames)
125
 
@@ -127,6 +131,7 @@ def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
127
 
128
 
129
 
 
130
  def extractZip(url):
131
  # Répertoire où les fichiers zip sont déjà téléchargés
132
  nom_extract = url.split("/")[-3] + "_extraction"
 
62
  return []
63
 
64
 
65
+ import os
66
+ import requests
67
+ from bs4 import BeautifulSoup
68
+ import pandas as pd
69
+ import gradio as gr
70
+
71
  def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
72
  filenames = []
73
  status_filenames = []
74
  df = pd.DataFrame() # Initialize df to ensure it's always defined
75
 
76
+ # Try to process the Excel file if provided and valid
77
  if excel_file and os.path.exists(excel_file):
78
  try:
79
  df = pd.read_excel(excel_file)
 
82
  if 'TDoc Status' in df.columns and status_list:
83
  df = df[df['TDoc Status'].isin(status_list)]
84
  print(f"Filtered DataFrame size: {len(df)}")
 
 
85
 
86
  if not df.empty:
87
  if 'TDoc' in df.columns and not df['TDoc'].isnull().all():
88
  status_filenames = [f"{url}{row['TDoc']}.zip" for index, row in df.iterrows()]
89
  elif 'URL' in df.columns and not df['URL'].isnull().all():
90
  status_filenames = df['URL'].tolist()
 
 
 
 
 
 
91
 
92
+ print(f"Filenames from Excel: {status_filenames}")
93
  except Exception as e:
94
  print(f"Error reading Excel file: {e}")
95
+
96
+ # If no valid Excel file is given or no status_filenames are found, download zip files directly from the URL
97
+ if not excel_file or not status_filenames:
98
+ print("Downloading zip files directly from the URL...")
99
+ response = requests.get(url)
100
+ soup = BeautifulSoup(response.content, 'html.parser')
101
+ zip_links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].endswith('.zip')]
102
+
103
+ # Construct absolute URLs for zip files
104
+ status_filenames = [url + link if not link.startswith('http') else link for link in zip_links]
105
+ print(f"Filenames from URL: {status_filenames}")
106
 
107
  download_directory = folder_name
108
  if not os.path.exists(download_directory):
 
110
 
111
  pourcentss = 0.05
112
 
113
+ # Proceed with downloading files
 
 
 
 
114
  for file_url in status_filenames:
115
  filename = os.path.basename(file_url)
116
  save_path = os.path.join(download_directory, filename)
117
  progress(pourcentss, desc='Downloading')
118
+ pourcentss += 0.4 / max(len(status_filenames), 1) # Ensure non-zero division
119
  try:
120
  with requests.get(file_url, stream=True) as r:
121
  r.raise_for_status()
 
123
  for chunk in r.iter_content(chunk_size=8192):
124
  f.write(chunk)
125
  except requests.exceptions.HTTPError as e:
126
+ print(f"HTTP error occurred while downloading {file_url}: {e}")
127
 
128
  return True, len(status_filenames)
129
 
 
131
 
132
 
133
 
134
+
135
  def extractZip(url):
136
  # Répertoire où les fichiers zip sont déjà téléchargés
137
  nom_extract = url.split("/")[-3] + "_extraction"