supunTE commited on
Commit
6005655
·
0 Parent(s):

print output

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. scrape-content.ipynb +413 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .idea
scrape-content.ipynb ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "metadata": {
5
+ "ExecuteTime": {
6
+ "end_time": "2024-10-25T10:32:47.963356Z",
7
+ "start_time": "2024-10-25T10:32:47.950533Z"
8
+ }
9
+ },
10
+ "cell_type": "code",
11
+ "source": [
12
+ "from fake_headers import Headers\n",
13
+ "\n",
14
+ "headers = Headers(headers=True).generate()\n",
15
+ "headers"
16
+ ],
17
+ "id": "c60b4d771c2e0a21",
18
+ "outputs": [
19
+ {
20
+ "data": {
21
+ "text/plain": [
22
+ "{'Accept': '*/*',\n",
23
+ " 'Connection': 'keep-alive',\n",
24
+ " 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6; rv:61.0) Gecko/20100101 Firefox/61.0',\n",
25
+ " 'Cache-Control': 'max-age=0',\n",
26
+ " 'Upgrade-Insecure-Requests': '1',\n",
27
+ " 'Referer': 'https://google.com'}"
28
+ ]
29
+ },
30
+ "execution_count": 9,
31
+ "metadata": {},
32
+ "output_type": "execute_result"
33
+ }
34
+ ],
35
+ "execution_count": 9
36
+ },
37
+ {
38
+ "metadata": {
39
+ "ExecuteTime": {
40
+ "end_time": "2024-10-25T10:32:49.821005Z",
41
+ "start_time": "2024-10-25T10:32:49.798988Z"
42
+ }
43
+ },
44
+ "cell_type": "code",
45
+ "source": [
46
+ "from selenium.webdriver.chrome.options import Options\n",
47
+ "from selenium import webdriver\n",
48
+ "from selenium.webdriver.support.ui import WebDriverWait\n",
49
+ "from selenium.webdriver.support import expected_conditions as EC\n",
50
+ "from selenium.webdriver.common.by import By\n",
51
+ "from bs4 import BeautifulSoup\n",
52
+ "import time\n",
53
+ "\n",
54
+ "\n",
55
+ "def scroll_and_wait(driver, scroll_pause_time=2):\n",
56
+ " \"\"\"\n",
57
+ " Scroll the page gradually and wait for images to load\n",
58
+ " \"\"\"\n",
59
+ " # Get scroll height\n",
60
+ " last_height = driver.execute_script(\"return document.body.scrollHeight\")\n",
61
+ "\n",
62
+ " while True:\n",
63
+ " # Scroll down gradually\n",
64
+ " for i in range(10):\n",
65
+ " driver.execute_script(f\"window.scrollTo(0, {(i + 1) * (last_height / 10)});\")\n",
66
+ " time.sleep(0.5) # Short pause between each scroll step\n",
67
+ "\n",
68
+ " # Wait for new images to load\n",
69
+ " time.sleep(scroll_pause_time)\n",
70
+ "\n",
71
+ " # Calculate new scroll height and compare with last scroll height\n",
72
+ " new_height = driver.execute_script(\"return document.body.scrollHeight\")\n",
73
+ " if new_height == last_height:\n",
74
+ " break\n",
75
+ " last_height = new_height\n",
76
+ "\n",
77
+ "\n",
78
+ "def wait_for_images(driver, timeout=10):\n",
79
+ " \"\"\"\n",
80
+ " Wait for images to load and become visible\n",
81
+ " \"\"\"\n",
82
+ " try:\n",
83
+ " # Wait for all image elements to be present\n",
84
+ " WebDriverWait(driver, timeout).until(\n",
85
+ " EC.presence_of_all_elements_located((By.TAG_NAME, \"img\"))\n",
86
+ " )\n",
87
+ "\n",
88
+ " # Get all image elements\n",
89
+ " images = driver.find_elements(By.TAG_NAME, \"img\")\n",
90
+ "\n",
91
+ " # Wait for images to load\n",
92
+ " for img in images:\n",
93
+ " try:\n",
94
+ " WebDriverWait(driver, 2).until(\n",
95
+ " lambda d: img.get_attribute('complete') == 'true' and\n",
96
+ " img.get_attribute('naturalHeight') != '0'\n",
97
+ " )\n",
98
+ " except:\n",
99
+ " continue # Skip images that don't load within timeout\n",
100
+ "\n",
101
+ " except Exception as e:\n",
102
+ " print(f\"Warning: Not all images could be loaded: {e}\")"
103
+ ],
104
+ "id": "11933d956e20b6b8",
105
+ "outputs": [],
106
+ "execution_count": 10
107
+ },
108
+ {
109
+ "metadata": {
110
+ "ExecuteTime": {
111
+ "end_time": "2024-10-25T10:33:23.469518Z",
112
+ "start_time": "2024-10-25T10:32:53.382666Z"
113
+ }
114
+ },
115
+ "cell_type": "code",
116
+ "source": [
117
+ "chrome_options = Options()\n",
118
+ "chrome_options.add_argument(\"--headless\")\n",
119
+ "chrome_options.add_argument(\"--disable-gpu\")\n",
120
+ "chrome_options.add_argument(\"--no-sandbox\")\n",
121
+ "chrome_options.add_argument(\"--disable-dev-shm-usage\")\n",
122
+ "\n",
123
+ "# Add fake headers\n",
124
+ "for key, value in headers.items():\n",
125
+ " chrome_options.add_argument(f'--{key.lower()}={value}')\n",
126
+ "\n",
127
+ "# Additional configurations to appear more human-like\n",
128
+ "chrome_options.add_argument(\"--disable-blink-features=AutomationControlled\")\n",
129
+ "chrome_options.add_argument(\"--window-size=1920,1080\")\n",
130
+ "\n",
131
+ "# Enable images in headless mode\n",
132
+ "chrome_options.add_argument(\"--force-device-scale-factor=1\")\n",
133
+ "chrome_options.add_argument(\"--high-dpi-support=1\")\n",
134
+ "\n",
135
+ "# Privacy and fingerprinting prevention\n",
136
+ "chrome_options.add_argument(\"--disable-blink-features\")\n",
137
+ "chrome_options.add_argument(\"--disable-infobars\")\n",
138
+ "chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n",
139
+ "chrome_options.add_experimental_option(\"useAutomationExtension\", False)\n",
140
+ "\n",
141
+ "# Enable JavaScript\n",
142
+ "chrome_options.add_argument(\"--enable-javascript\")\n",
143
+ "\n",
144
+ "driver = webdriver.Chrome(options=chrome_options)\n",
145
+ "\n",
146
+ "driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n",
147
+ " \"source\": \"\"\"\n",
148
+ " Object.defineProperty(navigator, 'webdriver', {\n",
149
+ " get: () => undefined\n",
150
+ " })\n",
151
+ " \"\"\"\n",
152
+ "})\n",
153
+ "\n",
154
+ "products_url = \"https://www.target.com/s?searchTerm=Peach&tref=typeahead%7Cterm%7CPeach%7C%7C%7Chistory\"\n",
155
+ "driver.get(products_url)\n",
156
+ "\n",
157
+ "time.sleep(3)\n",
158
+ "\n",
159
+ "# Scroll and wait for content\n",
160
+ "scroll_and_wait(driver)\n",
161
+ "\n",
162
+ "# Wait for images to load\n",
163
+ "wait_for_images(driver)\n",
164
+ "\n",
165
+ "time.sleep(2)\n",
166
+ "\n",
167
+ "soup = BeautifulSoup(driver.page_source, \"html.parser\")\n",
168
+ "driver.quit()"
169
+ ],
170
+ "id": "ac14cff825f0887f",
171
+ "outputs": [],
172
+ "execution_count": 11
173
+ },
174
+ {
175
+ "metadata": {
176
+ "ExecuteTime": {
177
+ "end_time": "2024-10-25T10:52:34.470225Z",
178
+ "start_time": "2024-10-25T10:52:34.458243Z"
179
+ }
180
+ },
181
+ "cell_type": "code",
182
+ "source": [
183
+ "from urllib.parse import urljoin\n",
184
+ "import json\n",
185
+ "\n",
186
+ "\n",
187
+ "def convert_relative_urls(soup, base_url):\n",
188
+ " \"\"\"\n",
189
+ " Convert all relative URLs in the soup object to absolute URLs.\n",
190
+ " Handles href, src, and data-src attributes.\n",
191
+ " \"\"\"\n",
192
+ " # Convert href attributes (links)\n",
193
+ " for tag in soup.find_all(href=True):\n",
194
+ " tag['href'] = urljoin(base_url, tag['href'])\n",
195
+ "\n",
196
+ " # Convert src attributes (images, scripts, etc.)\n",
197
+ " for tag in soup.find_all(src=True):\n",
198
+ " tag['src'] = urljoin(base_url, tag['src'])\n",
199
+ "\n",
200
+ " # Convert data-src attributes (lazy loaded images)\n",
201
+ " for tag in soup.find_all(attrs={'data-src': True}):\n",
202
+ " tag['data-src'] = urljoin(base_url, tag['data-src'])\n",
203
+ "\n",
204
+ " return soup\n",
205
+ "\n",
206
+ "\n",
207
+ "def count_images_in_element(element):\n",
208
+ " \"\"\"\n",
209
+ " Count all images within an element, including nested ones.\n",
210
+ " \"\"\"\n",
211
+ " return len(element.find_all('img', recursive=True))\n",
212
+ "\n",
213
+ "\n",
214
+ "def get_element_identifier(element):\n",
215
+ " \"\"\"\n",
216
+ " Create a unique identifier for an element including tag and classes.\n",
217
+ " \"\"\"\n",
218
+ " identifier = element.name\n",
219
+ " if element.get('class'):\n",
220
+ " identifier += f\" .{' .'.join(element['class'])}\"\n",
221
+ " if element.get('id'):\n",
222
+ " identifier += f\" #{element['id']}\"\n",
223
+ " return identifier\n",
224
+ "\n",
225
+ "\n",
226
+ "def has_child_with_same_count(element, image_count, all_elements_with_counts):\n",
227
+ " \"\"\"\n",
228
+ " Check if the element has any child with the same image count.\n",
229
+ " \"\"\"\n",
230
+ " for other_element, other_count in all_elements_with_counts:\n",
231
+ " if other_count == image_count and other_element != element:\n",
232
+ " if any(parent == element for parent in other_element.parents):\n",
233
+ " return True\n",
234
+ " return False\n",
235
+ "\n",
236
+ "\n",
237
+ "def print_results_with_content(element_list):\n",
238
+ " \"\"\"\n",
239
+ " Print formatted results including the inner content of elements.\n",
240
+ " \"\"\"\n",
241
+ " print(\"\\nElements Containing Most Images (Lowest Level for Each Count):\")\n",
242
+ " print(\"=\" * 100)\n",
243
+ "\n",
244
+ " for rank, (tag_info, count, element) in enumerate(element_list, 1):\n",
245
+ " print(f\"\\nRank {rank}:\")\n",
246
+ " print(\"-\" * 100)\n",
247
+ " print(f\"Element: {tag_info}\")\n",
248
+ " print(f\"Image Count: {count}\")\n",
249
+ " print(\"\\nContent Preview:\")\n",
250
+ " print(\"-\" * 100)\n",
251
+ "\n",
252
+ " # Get all immediate img tags\n",
253
+ " immediate_images = element.find_all('img', recursive=False)\n",
254
+ " nested_images = element.find_all('img', recursive=True)\n",
255
+ "\n",
256
+ " print(f\"Direct images: {len(immediate_images)}\")\n",
257
+ " print(f\"Total images (including nested): {len(nested_images)}\")\n",
258
+ " print(\"\\nImage sources:\")\n",
259
+ "\n",
260
+ " # Print image sources and alt text\n",
261
+ " for img in nested_images:\n",
262
+ " src = img.get('src', 'No source')\n",
263
+ " alt = img.get('alt', 'No alt text')\n",
264
+ " print(f\"- Source: {src}\")\n",
265
+ " print(f\" Alt text: {alt}\")\n",
266
+ "\n",
267
+ " print(\"\\nFull HTML structure:\")\n",
268
+ " print(\"-\" * 100)\n",
269
+ " # Print formatted HTML structure\n",
270
+ " html_content = element.prettify()\n",
271
+ " print(html_content)\n",
272
+ " print(\"=\" * 100)\n",
273
+ "\n",
274
+ "\n",
275
+ "def find_top_image_parent(soup, base_url):\n",
276
+ " \"\"\"\n",
277
+ " Find the element containing the most images at the lowest level and return its details as JSON.\n",
278
+ " \"\"\"\n",
279
+ " # Collect all elements with their image counts\n",
280
+ " soup = convert_relative_urls(soup, base_url)\n",
281
+ "\n",
282
+ " elements_with_counts = []\n",
283
+ " for element in soup.find_all():\n",
284
+ " if element.name != 'img': # Skip img tags themselves\n",
285
+ " image_count = count_images_in_element(element)\n",
286
+ " if image_count > 0:\n",
287
+ " elements_with_counts.append((element, image_count))\n",
288
+ "\n",
289
+ " # Sort by image count in descending order\n",
290
+ " elements_with_counts.sort(key=lambda x: x[1], reverse=True)\n",
291
+ "\n",
292
+ " if not elements_with_counts:\n",
293
+ " return json.dumps({\"error\": \"No elements with images found\"}, indent=2)\n",
294
+ "\n",
295
+ " max_count = elements_with_counts[0][1]\n",
296
+ "\n",
297
+ " # Get all elements with max count\n",
298
+ " top_elements = [(elem, count) for elem, count in elements_with_counts if count == max_count]\n",
299
+ " print(len(elements_with_counts))\n",
300
+ " # \n",
301
+ " # # Find the lowest-level element among those with max count\n",
302
+ " # top_element = None\n",
303
+ " # for element, count in top_elements:\n",
304
+ " # if not has_child_with_same_count(element, count, elements_with_counts):\n",
305
+ " # top_element = element\n",
306
+ " # break\n",
307
+ " # \n",
308
+ " # if not top_element:\n",
309
+ " # return json.dumps({\"error\": \"No suitable element found\"}, indent=2)\n",
310
+ " # \n",
311
+ " # # Collect all images within the element\n",
312
+ " # images = []\n",
313
+ " # for img in top_element.find_all('img', recursive=True):\n",
314
+ " # image_data = {\n",
315
+ " # \"src\": img.get('src', 'No source'),\n",
316
+ " # \"alt\": img.get('alt', 'No alt text')\n",
317
+ " # }\n",
318
+ " # # Add any other attributes that exist\n",
319
+ " # for attr in ['title', 'width', 'height', 'class']:\n",
320
+ " # if img.get(attr):\n",
321
+ " # image_data[attr] = img[attr]\n",
322
+ " # images.append(image_data)\n",
323
+ " # \n",
324
+ " # # Create result dictionary\n",
325
+ " # result = {\n",
326
+ " # \"element\": {\n",
327
+ " # \"tag\": top_element.name,\n",
328
+ " # \"identifier\": get_element_identifier(top_element),\n",
329
+ " # \"classes\": top_element.get('class', []),\n",
330
+ " # \"id\": top_element.get('id', None)\n",
331
+ " # },\n",
332
+ " # \"image_count\": max_count,\n",
333
+ " # \"images\": images,\n",
334
+ " # \"html_content\": str(top_element)\n",
335
+ " # }\n",
336
+ " # \n",
337
+ " # # Create styled HTML output\n",
338
+ " # style_tag = f\"\"\"\n",
339
+ " # <style>\n",
340
+ " # img {{\n",
341
+ " # width: 300px;\n",
342
+ " # height: 300px;\n",
343
+ " # object-fit: contain;\n",
344
+ " # }}\n",
345
+ " # </style>\n",
346
+ " # \"\"\"\n",
347
+ " # html_output = style_tag + str(top_element)\n",
348
+ " # \n",
349
+ " # return json.dumps(result, indent=2), html_output\n"
350
+ ],
351
+ "id": "3830f2e224e84798",
352
+ "outputs": [],
353
+ "execution_count": 33
354
+ },
355
+ {
356
+ "metadata": {},
357
+ "cell_type": "markdown",
358
+ "source": "",
359
+ "id": "80fa7f140d4da0a2"
360
+ },
361
+ {
362
+ "metadata": {
363
+ "ExecuteTime": {
364
+ "end_time": "2024-10-25T10:52:36.684418Z",
365
+ "start_time": "2024-10-25T10:52:36.614623Z"
366
+ }
367
+ },
368
+ "cell_type": "code",
369
+ "source": [
370
+ "base_url = products_url.rsplit('/', 1)[0]\n",
371
+ "find_top_image_parent(soup, base_url)\n",
372
+ "#\n",
373
+ "# with open(\"output.json\", \"w\") as file:\n",
374
+ "# file.write(json_data)\n",
375
+ "# \n",
376
+ "# with open(\"output.html\", \"w\") as file:\n",
377
+ "# file.write(html_content)"
378
+ ],
379
+ "id": "20b0b8cd238de02d",
380
+ "outputs": [
381
+ {
382
+ "name": "stdout",
383
+ "output_type": "stream",
384
+ "text": [
385
+ "411\n"
386
+ ]
387
+ }
388
+ ],
389
+ "execution_count": 34
390
+ }
391
+ ],
392
+ "metadata": {
393
+ "kernelspec": {
394
+ "display_name": "Python 3",
395
+ "language": "python",
396
+ "name": "python3"
397
+ },
398
+ "language_info": {
399
+ "codemirror_mode": {
400
+ "name": "ipython",
401
+ "version": 2
402
+ },
403
+ "file_extension": ".py",
404
+ "mimetype": "text/x-python",
405
+ "name": "python",
406
+ "nbconvert_exporter": "python",
407
+ "pygments_lexer": "ipython2",
408
+ "version": "2.7.6"
409
+ }
410
+ },
411
+ "nbformat": 4,
412
+ "nbformat_minor": 5
413
+ }