|
import requests |
|
from bs4 import BeautifulSoup |
|
from pathlib import Path |
|
import time |
|
import json |
|
from typing import Dict, List |
|
import logging |
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
class StalkerWikiScraper: |
|
def __init__(self, base_url: str = "https://stalker.fandom.com"): |
|
self.base_url = base_url |
|
self.session = requests.Session() |
|
self.session.headers.update({ |
|
'User-Agent': 'Mozilla/5.0 (Educational Dataset Creation)' |
|
}) |
|
|
|
def get_page(self, url: str) -> BeautifulSoup: |
|
"""Fetch page with rate limiting""" |
|
time.sleep(1) |
|
response = self.session.get(url) |
|
response.raise_for_status() |
|
return BeautifulSoup(response.text, 'html.parser') |
|
|
|
def scrape_images(self) -> List[Dict]: |
|
"""Scrape images and metadata from wiki""" |
|
data = [] |
|
categories = ['Characters', 'Locations', 'Items'] |
|
|
|
for category in categories: |
|
try: |
|
category_url = f"{self.base_url}/wiki/Category:{category}" |
|
soup = self.get_page(category_url) |
|
|
|
|
|
images = soup.find_all('div', class_='image-thumbnail') |
|
|
|
for img in images: |
|
try: |
|
img_url = img.find('img')['src'] |
|
title = img.find('a')['title'] if img.find('a') else 'Unknown' |
|
|
|
data.append({ |
|
'image_url': img_url, |
|
'category': category.lower(), |
|
'metadata': { |
|
'title': title, |
|
'source': 'stalker_wiki', |
|
'scrape_date': time.strftime('%Y-%m-%d') |
|
} |
|
}) |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing image: {e}") |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing category {category}: {e}") |
|
|
|
return data |
|
|
|
def save_raw_data(self, data: List[Dict], output_dir: str = 'dataset'): |
|
"""Save scraped data to JSON""" |
|
Path(output_dir).mkdir(exist_ok=True) |
|
output_file = Path(output_dir) / 'raw_data.json' |
|
|
|
with open(output_file, 'w') as f: |
|
json.dump(data, f, indent=2) |
|
|
|
logger.info(f"Saved {len(data)} entries to {output_file}") |
|
|
|
if __name__ == "__main__": |
|
scraper = StalkerWikiScraper() |
|
data = scraper.scrape_images() |
|
scraper.save_raw_data(data) |
|
|