init
Browse files- .gitignore +0 -0
- Makefile +16 -0
- README.md +89 -0
- requirements.txt +5 -0
- scripts/process.py +63 -0
- scripts/scraper.py +76 -0
.gitignore
ADDED
File without changes
|
Makefile
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: install scrape process upload clean
|
2 |
+
|
3 |
+
install:
|
4 |
+
pip install -r requirements.txt
|
5 |
+
|
6 |
+
scrape:
|
7 |
+
python scraper.py
|
8 |
+
|
9 |
+
process:
|
10 |
+
python process.py
|
11 |
+
|
12 |
+
upload:
|
13 |
+
python process.py --upload True
|
14 |
+
|
15 |
+
clean:
|
16 |
+
rm -rf dataset/*
|
README.md
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
task_categories:
|
4 |
+
- image-to-image
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
tags:
|
8 |
+
- video-games
|
9 |
+
- stalker
|
10 |
+
- reference
|
11 |
+
- environment
|
12 |
+
- characters
|
13 |
+
pretty_name: STALKER Reference Dataset
|
14 |
+
size_categories:
|
15 |
+
- n<1K
|
16 |
+
---
|
17 |
+
|
18 |
+
# STALKER Reference Dataset
|
19 |
+
|
20 |
+
This dataset contains reference images and metadata from the STALKER game series wiki. It's intended for research and educational purposes.
|
21 |
+
|
22 |
+
## Dataset Description
|
23 |
+
|
24 |
+
### Dataset Summary
|
25 |
+
|
26 |
+
A curated collection of STALKER game series reference images with associated metadata, categorized by type (characters, locations, items).
|
27 |
+
|
28 |
+
### Languages
|
29 |
+
English
|
30 |
+
|
31 |
+
### Data Collection and Curation
|
32 |
+
|
33 |
+
Images and metadata were collected from publicly available wiki pages using automated scraping with appropriate rate limiting and validation.
|
34 |
+
|
35 |
+
### Dataset Structure
|
36 |
+
|
37 |
+
```
|
38 |
+
{
|
39 |
+
'image_url': str, # Direct URL to image
|
40 |
+
'category': str, # One of: characters, locations, items
|
41 |
+
'metadata': {
|
42 |
+
'title': str, # Original image title
|
43 |
+
'source': str, # Source attribution
|
44 |
+
'scrape_date': str # Date of collection
|
45 |
+
}
|
46 |
+
}
|
47 |
+
```
|
48 |
+
|
49 |
+
### Dataset Creation
|
50 |
+
|
51 |
+
Collection automated via Python scripts (see repository).
|
52 |
+
|
53 |
+
### Considerations for Using the Data
|
54 |
+
|
55 |
+
#### Social Impact of Dataset
|
56 |
+
|
57 |
+
This dataset is intended for research and educational purposes only.
|
58 |
+
|
59 |
+
#### Discussion of Biases
|
60 |
+
|
61 |
+
The dataset may over-represent prominent game elements that are more extensively documented in the wiki.
|
62 |
+
|
63 |
+
#### Other Known Limitations
|
64 |
+
|
65 |
+
- Limited to publicly available wiki content
|
66 |
+
- Image quality varies
|
67 |
+
- Some categories may be underrepresented
|
68 |
+
|
69 |
+
### Additional Information
|
70 |
+
|
71 |
+
#### Dataset Curators
|
72 |
+
|
73 |
+
This dataset was curated automatically with manual validation of the collection process.
|
74 |
+
|
75 |
+
#### Licensing Information
|
76 |
+
|
77 |
+
MIT License
|
78 |
+
|
79 |
+
#### Citation Information
|
80 |
+
|
81 |
+
Please cite this dataset as:
|
82 |
+
|
83 |
+
```
|
84 |
+
@misc{stalker_dataset,
|
85 |
+
title={STALKER Reference Dataset},
|
86 |
+
year={2024},
|
87 |
+
publisher={HuggingFace}
|
88 |
+
}
|
89 |
+
```
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
requests==2.31.0
|
2 |
+
beautifulsoup4==4.12.2
|
3 |
+
datasets==2.18.0
|
4 |
+
Pillow==10.2.0
|
5 |
+
huggingface-hub==0.20.3
|
scripts/process.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import Dataset
|
2 |
+
from pathlib import Path
|
3 |
+
import json
|
4 |
+
import logging
|
5 |
+
from typing import Dict, List
|
6 |
+
import requests
|
7 |
+
from PIL import Image
|
8 |
+
from io import BytesIO
|
9 |
+
|
10 |
+
logging.basicConfig(level=logging.INFO)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
class DatasetProcessor:
|
14 |
+
def __init__(self, raw_data_path: str = 'dataset/raw_data.json'):
|
15 |
+
self.raw_data_path = Path(raw_data_path)
|
16 |
+
|
17 |
+
def load_raw_data(self) -> List[Dict]:
|
18 |
+
"""Load scraped data from JSON"""
|
19 |
+
with open(self.raw_data_path) as f:
|
20 |
+
return json.load(f)
|
21 |
+
|
22 |
+
def validate_image(self, url: str) -> bool:
|
23 |
+
"""Check if image URL is valid and image can be loaded"""
|
24 |
+
try:
|
25 |
+
response = requests.get(url)
|
26 |
+
img = Image.open(BytesIO(response.content))
|
27 |
+
return True
|
28 |
+
except:
|
29 |
+
return False
|
30 |
+
|
31 |
+
def process_data(self) -> Dataset:
|
32 |
+
"""Process raw data into HuggingFace dataset"""
|
33 |
+
raw_data = self.load_raw_data()
|
34 |
+
|
35 |
+
# Filter valid images and restructure data
|
36 |
+
processed_data = {
|
37 |
+
'image_url': [],
|
38 |
+
'category': [],
|
39 |
+
'metadata': []
|
40 |
+
}
|
41 |
+
|
42 |
+
for entry in raw_data:
|
43 |
+
if self.validate_image(entry['image_url']):
|
44 |
+
processed_data['image_url'].append(entry['image_url'])
|
45 |
+
processed_data['category'].append(entry['category'])
|
46 |
+
processed_data['metadata'].append(entry['metadata'])
|
47 |
+
|
48 |
+
# Create HuggingFace dataset
|
49 |
+
dataset = Dataset.from_dict(processed_data)
|
50 |
+
logger.info(f"Created dataset with {len(dataset)} entries")
|
51 |
+
|
52 |
+
return dataset
|
53 |
+
|
54 |
+
def save_to_hub(self, dataset: Dataset, repo_id: str):
|
55 |
+
"""Push dataset to HuggingFace Hub"""
|
56 |
+
dataset.push_to_hub(repo_id)
|
57 |
+
logger.info(f"Pushed dataset to {repo_id}")
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
processor = DatasetProcessor()
|
61 |
+
dataset = processor.process_data()
|
62 |
+
# Uncomment to push to hub:
|
63 |
+
# processor.save_to_hub(dataset, "your-username/stalker-dataset")
|
scripts/scraper.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
from pathlib import Path
|
4 |
+
import time
|
5 |
+
import json
|
6 |
+
from typing import Dict, List
|
7 |
+
import logging
|
8 |
+
|
9 |
+
logging.basicConfig(level=logging.INFO)
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
class StalkerWikiScraper:
|
13 |
+
def __init__(self, base_url: str = "https://stalker.fandom.com"):
|
14 |
+
self.base_url = base_url
|
15 |
+
self.session = requests.Session()
|
16 |
+
self.session.headers.update({
|
17 |
+
'User-Agent': 'Mozilla/5.0 (Educational Dataset Creation)'
|
18 |
+
})
|
19 |
+
|
20 |
+
def get_page(self, url: str) -> BeautifulSoup:
|
21 |
+
"""Fetch page with rate limiting"""
|
22 |
+
time.sleep(1) # Rate limiting
|
23 |
+
response = self.session.get(url)
|
24 |
+
response.raise_for_status()
|
25 |
+
return BeautifulSoup(response.text, 'html.parser')
|
26 |
+
|
27 |
+
def scrape_images(self) -> List[Dict]:
|
28 |
+
"""Scrape images and metadata from wiki"""
|
29 |
+
data = []
|
30 |
+
categories = ['Characters', 'Locations', 'Items']
|
31 |
+
|
32 |
+
for category in categories:
|
33 |
+
try:
|
34 |
+
category_url = f"{self.base_url}/wiki/Category:{category}"
|
35 |
+
soup = self.get_page(category_url)
|
36 |
+
|
37 |
+
# Find all image containers
|
38 |
+
images = soup.find_all('div', class_='image-thumbnail')
|
39 |
+
|
40 |
+
for img in images:
|
41 |
+
try:
|
42 |
+
img_url = img.find('img')['src']
|
43 |
+
title = img.find('a')['title'] if img.find('a') else 'Unknown'
|
44 |
+
|
45 |
+
data.append({
|
46 |
+
'image_url': img_url,
|
47 |
+
'category': category.lower(),
|
48 |
+
'metadata': {
|
49 |
+
'title': title,
|
50 |
+
'source': 'stalker_wiki',
|
51 |
+
'scrape_date': time.strftime('%Y-%m-%d')
|
52 |
+
}
|
53 |
+
})
|
54 |
+
|
55 |
+
except Exception as e:
|
56 |
+
logger.error(f"Error processing image: {e}")
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
logger.error(f"Error processing category {category}: {e}")
|
60 |
+
|
61 |
+
return data
|
62 |
+
|
63 |
+
def save_raw_data(self, data: List[Dict], output_dir: str = 'dataset'):
|
64 |
+
"""Save scraped data to JSON"""
|
65 |
+
Path(output_dir).mkdir(exist_ok=True)
|
66 |
+
output_file = Path(output_dir) / 'raw_data.json'
|
67 |
+
|
68 |
+
with open(output_file, 'w') as f:
|
69 |
+
json.dump(data, f, indent=2)
|
70 |
+
|
71 |
+
logger.info(f"Saved {len(data)} entries to {output_file}")
|
72 |
+
|
73 |
+
if __name__ == "__main__":
|
74 |
+
scraper = StalkerWikiScraper()
|
75 |
+
data = scraper.scrape_images()
|
76 |
+
scraper.save_raw_data(data)
|