nyanko7 commited on
Commit
0b17507
1 Parent(s): b1553aa

Upload folder using huggingface_hub

Browse files
scripts/danbooru/create_tar.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tarfile
3
+ import shutil
4
+ from tqdm import tqdm
5
+
6
+ def create_tar_and_remove_dir(dirname, tar_name):
7
+ # Check if the directory exists
8
+ if not os.path.isdir(dirname):
9
+ print("The specified directory does not exist.")
10
+ return
11
+
12
+ # Create the tar file
13
+ with tarfile.open(tar_name, "w") as tar:
14
+ # Sort and add files to the tar file
15
+ for root, dirs, files in os.walk(dirname):
16
+ # Sort files for consistent ordering
17
+ for file in tqdm(sorted(files), desc="Creating tars..."):
18
+ full_path = os.path.join(root, file)
19
+ # Add the file to the tar archive
20
+ tar.add(full_path, arcname=os.path.relpath(full_path, dirname))
21
+
22
+ # Remove the original directory after archiving
23
+ # shutil.rmtree(dirname)
24
+ print(f"The directory {dirname} has been removed.")
25
+
26
+ # Example usage of the function
27
+ # Replace 'your_directory_name' with the actual directory name you want to tar and remove
28
+ create_tar_and_remove_dir("danbooru2/0202", f"danbooru-tars/0202.tar")
scripts/danbooru/danbooru-check.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time, os, json
2
+ from tqdm import tqdm
3
+ from curl_cffi import requests
4
+ import concurrent.futures
5
+ from pathlib import Path
6
+ import tarfile
7
+ import shutil
8
+ from huggingface_hub import HfApi
9
+
10
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
11
+
12
+ def main():
13
+ danbooru_path = Path("danbooru")
14
+ selected_range = set(['%03d' % i for i in [400]])
15
+ notexists, total = 0, 0
16
+ notexistsid = {}
17
+ with open("posts.json", "r") as f:
18
+ bar = tqdm(desc="Indexing files", ascii=True, leave=False)
19
+ cache = {}
20
+ data_size = {}
21
+ for d in selected_range:
22
+ data = []
23
+ fp = ('0' + d)
24
+ for file_path in (danbooru_path / fp).iterdir():
25
+ if file_path.is_file():
26
+ data.append(file_path.stem)
27
+ data_size[file_path.stem] = file_path.stat().st_size
28
+ bar.update(1)
29
+ cache[fp] = set(data)
30
+
31
+ bar = tqdm(desc="Checking files", total=6_857_737, ascii=True, leave=False)
32
+ for line in f:
33
+ post = json.loads(line)
34
+ file_id = post['id']
35
+ cutoff = str(file_id)[-3:]
36
+ if cutoff not in selected_range:
37
+ bar.update(1)
38
+ continue
39
+ cutoff = '0' + cutoff
40
+ assert (danbooru_path / cutoff).exists(), f"{(danbooru_path / cutoff)} not exixts"
41
+ exists = str(file_id) in cache[cutoff] and (data_size[str(file_id)] == post["file_size"] or int(file_id) < 5_020_995)
42
+ total += 1
43
+ if not exists and "file_url" in post:
44
+ notexists += 1
45
+ if cutoff not in notexistsid:
46
+ notexistsid[cutoff] = []
47
+ notexistsid[cutoff].append((
48
+ file_id, cutoff, post["file_url"],
49
+ ))
50
+ # print(post["file_url"])
51
+ bar.update(1)
52
+ bar.set_postfix_str(f"not exists: {notexists}")
53
+
54
+ bar.close()
55
+ print(f"not exists: {notexists}, total: {total}")
56
+
57
+
58
+ with concurrent.futures.ThreadPoolExecutor(max_workers=4) as tar_executor:
59
+ for d in selected_range:
60
+ cut = '0' + d
61
+ if cut not in notexistsid:
62
+ tar_executor.submit(archive_and_upload, Path("danbooru") / cut, cut)
63
+
64
+ for key, group in notexistsid.items():
65
+ keybar = tqdm(desc=f"Downloading files in key={key}", total=len(group), position=1, ascii=True, leave=False)
66
+ ok = False
67
+ while not ok:
68
+ with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
69
+ for file_id, cutoff, file_url in group:
70
+ executor.submit(download, file_id, cutoff, file_url, keybar)
71
+ ok = True
72
+ for file_id, cutoff, file_url in group:
73
+ suffix = Path(file_url).suffix
74
+ if file_url != "" and not Path(f"danbooru/{cutoff}/{file_id}{suffix}").is_file():
75
+ ok = False
76
+ tar_executor.submit(archive_and_upload, Path(f"danbooru/{cutoff}"), cutoff)
77
+ print(f"Finished download group {cutoff}")
78
+ keybar.close()
79
+
80
+ def rm_tree(pth: Path):
81
+ for child in pth.iterdir():
82
+ if child.is_file():
83
+ child.unlink()
84
+ else:
85
+ rm_tree(child)
86
+ pth.rmdir()
87
+
88
+ def archive_and_upload(dirname, name):
89
+ tar_name = Path("danbooru-tars") / f"data-{name}.tar"
90
+ # Check if the directory exists
91
+ if not os.path.isdir(dirname):
92
+ print("The specified directory does not exist.")
93
+ return
94
+
95
+ # Create the tar file
96
+ print(f"Creating {tar_name}")
97
+ with tarfile.open(tar_name, "w") as tar:
98
+ # Sort and add files to the tar file
99
+ for root, dirs, files in os.walk(dirname):
100
+ # Sort files for consistent ordering
101
+ for file in tqdm(sorted(files), desc=f"Creating {tar_name}", ascii=True):
102
+ full_path = os.path.join(root, file)
103
+ # Add the file to the tar archive
104
+ tar.add(full_path, arcname=file)
105
+
106
+ # Remove the original directory after archiving
107
+ rm_tree(dirname)
108
+ print(f"The directory {dirname} has been removed.")
109
+
110
+ api = HfApi()
111
+ print(api.upload_file(
112
+ path_or_fileobj=tar_name,
113
+ path_in_repo=f"original/data-{name}.tar",
114
+ repo_id="nyanko7/danbooru2023",
115
+ repo_type="dataset",
116
+ ))
117
+ Path(tar_name).unlink()
118
+
119
+ def download(idx, cutoff, file_url, bar):
120
+ suffix = Path(file_url).suffix
121
+ max_attempts = 3 # specify the maximum number of attempts
122
+
123
+ for attempt in range(max_attempts):
124
+ try:
125
+ r = requests.get(file_url, impersonate="chrome110", timeout=120)
126
+ if r.status_code == 200:
127
+ with open(f"danbooru/{cutoff}/{idx}{suffix}", "wb") as f:
128
+ f.write(r.content)
129
+ break # if the download is successful, break the loop
130
+ else:
131
+ print(f"Attempt {attempt+1} failed to download {file_url}: error {r.status_code}")
132
+ except Exception as e:
133
+ print(f"Attempt {attempt+1} failed to download {file_url}: error {e}")
134
+
135
+ time.sleep(1) # wait for 1 second before the next attempt
136
+ if attempt+1 == max_attempts:
137
+ print(f"Failed to download {file_url} after {max_attempts} attempts.")
138
+ bar.update(1)
139
+
140
+ if __name__ == "__main__":
141
+ main()
scripts/down_files.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time, os, json
2
+ from tqdm import tqdm
3
+ from curl_cffi import requests
4
+ import concurrent.futures
5
+ from pathlib import Path
6
+ import tarfile
7
+ from huggingface_hub import HfApi
8
+
9
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
10
+
11
+ def main():
12
+ notexistsid = {}
13
+ yandere_data = json.loads(Path("posts.json").read_text())
14
+ for item in tqdm(yandere_data, desc="Processing yandere", ascii=True):
15
+ file_id = item["id"]
16
+ modula = int(file_id) % 1000
17
+ cutoff = str(modula).zfill(4)
18
+ if cutoff not in notexistsid:
19
+ notexistsid[cutoff] = []
20
+ notexistsid[cutoff].append((file_id, cutoff, item["file_url"]))
21
+
22
+ with concurrent.futures.ThreadPoolExecutor(max_workers=4) as tar_executor:
23
+ for key, group in notexistsid.items():
24
+ if key in [
25
+ '0862', '0863', '0865', '0867', '0869',
26
+ '0868', '0870', '0871', '0873',
27
+ '0874', '0876', '0877', '0878', '0879',
28
+ '0880', '0881', '0882', '0883', '0884', '0885',
29
+ '0886', '0887', '0889', '0890', '0891',
30
+ '0892', '0893', '0894', '0896', '0897', '0898',
31
+ '0899', '0900', '0901', '0902', '0903'
32
+ ]:
33
+ continue
34
+ keybar = tqdm(desc=f"Downloading files in key={key}", total=len(group), position=1, ascii=True, leave=False)
35
+ os.makedirs(f"yandere/{key}/", exist_ok=True)
36
+ ok = False
37
+ while not ok:
38
+ with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:
39
+ for file_id, cutoff, file_url in group:
40
+ executor.submit(download, file_id, cutoff, file_url, keybar)
41
+ ok = True
42
+ for file_id, cutoff, file_url in group:
43
+ suffix = Path(file_url).suffix
44
+ if file_url != "" and not Path(f"yandere/{cutoff}/{file_id}{suffix}").is_file():
45
+ ok = False
46
+ tar_executor.submit(archive_and_upload, Path(f"yandere/{cutoff}"), cutoff)
47
+ print(f"Finished download group {cutoff}")
48
+ keybar.close()
49
+
50
+ def rm_tree(pth: Path):
51
+ for child in pth.iterdir():
52
+ if child.is_file():
53
+ child.unlink()
54
+ else:
55
+ rm_tree(child)
56
+ pth.rmdir()
57
+
58
+ def archive_and_upload(dirname, name):
59
+ tar_name = Path("yandere-tars") / f"data-{name}.tar"
60
+ # Check if the directory exists
61
+ if not os.path.isdir(dirname):
62
+ print("The specified directory does not exist.")
63
+ return
64
+
65
+ # Create the tar file
66
+ print(f"Creating {tar_name}")
67
+ with tarfile.open(tar_name, "w") as tar:
68
+ # Sort and add files to the tar file
69
+ for root, dirs, files in os.walk(dirname):
70
+ # Sort files for consistent ordering
71
+ for file in tqdm(sorted(files), desc=f"Creating {tar_name}", ascii=True):
72
+ full_path = os.path.join(root, file)
73
+ # Add the file to the tar archive
74
+ tar.add(full_path, arcname=file)
75
+
76
+ # Remove the original directory after archiving
77
+ rm_tree(dirname)
78
+ print(f"The directory {dirname} has been removed.")
79
+
80
+ api = HfApi()
81
+ print(api.upload_file(
82
+ path_or_fileobj=tar_name,
83
+ path_in_repo=f"original/data-{name}.tar",
84
+ repo_id="nyanko7/yandere2023",
85
+ repo_type="dataset",
86
+ ))
87
+ Path(tar_name).unlink()
88
+
89
+ def download(idx, cutoff, file_url, bar):
90
+ suffix = Path(file_url).suffix
91
+ max_attempts = 5 # specify the maximum number of attempts
92
+
93
+ for attempt in range(max_attempts):
94
+ try:
95
+ r = requests.get(file_url, impersonate="chrome110", timeout=120)
96
+ if r.status_code == 200:
97
+ with open(f"yandere/{cutoff}/{idx}{suffix}", "wb") as f:
98
+ f.write(r.content)
99
+ break # if the download is successful, break the loop
100
+ else:
101
+ print(f"Attempt {attempt+1} failed to download {file_url}: error {r.status_code}")
102
+ except Exception as e:
103
+ print(f"Attempt {attempt+1} failed to download {file_url}: error {e}")
104
+
105
+ time.sleep(1) # wait for 1 second before the next attempt
106
+ if attempt+1 == max_attempts:
107
+ print(f"Failed to download {file_url} after {max_attempts} attempts.")
108
+ bar.update(1)
109
+
110
+
111
+ if __name__ == "__main__":
112
+ main()
scripts/down_metadata.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from curl_cffi import requests
2
+ import json
3
+ from concurrent.futures import ThreadPoolExecutor, as_completed
4
+ from tqdm.rich import tqdm
5
+
6
+ # 定义爬取函数
7
+ def fetch_url(page):
8
+ url = f"https://yande.re/post.json?limit=100&page={page}"
9
+ response = requests.get(url, impersonate="chrome110")
10
+ # 检查请求是否成功
11
+ if response.status_code == 200:
12
+ return response.json()
13
+ else:
14
+ print(f"Failed to retrieve data from {url}")
15
+ return None
16
+
17
+ # 定义保存为 JSON 的函数
18
+ def save_json(data, page,):
19
+ if data:
20
+ with open(f"jsonpart/page_{page}.json", 'w', encoding='utf-8') as f:
21
+ json.dump(data, f, ensure_ascii=False, indent=4)
22
+
23
+ # 使用 ThreadPoolExecutor 创建线程池来并发爬取
24
+ def main():
25
+ bar = tqdm(total=25000)
26
+ pages = range(25001) # 0 到 25000 页
27
+ # 设置线程池的最大线程数,例如此处设置为 20
28
+ with ThreadPoolExecutor(max_workers=20) as executor:
29
+ # 创建 future 到 URL 的映射
30
+ future_to_url = {executor.submit(fetch_url, page): page for page in pages}
31
+ # 等待线程完成
32
+ for future in as_completed(future_to_url):
33
+ page = future_to_url[future]
34
+ data = future.result()
35
+ save_json(data, page)
36
+ bar.update(1)
37
+
38
+ if __name__ == "__main__":
39
+ main()