upload
Browse files
make_artist_all_in_one_dataset_from_hf.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import shutil
|
4 |
+
import threading
|
5 |
+
import logging
|
6 |
+
from pathlib import Path
|
7 |
+
from huggingface_hub import HfApi, hf_hub_download
|
8 |
+
from tqdm import tqdm
|
9 |
+
from queue import Queue
|
10 |
+
|
11 |
+
|
12 |
+
def get_logging(level = None) -> logging.Logger:
|
13 |
+
logger = logging.getLogger('DB-Maker')
|
14 |
+
if level is None:
|
15 |
+
level = logging.DEBUG
|
16 |
+
logger.setLevel(level) # 设置日志级别
|
17 |
+
|
18 |
+
# 创建一个控制台处理器
|
19 |
+
console_handler = logging.StreamHandler()
|
20 |
+
|
21 |
+
# 创建一个格式器,并定义日志输出格式
|
22 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
23 |
+
console_handler.setFormatter(formatter)
|
24 |
+
|
25 |
+
# 将处理器添加到记录器
|
26 |
+
logger.addHandler(console_handler)
|
27 |
+
return logger
|
28 |
+
|
29 |
+
|
30 |
+
def get_args() -> argparse.Namespace:
|
31 |
+
parser = argparse.ArgumentParser()
|
32 |
+
normalized_filepath = lambda filepath: str(Path(filepath).absolute().as_posix())
|
33 |
+
|
34 |
+
parser.add_argument("--hf-token", type = str, default = None, help = "huggingFace Token")
|
35 |
+
parser.add_argument("--repo", type = str, default = None, help = "仓库的名称")
|
36 |
+
parser.add_argument("--repo-type", type = str, default = None, help = "HuggingFace 仓库的种类, 如: model, dataset")
|
37 |
+
parser.add_argument("--local-path", type = normalized_filepath, default = None, help = "本地要上传文件的路径")
|
38 |
+
parser.add_argument("--folder", type = str, default = None, help = "指定下载仓库的某个文件夹内容")
|
39 |
+
|
40 |
+
return parser.parse_args()
|
41 |
+
|
42 |
+
|
43 |
+
def get_hf_file_list(repo, repo_type, hf_token) -> list:
|
44 |
+
logger.info(f"获取 {repo} 中所有的文件列表中")
|
45 |
+
api = HfApi()
|
46 |
+
model_list = api.list_repo_files(
|
47 |
+
repo_id = repo,
|
48 |
+
repo_type = repo_type,
|
49 |
+
token = hf_token
|
50 |
+
)
|
51 |
+
logger.info(f"{repo} 中所有文件的数量: {len(model_list)}")
|
52 |
+
return model_list
|
53 |
+
|
54 |
+
|
55 |
+
def download_hf_file(repo_id: str, repo_type: str, filename: str, local_dir: str, hf_token: str) -> None:
|
56 |
+
try:
|
57 |
+
logger.info(f"下载 {filename} 到 {local_dir} 中")
|
58 |
+
hf_hub_download(
|
59 |
+
repo_id = repo_id,
|
60 |
+
repo_type = repo_type,
|
61 |
+
filename = filename,
|
62 |
+
local_dir_use_symlinks = False,
|
63 |
+
local_dir = local_dir,
|
64 |
+
token = hf_token
|
65 |
+
)
|
66 |
+
logger.info(f"{filename} 下载成功")
|
67 |
+
except:
|
68 |
+
logger.error(f"{filename} 下载失败")
|
69 |
+
|
70 |
+
|
71 |
+
def filter_file(file_list: list, start_dir: str) -> list:
|
72 |
+
filter_file_list = []
|
73 |
+
for f in file_list:
|
74 |
+
if f.startswith(start_dir):
|
75 |
+
filter_file_list.append(f)
|
76 |
+
|
77 |
+
logger.info(f"{start_dir} 中的文件数量: {len(filter_file_list)}")
|
78 |
+
return filter_file_list
|
79 |
+
|
80 |
+
|
81 |
+
def insert_tag(path: str, tag:str) -> None:
|
82 |
+
content = []
|
83 |
+
if not os.path.exists(path):
|
84 |
+
logger.info(f"{path} 文件未找到")
|
85 |
+
return None
|
86 |
+
|
87 |
+
try:
|
88 |
+
with open(path, "r", encoding="utf8") as f:
|
89 |
+
for i in f.readlines():
|
90 |
+
content.append(i)
|
91 |
+
|
92 |
+
if ''.join(content).startswith(tag):
|
93 |
+
return
|
94 |
+
|
95 |
+
with open(path, "w", encoding="utf8") as f:
|
96 |
+
f.write(f"{tag}, {''.join(content)}")
|
97 |
+
except Exception as e:
|
98 |
+
logger.error(f"打开 {path} 失败", e)
|
99 |
+
|
100 |
+
|
101 |
+
def get_all_file(directory: str) -> list:
|
102 |
+
file_list = []
|
103 |
+
for dirname, _, filenames in os.walk(directory):
|
104 |
+
for filename in filenames:
|
105 |
+
file_list.append(os.path.join(dirname, filename))
|
106 |
+
return file_list
|
107 |
+
|
108 |
+
|
109 |
+
def process_dataset_tag(dataset_path: str, tag: str) -> None:
|
110 |
+
if not os.path.exists(dataset_path):
|
111 |
+
print(f"{dataset_path} 未找到")
|
112 |
+
return
|
113 |
+
|
114 |
+
file_list = get_all_file(dataset_path)
|
115 |
+
|
116 |
+
# 过滤文件
|
117 |
+
tag_file = []
|
118 |
+
for file in file_list:
|
119 |
+
if file.endswith(".txt"):
|
120 |
+
tag_file.append(file)
|
121 |
+
|
122 |
+
for file in tqdm(tag_file, desc=f"添加 {tag} 标签"):
|
123 |
+
insert_tag(file, tag)
|
124 |
+
|
125 |
+
|
126 |
+
def make_dataset(repo: str, repo_type: str, hf_token: str, hf_repo_file_list: list, hf_folder: str, path:str, repeat: int, tag: str) -> None:
|
127 |
+
dataset_path = os.path.join(path, hf_folder)
|
128 |
+
|
129 |
+
if os.path.exists(os.path.join(os.path.dirname(dataset_path), f"{str(repeat)}_{hf_folder}")):
|
130 |
+
logger.info(f"{hf_folder} 已存在")
|
131 |
+
return
|
132 |
+
|
133 |
+
logger.info(f"处理数据集: {hf_folder}")
|
134 |
+
|
135 |
+
hf_list = filter_file(hf_repo_file_list, hf_folder)
|
136 |
+
task_list = []
|
137 |
+
for filename in tqdm(hf_list, desc = "创建下载任务"):
|
138 |
+
task_list.append([repo, repo_type, filename, path, hf_token])
|
139 |
+
model_downloader = ModelDownload(task_list)
|
140 |
+
model_downloader.start_threads()
|
141 |
+
|
142 |
+
process_dataset_tag(dataset_path, tag)
|
143 |
+
shutil.move(dataset_path, os.path.join(os.path.dirname(dataset_path), f"{str(repeat)}_{hf_folder}"))
|
144 |
+
|
145 |
+
|
146 |
+
class ModelDownload:
|
147 |
+
def __init__(self, urls) -> None:
|
148 |
+
self.urls = urls
|
149 |
+
self.queue = Queue()
|
150 |
+
self.total_urls = len(urls) # 记录总的URL数
|
151 |
+
self.downloaded_count = 0 # 记录已下载的数量
|
152 |
+
self.lock = threading.Lock() # 创建锁以保护对下载计数器的访问
|
153 |
+
self.progress_bar = tqdm(total=self.total_urls, desc="下载进度", unit="file") # 创建进度条实例
|
154 |
+
|
155 |
+
|
156 |
+
def worker(self):
|
157 |
+
while True:
|
158 |
+
url = self.queue.get()
|
159 |
+
if url is None:
|
160 |
+
break
|
161 |
+
download_hf_file(url[0], url[1], url[2], url[3], url[4])
|
162 |
+
self.queue.task_done()
|
163 |
+
with self.lock: # 访问共享资源时加锁
|
164 |
+
self.downloaded_count += 1
|
165 |
+
self.print_progress() # 打印进度
|
166 |
+
# self.progress_bar.update(1) # 更新进度条
|
167 |
+
|
168 |
+
|
169 |
+
def print_progress(self):
|
170 |
+
progress = (self.downloaded_count / self.total_urls) * 100
|
171 |
+
logger.info(f"下载进度: {self.downloaded_count}/{self.total_urls} ({progress:.2f}%)")
|
172 |
+
|
173 |
+
|
174 |
+
def finish(self):
|
175 |
+
self.progress_bar.close() # 关闭进度条
|
176 |
+
|
177 |
+
|
178 |
+
def start_threads(self, num_threads=16):
|
179 |
+
threads = []
|
180 |
+
for _ in range(num_threads):
|
181 |
+
thread = threading.Thread(target=self.worker)
|
182 |
+
thread.start()
|
183 |
+
threads.append(thread)
|
184 |
+
|
185 |
+
for url in self.urls:
|
186 |
+
self.queue.put(url)
|
187 |
+
|
188 |
+
self.queue.join()
|
189 |
+
|
190 |
+
for _ in range(num_threads):
|
191 |
+
self.queue.put(None)
|
192 |
+
|
193 |
+
for thread in threads:
|
194 |
+
thread.join()
|
195 |
+
|
196 |
+
self.finish() # 完成后关闭进度条
|
197 |
+
|
198 |
+
|
199 |
+
def main():
|
200 |
+
global logger
|
201 |
+
logger = get_logging()
|
202 |
+
|
203 |
+
repo = "licyk/image_training_set"
|
204 |
+
repo_type = "dataset"
|
205 |
+
hf_token = None
|
206 |
+
dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "artist_all_in_one")
|
207 |
+
dataset_list = [
|
208 |
+
["gan_cheng_1", "nachoneko", 2], # 125
|
209 |
+
["rurudo_1", "rurudo", 7], # 40
|
210 |
+
["deyui_1", "deyui", 2], # 148
|
211 |
+
["tyomimas_1", "tyomimas", 1], # 238
|
212 |
+
["linhai_1", "linhai", 7], # 40
|
213 |
+
["viclim-monou_1", "viclim monou", 4], # 74
|
214 |
+
["yeurei_1", "yeurei", 4], # 69
|
215 |
+
["asagi_0398_1", "asagi0398", 6], # 44
|
216 |
+
["Noyu_1", "noyu", 3], # 86
|
217 |
+
["gomalio_y_1", "gomalio y", 4], # 74
|
218 |
+
["qian_qian_jie_1", "qianqianjie", 4], # 64
|
219 |
+
["sheya_1", "sheya", 3], # 85
|
220 |
+
["Blue_Archive_Official_2", "blue archive official", 1], # 262, max
|
221 |
+
["sei_umehara_1", "sei umehara", 2], # 132
|
222 |
+
["rafa_1", "rafa", 5], # 52
|
223 |
+
["yume335_1", "yume335", 6], # 43
|
224 |
+
["Yunouou10_1", "yunouou10", 11], # 23
|
225 |
+
["Renren_1", "renren", 6], # 47
|
226 |
+
["AL17460600_1", "al17460600", 5], # 49
|
227 |
+
["fruitsrabbit_1", "fruitsrabbit", 2], # 158
|
228 |
+
["void_0_1", "void0", 1], # 234
|
229 |
+
["ekureea_1", "ekureea", 4], # 74
|
230 |
+
["nocopyrightgirl_1", "nocopyrightgirl", 2], # 121
|
231 |
+
["KonYa666_1", "konya666", 3], # 109
|
232 |
+
["momoimon_1", "momoimon", 3], # 89
|
233 |
+
["a20_1", "a20", 4], # 64
|
234 |
+
["ikari_1", "ikari", 2], # 123
|
235 |
+
["ningen_mame_1", "ningen mame", 2], # 139
|
236 |
+
["jyt_1", "jyt", 1], # 195
|
237 |
+
["suzume_1", "suzume", 1], # 197
|
238 |
+
["deieaf_1", "deieaf", 2], # 168
|
239 |
+
["hk3_1", "hk3", 3], # 80
|
240 |
+
["matchach_1", "matchach", 1], # 218
|
241 |
+
["quan_1", "quan", 2], # 126
|
242 |
+
["fuzichoco_1", "fuzichoco", 2], # 133
|
243 |
+
["torino_1", "torino", 2], # 186
|
244 |
+
["negimapurinn_1", "negimapurinn", 3], # 94
|
245 |
+
["anon_1", "anon", 3], # 103
|
246 |
+
["honkai_star_rail_expression_1", "honkai star rail expression", 2], # 167
|
247 |
+
["lanubis_2", "lanubis", 5], # 56
|
248 |
+
["wanke_1", "wanke", 3], # 96
|
249 |
+
["ixy_1", "ixy", 2], # 167
|
250 |
+
["jirujiaru826_1", "jirujiaru826", 3], # 94
|
251 |
+
["Sco_ttie_1", "sco ttie", 2], # 183
|
252 |
+
["lplpsg_1", "lplpsg", 6], # 48
|
253 |
+
["sakurapion_1", "sakurapion", 4], # 67
|
254 |
+
# ["pvc_1", "pvc", 1], # 546
|
255 |
+
["guo_jiang_mullay_1", "guo jiang mullay", 7], # 40
|
256 |
+
# ["wlop_1", "wlop", 1], # 210
|
257 |
+
["Colon_BR_1", "colon br", 3], # 90
|
258 |
+
["DaylightAllure_1", "daylightallure", 3], # 109
|
259 |
+
["kagawayusaku_1", "kagawayusaku", 3], # 105
|
260 |
+
["ogipote_1", "ogipote", 4], # 60
|
261 |
+
["shiratama_shiratamaco_1", "shiratama shiratamaco", 2], # 132
|
262 |
+
["yoneyama_mai_1", "yoneyama mai", 4], # 60
|
263 |
+
["genshin_cg_flat_color_style_1", "genshin cg flat color style", 1], # 219
|
264 |
+
["Batory_ken_1", "batory ken", 5], # 52
|
265 |
+
# ["Nakkar7_1", "nakkar7", 14], # 40
|
266 |
+
["siro90414_1", "siro90414", 2], # 161
|
267 |
+
["sunfish_1", "sunfish", 4], # 64
|
268 |
+
["Anmi_1", "anmi",5], # 59
|
269 |
+
["hakua_aa_1", "hakua aa", 3], # 81
|
270 |
+
["WERI_1", "weri", 5], # 56
|
271 |
+
]
|
272 |
+
|
273 |
+
|
274 |
+
logger.info(f"仓库: {repo}")
|
275 |
+
logger.info(f"仓库种类: {repo_type}")
|
276 |
+
logger.info(f"训练集目录: {dataset_path}")
|
277 |
+
|
278 |
+
hf_repo_file_list = get_hf_file_list(repo, repo_type, hf_token)
|
279 |
+
|
280 |
+
for dataset in tqdm(dataset_list, desc="处理训练集"):
|
281 |
+
make_dataset(
|
282 |
+
repo=repo,
|
283 |
+
repo_type=repo_type,
|
284 |
+
hf_token=hf_token,
|
285 |
+
hf_repo_file_list=hf_repo_file_list,
|
286 |
+
hf_folder=dataset[0],
|
287 |
+
path=dataset_path,
|
288 |
+
repeat=dataset[2],
|
289 |
+
tag=dataset[1],
|
290 |
+
)
|
291 |
+
logger.info(f"训练集处理完成, 路径: {dataset_path}")
|
292 |
+
# hf_list = filter_file(hf_repo_file_list, folder)
|
293 |
+
# logging.info(f"下载 {repo} 中的文件中")
|
294 |
+
# task_list = []
|
295 |
+
# for filename in tqdm(hf_list, desc = "创建下载任务"):
|
296 |
+
# task_list.append([repo, repo_type, filename, root_path, hf_token])
|
297 |
+
|
298 |
+
# model_downloader = ModelDownload(task_list)
|
299 |
+
# model_downloader.start_threads(num_threads=5)
|
300 |
+
# print(f"下载 {repo} 完成")
|
301 |
+
|
302 |
+
|
303 |
+
main()
|