Upload 7 files
Browse files- app.py +219 -0
- app_log.txt +4 -0
- error_log.txt +60 -0
- indexer.py +92 -0
- nsfw_classification_results.parquet +3 -0
- scrape_images_worker.py +171 -0
- wsServer.py +45 -0
app.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import streamlit as st
|
4 |
+
import subprocess
|
5 |
+
from zipfile import ZipFile
|
6 |
+
from PIL import Image, ImageFilter, UnidentifiedImageError
|
7 |
+
import pandas as pd
|
8 |
+
import io
|
9 |
+
|
10 |
+
# プレフィックスとParquetファイルのパス
|
11 |
+
sth = "https___nhentai_net_g_" # 任意のプレフィックス
|
12 |
+
parquet_file = "nsfw_classification_results.parquet" # Parquetファイルのパス
|
13 |
+
log_file = "app_log.txt" # ログファイルのパス
|
14 |
+
|
15 |
+
# ページ番号に基づいてファイル名をソートする関数
|
16 |
+
def sort_files_by_page_number(file_list):
|
17 |
+
def extract_page_number(filename):
|
18 |
+
match = re.search(r'page_(\d+)\.(jpg|png)', filename)
|
19 |
+
if match:
|
20 |
+
return int(match.group(1))
|
21 |
+
return 0
|
22 |
+
return sorted(file_list, key=extract_page_number)
|
23 |
+
|
24 |
+
# 画像フォルダを取得する関数(更新時間順にソート)
|
25 |
+
def get_image_folders(base_folder='scraped_images'):
|
26 |
+
if not os.path.exists(base_folder):
|
27 |
+
os.makedirs(base_folder)
|
28 |
+
|
29 |
+
folder_paths = [os.path.join(base_folder, f) for f in os.listdir(base_folder) if os.path.isdir(os.path.join(base_folder, f))]
|
30 |
+
|
31 |
+
# フォルダの更新時間を取得し、タプルのリストを作成
|
32 |
+
folder_info = []
|
33 |
+
for folder_path in folder_paths:
|
34 |
+
mtime = os.path.getmtime(folder_path)
|
35 |
+
folder_name = os.path.basename(folder_path)
|
36 |
+
# プレフィックスを取り除く
|
37 |
+
if folder_name.startswith(sth):
|
38 |
+
folder_name = folder_name.replace(sth, "")
|
39 |
+
folder_info.append((folder_name, mtime, folder_path))
|
40 |
+
|
41 |
+
# 更新時間でソート(新しい順)
|
42 |
+
folder_info.sort(key=lambda x: x[1], reverse=True)
|
43 |
+
|
44 |
+
# フォルダ名のリストを返す
|
45 |
+
sorted_folders = [info[0] for info in folder_info]
|
46 |
+
|
47 |
+
return sorted_folders
|
48 |
+
|
49 |
+
# フォルダをZIP化する関数
|
50 |
+
def create_zip_of_folder(folder_path, zip_name):
|
51 |
+
with ZipFile(zip_name, 'w') as zipf:
|
52 |
+
for root, dirs, files in os.walk(folder_path):
|
53 |
+
for file in files:
|
54 |
+
zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), folder_path))
|
55 |
+
|
56 |
+
# サブプロセスを実行し、ログをファイルから読み取る関数
|
57 |
+
def run_subprocess(command):
|
58 |
+
with open(log_file, 'a', encoding='shift_jis') as log_f:
|
59 |
+
process = subprocess.Popen(
|
60 |
+
command,
|
61 |
+
stdout=log_f,
|
62 |
+
stderr=log_f,
|
63 |
+
text=True
|
64 |
+
)
|
65 |
+
process.wait()
|
66 |
+
#
|
67 |
+
# Parquetファイルをロードする関数
|
68 |
+
def load_parquet_data(parquet_file):
|
69 |
+
if os.path.exists(parquet_file):
|
70 |
+
return pd.read_parquet(parquet_file)
|
71 |
+
else:
|
72 |
+
st.error(f"{parquet_file} が見つかりません。スクレイピング後にインデックスが作成される必要があります。")
|
73 |
+
return None
|
74 |
+
|
75 |
+
# Unsafe画像にブラーを適用する関数
|
76 |
+
def apply_gaussian_blur_if_unsafe(image, label, show_unsafe):
|
77 |
+
label = label.lower()
|
78 |
+
if label == "unsafe" and not show_unsafe:
|
79 |
+
blurred_image = image.filter(ImageFilter.GaussianBlur(18))
|
80 |
+
img_byte_arr = io.BytesIO()
|
81 |
+
blurred_image.save(img_byte_arr, format='PNG')
|
82 |
+
img_byte_arr = img_byte_arr.getvalue()
|
83 |
+
return img_byte_arr
|
84 |
+
else:
|
85 |
+
img_byte_arr = io.BytesIO()
|
86 |
+
image.save(img_byte_arr, format='PNG')
|
87 |
+
img_byte_arr = img_byte_arr.getvalue()
|
88 |
+
return img_byte_arr
|
89 |
+
|
90 |
+
# コールバック関数: フォルダを選択し、ビューを切り替える
|
91 |
+
def open_folder(folder):
|
92 |
+
st.session_state['selected_folder'] = folder
|
93 |
+
st.session_state['current_view'] = 'Selected Folder'
|
94 |
+
|
95 |
+
# StreamlitのUI
|
96 |
+
st.title('画像ギャラリーとダウンロード')
|
97 |
+
|
98 |
+
# show_unsafeのチェックボックスを作成し、その値をst.session_stateに保存
|
99 |
+
if 'show_unsafe' not in st.session_state:
|
100 |
+
st.session_state['show_unsafe'] = False
|
101 |
+
|
102 |
+
st.session_state['show_unsafe'] = st.checkbox('Unsafe画像をブラーなしで表示', value=st.session_state['show_unsafe'])
|
103 |
+
|
104 |
+
# Parquetファイルからデータをロード
|
105 |
+
df = load_parquet_data(parquet_file)
|
106 |
+
|
107 |
+
# URL入力
|
108 |
+
url = st.text_input('スクレイピングするURLを入力してください', '')
|
109 |
+
|
110 |
+
# ラジオボタンでビューを切り替える
|
111 |
+
views = ["Gallery", "Logs", "Selected Folder"]
|
112 |
+
|
113 |
+
# 初期ビュー設定
|
114 |
+
if 'current_view' not in st.session_state:
|
115 |
+
st.session_state['current_view'] = 'Gallery'
|
116 |
+
|
117 |
+
# ラジオボタンを使用してビューを選択
|
118 |
+
selected_view = st.radio("ビューを選択", views, index=views.index(st.session_state['current_view']))
|
119 |
+
|
120 |
+
# ビューの更新
|
121 |
+
if selected_view != st.session_state['current_view']:
|
122 |
+
st.session_state['current_view'] = selected_view
|
123 |
+
|
124 |
+
# "Gallery"ビュー
|
125 |
+
if st.session_state['current_view'] == "Gallery":
|
126 |
+
st.header("ギャラリー")
|
127 |
+
|
128 |
+
if st.button('スクレイピングを開始'):
|
129 |
+
if url:
|
130 |
+
# ログファイルをクリア
|
131 |
+
open(log_file, 'w').close()
|
132 |
+
# スクレイピングとインデックス作成を順次実行
|
133 |
+
run_subprocess(["python", "scrape_images_worker.py", url])
|
134 |
+
run_subprocess(["python", "indexer.py"])
|
135 |
+
st.success("スクレイピングとインデックス作成が完了しました。")
|
136 |
+
|
137 |
+
# フォルダからギャラリーを表示
|
138 |
+
folders = get_image_folders()
|
139 |
+
|
140 |
+
if folders:
|
141 |
+
col1, col2 = st.columns(2)
|
142 |
+
if 'selected_folder' not in st.session_state:
|
143 |
+
st.session_state['selected_folder'] = None
|
144 |
+
|
145 |
+
for i, folder in enumerate(folders):
|
146 |
+
if "http" in folder:
|
147 |
+
folder_path = os.path.join('scraped_images', folder)
|
148 |
+
else:
|
149 |
+
folder_path = os.path.join('scraped_images', sth + folder)
|
150 |
+
image_files = [f for f in os.listdir(folder_path) if f.endswith(('jpg', 'png'))]
|
151 |
+
image_files = sort_files_by_page_number(image_files)
|
152 |
+
|
153 |
+
if image_files:
|
154 |
+
if i % 2 == 0:
|
155 |
+
with col1:
|
156 |
+
st.image(os.path.join(folder_path, image_files[0]), caption=f"{folder} - 1ページ目", use_column_width=True)
|
157 |
+
st.button(f'{folder} を開く', key=f"open_{folder}_1", on_click=open_folder, args=(folder,))
|
158 |
+
else:
|
159 |
+
with col2:
|
160 |
+
st.image(os.path.join(folder_path, image_files[0]), caption=f"{folder} - 1ページ目", use_column_width=True)
|
161 |
+
st.button(f'{folder} を開く', key=f"open_{folder}_2", on_click=open_folder, args=(folder,))
|
162 |
+
else:
|
163 |
+
st.write('画像フォルダが見つかりません。')
|
164 |
+
|
165 |
+
# "Logs"ビュー
|
166 |
+
elif st.session_state['current_view'] == "Logs":
|
167 |
+
st.header("ログ")
|
168 |
+
if os.path.exists(log_file):
|
169 |
+
with open(log_file, 'r', encoding='shift_jis') as f:
|
170 |
+
log_text = f.read()
|
171 |
+
st.text_area("ログ", value=log_text, height=400)
|
172 |
+
else:
|
173 |
+
st.write("ログがありません。スクレイピングを開始してください。")
|
174 |
+
|
175 |
+
# "Selected Folder"ビュー
|
176 |
+
elif st.session_state['current_view'] == "Selected Folder":
|
177 |
+
st.header("選択されたフォルダ")
|
178 |
+
|
179 |
+
if 'selected_folder' in st.session_state and st.session_state['selected_folder']:
|
180 |
+
selected_folder = st.session_state['selected_folder']
|
181 |
+
|
182 |
+
if "http" in selected_folder:
|
183 |
+
folder_path = os.path.join('scraped_images', selected_folder)
|
184 |
+
else:
|
185 |
+
folder_path = os.path.join('scraped_images', sth + selected_folder)
|
186 |
+
|
187 |
+
st.subheader(f"フォルダ: {selected_folder} の画像一覧")
|
188 |
+
|
189 |
+
if df is not None:
|
190 |
+
image_files = [f for f in os.listdir(folder_path) if f.endswith(('jpg', 'png'))]
|
191 |
+
image_files = sort_files_by_page_number(image_files)
|
192 |
+
|
193 |
+
if image_files:
|
194 |
+
for image_file in image_files:
|
195 |
+
image_path = os.path.join(folder_path, image_file)
|
196 |
+
label_row = df[df['file_path'] == image_path]
|
197 |
+
|
198 |
+
if not label_row.empty:
|
199 |
+
label = label_row['label'].values[0]
|
200 |
+
else:
|
201 |
+
label = "Unknown"
|
202 |
+
|
203 |
+
try:
|
204 |
+
image = Image.open(image_path)
|
205 |
+
img_byte_arr = apply_gaussian_blur_if_unsafe(image, label, st.session_state['show_unsafe'])
|
206 |
+
st.image(img_byte_arr, caption=f"{image_file} - {label}", use_column_width=True)
|
207 |
+
except UnidentifiedImageError:
|
208 |
+
st.error(f"🚫 画像ファイルを識別できません: {image_file}")
|
209 |
+
continue
|
210 |
+
else:
|
211 |
+
st.warning("選択されたフォルダに画像が存在しません。")
|
212 |
+
|
213 |
+
zip_name = f'{selected_folder}.zip'
|
214 |
+
if st.button('画像をダウンロード'):
|
215 |
+
create_zip_of_folder(folder_path, zip_name)
|
216 |
+
with open(zip_name, 'rb') as f:
|
217 |
+
st.download_button('ダウンロード', f, file_name=zip_name)
|
218 |
+
else:
|
219 |
+
st.write('画像フォルダが選択されていません。Galleryビューでフォルダを選択してください。')
|
app_log.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-09-29 22:23:25.498183: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
|
2 |
+
2024-09-29 22:23:27.651496: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
|
3 |
+
C:\Users\asada\anaconda3\Lib\site-packages\huggingface_hub\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
4 |
+
warnings.warn(
|
error_log.txt
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
2 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
3 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
4 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
5 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
6 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
7 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
8 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
9 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
10 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
11 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
12 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
13 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
14 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
15 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
16 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
17 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
18 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
19 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
20 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
21 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
22 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
23 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
24 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
25 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
26 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
27 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
28 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
29 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
30 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
31 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
32 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
33 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
34 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
35 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
36 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
37 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
38 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
39 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
40 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
41 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
42 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
43 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
44 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
45 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
46 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
47 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
48 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
49 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
50 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
51 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
52 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
53 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
54 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
55 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_10.jpg. Skipping...
|
56 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_12.jpg. Skipping...
|
57 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_14.jpg. Skipping...
|
58 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_15.jpg. Skipping...
|
59 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_4.jpg. Skipping...
|
60 |
+
Unidentified image file: scraped_images\https___nhentai_net_g_528984_\page_7.jpg. Skipping...
|
indexer.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
from PIL import Image, UnidentifiedImageError
|
4 |
+
import torch
|
5 |
+
from torchvision import transforms
|
6 |
+
from transformers import AutoProcessor, FocalNetForImageClassification
|
7 |
+
import pyarrow as pa
|
8 |
+
import pyarrow.parquet as pq
|
9 |
+
|
10 |
+
# 画像フォルダとモデルのパスを指定
|
11 |
+
image_folder = "scraped_images" # 画像フォルダのパス
|
12 |
+
model_path = "MichalMlodawski/nsfw-image-detection-large" # NSFWモデルのパス
|
13 |
+
|
14 |
+
# サブフォルダを含めてjpgファイルを再帰的に取得
|
15 |
+
jpg_files = []
|
16 |
+
for root, dirs, files in os.walk(image_folder):
|
17 |
+
for file in files:
|
18 |
+
if file.lower().endswith(".jpg"):
|
19 |
+
jpg_files.append(os.path.join(root, file))
|
20 |
+
|
21 |
+
# jpgファイルが存在するか確認
|
22 |
+
if not jpg_files:
|
23 |
+
print("No jpg files found in folder:", image_folder)
|
24 |
+
exit()
|
25 |
+
|
26 |
+
# モデルとプロセッサの読み込み
|
27 |
+
feature_extractor = AutoProcessor.from_pretrained(model_path)
|
28 |
+
model = FocalNetForImageClassification.from_pretrained(model_path)
|
29 |
+
model.eval()
|
30 |
+
|
31 |
+
# 画像の変換処理
|
32 |
+
transform = transforms.Compose([
|
33 |
+
transforms.Resize((512, 512)),
|
34 |
+
transforms.ToTensor(),
|
35 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
36 |
+
])
|
37 |
+
|
38 |
+
# ラベルとNSFWカテゴリのマッピング
|
39 |
+
label_to_category = {
|
40 |
+
"LABEL_0": "Safe",
|
41 |
+
"LABEL_1": "Questionable",
|
42 |
+
"LABEL_2": "Unsafe"
|
43 |
+
}
|
44 |
+
|
45 |
+
# 結果を保存するためのリスト
|
46 |
+
results = []
|
47 |
+
|
48 |
+
# ログファイルを作成(破損画像ファイルを記録)
|
49 |
+
error_log = "error_log.txt"
|
50 |
+
|
51 |
+
# 各画像に対して分類処理を行い、結果を取得
|
52 |
+
for jpg_file in jpg_files:
|
53 |
+
try:
|
54 |
+
# 画像を開く
|
55 |
+
image = Image.open(jpg_file).convert("RGB")
|
56 |
+
except UnidentifiedImageError:
|
57 |
+
# 画像を識別できない場合のエラーハンドリング
|
58 |
+
with open(error_log, "a", encoding="utf-8") as log_file:
|
59 |
+
log_file.write(f"Unidentified image file: {jpg_file}. Skipping...\n")
|
60 |
+
print(f"Unidentified image file: {jpg_file}. Skipping...")
|
61 |
+
continue
|
62 |
+
|
63 |
+
image_tensor = transform(image).unsqueeze(0)
|
64 |
+
|
65 |
+
# モデルでの推論
|
66 |
+
inputs = feature_extractor(images=image, return_tensors="pt")
|
67 |
+
with torch.no_grad():
|
68 |
+
outputs = model(**inputs)
|
69 |
+
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
70 |
+
confidence, predicted = torch.max(probabilities, 1)
|
71 |
+
|
72 |
+
# ラベルを取得
|
73 |
+
label = model.config.id2label[predicted.item()]
|
74 |
+
category = label_to_category.get(label, "Unknown")
|
75 |
+
|
76 |
+
# 結果をリストに追加
|
77 |
+
results.append({
|
78 |
+
"file_path": jpg_file,
|
79 |
+
"label": label,
|
80 |
+
"category": category,
|
81 |
+
"confidence": confidence.item() * 100
|
82 |
+
})
|
83 |
+
|
84 |
+
# 結果をDataFrameに変換
|
85 |
+
df = pd.DataFrame(results)
|
86 |
+
|
87 |
+
# Parquet形式で保存
|
88 |
+
parquet_file = "nsfw_classification_results.parquet"
|
89 |
+
table = pa.Table.from_pandas(df)
|
90 |
+
pq.write_table(table, parquet_file)
|
91 |
+
|
92 |
+
print(f"Classification completed and saved to {parquet_file}!")
|
nsfw_classification_results.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f85291a8d87eee60eeec7c184794f3bd23928d7abf61f8765d877d59da7c02a
|
3 |
+
size 65590
|
scrape_images_worker.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
from playwright.sync_api import sync_playwright
|
4 |
+
import requests
|
5 |
+
import sys
|
6 |
+
from PIL import Image, UnidentifiedImageError
|
7 |
+
from io import BytesIO
|
8 |
+
log_file = "app_log.txt" # ログファイルのパス
|
9 |
+
|
10 |
+
# ログフォーマットの定義
|
11 |
+
log_format = '%(asctime)s - %(levelname)s - %(message)s'
|
12 |
+
|
13 |
+
import logging
|
14 |
+
file_handler = logging.FileHandler(log_file, encoding='utf-8')
|
15 |
+
# ログの設定
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.INFO, # ログレベルをINFOに設定
|
18 |
+
format='%(asctime)s - %(levelname)s - %(message)s', # ログのフォーマットを指定
|
19 |
+
handlers=[
|
20 |
+
logging.StreamHandler(sys.stdout), # 標準出力にログを出力
|
21 |
+
file_handler,
|
22 |
+
]
|
23 |
+
)
|
24 |
+
logger = logging.getLogger(__name__)
|
25 |
+
|
26 |
+
# 安全なフォルダ名を生成する関数
|
27 |
+
def generate_safe_folder_name(url):
|
28 |
+
# URLから安全なフォルダ名を生成(ファイル名に使えない文字を除去)
|
29 |
+
safe_name = re.sub(r'[^a-zA-Z0-9_\-]', '_', url)
|
30 |
+
return safe_name
|
31 |
+
|
32 |
+
# 画像を保存する関数 (JPG 80%の品質で保存)
|
33 |
+
def save_image_as_jpg(image_url, save_folder, image_name):
|
34 |
+
if not os.path.exists(save_folder):
|
35 |
+
os.makedirs(save_folder)
|
36 |
+
logger.info(f"フォルダを作成しました: {save_folder}")
|
37 |
+
|
38 |
+
try:
|
39 |
+
response = requests.get(image_url, timeout=10)
|
40 |
+
response.raise_for_status() # HTTPエラーが発生した場合例外を投げる
|
41 |
+
except requests.exceptions.RequestException as e:
|
42 |
+
logger.error(f"画像のダウンロード中にエラーが発生しました: {e}")
|
43 |
+
return
|
44 |
+
|
45 |
+
try:
|
46 |
+
image = Image.open(BytesIO(response.content))
|
47 |
+
except UnidentifiedImageError:
|
48 |
+
logger.warning(f"未識別の画像ファイル: {image_url}. スキップします。")
|
49 |
+
return
|
50 |
+
except Exception as e:
|
51 |
+
logger.error(f"画像のオープン中にエラーが発生しました: {e}")
|
52 |
+
return
|
53 |
+
|
54 |
+
# 保存時に JPG に変換し、品質80%で保存
|
55 |
+
image_path = os.path.join(save_folder, image_name)
|
56 |
+
try:
|
57 |
+
image.convert("RGB").save(image_path, "JPEG", quality=80)
|
58 |
+
logger.info(f"画像を保存しました: {image_path}")
|
59 |
+
except Exception as e:
|
60 |
+
logger.error(f"画像の保存中にエラーが発生しました: {e}")
|
61 |
+
|
62 |
+
# 画像の再帰的取得
|
63 |
+
def scrape_images_by_page(url, folder_name='scraped_images'):
|
64 |
+
# URLが"/"で終わっている場合、スラッシュを削除
|
65 |
+
original_url = url
|
66 |
+
url = url.rstrip('/')
|
67 |
+
logger.info(f"処理するURL: {url}")
|
68 |
+
|
69 |
+
with sync_playwright() as p:
|
70 |
+
browser = p.chromium.launch(headless=False) # ブラウザを表示して操作
|
71 |
+
page = browser.new_page()
|
72 |
+
|
73 |
+
# 初期ページにアクセス
|
74 |
+
page.goto(url)
|
75 |
+
logger.info(f"ページにアクセスしました: {url}")
|
76 |
+
|
77 |
+
# ページが完全に読み込まれるまで待機
|
78 |
+
page.wait_for_load_state('networkidle')
|
79 |
+
logger.info("ページの読み込みが完了しました。")
|
80 |
+
|
81 |
+
# lazy-loading属性を無効にするためのJavaScriptを挿入
|
82 |
+
try:
|
83 |
+
page.evaluate("""
|
84 |
+
document.querySelectorAll('img[loading="lazy"]').forEach(img => {
|
85 |
+
img.setAttribute('loading', 'eager');
|
86 |
+
img.src = img.src; // 画像を強制的にリロード
|
87 |
+
});
|
88 |
+
""")
|
89 |
+
logger.info("lazy-loadingを無効化しました。")
|
90 |
+
except Exception as eval_error:
|
91 |
+
logger.warning(f"JavaScriptの評価中にエラーが発生しました: {eval_error}")
|
92 |
+
|
93 |
+
# フォルダ名を生成
|
94 |
+
safe_folder_name = generate_safe_folder_name(url)
|
95 |
+
folder_path = os.path.join(folder_name, safe_folder_name)
|
96 |
+
logger.info(f"保存先フォルダ: {folder_path}")
|
97 |
+
|
98 |
+
# ページ数を取得
|
99 |
+
try:
|
100 |
+
# ページ数が格納されているセレクタからテキストを取得
|
101 |
+
page_count_selector = 'div.tag-container:nth-child(8) > span:nth-child(1) > a:nth-child(1) > span:nth-child(1)'
|
102 |
+
page_count_text = page.locator(page_count_selector).text_content().strip()
|
103 |
+
num_pages = int(re.search(r'\d+', page_count_text).group())
|
104 |
+
logger.info(f"セレクタ '{page_count_selector}' からページ数を取得: {num_pages}")
|
105 |
+
except Exception as e:
|
106 |
+
logger.warning(f"セレクタ '{page_count_selector}' からページ数を取得できませんでした: {e}")
|
107 |
+
# セレクタが見つからない場合のフォールバック
|
108 |
+
try:
|
109 |
+
fallback_selector = 'section.reader-bar:nth-child(2) > div:nth-child(2) > button:nth-child(3) > span:nth-child(3)'
|
110 |
+
page.wait_for_selector(fallback_selector, timeout=5000)
|
111 |
+
num_pages_text = page.locator(fallback_selector).text_content().strip()
|
112 |
+
num_pages = int(re.search(r'\d+', num_pages_text).group())
|
113 |
+
logger.info(f"セレクタ '{fallback_selector}' からページ数を取得: {num_pages}")
|
114 |
+
except Exception as e2:
|
115 |
+
logger.error(f"ページ数の取得に失敗しました: {e2}")
|
116 |
+
num_pages = 1 # デフォルトで1ページとする
|
117 |
+
|
118 |
+
logger.info(f"総ページ数: {num_pages}")
|
119 |
+
|
120 |
+
# 各ページにアクセスして画像を取得
|
121 |
+
for i in range(1, num_pages + 1):
|
122 |
+
page_url = f"{url}/{i}"
|
123 |
+
page.goto(page_url)
|
124 |
+
logger.info(f"ページにアクセスしました: {page_url}")
|
125 |
+
|
126 |
+
# ページが完全に読み込まれるまで待機
|
127 |
+
page.wait_for_load_state('networkidle')
|
128 |
+
logger.info(f"ページ {i} の読み込みが完了しました。")
|
129 |
+
|
130 |
+
try:
|
131 |
+
# 画像を取得するセレクタ
|
132 |
+
img_selector = '#image-container > a > img'
|
133 |
+
img_elements = page.locator(img_selector)
|
134 |
+
img_count = img_elements.count()
|
135 |
+
logger.info(f"ページ {i} の画像数: {img_count}")
|
136 |
+
|
137 |
+
if img_count == 0:
|
138 |
+
logger.warning(f"ページ {i} に画像が見つかりません。")
|
139 |
+
continue
|
140 |
+
|
141 |
+
for j in range(img_count):
|
142 |
+
try:
|
143 |
+
image_element = img_elements.nth(j)
|
144 |
+
image_url = image_element.get_attribute('src')
|
145 |
+
if not image_url:
|
146 |
+
# data-srcなどに画像URLが格納されている場合
|
147 |
+
image_url = image_element.get_attribute('data-src')
|
148 |
+
logger.info(f"取得した画像URL (ページ {i}, 画像 {j + 1}): {image_url}")
|
149 |
+
|
150 |
+
if image_url:
|
151 |
+
# ファイル名にページ番号と画像番号を含め、位取りを適用
|
152 |
+
image_name = f'page_{str(i).zfill(5)}_img_{str(j + 1).zfill(5)}.jpg'
|
153 |
+
save_image_as_jpg(image_url, folder_path, image_name)
|
154 |
+
except Exception as e:
|
155 |
+
logger.error(f"ページ {i}, 画像 {j + 1} の処理中にエラーが発生しました: {e}")
|
156 |
+
continue
|
157 |
+
except Exception as e:
|
158 |
+
logger.error(f"ページ {i} の画像取得中にエラーが発生しました: {e}")
|
159 |
+
continue
|
160 |
+
|
161 |
+
browser.close()
|
162 |
+
logger.info("ブラウザを閉じました。")
|
163 |
+
|
164 |
+
if __name__ == "__main__":
|
165 |
+
if len(sys.argv) < 2:
|
166 |
+
logger.error("使用方法: python scrape_images_worker.py <URL>")
|
167 |
+
sys.exit(1)
|
168 |
+
|
169 |
+
url = sys.argv[1] # コマンドライン引数でURLを受け取る
|
170 |
+
folder_name = 'scraped_images' # デフォルトのフォルダ名
|
171 |
+
scrape_images_by_page(url, folder_name)
|
wsServer.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import json
|
4 |
+
import time
|
5 |
+
|
6 |
+
# データの共有辞書
|
7 |
+
shared_data = {
|
8 |
+
"latest_sensor_data": None,
|
9 |
+
"last_message_time": None
|
10 |
+
}
|
11 |
+
|
12 |
+
# クライアントからのデータを処理し、共有データを更新
|
13 |
+
async def handle_client(websocket, path):
|
14 |
+
global shared_data
|
15 |
+
while True:
|
16 |
+
try:
|
17 |
+
# クライアントからのメッセージを受信
|
18 |
+
message = await websocket.recv()
|
19 |
+
print(f"受信したメッセージ: {message}")
|
20 |
+
|
21 |
+
# "ping"の場合、"pong"で応答
|
22 |
+
if message == "ping":
|
23 |
+
await websocket.send("pong")
|
24 |
+
continue
|
25 |
+
|
26 |
+
# JSON形式のセンサーデータを受信した場合
|
27 |
+
sensor_data = json.loads(message)
|
28 |
+
shared_data["latest_sensor_data"] = sensor_data
|
29 |
+
shared_data["last_message_time"] = time.time()
|
30 |
+
|
31 |
+
# 受信データをサーバー側に出力
|
32 |
+
print(f"最新センサーデータ: {shared_data['latest_sensor_data']}")
|
33 |
+
except websockets.ConnectionClosed:
|
34 |
+
print("クライアントとの接続が切断されました")
|
35 |
+
break
|
36 |
+
|
37 |
+
# WebSocketサーバーを起動
|
38 |
+
async def main():
|
39 |
+
async with websockets.serve(handle_client, "localhost", 8765):
|
40 |
+
print("WebSocketサーバーが起動しました。")
|
41 |
+
await asyncio.Future() # 無限に実行
|
42 |
+
|
43 |
+
# メイン関数の実行
|
44 |
+
if __name__ == "__main__":
|
45 |
+
asyncio.run(main())
|