Spaces:
Running
Running
Update steganography.py
Browse files- steganography.py +126 -2
steganography.py
CHANGED
@@ -7,6 +7,9 @@ import matplotlib.pyplot as plt
|
|
7 |
import numpy as np
|
8 |
import soundfile as sf
|
9 |
from PIL import Image, ImageDraw, ImageFont
|
|
|
|
|
|
|
10 |
|
11 |
DEFAULT_FONT_PATH = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
|
12 |
DEFAULT_SAMPLE_RATE = 22050
|
@@ -116,6 +119,117 @@ def gradio_image_to_audio_fn(upload_image):
|
|
116 |
def gradio_decode_fn(upload_audio):
|
117 |
return display_audio_spectrogram(upload_audio)
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
def create_gradio_interface():
|
120 |
with gr.Blocks(title="Audio Steganography", css="footer{display:none !important}", theme=gr.themes.Soft(primary_hue="green", secondary_hue="green", spacing_size="sm", radius_size="lg")) as txt2spec:
|
121 |
with gr.Tab("Text to Spectrogram"):
|
@@ -147,7 +261,7 @@ def create_gradio_interface():
|
|
147 |
|
148 |
convert_button.click(gradio_image_to_audio_fn, inputs=[upload_image], outputs=[output_audio_from_image])
|
149 |
|
150 |
-
with gr.Tab("Audio Spectrogram"):
|
151 |
with gr.Group():
|
152 |
with gr.Column():
|
153 |
upload_audio = gr.Audio(type="filepath", label="Upload audio", scale=3)
|
@@ -158,8 +272,18 @@ def create_gradio_interface():
|
|
158 |
|
159 |
decode_button.click(gradio_decode_fn, inputs=[upload_audio], outputs=[decoded_image])
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
return txt2spec
|
162 |
|
163 |
if __name__ == "__main__":
|
164 |
txt2spec = create_gradio_interface()
|
165 |
-
txt2spec.launch(share=True)
|
|
|
7 |
import numpy as np
|
8 |
import soundfile as sf
|
9 |
from PIL import Image, ImageDraw, ImageFont
|
10 |
+
import os
|
11 |
+
import cv2
|
12 |
+
from moviepy.editor import VideoFileClip, AudioFileClip
|
13 |
|
14 |
DEFAULT_FONT_PATH = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
|
15 |
DEFAULT_SAMPLE_RATE = 22050
|
|
|
119 |
def gradio_decode_fn(upload_audio):
|
120 |
return display_audio_spectrogram(upload_audio)
|
121 |
|
122 |
+
|
123 |
+
def display_progress(percent, message, progress=gr.Progress()):
|
124 |
+
progress(percent, desc=message)
|
125 |
+
|
126 |
+
def extract_audio(video_path, progress):
|
127 |
+
display_progress(0.1, "Extracting audio from video", progress)
|
128 |
+
try:
|
129 |
+
video = VideoFileClip(video_path)
|
130 |
+
if video.audio is None:
|
131 |
+
raise ValueError("No audio found in the video")
|
132 |
+
audio_path = "extracted_audio.wav"
|
133 |
+
video.audio.write_audiofile(audio_path)
|
134 |
+
display_progress(0.2, "Audio extracted", progress)
|
135 |
+
return audio_path
|
136 |
+
except Exception as e:
|
137 |
+
display_progress(0.2, f"Failed to extract audio: {e}", progress)
|
138 |
+
return None
|
139 |
+
|
140 |
+
def extract_frames(video_path, progress):
|
141 |
+
display_progress(0.3, "Extracting frames from video", progress)
|
142 |
+
try:
|
143 |
+
video = cv2.VideoCapture(video_path)
|
144 |
+
frames = []
|
145 |
+
success, frame = video.read()
|
146 |
+
while success:
|
147 |
+
frames.append(frame)
|
148 |
+
success, frame = video.read()
|
149 |
+
video.release()
|
150 |
+
display_progress(0.4, "Frames extracted", progress)
|
151 |
+
return frames
|
152 |
+
except Exception as e:
|
153 |
+
display_progress(0.4, f"Failed to extract frames: {e}", progress)
|
154 |
+
return None
|
155 |
+
|
156 |
+
def frame_to_spectrogram(frame, sr=DEFAULT_SAMPLE_RATE):
|
157 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
158 |
+
S = np.flipud(gray_frame.astype(np.float32) / 255.0 * 100.0)
|
159 |
+
y = librosa.griffinlim(S)
|
160 |
+
return y
|
161 |
+
|
162 |
+
def save_audio(y, sr=DEFAULT_SAMPLE_RATE):
|
163 |
+
audio_path = 'output_frame_audio.wav'
|
164 |
+
sf.write(audio_path, y, sr)
|
165 |
+
return audio_path
|
166 |
+
|
167 |
+
def save_spectrogram_image(S, frame_number, temp_dir):
|
168 |
+
plt.figure(figsize=(10, 4))
|
169 |
+
librosa.display.specshow(S)
|
170 |
+
plt.tight_layout()
|
171 |
+
image_path = os.path.join(temp_dir, f'spectrogram_frame_{frame_number}.png')
|
172 |
+
plt.savefig(image_path)
|
173 |
+
plt.close()
|
174 |
+
return image_path
|
175 |
+
|
176 |
+
def process_video_frames(frames, sr=DEFAULT_SAMPLE_RATE, temp_dir=None, progress=gr.Progress()):
|
177 |
+
processed_frames = []
|
178 |
+
total_frames = len(frames)
|
179 |
+
for i, frame in enumerate(frames):
|
180 |
+
y = frame_to_spectrogram(frame, sr)
|
181 |
+
S = librosa.feature.melspectrogram(y=y, sr=sr)
|
182 |
+
image_path = save_spectrogram_image(S, i, temp_dir)
|
183 |
+
processed_frame = cv2.imread(image_path)
|
184 |
+
processed_frames.append(processed_frame)
|
185 |
+
display_progress(0.5 + int((i + 1) / total_frames * 0.7), f"Frame processing {i + 1}/{total_frames}", progress)
|
186 |
+
display_progress(0.8, "All frames processed", progress)
|
187 |
+
return processed_frames
|
188 |
+
|
189 |
+
def save_video_from_frames(frames, output_path, fps=30):
|
190 |
+
height, width, layers = frames[0].shape
|
191 |
+
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
|
192 |
+
for frame in frames:
|
193 |
+
video.write(frame)
|
194 |
+
video.release()
|
195 |
+
|
196 |
+
def add_audio_to_video(video_path, audio_path, output_path, progress):
|
197 |
+
display_progress(0.9, "Adding audio back to video", progress)
|
198 |
+
try:
|
199 |
+
video = VideoFileClip(video_path)
|
200 |
+
audio = AudioFileClip(audio_path)
|
201 |
+
final_video = video.set_audio(audio)
|
202 |
+
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
|
203 |
+
display_progress(1, "Video's ready", progress)
|
204 |
+
except Exception as e:
|
205 |
+
display_progress(1, f"Failed to add audio to video: {e}", progress)
|
206 |
+
|
207 |
+
def process_video(video_path, progress=gr.Progress()):
|
208 |
+
try:
|
209 |
+
video = VideoFileClip(video_path)
|
210 |
+
if video.duration > 10:
|
211 |
+
video = video.subclip(0, 10)
|
212 |
+
temp_trimmed_video_path = "trimmed_video.mp4"
|
213 |
+
video.write_videofile(temp_trimmed_video_path, codec='libx264')
|
214 |
+
video_path = temp_trimmed_video_path
|
215 |
+
except Exception as e:
|
216 |
+
return f"Failed to load video: {e}"
|
217 |
+
|
218 |
+
audio_path = extract_audio(video_path, progress)
|
219 |
+
if audio_path is None:
|
220 |
+
return "Failed to extract audio from video."
|
221 |
+
frames = extract_frames(video_path, progress)
|
222 |
+
if frames is None:
|
223 |
+
return "Failed to extract frames from video."
|
224 |
+
|
225 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
226 |
+
processed_frames = process_video_frames(frames, temp_dir=temp_dir, progress=progress)
|
227 |
+
temp_video_path = os.path.join(temp_dir, 'processed_video.mp4')
|
228 |
+
save_video_from_frames(processed_frames, temp_video_path)
|
229 |
+
output_video_path = 'output_video_with_audio.mp4'
|
230 |
+
add_audio_to_video(temp_video_path, audio_path, output_video_path, progress)
|
231 |
+
return output_video_path
|
232 |
+
|
233 |
def create_gradio_interface():
|
234 |
with gr.Blocks(title="Audio Steganography", css="footer{display:none !important}", theme=gr.themes.Soft(primary_hue="green", secondary_hue="green", spacing_size="sm", radius_size="lg")) as txt2spec:
|
235 |
with gr.Tab("Text to Spectrogram"):
|
|
|
261 |
|
262 |
convert_button.click(gradio_image_to_audio_fn, inputs=[upload_image], outputs=[output_audio_from_image])
|
263 |
|
264 |
+
with gr.Tab("Audio to Spectrogram"):
|
265 |
with gr.Group():
|
266 |
with gr.Column():
|
267 |
upload_audio = gr.Audio(type="filepath", label="Upload audio", scale=3)
|
|
|
272 |
|
273 |
decode_button.click(gradio_decode_fn, inputs=[upload_audio], outputs=[decoded_image])
|
274 |
|
275 |
+
with gr.Tab("Video to Spectrogram"):
|
276 |
+
with gr.Group():
|
277 |
+
video_input = gr.Video(label="Upload video")
|
278 |
+
generate_button = gr.Button("Generate", variant="primary", size="lg")
|
279 |
+
|
280 |
+
with gr.Column(variant="panel"):
|
281 |
+
video_output = gr.Video(label="Video Spectrogram")
|
282 |
+
|
283 |
+
generate_button.click(process_video, inputs=[video_input], outputs=[video_output])
|
284 |
+
|
285 |
return txt2spec
|
286 |
|
287 |
if __name__ == "__main__":
|
288 |
txt2spec = create_gradio_interface()
|
289 |
+
txt2spec.launch(share=True)
|