animikhaich commited on
Commit
ac90b4d
·
1 Parent(s): 47c18e5

Added user session monitoring and drop down selection for generated music + loading wheel

Browse files
Files changed (1) hide show
  1. main.py +80 -56
main.py CHANGED
@@ -5,13 +5,16 @@ from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
5
  from moviepy.audio.fx.volumex import volumex
6
  from streamlit.runtime.scriptrunner import get_script_run_ctx
7
 
 
8
  def get_session_id():
9
  session_id = get_script_run_ctx().session_id
10
- session_id = session_id.replace('-','_')
11
- session_id = '_id_' + session_id
12
  return session_id
13
 
14
- print(get_session_id())
 
 
15
  # Define model maps
16
  video_model_map = {
17
  "Fast": "flash",
@@ -80,7 +83,7 @@ if "orig_audio_vol" not in st.session_state:
80
  st.session_state.orig_audio_vol = 100
81
  if "generated_audio_vol" not in st.session_state:
82
  st.session_state.generated_audio_vol = 100
83
-
84
  # Sidebar
85
  st.sidebar.title("Settings")
86
 
@@ -153,11 +156,11 @@ video_descriptor, audio_generator = load_models(
153
  uploaded_video = st.file_uploader("Upload Video", type=["mp4"])
154
  if uploaded_video is not None:
155
  st.session_state.uploaded_video = uploaded_video
156
- with open("temp.mp4", mode="wb") as w:
157
  w.write(uploaded_video.getvalue())
158
 
159
  # Video Player
160
- if os.path.exists("temp.mp4") and uploaded_video is not None:
161
  st.video(uploaded_video)
162
 
163
  # Submit button if video is not uploaded
@@ -168,12 +171,12 @@ if generate_button:
168
 
169
  with st.spinner("Analyzing video..."):
170
  video_description = video_descriptor.describe_video(
171
- "temp.mp4",
172
  genre=st.session_state.music_genre,
173
  bpm=st.session_state.music_bpm,
174
  user_keywords=st.session_state.user_keywords,
175
  )
176
- video_duration = VideoFileClip("temp.mp4").duration
177
  music_prompt = video_description["Music Prompt"]
178
 
179
  st.success("Video description generated successfully.")
@@ -188,7 +191,7 @@ if generate_button:
188
  music_prompt = st.text_area(
189
  "Music Prompt",
190
  music_prompt,
191
- disabled=False,
192
  height=120,
193
  )
194
 
@@ -204,65 +207,86 @@ if generate_button:
204
  st.success("Music generated successfully.")
205
  st.balloons()
206
 
 
207
  # Callback function for radio button selection change
208
  def on_audio_selection_change():
209
- selected_index = audio_options.index(st.session_state.selected_audio) - 1
210
- if selected_index >= 0:
211
- st.session_state.selected_audio_path = st.session_state.audio_paths[selected_index]
 
 
212
  else:
213
  st.session_state.selected_audio_path = None
214
 
215
- # Display radio buttons and handle audio selections
216
  if st.session_state.audio_paths:
 
 
 
 
 
 
217
  for i, audio_path in enumerate(st.session_state.audio_paths):
218
  st.audio(audio_path, format="audio/wav")
219
-
220
- audio_options = ["None"]+[f"Sample {i+1}" for i in range(len(st.session_state.audio_paths))]
221
- st.radio(
222
  "Select one of the generated audio files for further processing:",
223
- audio_options,
 
224
  index=0,
225
  key="selected_audio",
226
- on_change=on_audio_selection_change
227
  )
228
-
229
- if st.session_state.selected_audio_path:
230
- st.write(f"**Selected Audio:** {st.session_state.selected_audio_path}")
 
231
 
232
  # Handle Audio Mixing and Export
233
  if st.session_state.selected_audio_path is not None:
234
- orig_clip = VideoFileClip("temp.mp4")
235
- orig_clip_audio = orig_clip.audio
236
- generated_audio = AudioFileClip(st.session_state.selected_audio_path)
237
-
238
- st.session_state.orig_audio_vol = st.slider(
239
- "Original Audio Volume", 0, 200, st.session_state.orig_audio_vol
240
- )
241
-
242
- st.session_state.generated_audio_vol = st.slider(
243
- "Selected Sample Volume", 0, 200, st.session_state.generated_audio_vol
244
- )
245
-
246
- orig_clip_audio = volumex(orig_clip_audio, float(st.session_state.orig_audio_vol/100))
247
- generated_audio = volumex(generated_audio, float(st.session_state.generated_audio_vol/100))
248
-
249
- orig_clip.audio = CompositeAudioClip([orig_clip_audio, generated_audio])
250
-
251
- final_video_path="out_tmp.mp4"
252
- orig_clip.write_videofile(final_video_path)
253
-
254
- orig_clip.close()
255
- generated_audio.close()
256
-
257
- st.session_state.final_video_path = final_video_path
258
-
259
- st.video(final_video_path)
260
-
261
- if st.session_state.final_video_path:
262
- with open(st.session_state.final_video_path, "rb") as video_file:
263
- st.download_button(
264
- label="Download final video",
265
- data=video_file,
266
- file_name="final_video.mp4",
267
- mime="video/mp4",
268
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from moviepy.audio.fx.volumex import volumex
6
  from streamlit.runtime.scriptrunner import get_script_run_ctx
7
 
8
+
9
  def get_session_id():
10
  session_id = get_script_run_ctx().session_id
11
+ session_id = session_id.replace("-", "_")
12
+ session_id = "_id_" + session_id
13
  return session_id
14
 
15
+
16
+ user_session_id = get_session_id()
17
+ os.makedirs(user_session_id, exist_ok=True)
18
  # Define model maps
19
  video_model_map = {
20
  "Fast": "flash",
 
83
  st.session_state.orig_audio_vol = 100
84
  if "generated_audio_vol" not in st.session_state:
85
  st.session_state.generated_audio_vol = 100
86
+
87
  # Sidebar
88
  st.sidebar.title("Settings")
89
 
 
156
  uploaded_video = st.file_uploader("Upload Video", type=["mp4"])
157
  if uploaded_video is not None:
158
  st.session_state.uploaded_video = uploaded_video
159
+ with open(f"{user_session_id}/temp.mp4", mode="wb") as w:
160
  w.write(uploaded_video.getvalue())
161
 
162
  # Video Player
163
+ if os.path.exists(f"{user_session_id}/temp.mp4") and uploaded_video is not None:
164
  st.video(uploaded_video)
165
 
166
  # Submit button if video is not uploaded
 
171
 
172
  with st.spinner("Analyzing video..."):
173
  video_description = video_descriptor.describe_video(
174
+ f"{user_session_id}/temp.mp4",
175
  genre=st.session_state.music_genre,
176
  bpm=st.session_state.music_bpm,
177
  user_keywords=st.session_state.user_keywords,
178
  )
179
+ video_duration = VideoFileClip(f"{user_session_id}/temp.mp4").duration
180
  music_prompt = video_description["Music Prompt"]
181
 
182
  st.success("Video description generated successfully.")
 
191
  music_prompt = st.text_area(
192
  "Music Prompt",
193
  music_prompt,
194
+ disabled=True,
195
  height=120,
196
  )
197
 
 
207
  st.success("Music generated successfully.")
208
  st.balloons()
209
 
210
+
211
  # Callback function for radio button selection change
212
  def on_audio_selection_change():
213
+ selected_audio_index = st.session_state.selected_audio
214
+ if selected_audio_index > 0:
215
+ st.session_state.selected_audio_path = st.session_state.audio_paths[
216
+ selected_audio_index - 1
217
+ ]
218
  else:
219
  st.session_state.selected_audio_path = None
220
 
221
+
222
  if st.session_state.audio_paths:
223
+ # Dropdown to select one of the generated audio files
224
+ audio_options = ["None"] + [
225
+ f"Generated Music {i+1}" for i in range(len(st.session_state.audio_paths))
226
+ ]
227
+
228
+ # Display the audio files
229
  for i, audio_path in enumerate(st.session_state.audio_paths):
230
  st.audio(audio_path, format="audio/wav")
231
+
232
+ selected_audio_index = st.selectbox(
 
233
  "Select one of the generated audio files for further processing:",
234
+ range(len(audio_options)),
235
+ format_func=lambda x: audio_options[x],
236
  index=0,
237
  key="selected_audio",
 
238
  )
239
+
240
+ # Button to confirm the selection
241
+ if st.button("Add Generated Music to Video"):
242
+ on_audio_selection_change()
243
 
244
  # Handle Audio Mixing and Export
245
  if st.session_state.selected_audio_path is not None:
246
+ with st.spinner("Mixing Audio..."):
247
+ orig_clip = VideoFileClip(f"{user_session_id}/temp.mp4")
248
+ orig_clip_audio = orig_clip.audio
249
+ generated_audio = AudioFileClip(st.session_state.selected_audio_path)
250
+
251
+ st.session_state.orig_audio_vol = st.slider(
252
+ "Original Audio Volume",
253
+ 0,
254
+ 200,
255
+ st.session_state.orig_audio_vol,
256
+ format="%d%%",
257
+ )
258
+
259
+ st.session_state.generated_audio_vol = st.slider(
260
+ "Generated Music Volume",
261
+ 0,
262
+ 200,
263
+ st.session_state.generated_audio_vol,
264
+ format="%d%%",
265
+ )
266
+
267
+ orig_clip_audio = volumex(
268
+ orig_clip_audio, float(st.session_state.orig_audio_vol / 100)
269
+ )
270
+ generated_audio = volumex(
271
+ generated_audio, float(st.session_state.generated_audio_vol / 100)
272
+ )
273
+
274
+ orig_clip.audio = CompositeAudioClip([orig_clip_audio, generated_audio])
275
+
276
+ final_video_path = f"{user_session_id}/out_tmp.mp4"
277
+ orig_clip.write_videofile(final_video_path)
278
+
279
+ orig_clip.close()
280
+ generated_audio.close()
281
+
282
+ st.session_state.final_video_path = final_video_path
283
+
284
+ st.video(final_video_path)
285
+ if st.session_state.final_video_path:
286
+ with open(st.session_state.final_video_path, "rb") as video_file:
287
+ st.download_button(
288
+ label="Download final video",
289
+ data=video_file,
290
+ file_name="final_video.mp4",
291
+ mime="video/mp4",
292
+ )