yerang commited on
Commit
7b53d4f
1 Parent(s): 614c2f6

Rename app_.py to app.py

Browse files
Files changed (2) hide show
  1. app.py +287 -0
  2. app_.py +0 -248
app.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ The entrance of the gradio
5
+ """
6
+
7
+ import tyro
8
+ import gradio as gr
9
+ import os.path as osp
10
+ from src.utils.helper import load_description
11
+ from src.gradio_pipeline import GradioPipeline
12
+ from src.config.crop_config import CropConfig
13
+ from src.config.argument_config import ArgumentConfig
14
+ from src.config.inference_config import InferenceConfig
15
+ import spaces
16
+ import cv2
17
+
18
+
19
+ #추가
20
+ from elevenlabs_utils import ElevenLabsPipeline
21
+ from setup_environment import initialize_environment
22
+ from src.utils.video import extract_audio
23
+ from flux_dev import create_flux_tab
24
+
25
+ # import gdown
26
+ # folder_url = f"https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib"
27
+ # gdown.download_folder(url=folder_url, output="pretrained_weights", quiet=False)
28
+
29
+
30
+ initialize_environment()
31
+
32
+ import sys
33
+ sys.path.append('/home/user/.local/lib/python3.10/site-packages')
34
+ sys.path.append('/home/user/.local/lib/python3.10/site-packages/stf_alternative/src/stf_alternative')
35
+ sys.path.append('/home/user/.local/lib/python3.10/site-packages/stf_tools/src/stf_tools')
36
+ sys.path.append('/home/user/app/')
37
+ sys.path.append('/home/user/app/stf/')
38
+ sys.path.append('/home/user/app/stf/stf_alternative/')
39
+ sys.path.append('/home/user/app/stf/stf_alternative/src/stf_alternative')
40
+ sys.path.append('/home/user/app/stf/stf_tools')
41
+ sys.path.append('/home/user/app/stf/stf_tools/src/stf_tools')
42
+
43
+
44
+
45
+ # CUDA 경로를 환경 변수로 설정
46
+ os.environ['PATH'] = '/usr/local/cuda/bin:' + os.environ.get('PATH', '')
47
+ os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda/lib64:' + os.environ.get('LD_LIBRARY_PATH', '')
48
+ # 확인용 출력
49
+ print("PATH:", os.environ['PATH'])
50
+ print("LD_LIBRARY_PATH:", os.environ['LD_LIBRARY_PATH'])
51
+
52
+ from stf_utils import STFPipeline
53
+
54
+
55
+
56
+ audio_path="assets/examples/driving/test_aud.mp3"
57
+ #audio_path_component = gr.Textbox(label="Input", value="assets/examples/driving/test_aud.mp3")
58
+
59
+
60
+ @spaces.GPU(duration=120)
61
+ def gpu_wrapped_stf_pipeline_execute(audio_path):
62
+ return stf_pipeline.execute(audio_path)
63
+
64
+
65
+ ###### 테스트중 ######
66
+
67
+
68
+ stf_pipeline = STFPipeline()
69
+ driving_video_path=gr.Video()
70
+
71
+ # set tyro theme
72
+ tyro.extras.set_accent_color("bright_cyan")
73
+ args = tyro.cli(ArgumentConfig)
74
+
75
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
76
+ with gr.Row():
77
+ audio_path_component = gr.Textbox(label="Input", value="assets/examples/driving/test_aud.mp3")
78
+ stf_button = gr.Button("stf test", variant="primary")
79
+ stf_button.click(
80
+ fn=gpu_wrapped_stf_pipeline_execute,
81
+ inputs=[
82
+ audio_path_component
83
+ ],
84
+ outputs=[driving_video_path]
85
+ )
86
+ with gr.Row():
87
+ driving_video_path.render()
88
+
89
+
90
+
91
+
92
+ # def partial_fields(target_class, kwargs):
93
+ # return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
94
+
95
+ # # set tyro theme
96
+ # tyro.extras.set_accent_color("bright_cyan")
97
+ # args = tyro.cli(ArgumentConfig)
98
+
99
+ # # specify configs for inference
100
+ # inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
101
+ # crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
102
+
103
+ # gradio_pipeline = GradioPipeline(
104
+ # inference_cfg=inference_cfg,
105
+ # crop_cfg=crop_cfg,
106
+ # args=args
107
+ # )
108
+
109
+ # # 추가 정의
110
+ # elevenlabs_pipeline = ElevenLabsPipeline()
111
+
112
+ # @spaces.GPU(duration=200)
113
+ # def gpu_wrapped_elevenlabs_pipeline_generate_voice(text, voice):
114
+ # return elevenlabs_pipeline.generate_voice(text, voice)
115
+
116
+
117
+
118
+
119
+ # @spaces.GPU(duration=240)
120
+ # def gpu_wrapped_execute_video(*args, **kwargs):
121
+ # return gradio_pipeline.execute_video(*args, **kwargs)
122
+
123
+ # @spaces.GPU(duration=240)
124
+ # def gpu_wrapped_execute_image(*args, **kwargs):
125
+ # return gradio_pipeline.execute_image(*args, **kwargs)
126
+
127
+ # def is_square_video(video_path):
128
+ # video = cv2.VideoCapture(video_path)
129
+
130
+ # width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
131
+ # height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
132
+
133
+ # video.release()
134
+ # if width != height:
135
+ # raise gr.Error("Error: the video does not have a square aspect ratio. We currently only support square videos")
136
+
137
+ # return gr.update(visible=True)
138
+
139
+ # # assets
140
+ # title_md = "assets/gradio_title.md"
141
+ # example_portrait_dir = "assets/examples/source"
142
+ # example_video_dir = "assets/examples/driving"
143
+ # data_examples = [
144
+ # [osp.join(example_portrait_dir, "s9.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
145
+ # [osp.join(example_portrait_dir, "s6.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
146
+ # [osp.join(example_portrait_dir, "s10.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
147
+ # [osp.join(example_portrait_dir, "s5.jpg"), osp.join(example_video_dir, "d18.mp4"), True, True, True, True],
148
+ # [osp.join(example_portrait_dir, "s7.jpg"), osp.join(example_video_dir, "d19.mp4"), True, True, True, True],
149
+ # [osp.join(example_portrait_dir, "s22.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
150
+ # ]
151
+ # #################### interface logic ####################
152
+
153
+ # # Define components first
154
+ # eye_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target eyes-open ratio")
155
+ # lip_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target lip-open ratio")
156
+ # retargeting_input_image = gr.Image(type="filepath")
157
+ # output_image = gr.Image(type="numpy")
158
+ # output_image_paste_back = gr.Image(type="numpy")
159
+ # output_video = gr.Video()
160
+ # output_video_concat = gr.Video()
161
+
162
+ # with gr.Blocks(theme=gr.themes.Soft()) as demo:
163
+ # #gr.HTML(load_description(title_md))
164
+
165
+ # with gr.Tabs():
166
+ # with gr.Tab("Text to LipSync"):
167
+ # gr.Markdown("# Text to LipSync")
168
+ # with gr.Row():
169
+ # with gr.Column():
170
+ # script_txt = gr.Text()
171
+ # with gr.Column():
172
+ # audio_gen_button = gr.Button("Audio generation", variant="primary")
173
+ # with gr.Row():
174
+ # output_audio_path = gr.Audio(label="Generated audio", type="filepath")
175
+
176
+ # gr.Markdown(load_description("assets/gradio_description_upload.md"))
177
+ # with gr.Row():
178
+ # with gr.Accordion(open=True, label="Source Portrait"):
179
+ # image_input = gr.Image(type="filepath")
180
+ # gr.Examples(
181
+ # examples=[
182
+ # [osp.join(example_portrait_dir, "s9.jpg")],
183
+ # [osp.join(example_portrait_dir, "s6.jpg")],
184
+ # [osp.join(example_portrait_dir, "s10.jpg")],
185
+ # [osp.join(example_portrait_dir, "s5.jpg")],
186
+ # [osp.join(example_portrait_dir, "s7.jpg")],
187
+ # [osp.join(example_portrait_dir, "s12.jpg")],
188
+ # [osp.join(example_portrait_dir, "s22.jpg")],
189
+ # ],
190
+ # inputs=[image_input],
191
+ # cache_examples=False,
192
+ # )
193
+ # with gr.Accordion(open=True, label="Driving Video"):
194
+ # video_input = gr.Video()
195
+ # gr.Examples(
196
+ # examples=[
197
+ # [osp.join(example_video_dir, "d0.mp4")],
198
+ # [osp.join(example_video_dir, "d18.mp4")],
199
+ # [osp.join(example_video_dir, "d19.mp4")],
200
+ # [osp.join(example_video_dir, "d14_trim.mp4")],
201
+ # [osp.join(example_video_dir, "d6_trim.mp4")],
202
+ # ],
203
+ # inputs=[video_input],
204
+ # cache_examples=False,
205
+ # )
206
+ # with gr.Row():
207
+ # with gr.Accordion(open=False, label="Animation Instructions and Options"):
208
+ # gr.Markdown(load_description("assets/gradio_description_animation.md"))
209
+ # with gr.Row():
210
+ # flag_relative_input = gr.Checkbox(value=True, label="relative motion")
211
+ # flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
212
+ # flag_remap_input = gr.Checkbox(value=True, label="paste-back")
213
+ # gr.Markdown(load_description("assets/gradio_description_animate_clear.md"))
214
+ # with gr.Row():
215
+ # with gr.Column():
216
+ # process_button_animation = gr.Button("🚀 Animate", variant="primary")
217
+ # with gr.Column():
218
+ # process_button_reset = gr.ClearButton([image_input, video_input, output_video, output_video_concat], value="🧹 Clear")
219
+ # with gr.Row():
220
+ # with gr.Column():
221
+ # with gr.Accordion(open=True, label="The animated video in the original image space"):
222
+ # output_video.render()
223
+ # with gr.Column():
224
+ # with gr.Accordion(open=True, label="The animated video"):
225
+ # output_video_concat.render()
226
+ # with gr.Row():
227
+ # # Examples
228
+ # gr.Markdown("## You could also choose the examples below by one click ⬇️")
229
+ # with gr.Row():
230
+ # gr.Examples(
231
+ # examples=data_examples,
232
+ # fn=gpu_wrapped_execute_video,
233
+ # inputs=[
234
+ # image_input,
235
+ # video_input,
236
+ # flag_relative_input,
237
+ # flag_do_crop_input,
238
+ # flag_remap_input
239
+ # ],
240
+ # outputs=[output_image, output_image_paste_back],
241
+ # examples_per_page=6,
242
+ # cache_examples=False,
243
+ # )
244
+
245
+ # process_button_animation.click(
246
+ # fn=gpu_wrapped_execute_video,
247
+ # inputs=[
248
+ # image_input,
249
+ # video_input,
250
+ # flag_relative_input,
251
+ # flag_do_crop_input,
252
+ # flag_remap_input
253
+ # ],
254
+ # outputs=[output_video, output_video_concat],
255
+ # show_progress=True
256
+ # )
257
+ # audio_gen_button.click(
258
+ # fn=gpu_wrapped_elevenlabs_pipeline_generate_voice,
259
+ # inputs=[
260
+ # script_txt
261
+ # ],
262
+ # outputs=[output_audio_path],
263
+ # show_progress=True
264
+ # )
265
+
266
+
267
+
268
+ # # image_input.change(
269
+ # # fn=gradio_pipeline.prepare_retargeting,
270
+ # # inputs=image_input,
271
+ # # outputs=[eye_retargeting_slider, lip_retargeting_slider, retargeting_input_image]
272
+ # # )
273
+ # video_input.upload(
274
+ # fn=is_square_video,
275
+ # inputs=video_input,
276
+ # outputs=video_input
277
+ # )
278
+
279
+ # # 세 번째 탭: Flux 개발용 탭
280
+ # with gr.Tab("FLUX Dev"):
281
+ # flux_demo = create_flux_tab(image_input) # Flux 개발용 탭 생성
282
+
283
+ demo.launch(
284
+ server_port=args.server_port,
285
+ share=args.share,
286
+ server_name=args.server_name
287
+ )
app_.py DELETED
@@ -1,248 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- The entrance of the gradio
5
- """
6
-
7
- import tyro
8
- import gradio as gr
9
- import os.path as osp
10
- from src.utils.helper import load_description
11
- from src.gradio_pipeline import GradioPipeline
12
- from src.config.crop_config import CropConfig
13
- from src.config.argument_config import ArgumentConfig
14
- from src.config.inference_config import InferenceConfig
15
- import spaces
16
- import cv2
17
-
18
-
19
- #추가
20
- from elevenlabs_utils import ElevenLabsPipeline
21
- from setup_environment import initialize_environment
22
- from src.utils.video import extract_audio
23
- from flux_dev import create_flux_tab
24
-
25
- # import gdown
26
- # folder_url = f"https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib"
27
- # gdown.download_folder(url=folder_url, output="pretrained_weights", quiet=False)
28
-
29
-
30
- # import sys
31
- # sys.path.append('/home/user/.local/lib/python3.10/site-packages')
32
- # sys.path.append('/home/user/.local/lib/python3.10/site-packages/stf_alternative/src/stf_alternative')
33
- # sys.path.append('/home/user/.local/lib/python3.10/site-packages/stf_tools/src/stf_tools')
34
- # sys.path.append('/home/user/app/')
35
- # sys.path.append('/home/user/app/stf/')
36
- # sys.path.append('/home/user/app/stf/stf_alternative/')
37
- # sys.path.append('/home/user/app/stf/stf_alternative/src/stf_alternative')
38
- # sys.path.append('/home/user/app/stf/stf_tools')
39
- # sys.path.append('/home/user/app/stf/stf_tools/src/stf_tools')
40
-
41
-
42
-
43
- # # CUDA 경로를 환경 변수로 설정
44
- # os.environ['PATH'] = '/usr/local/cuda/bin:' + os.environ.get('PATH', '')
45
- # os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda/lib64:' + os.environ.get('LD_LIBRARY_PATH', '')
46
- # # 확인용 출력
47
- # print("PATH:", os.environ['PATH'])
48
- # print("LD_LIBRARY_PATH:", os.environ['LD_LIBRARY_PATH'])
49
-
50
- # from stf_utils import STFPipeline
51
-
52
-
53
- def partial_fields(target_class, kwargs):
54
- return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
55
-
56
- # set tyro theme
57
- tyro.extras.set_accent_color("bright_cyan")
58
- args = tyro.cli(ArgumentConfig)
59
-
60
- # specify configs for inference
61
- inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
62
- crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
63
-
64
- gradio_pipeline = GradioPipeline(
65
- inference_cfg=inference_cfg,
66
- crop_cfg=crop_cfg,
67
- args=args
68
- )
69
-
70
- # 추가 정의
71
- elevenlabs_pipeline = ElevenLabsPipeline()
72
-
73
- @spaces.GPU(duration=200)
74
- def gpu_wrapped_elevenlabs_pipeline_generate_voice(text, voice):
75
- return elevenlabs_pipeline.generate_voice(text, voice)
76
-
77
-
78
-
79
-
80
- @spaces.GPU(duration=240)
81
- def gpu_wrapped_execute_video(*args, **kwargs):
82
- return gradio_pipeline.execute_video(*args, **kwargs)
83
-
84
- @spaces.GPU(duration=240)
85
- def gpu_wrapped_execute_image(*args, **kwargs):
86
- return gradio_pipeline.execute_image(*args, **kwargs)
87
-
88
- def is_square_video(video_path):
89
- video = cv2.VideoCapture(video_path)
90
-
91
- width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
92
- height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
93
-
94
- video.release()
95
- if width != height:
96
- raise gr.Error("Error: the video does not have a square aspect ratio. We currently only support square videos")
97
-
98
- return gr.update(visible=True)
99
-
100
- # assets
101
- title_md = "assets/gradio_title.md"
102
- example_portrait_dir = "assets/examples/source"
103
- example_video_dir = "assets/examples/driving"
104
- data_examples = [
105
- [osp.join(example_portrait_dir, "s9.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
106
- [osp.join(example_portrait_dir, "s6.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
107
- [osp.join(example_portrait_dir, "s10.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
108
- [osp.join(example_portrait_dir, "s5.jpg"), osp.join(example_video_dir, "d18.mp4"), True, True, True, True],
109
- [osp.join(example_portrait_dir, "s7.jpg"), osp.join(example_video_dir, "d19.mp4"), True, True, True, True],
110
- [osp.join(example_portrait_dir, "s22.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
111
- ]
112
- #################### interface logic ####################
113
-
114
- # Define components first
115
- eye_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target eyes-open ratio")
116
- lip_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target lip-open ratio")
117
- retargeting_input_image = gr.Image(type="filepath")
118
- output_image = gr.Image(type="numpy")
119
- output_image_paste_back = gr.Image(type="numpy")
120
- output_video = gr.Video()
121
- output_video_concat = gr.Video()
122
-
123
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
124
- #gr.HTML(load_description(title_md))
125
-
126
- with gr.Tabs():
127
- with gr.Tab("Text to LipSync"):
128
- gr.Markdown("# Text to LipSync")
129
- with gr.Row():
130
- with gr.Column():
131
- script_txt = gr.Text()
132
- with gr.Column():
133
- audio_gen_button = gr.Button("Audio generation", variant="primary")
134
- with gr.Row():
135
- output_audio_path = gr.Audio(label="Generated audio", type="filepath")
136
-
137
- gr.Markdown(load_description("assets/gradio_description_upload.md"))
138
- with gr.Row():
139
- with gr.Accordion(open=True, label="Source Portrait"):
140
- image_input = gr.Image(type="filepath")
141
- gr.Examples(
142
- examples=[
143
- [osp.join(example_portrait_dir, "s9.jpg")],
144
- [osp.join(example_portrait_dir, "s6.jpg")],
145
- [osp.join(example_portrait_dir, "s10.jpg")],
146
- [osp.join(example_portrait_dir, "s5.jpg")],
147
- [osp.join(example_portrait_dir, "s7.jpg")],
148
- [osp.join(example_portrait_dir, "s12.jpg")],
149
- [osp.join(example_portrait_dir, "s22.jpg")],
150
- ],
151
- inputs=[image_input],
152
- cache_examples=False,
153
- )
154
- with gr.Accordion(open=True, label="Driving Video"):
155
- video_input = gr.Video()
156
- gr.Examples(
157
- examples=[
158
- [osp.join(example_video_dir, "d0.mp4")],
159
- [osp.join(example_video_dir, "d18.mp4")],
160
- [osp.join(example_video_dir, "d19.mp4")],
161
- [osp.join(example_video_dir, "d14_trim.mp4")],
162
- [osp.join(example_video_dir, "d6_trim.mp4")],
163
- ],
164
- inputs=[video_input],
165
- cache_examples=False,
166
- )
167
- with gr.Row():
168
- with gr.Accordion(open=False, label="Animation Instructions and Options"):
169
- gr.Markdown(load_description("assets/gradio_description_animation.md"))
170
- with gr.Row():
171
- flag_relative_input = gr.Checkbox(value=True, label="relative motion")
172
- flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
173
- flag_remap_input = gr.Checkbox(value=True, label="paste-back")
174
- gr.Markdown(load_description("assets/gradio_description_animate_clear.md"))
175
- with gr.Row():
176
- with gr.Column():
177
- process_button_animation = gr.Button("🚀 Animate", variant="primary")
178
- with gr.Column():
179
- process_button_reset = gr.ClearButton([image_input, video_input, output_video, output_video_concat], value="🧹 Clear")
180
- with gr.Row():
181
- with gr.Column():
182
- with gr.Accordion(open=True, label="The animated video in the original image space"):
183
- output_video.render()
184
- with gr.Column():
185
- with gr.Accordion(open=True, label="The animated video"):
186
- output_video_concat.render()
187
- with gr.Row():
188
- # Examples
189
- gr.Markdown("## You could also choose the examples below by one click ⬇️")
190
- with gr.Row():
191
- gr.Examples(
192
- examples=data_examples,
193
- fn=gpu_wrapped_execute_video,
194
- inputs=[
195
- image_input,
196
- video_input,
197
- flag_relative_input,
198
- flag_do_crop_input,
199
- flag_remap_input
200
- ],
201
- outputs=[output_image, output_image_paste_back],
202
- examples_per_page=6,
203
- cache_examples=False,
204
- )
205
-
206
- process_button_animation.click(
207
- fn=gpu_wrapped_execute_video,
208
- inputs=[
209
- image_input,
210
- video_input,
211
- flag_relative_input,
212
- flag_do_crop_input,
213
- flag_remap_input
214
- ],
215
- outputs=[output_video, output_video_concat],
216
- show_progress=True
217
- )
218
- audio_gen_button.click(
219
- fn=gpu_wrapped_elevenlabs_pipeline_generate_voice,
220
- inputs=[
221
- script_txt
222
- ],
223
- outputs=[output_audio_path],
224
- show_progress=True
225
- )
226
-
227
-
228
-
229
- # image_input.change(
230
- # fn=gradio_pipeline.prepare_retargeting,
231
- # inputs=image_input,
232
- # outputs=[eye_retargeting_slider, lip_retargeting_slider, retargeting_input_image]
233
- # )
234
- video_input.upload(
235
- fn=is_square_video,
236
- inputs=video_input,
237
- outputs=video_input
238
- )
239
-
240
- # 세 번째 탭: Flux 개발용 탭
241
- with gr.Tab("FLUX Dev"):
242
- flux_demo = create_flux_tab(image_input) # Flux 개발용 탭 생성
243
-
244
- demo.launch(
245
- server_port=args.server_port,
246
- share=args.share,
247
- server_name=args.server_name
248
- )