fffiloni commited on
Commit
b28e9a1
·
1 Parent(s): d084e7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -19
app.py CHANGED
@@ -7,16 +7,7 @@ lpmc_client = gr.load("seungheondoh/LP-Music-Caps-demo", src="spaces")
7
  from gradio_client import Client
8
 
9
  client = Client("https://fffiloni-test-llama-api.hf.space/", hf_token=hf_token)
10
-
11
- from diffusers import DiffusionPipeline
12
- import torch
13
-
14
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
15
- pipe.to("cuda")
16
- #pipe.enable_model_cpu_offload()
17
-
18
- # if using torch < 2.0
19
- # pipe.enable_xformers_memory_efficient_attention()
20
 
21
  from pydub import AudioSegment
22
 
@@ -30,9 +21,12 @@ def cut_audio(input_path, output_path, max_duration=30000):
30
 
31
  return output_path
32
 
33
- def solo_xd(prompt):
34
- images = pipe(prompt=prompt).images[0]
35
- return images
 
 
 
36
 
37
  def infer(audio_file):
38
 
@@ -82,12 +76,15 @@ def infer(audio_file):
82
 
83
  print(f"Llama2 result: {result}")
84
 
85
- images = pipe(prompt=result).images[0]
 
 
 
86
 
87
  print("Finished")
88
 
89
  #return cap_result, result, images
90
- return images, result, gr.update(visible=True)
91
 
92
  css = """
93
  #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
@@ -118,18 +115,18 @@ with gr.Blocks(css=css) as demo:
118
  infer_btn = gr.Button("Generate Image from Music")
119
  #lpmc_cap = gr.Textbox(label="Lp Music Caps caption")
120
  llama_trans_cap = gr.Textbox(label="Llama translation", visible=False)
121
- img_result = gr.Image(label="Image Result")
122
  tryagain_btn = gr.Button("Try again ?", visible=False)
123
 
124
  gr.Examples(examples=[["./examples/electronic.mp3"],["./examples/folk.wav"], ["./examples/orchestra.wav"]],
125
  fn=infer,
126
  inputs=[audio_input],
127
- outputs=[img_result, llama_trans_cap, tryagain_btn],
128
  cache_examples=True
129
  )
130
 
131
  #infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap, img_result])
132
- infer_btn.click(fn=infer, inputs=[audio_input], outputs=[img_result, llama_trans_cap, tryagain_btn])
133
- tryagain_btn.click(fn=solo_xd, inputs=[llama_trans_cap], outputs=[img_result])
134
 
135
  demo.queue(max_size=20).launch()
 
7
  from gradio_client import Client
8
 
9
  client = Client("https://fffiloni-test-llama-api.hf.space/", hf_token=hf_token)
10
+ zrscp_client = Client("https://fffiloni-zeroscope--76p9h.hf.space/", hf_token=hf_token)
 
 
 
 
 
 
 
 
 
11
 
12
  from pydub import AudioSegment
13
 
 
21
 
22
  return output_path
23
 
24
+ def solo_zrscp(prompt):
25
+ res_vid = zrscp_client.predict(
26
+ prompt,
27
+ api_name="/zrscp"
28
+ )
29
+ return res_vid
30
 
31
  def infer(audio_file):
32
 
 
76
 
77
  print(f"Llama2 result: {result}")
78
 
79
+ res_vid = zrscp_client.predict(
80
+ prompt,
81
+ api_name="/zrscp"
82
+ )
83
 
84
  print("Finished")
85
 
86
  #return cap_result, result, images
87
+ return res_vid, result, gr.update(visible=True)
88
 
89
  css = """
90
  #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
 
115
  infer_btn = gr.Button("Generate Image from Music")
116
  #lpmc_cap = gr.Textbox(label="Lp Music Caps caption")
117
  llama_trans_cap = gr.Textbox(label="Llama translation", visible=False)
118
+ vid_result = gr.Video(label="Image Result")
119
  tryagain_btn = gr.Button("Try again ?", visible=False)
120
 
121
  gr.Examples(examples=[["./examples/electronic.mp3"],["./examples/folk.wav"], ["./examples/orchestra.wav"]],
122
  fn=infer,
123
  inputs=[audio_input],
124
+ outputs=[vid_result, llama_trans_cap, tryagain_btn],
125
  cache_examples=True
126
  )
127
 
128
  #infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap, img_result])
129
+ infer_btn.click(fn=infer, inputs=[audio_input], outputs=[vid_result, llama_trans_cap, tryagain_btn])
130
+ tryagain_btn.click(fn=solo_zrscp, inputs=[llama_trans_cap], outputs=[vid_result])
131
 
132
  demo.queue(max_size=20).launch()