fffiloni
commited on
Commit
•
606a45c
0
Parent(s):
Duplicate from fffiloni/Music-To-Image
Browse files- .gitattributes +37 -0
- README.md +13 -0
- app.py +135 -0
- examples/blank.md +0 -0
- examples/electronic.mp3 +0 -0
- examples/folk.wav +3 -0
- examples/orchestra.wav +3 -0
- requirements.txt +6 -0
.gitattributes
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
examples/folk.wav filter=lfs diff=lfs merge=lfs -text
|
37 |
+
examples/orchestra.wav filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Music To Image
|
3 |
+
emoji: 🎶🌅
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.39.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: fffiloni/Music-To-Image
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
hf_token = os.environ.get('HF_TOKEN')
|
4 |
+
|
5 |
+
lpmc_client = gr.load("seungheondoh/LP-Music-Caps-demo", src="spaces")
|
6 |
+
|
7 |
+
from gradio_client import Client
|
8 |
+
|
9 |
+
client = Client("https://fffiloni-test-llama-api.hf.space/", hf_token=hf_token)
|
10 |
+
|
11 |
+
from diffusers import DiffusionPipeline
|
12 |
+
import torch
|
13 |
+
|
14 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
|
15 |
+
pipe.to("cuda")
|
16 |
+
#pipe.enable_model_cpu_offload()
|
17 |
+
|
18 |
+
# if using torch < 2.0
|
19 |
+
# pipe.enable_xformers_memory_efficient_attention()
|
20 |
+
|
21 |
+
from pydub import AudioSegment
|
22 |
+
|
23 |
+
def cut_audio(input_path, output_path, max_duration=30000):
|
24 |
+
audio = AudioSegment.from_file(input_path)
|
25 |
+
|
26 |
+
if len(audio) > max_duration:
|
27 |
+
audio = audio[:max_duration]
|
28 |
+
|
29 |
+
audio.export(output_path, format="mp3")
|
30 |
+
|
31 |
+
return output_path
|
32 |
+
|
33 |
+
def solo_xd(prompt):
|
34 |
+
images = pipe(prompt=prompt).images[0]
|
35 |
+
return images
|
36 |
+
|
37 |
+
def infer(audio_file):
|
38 |
+
|
39 |
+
truncated_audio = cut_audio(audio_file, "trunc_audio.mp3")
|
40 |
+
|
41 |
+
cap_result = lpmc_client(
|
42 |
+
truncated_audio, # str (filepath or URL to file) in 'audio_path' Audio component
|
43 |
+
api_name="predict"
|
44 |
+
)
|
45 |
+
print(cap_result)
|
46 |
+
|
47 |
+
#summarize_q = f"""
|
48 |
+
|
49 |
+
#I'll give you a list of music descriptions. Create a summary reflecting the musical ambiance.
|
50 |
+
#Do not processs each segment, but provide a summary for the whole instead.
|
51 |
+
|
52 |
+
#Here's the list:
|
53 |
+
|
54 |
+
#{cap_result}
|
55 |
+
#"""
|
56 |
+
|
57 |
+
#summary_result = client.predict(
|
58 |
+
# summarize_q, # str in 'Message' Textbox component
|
59 |
+
# api_name="/chat_1"
|
60 |
+
#)
|
61 |
+
|
62 |
+
#print(f"SUMMARY: {summary_result}")
|
63 |
+
|
64 |
+
llama_q = f"""
|
65 |
+
I'll give you a music description, from i want you to provide an illustrative image description that would fit well with the music.
|
66 |
+
Do not processs each segment or song, but provide a summary for the whole instead.
|
67 |
+
Answer with only one image description. Never do lists. Maximum 77 tokens.
|
68 |
+
|
69 |
+
Here's the music description :
|
70 |
+
|
71 |
+
{cap_result}
|
72 |
+
|
73 |
+
"""
|
74 |
+
|
75 |
+
result = client.predict(
|
76 |
+
llama_q, # str in 'Message' Textbox component
|
77 |
+
api_name="/predict"
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
print(f"Llama2 result: {result}")
|
84 |
+
|
85 |
+
images = pipe(prompt=result).images[0]
|
86 |
+
|
87 |
+
print("Finished")
|
88 |
+
|
89 |
+
#return cap_result, result, images
|
90 |
+
return images, result, gr.update(visible=True)
|
91 |
+
|
92 |
+
css = """
|
93 |
+
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
|
94 |
+
"""
|
95 |
+
with gr.Blocks(css=css) as demo:
|
96 |
+
with gr.Column(elem_id="col-container"):
|
97 |
+
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
98 |
+
<div
|
99 |
+
style="
|
100 |
+
display: inline-flex;
|
101 |
+
align-items: center;
|
102 |
+
gap: 0.8rem;
|
103 |
+
font-size: 1.75rem;
|
104 |
+
"
|
105 |
+
>
|
106 |
+
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
107 |
+
Music To Image
|
108 |
+
</h1>
|
109 |
+
</div>
|
110 |
+
<p style="margin-bottom: 10px; font-size: 94%">
|
111 |
+
Sends an audio into <a href="https://huggingface.co/spaces/seungheondoh/LP-Music-Caps-demo" target="_blank">LP-Music-Caps</a>
|
112 |
+
to generate a audio caption which is then translated to an illustrative image description with Llama2, and finally run through
|
113 |
+
Stable Diffusion XL to generate an image from the audio ! <br /><br />
|
114 |
+
Note: Only the first 30 seconds of your audio will be used for inference.
|
115 |
+
</p>
|
116 |
+
</div>""")
|
117 |
+
audio_input = gr.Audio(label="Music input", type="filepath", source="upload")
|
118 |
+
infer_btn = gr.Button("Generate Image from Music")
|
119 |
+
#lpmc_cap = gr.Textbox(label="Lp Music Caps caption")
|
120 |
+
llama_trans_cap = gr.Textbox(label="Llama translation", visible=False)
|
121 |
+
img_result = gr.Image(label="Image Result")
|
122 |
+
tryagain_btn = gr.Button("Try again ?", visible=False)
|
123 |
+
|
124 |
+
gr.Examples(examples=[["./examples/electronic.mp3"],["./examples/folk.wav"], ["./examples/orchestra.wav"]],
|
125 |
+
fn=infer,
|
126 |
+
inputs=[audio_input],
|
127 |
+
outputs=[img_result, llama_trans_cap, tryagain_btn],
|
128 |
+
cache_examples=True
|
129 |
+
)
|
130 |
+
|
131 |
+
#infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap, img_result])
|
132 |
+
infer_btn.click(fn=infer, inputs=[audio_input], outputs=[img_result, llama_trans_cap, tryagain_btn])
|
133 |
+
tryagain_btn.click(fn=solo_xd, inputs=[llama_trans_cap], outputs=[img_result])
|
134 |
+
|
135 |
+
demo.queue(max_size=20).launch()
|
examples/blank.md
ADDED
File without changes
|
examples/electronic.mp3
ADDED
Binary file (480 kB). View file
|
|
examples/folk.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed9c5e4e03b706c464993ab770388fd3a52b240e4c054ec279322dd48c0cb7a8
|
3 |
+
size 1918830
|
examples/orchestra.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97135f273616d39b11db37a1ec61e0bd729b82c085f7dcdc472b26a37affca8a
|
3 |
+
size 1323632
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pydub
|
2 |
+
invisible_watermark
|
3 |
+
transformers
|
4 |
+
diffusers
|
5 |
+
accelerate
|
6 |
+
safetensors
|