Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import spaces | |
import json | |
import re | |
from gradio_client import Client | |
from moviepy.editor import VideoFileClip | |
from moviepy.audio.AudioClip import AudioClip | |
def extract_audio(video_in): | |
input_video = video_in | |
output_audio = 'audio.wav' | |
# Open the video file and extract the audio | |
video_clip = VideoFileClip(input_video) | |
audio_clip = video_clip.audio | |
# Save the audio as a .wav file | |
audio_clip.write_audiofile(output_audio, fps=44100) # Use 44100 Hz as the sample rate for .wav files | |
print("Audio extraction complete.") | |
return 'audio.wav' | |
def get_caption(image_in): | |
kosmos2_client = Client("https://ydshieh-kosmos-2.hf.space/") | |
kosmos2_result = kosmos2_client.predict( | |
image_in, # str (filepath or URL to image) in 'Test Image' Image component | |
"Detailed", # str in 'Description Type' Radio component | |
fn_index=4 | |
) | |
print(f"KOSMOS2 RETURNS: {kosmos2_result}") | |
with open(kosmos2_result[1], 'r') as f: | |
data = json.load(f) | |
reconstructed_sentence = [] | |
for sublist in data: | |
reconstructed_sentence.append(sublist[0]) | |
full_sentence = ' '.join(reconstructed_sentence) | |
#print(full_sentence) | |
# Find the pattern matching the expected format ("Describe this image in detail:" followed by optional space and then the rest)... | |
pattern = r'^Describe this image in detail:\s*(.*)$' | |
# Apply the regex pattern to extract the description text. | |
match = re.search(pattern, full_sentence) | |
if match: | |
description = match.group(1) | |
print(description) | |
else: | |
print("Unable to locate valid description.") | |
# Find the last occurrence of "." | |
#last_period_index = full_sentence.rfind('.') | |
# Truncate the string up to the last period | |
#truncated_caption = full_sentence[:last_period_index + 1] | |
# print(truncated_caption) | |
#print(f"\n—\nIMAGE CAPTION: {truncated_caption}") | |
return description | |
def get_caption_from_MD(image_in): | |
client = Client("https://vikhyatk-moondream1.hf.space/") | |
result = client.predict( | |
image_in, # filepath in 'image' Image component | |
"Describe precisely the image.", # str in 'Question' Textbox component | |
api_name="/answer_question" | |
) | |
print(result) | |
return result | |
def get_magnet(prompt): | |
client = Client("https://fffiloni-magnet.hf.space/") | |
result = client.predict( | |
"facebook/magnet-small-10secs", # Literal['facebook/magnet-small-10secs', 'facebook/magnet-medium-10secs', 'facebook/magnet-small-30secs', 'facebook/magnet-medium-30secs', 'facebook/audio-magnet-small', 'facebook/audio-magnet-medium'] in 'Model' Radio component | |
"", # str in 'Model Path (custom models)' Textbox component | |
prompt, # str in 'Input Text' Textbox component | |
3, # float in 'Temperature' Number component | |
0.9, # float in 'Top-p' Number component | |
10, # float in 'Max CFG coefficient' Number component | |
1, # float in 'Min CFG coefficient' Number component | |
20, # float in 'Decoding Steps (stage 1)' Number component | |
10, # float in 'Decoding Steps (stage 2)' Number component | |
10, # float in 'Decoding Steps (stage 3)' Number component | |
10, # float in 'Decoding Steps (stage 4)' Number component | |
"prod-stride1 (new!)", # Literal['max-nonoverlap', 'prod-stride1 (new!)'] in 'Span Scoring' Radio component | |
api_name="/predict_full" | |
) | |
print(result) | |
return result[1] | |
def get_audioldm(prompt): | |
client = Client("https://haoheliu-audioldm2-text2audio-text2music.hf.space/") | |
result = client.predict( | |
prompt, # str in 'Input text' Textbox component | |
"Low quality.", # str in 'Negative prompt' Textbox component | |
10, # int | float (numeric value between 5 and 15) in 'Duration (seconds)' Slider component | |
3.5, # int | float (numeric value between 0 and 7) in 'Guidance scale' Slider component | |
45, # int | float in 'Seed' Number component | |
3, # int | float (numeric value between 1 and 5) in 'Number waveforms to generate' Slider component | |
fn_index=1 | |
) | |
print(result) | |
audio_result = extract_audio(result) | |
return audio_result | |
def get_riffusion(prompt): | |
client = Client("https://fffiloni-spectrogram-to-music.hf.space/") | |
result = client.predict( | |
prompt, # str in 'Musical prompt' Textbox component | |
"", # str in 'Negative prompt' Textbox component | |
None, # filepath in 'parameter_4' Audio component | |
10, # float (numeric value between 5 and 10) in 'Duration in seconds' Slider component | |
api_name="/predict" | |
) | |
print(result) | |
return result[1] | |
def get_mustango(prompt): | |
client = Client("https://declare-lab-mustango.hf.space/") | |
result = client.predict( | |
"Hello!!", # str in 'Prompt' Textbox component | |
200, # float (numeric value between 100 and 200) in 'Steps' Slider component | |
3, # float (numeric value between 1 and 10) in 'Guidance Scale' Slider component | |
api_name="/predict" | |
) | |
print(result) | |
return result | |
import re | |
import torch | |
from transformers import pipeline | |
zephyr_model = "HuggingFaceH4/zephyr-7b-beta" | |
mixtral_model = "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
pipe = pipeline("text-generation", model=zephyr_model, torch_dtype=torch.bfloat16, device_map="auto") | |
agent_maker_sys = f""" | |
You are an AI whose job is to help users create their own music which its genre will reflect the character or scene from an image described by users. | |
In particular, you need to respond succintly with few musical words, in a friendly tone, write a musical prompt for a music generation model. | |
For example, if a user says, "a picture of a man in a black suit and tie riding a black dragon", provide immediately a musical prompt corresponding to the image description. | |
Immediately STOP after that. It should be EXACTLY in this format: | |
"A grand orchestral arrangement with thunderous percussion, epic brass fanfares, and soaring strings, creating a cinematic atmosphere fit for a heroic battle" | |
""" | |
instruction = f""" | |
<|system|> | |
{agent_maker_sys}</s> | |
<|user|> | |
""" | |
def get_musical_prompt(user_prompt): | |
prompt = f"{instruction.strip()}\n{user_prompt}</s>" | |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>' | |
cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL) | |
print(f"SUGGESTED Musical prompt: {cleaned_text}") | |
return cleaned_text.lstrip("\n") | |
def infer(image_in, chosen_model): | |
gr.Info("Getting image caption with Kosmos2...") | |
user_prompt = get_caption(image_in) | |
gr.Info("Building a musical prompt according to the image caption ...") | |
musical_prompt = get_musical_prompt(user_prompt) | |
if chosen_model == "MAGNet" : | |
gr.Info("Now calling MAGNet for music...") | |
music_o = get_magnet(musical_prompt) | |
elif chosen_model == "AudioLDM-2" : | |
gr.Info("Now calling AudioLDM-2 for music...") | |
music_o = get_magnet(musical_prompt) | |
elif chosen_model == "Riffusion" : | |
gr.Info("Now calling Riffusion for music...") | |
music_o = get_riffusion(musical_prompt) | |
elif chosen_model == "Mustango" : | |
gr.Info("Now calling Mustango for music...") | |
music_o = get_mustango(musical_prompt) | |
return musical_prompt, music_o | |
demo_title = "Image to Music V2" | |
description = "Get music from a picture" | |
css = """ | |
#col-container{ | |
margin: 0 auto; | |
max-width: 980px; | |
text-align: left; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.HTML(f""" | |
<h2 style="text-align: center;">{demo_title}</h2> | |
<p style="text-align: center;">{description}</p> | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
image_in = gr.Image( | |
label = "Image reference", | |
type = "filepath", | |
elem_id = "image-in" | |
) | |
chosen_model = gr.Dropdown( | |
label = "Choose a model", | |
choices = [ | |
"MAGNet", | |
"AudioLDM-2", | |
"Riffusion", | |
"Mustango" | |
], | |
value = "MAGNet" | |
) | |
submit_btn = gr.Button("Make music from my pic !") | |
with gr.Column(): | |
caption = gr.Textbox( | |
label = "Inspirational musical prompt", | |
max_lines = 3 | |
) | |
result = gr.Audio( | |
label = "Music" | |
) | |
with gr.Column(): | |
gr.Examples( | |
examples = [ | |
["examples/monalisa.png", "MAGNet"], | |
["examples/santa.png", "MAGNet"], | |
["examples/ocean_poet.jpeg", "MAGNet"], | |
["examples/winter_hiking.png", "MAGNet"], | |
["examples/teatime.jpeg", "MAGNet"], | |
["examples/news_experts.jpeg", "MAGNet"] | |
], | |
fn = infer, | |
inputs = [image_in, chosen_model], | |
outputs = [caption, result], | |
cache_examples = False | |
) | |
submit_btn.click( | |
fn = infer, | |
inputs = [ | |
image_in, | |
chosen_model | |
], | |
outputs =[ | |
caption, | |
result | |
] | |
) | |
demo.queue(max_size=16).launch(show_api=False) |