#from transformers import pipeline import gradio as gr #import torch from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler model_id = "stabilityai/stable-diffusion-2" scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") image_model = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16) image_model = image_model.to("cuda") model = pipeline("automatic-speech-recognition","facebook/wav2vec2-large-xlsr-53-spanish") def transcribe_text_audio(mic=None, file=None): if mic is not None: audio = mic elif file is not None: audio = file else: return "No se ha detectado ninguna entrada de audio" transcription = model(audio)["text"] image = image_model(transcription).images[0] image = image.convert("RGB") return transcription, image gr.Interface( fn=transcribe_text_audio, inputs=[ gr.Audio(sources=["microphone"], type="filepath"), gr.Audio(sources=["upload"], type="filepath"), ], outputs=["text", "image"], ).launch()