Spaces:
Build error
Build error
import gradio as gr | |
from typing import Any | |
import torch | |
from transformers import pipeline | |
from diffusers import StableDiffusionPipeline | |
from TTS.api import TTS | |
import whisper | |
import utils | |
from youtubeaudioextractor import PytubeAudioExtractor | |
from transcriber import SpanishTranscriber, WhisperTranscriber | |
from textprocessor import TextProcessor | |
from videocreator import VideoCreator | |
spanish_transcribe_model = "juancopi81/whisper-medium-es" | |
languages = {"Spanish": "es", "English": "en"} | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
device_dict = {"cuda": 0, "cpu": -1} | |
dtype = torch.float16 if device == "cuda" else torch.float32 | |
# Detect if code is running in Colab | |
is_colab = utils.is_google_colab() | |
colab_instruction = "" if is_colab else """ | |
<p>You can skip the queue using Colab: | |
<a href=""> | |
<img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a></p>""" | |
device_print = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" | |
# Initialize components | |
audio_extractor = PytubeAudioExtractor() | |
es_transcription_pipe = pipeline( | |
task="automatic-speech-recognition", | |
model=spanish_transcribe_model, | |
chunk_length_s=30, | |
device=device_dict[device], | |
) | |
es_transcription_pipe.model.config.forced_decoder_ids = es_transcription_pipe.tokenizer.get_decoder_prompt_ids(language="es", | |
task="transcribe") | |
es_audio_transcriber = SpanishTranscriber(es_transcription_pipe) | |
en_transcription_pipe = whisper.load_model("base") | |
en_audio_transcriber = WhisperTranscriber(en_transcription_pipe) | |
openai_model = "text-davinci-003" | |
text_processor = TextProcessor(openai_model) | |
image_model_id = "runwayml/stable-diffusion-v1-5" | |
image_pipeline = StableDiffusionPipeline.from_pretrained(image_model_id, | |
torch_dtype=dtype, | |
revision="fp16") | |
image_pipeline = image_pipeline.to(device) | |
es_vo_model_name = TTS.list_models()[22] | |
en_vo_model_name = TTS.list_models()[8] | |
# Init TTS | |
es_tts = TTS(es_vo_model_name) | |
en_tts = TTS(en_vo_model_name) | |
def datapipeline(url: str, | |
video_language: str, | |
summary_language: str, | |
video_styles: str) -> Any: | |
audio_path_file = audio_extractor.extract(url) | |
print(f"Audio file created at: {audio_path_file}") | |
# Select transcriber | |
if video_language == "Spanish": | |
audio_transcriber = es_audio_transcriber | |
video_creator = VideoCreator(es_tts, image_pipeline) | |
elif video_language == "English": | |
audio_transcriber = en_audio_transcriber | |
video_creator = VideoCreator(en_tts, image_pipeline) | |
else: | |
return "Language not supported" | |
transcribed_text = audio_transcriber.transcribe(audio_path_file) | |
print("Audio transcription ready!") | |
json_scenes = text_processor.get_json_scenes(transcribed_text, | |
summary_language) | |
print("Scenes ready") | |
video = video_creator.create_video(json_scenes, video_styles) | |
print("Video at", video) | |
return video, video | |
css = """ | |
a { | |
color: inherit; | |
text-decoration: underline; | |
} | |
.gradio-container { | |
font-family: 'IBM Plex Sans', sans-serif; | |
} | |
.gr-button { | |
color: white; | |
border-color: #000000; | |
background: #000000; | |
} | |
input[type='range'] { | |
accent-color: #000000; | |
} | |
.dark input[type='range'] { | |
accent-color: #dfdfdf; | |
} | |
.container { | |
max-width: 730px; | |
margin: auto; | |
padding-top: 1.5rem; | |
} | |
#gallery { | |
min-height: 22rem; | |
margin-bottom: 15px; | |
margin-left: auto; | |
margin-right: auto; | |
border-bottom-right-radius: .5rem !important; | |
border-bottom-left-radius: .5rem !important; | |
} | |
#gallery>div>.h-full { | |
min-height: 20rem; | |
} | |
.details:hover { | |
text-decoration: underline; | |
} | |
.gr-button { | |
white-space: nowrap; | |
} | |
.gr-button:focus { | |
border-color: rgb(147 197 253 / var(--tw-border-opacity)); | |
outline: none; | |
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); | |
--tw-border-opacity: 1; | |
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); | |
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); | |
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); | |
--tw-ring-opacity: .5; | |
} | |
#advanced-btn { | |
font-size: .7rem !important; | |
line-height: 19px; | |
margin-top: 12px; | |
margin-bottom: 12px; | |
padding: 2px 8px; | |
border-radius: 14px !important; | |
} | |
#advanced-options { | |
margin-bottom: 20px; | |
} | |
.footer { | |
margin-bottom: 45px; | |
margin-top: 35px; | |
text-align: center; | |
border-bottom: 1px solid #e5e5e5; | |
} | |
.footer>p { | |
font-size: .8rem; | |
display: inline-block; | |
padding: 0 10px; | |
transform: translateY(10px); | |
background: white; | |
} | |
.dark .footer { | |
border-color: #303030; | |
} | |
.dark .footer>p { | |
background: #0b0f19; | |
} | |
.acknowledgments h4{ | |
margin: 1.25em 0 .25em 0; | |
font-weight: bold; | |
font-size: 115%; | |
} | |
#container-advanced-btns{ | |
display: flex; | |
flex-wrap: wrap; | |
justify-content: space-between; | |
align-items: center; | |
} | |
.animate-spin { | |
animation: spin 1s linear infinite; | |
} | |
@keyframes spin { | |
from { | |
transform: rotate(0deg); | |
} | |
to { | |
transform: rotate(360deg); | |
} | |
} | |
#share-btn-container { | |
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; | |
} | |
#share-btn { | |
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; | |
} | |
#share-btn * { | |
all: unset; | |
} | |
.gr-form{ | |
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; | |
} | |
#prompt-container{ | |
gap: 0; | |
} | |
#generated_id{ | |
min-height: 700px | |
} | |
#setting_id{ | |
margin-bottom: 12px; | |
text-align: center; | |
font-weight: 900; | |
} | |
""" | |
block = gr.Blocks(css=css) | |
with block as demo: | |
gr.HTML( | |
f""" | |
<div style="text-align: center; max-width: 650px; margin: 0 auto;"> | |
<div | |
style=" | |
display: inline-flex; | |
align-items: center; | |
gap: 0.8rem; | |
font-size: 1.75rem; | |
" | |
> | |
<h1 style="font-weight: 900; margin-bottom: 7px;"> | |
YouTube to Illustraded Summary | |
</h1> | |
</div> | |
<p style="margin-bottom: 10px; font-size: 94%"> | |
Enter the URL of a YouTube video (in Spanish) and you'll recive a video with an illustraded summary. | |
It works for audio books, history lessons, etc. Try it out with a short video (less than 4 minutes). | |
</p> | |
<p style="margin-bottom: 10px; font-size: 94%"> | |
Running on <b>{device_print}</b> | |
</p> | |
<p> | |
Some samples videos you can try: | |
<ul> | |
<li>https://www.youtube.com/watch?v=Hk5evm1NgzA (Little Red Riding Hood. Infer time: c.a. 196 seconds)</li> | |
<li>https://www.youtube.com/watch?v=nJxWS9jZ9-c (Elon Musk's Biography. Infer time: c.a. 176 seconds)</li> | |
<li>https://www.youtube.com/watch?v=sRmmQBBln9Q (Cook recipe. Infer time: c.a. 200 seconds)</li> | |
<li>https://www.youtube.com/watch?v=qz4Wc48KITA (Poem by Edgar Allan Poe. Infer time: c.a. 200 seconds)</li> | |
<li>https://www.youtube.com/watch?v=2D8CaoIY7Lk (The history of Christmas trees. Infer time: c.a. 130 seconds)</li> | |
<li>https://www.youtube.com/watch?v=uhmRR-Ir7Bk (Dec. 20 news. Infer time: c.a. 230 seconds)</li> | |
<li>https://www.youtube.com/watch?v=CT9T7Dp63x4 (Presentation of movie Lady Chatterley's Lover. Infer time: c.a. 277 seconds)</li> | |
</ul> | |
</p> | |
</div> | |
""" | |
) | |
with gr.Group(): | |
with gr.Box(): | |
with gr.Row(elem_id="setting_id").style(mobile_collapse=False, equal_height=True): | |
gr.HTML("<h1>Setting</h1>") | |
with gr.Row(): | |
with gr.Column(): | |
video_language = gr.Radio(choices=["Spanish", "English"], | |
label="Language of your input video:", | |
value="Spanish") | |
with gr.Column(): | |
summary_language = gr.Radio(choices=["Spanish", "English"], | |
label="Language of your output video:", | |
value="Spanish") | |
with gr.Row(): | |
video_styles = gr.Textbox(label="(OPTIONAL) Enter the styles for your ouput video", | |
value="", | |
placeholder="illustration, highly detailed, digital painting, concept art, matte, art by wlop and artgerm and greg rutkowski and alphonse mucha, masterpiece") | |
with gr.Group(): | |
with gr.Box(): | |
with gr.Row().style(mobile_collapse=False, equal_height=True): | |
url = gr.Textbox( | |
label="Enter the URL of the YouTubeVideo", | |
show_label=False, | |
max_lines=1, | |
placeholder="YouTube URL" | |
).style( | |
border=(True, False, True, True), | |
rounded=(True, False, False, True), | |
container=False, | |
) | |
btn = gr.Button("Run").style( | |
margin=False, | |
rounded=(False, True, True, False), | |
) | |
video_output = gr.Video() | |
file_output = gr.File() | |
btn.click(datapipeline, | |
inputs=[url, | |
video_language, | |
summary_language, | |
video_styles], | |
outputs=[video_output, file_output]) | |
#gr.Examples( | |
# examples=[[], []] | |
#) | |
gr.HTML( | |
""" | |
<div class="footer"> | |
<p>This demos is part of the Whisper Sprint (Dec. 2022).</a> | |
</p> | |
</div> | |
""" | |
) | |
gr.Markdown(''' | |
[![Twitter Follow](https://img.shields.io/twitter/follow/juancopi81?style=social)](https://twitter.com/juancopi81) | |
![visitors](https://visitor-badge.glitch.me/badge?page_id=Juancopi81.yt-illustraded-summary) | |
''') | |
if not is_colab: | |
demo.queue(concurrency_count=1) | |
demo.launch(debug=is_colab, share=is_colab) |