import os import openai import torch import gradio as gr import pytube as pt from transformers import pipeline from huggingface_hub import model_info openai.api_key = os.getenv('OPEN_AI_KEY') hf_t_key = ('HF_TOKEN_KEY') MODEL_NAME = "openai/whisper-large" lang = "en" device = 0 if torch.cuda.is_available() else "cpu" pipe = pipeline( task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=30, device=device, ) pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") def transcribe(microphone, file_upload): warn_output = "" if (microphone is not None) and (file_upload is not None): warn_output = ( "WARNING: You've uploaded a recorded audio file . " "The recorded file from the microphone uploaded, transcribed and immediately discarded.\n" ) elif (microphone is None) and (file_upload is None): return "ERROR: You have to either use the microphone or upload an audio file" file = microphone if microphone is not None else file_upload text = pipe(file)["text"] return warn_output + text def _return_yt_html_embed(yt_url): video_id = yt_url.split("?v=")[-1] HTML_str = ( f'
' "
" ) return HTML_str def yt_transcribe(yt_url): yt = pt.YouTube(yt_url) html_embed_str = _return_yt_html_embed(yt_url) stream = yt.streams.filter(only_audio=True)[0] stream.download(filename="audio.mp3") text = pipe("audio.mp3")["text"] return html_embed_str, text def predict(message, history): history_openai_format = [] for human, assistant in history: history_openai_format.append({"role": "user", "content": human }) history_openai_format.append({"role": "assistant", "content": assistant}) history_openai_format.append({"role": "user", "content": message}) response = openai.ChatCompletion.create( model='ft:gpt-3.5-turbo-1106:2292030-peach-tech::8cxzbHH4', messages= history_openai_format, temperature=1.0, stream=True ) partial_message = "" for chunk in response: if len(chunk['choices'][0]['delta']) != 0: partial_message = partial_message + chunk['choices'][0]['delta']['content'] yield partial_message A1 = gr.ChatInterface(predict, title="COLLEAGUE", description="An AI Productivity Assistant that Chats, Transcribes, and Creates, Built By Peach State Innovation and Technology. Select The Corresponding Tab For Tool Accessibility", textbox=gr.Textbox(placeholder="Enter your question/prompt here..."), theme= gr.themes.Glass(primary_hue="neutral", neutral_hue="slate"), retry_btn=None, clear_btn="Clear Conversation") A3 = gr.load( "models/Salesforce/blip-image-captioning-large", title=" ", description="Take a Photo or an Existing Image, Upload It, I'll Give You Its Description", outputs=[gr.Textbox(label="I see...")], theme= gr.themes.Glass(primary_hue="neutral", neutral_hue="slate")) A4 = gr.load( "models/stabilityai/stable-diffusion-xl-base-1.0", inputs=[gr.Textbox(label="Enter Your Image Description")], outputs=[gr.Image(label="Image")], title=" ", description="Bring Your Imagination Into Existence And Create Unique Images With COLLEAGUE, Powered With Stable Diffusion", allow_flagging="never", examples=["A gigantic celtic leprechaun wandering the streets of downtown Atlanta","A child eating pizza in a Brazilian favela"]) mf_transcribe = gr.Interface( fn=transcribe, inputs=[ gr.inputs.Audio(source="microphone", type="filepath", optional=True), gr.inputs.Audio(source="upload", type="filepath", optional=True), ], outputs="text", layout="horizontal", theme="huggingface", title=" ", description=( "Transcribe real-time speech or audio files at the click of a button." ), allow_flagging="never", ) yt_transcribe = gr.Interface( fn=yt_transcribe, inputs=[gr.Textbox(lines=1, placeholder="Paste your YouTube video URL/web address here", label="YouTube Video URL")], outputs=["html", "text"], layout="horizontal", theme="huggingface", title=" ", description=( "Transcribe YouTube videos at the click of a button." ), allow_flagging="never", ) clp = gr.TabbedInterface([A1, mf_transcribe, yt_transcribe, A3, A4], ["Chat", "Transcribe", "Transcribe YouTube Videos", "Describe", "Create"], theme= gr.themes.Glass(primary_hue="neutral", neutral_hue="slate")) clp.queue().launch()