import whisper import os import gradio as gr from groq import Groq from deep_translator import GoogleTranslator from diffusers import StableDiffusionPipeline import torch import huggingface_hub # Replace with your actual Groq API key api_key = "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" client = Groq(api_key=api_key) # Set Hugging Face API key HF_API_KEY = "https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_1step_x0.safetensors" huggingface_hub.login(HF_API_KEY) # Set device: CUDA if available, else CPU device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Load Whisper model (if using locally, else use API as in original code) whisper_model = whisper.load_model("base") # Model IDs for Stable Diffusion pipelines model_id1 = "dreamlike-art/dreamlike-diffusion-1.0" model_id2 = "stabilityai/stable-diffusion-xl-base-1.0" restricted_model_id = "ByteDance/SDXL-Lightning" # Model you want to access using HF_API_KEY # Initialize Stable Diffusion pipeline based on device if torch.cuda.is_available(): pipe = StableDiffusionPipeline.from_pretrained(model_id2, torch_dtype=torch.float16) else: pipe = StableDiffusionPipeline.from_pretrained(model_id2) # Omit torch_dtype for CPU # Move model to the selected device (either GPU or CPU) pipe = pipe.to(device) # Function to transcribe, translate, and analyze sentiment def process_audio(audio_path, image_option): if audio_path is None: return "Please upload an audio file.", None, None, None # Step 1: Transcribe audio try: with open(audio_path, "rb") as file: transcription = client.audio.transcriptions.create( file=(os.path.basename(audio_path), file.read()), model="whisper-large-v3", language="ta", response_format="verbose_json", ) tamil_text = transcription.text except Exception as e: return f"An error occurred during transcription: {str(e)}", None, None, None # Step 2: Translate Tamil to English try: translator = GoogleTranslator(source='ta', target='en') translation = translator.translate(tamil_text) except Exception as e: return tamil_text, f"An error occurred during translation: {str(e)}", None, None # Step 3: Generate image (if selected) image = None if image_option == "Generate Image": try: # Use the Hugging Face API key to load the restricted model for image generation pipe = StableDiffusionPipeline.from_pretrained(restricted_model_id, torch_dtype=torch.float16, use_auth_token=HF_API_KEY) pipe = pipe.to(device) image = pipe(translation).images[0] except Exception as e: return tamil_text, translation, f"An error occurred during image generation: {str(e)}", None return tamil_text, translation, image # Create Gradio interface with gr.Blocks(theme=gr.themes.Base()) as iface: gr.Markdown("# Audio Transcription, Translation, and Image Generation") with gr.Row(): with gr.Column(): audio_input = gr.Audio(type="filepath", label="Upload Audio File") image_option = gr.Dropdown(["Generate Image", "Skip Image"], label="Image Generation", value="Generate Image") submit_button = gr.Button("Process Audio") with gr.Column(): tamil_text_output = gr.Textbox(label="Tamil Transcription") translation_output = gr.Textbox(label="English Translation") image_output = gr.Image(label="Generated Image") submit_button.click( fn=process_audio, inputs=[audio_input, image_option], outputs=[tamil_text_output, translation_output, image_output] ) # Launch the interface iface.launch()