# Import spaces first to avoid CUDA initialization issues
import spaces
# Then import other libraries
import torch
import librosa
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration, AutoModelForCausalLM, AutoProcessor
from gtts import gTTS
import gradio as gr
from PIL import Image
import os
import base64
from io import BytesIO
import io
import subprocess
from langdetect import detect
print("Using GPU for operations when available")
# Install flash-attn
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
# Function to safely load pipeline within a GPU-decorated function
@spaces.GPU
def load_pipeline(model_name, **kwargs):
try:
device = 0 if torch.cuda.is_available() else "cpu"
return pipeline(model=model_name, device=device, **kwargs)
except Exception as e:
print(f"Error loading {model_name} pipeline: {e}")
return None
# Load Whisper model for speech recognition within a GPU-decorated function
@spaces.GPU
def load_whisper():
try:
device = 0 if torch.cuda.is_available() else "cpu"
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
return processor, model
except Exception as e:
print(f"Error loading Whisper model: {e}")
return None, None
# Load sarvam-2b for text generation within a GPU-decorated function
@spaces.GPU
def load_sarvam():
return load_pipeline('sarvamai/sarvam-2b-v0.5')
# Load Phi-3.5-vision-instruct model
@spaces.GPU
def load_vision_model():
try:
model_id = "microsoft/Phi-3.5-vision-instruct"
model = AutoModelForCausalLM.from_pretrained(
model_id, trust_remote_code=True, torch_dtype=torch.float16, use_flash_attention_2=False
)
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
return model, processor
except Exception as e:
print(f"Error loading vision model: {e}")
return None, None
# Load sarvam-2b for text generation within a GPU-decorated function
@spaces.GPU
def load_sarvam():
return load_pipeline('sarvamai/sarvam-2b-v0.5')
# Load Phi-3.5-vision-instruct model
@spaces.GPU
def load_vision_model():
try:
print("Starting to load vision model...")
model_id = "microsoft/Phi-3.5-vision-instruct"
print(f"Loading model from {model_id}")
# Check for CUDA availability
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Load model with potential memory optimization
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
torch_dtype=torch.float16,
use_flash_attention_2=True, # Enable if supported
device_map="auto", # Automatically manage model placement
low_cpu_mem_usage=True
)
print("Model loaded successfully")
print("Loading processor...")
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
print("Processor loaded successfully")
return model, processor
except ImportError as e:
print(f"Error importing required modules: {str(e)}")
print("Please ensure all required dependencies are installed.")
except RuntimeError as e:
print(f"Runtime error (possibly CUDA out of memory): {str(e)}")
print("Consider using a smaller model or enabling GPU offloading.")
except Exception as e:
print(f"Unexpected error in loading vision model: {str(e)}")
return None, None
# Process audio input within a GPU-decorated function
@spaces.GPU
def process_audio_input(audio, whisper_processor, whisper_model):
if whisper_processor is None or whisper_model is None:
return "Error: Speech recognition model is not available. Please type your message instead."
try:
audio, sr = librosa.load(audio, sr=16000)
input_features = whisper_processor(audio, sampling_rate=sr, return_tensors="pt").input_features.to(whisper_model.device)
predicted_ids = whisper_model.generate(input_features)
transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
return transcription
except Exception as e:
return f"Error processing audio: {str(e)}. Please type your message instead."
# Updated process_image_input function
@spaces.GPU
@spaces.GPU
def process_image_input(image, text_prompt, vision_model, processor):
if vision_model is None or processor is None:
return "Error: Vision model is not available."
try:
# Convert image to base64
if isinstance(image, Image.Image):
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
else:
# If it's not a PIL Image, assume it's a file path
with open(image, "rb") as image_file:
img_str = base64.b64encode(image_file.read()).decode()
# Format the input with image tag
formatted_prompt = f"{text_prompt}\ndata:image/png;base64,{img_str}"
# Process the formatted prompt
inputs = processor(text=formatted_prompt, return_tensors="pt").to(vision_model.device)
# Generate text
with torch.no_grad():
outputs = vision_model.generate(
**inputs,
max_new_tokens=100,
do_sample=True,
top_k=50,
top_p=0.95,
num_return_sequences=1
)
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0]
return generated_text
except Exception as e:
return f"Error processing image: {str(e)}"
# Generate response within a GPU-decorated function
@spaces.GPU
def generate_response(transcription, sarvam_pipe):
if sarvam_pipe is None:
return "Error: Text generation model is not available."
try:
# Generate response using the sarvam-2b model
response = sarvam_pipe(transcription, max_length=100, num_return_sequences=1)[0]['generated_text']
return response
except Exception as e:
return f"Error generating response: {str(e)}"
# Text-to-speech function
def text_to_speech(text, lang='hi'):
try:
# Use a better TTS engine for Indic languages
if lang in ['hi', 'bn', 'gu', 'kn', 'ml', 'mr', 'or', 'pa', 'ta', 'te']:
# You might want to use a different TTS library here
# For example, you could use the Google Cloud Text-to-Speech API
# or a specialized Indic language TTS library
# This is a placeholder for a better Indic TTS solution
tts = gTTS(text=text, lang=lang, tld='co.in') # Use Indian TLD
else:
tts = gTTS(text=text, lang=lang)
tts.save("response.mp3")
return "response.mp3"
except Exception as e:
print(f"Error in text-to-speech: {str(e)}")
return None
# Improved language detection function
def detect_language(text):
lang_codes = {
'bn': 'Bengali', 'gu': 'Gujarati', 'hi': 'Hindi', 'kn': 'Kannada',
'ml': 'Malayalam', 'mr': 'Marathi', 'or': 'Oriya', 'pa': 'Punjabi',
'ta': 'Tamil', 'te': 'Telugu', 'en': 'English'
}
try:
detected_lang = detect(text)
return detected_lang if detected_lang in lang_codes else 'en'
except:
# Fallback to simple script-based detection
for code, lang in lang_codes.items():
if any(ord(char) >= 0x0900 and ord(char) <= 0x097F for char in text): # Devanagari script
return 'hi'
return 'en' # Default to English if no Indic script is detected
@spaces.GPU
def indic_vision_assistant(input_type, audio_input, text_input, image_input):
try:
whisper_processor, whisper_model = load_whisper()
sarvam_pipe = load_sarvam()
vision_model, processor = load_vision_model()
if input_type == "audio" and audio_input is not None:
transcription = process_audio_input(audio_input, whisper_processor, whisper_model)
elif input_type == "text" and text_input:
transcription = text_input
elif input_type == "image" and image_input is not None:
# Use a default prompt if no text input is provided
text_prompt = text_input if text_input else "Describe this image in detail."
transcription = process_image_input(image_input, text_prompt, vision_model, processor)
else:
return "Please provide either audio, text, or image input.", "No input provided.", None
response = generate_response(transcription, sarvam_pipe)
lang = detect_language(response)
audio_response = text_to_speech(response, lang)
return transcription, response, audio_response
except Exception as e:
error_message = f"An error occurred: {str(e)}"
return error_message, error_message, None
# Custom CSS
custom_css = """
body {
background-color: #0b0f19;
color: #e2e8f0;
font-family: 'Arial', sans-serif;
}
#custom-header {
text-align: center;
padding: 20px 0;
background-color: #1a202c;
margin-bottom: 20px;
border-radius: 10px;
}
#custom-header h1 {
font-size: 2.5rem;
margin-bottom: 0.5rem;
}
#custom-header h1 .blue {
color: #60a5fa;
}
#custom-header h1 .pink {
color: #f472b6;
}
#custom-header h2 {@spaces.GPU
def indic_vision_assistant(input_type, audio_input, text_input, image_input):
try:
whisper_processor, whisper_model = load_whisper()
sarvam_pipe = load_sarvam()
vision_model, processor = load_vision_model()
if input_type == "audio" and audio_input is not None:
transcription = process_audio_input(audio_input, whisper_processor, whisper_model)
elif input_type == "text" and text_input:
transcription = text_input
elif input_type == "image" and image_input is not None:
# Use a default prompt if no text input is provided
text_prompt = text_input if text_input else "Describe this image in detail."
transcription = process_image_input(image_input, text_prompt, vision_model, processor)
else:
return "Please provide either audio, text, or image input.", "No input provided.", None
response = generate_response(transcription, sarvam_pipe)
lang = detect_language(response)
audio_response = text_to_speech(response, lang)
return transcription, response, audio_response
except Exception as e:
error_message = f"An error occurred: {str(e)}"
return error_message, error_message, None
font-size: 1.5rem;
color: #94a3b8;
}
.suggestions {
display: flex;
justify-content: center;
flex-wrap: wrap;
gap: 1rem;
margin: 20px 0;
}
.suggestion {
background-color: #1e293b;
border-radius: 0.5rem;
padding: 1rem;
display: flex;
align-items: center;
transition: transform 0.3s ease;
width: 200px;
}
.suggestion:hover {
transform: translateY(-5px);
}
.suggestion-icon {
font-size: 1.5rem;
margin-right: 1rem;
background-color: #2d3748;
padding: 0.5rem;
border-radius: 50%;
}
.gradio-container {
max-width: 100% !important;
}
#component-0, #component-1, #component-2 {
max-width: 100% !important;
}
footer {
text-align: center;
margin-top: 2rem;
color: #64748b;
}
"""
# Custom HTML for the header
custom_header = """
"""
# Custom HTML for suggestions
custom_suggestions = """
🎤
Speak in any Indic language
⌨️
Type in any Indic language
🖼️
Upload an image for analysis
🤖
Get AI-generated responses
🔊
Listen to audio responses
"""
# Update the Gradio interface to allow text input for image processing
with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
body_background_fill="#0b0f19",
body_text_color="#e2e8f0",
button_primary_background_fill="#3b82f6",
button_primary_background_fill_hover="#2563eb",
button_primary_text_color="white",
block_title_text_color="#94a3b8",
block_label_text_color="#94a3b8",
)) as iface:
gr.HTML(custom_header)
gr.HTML(custom_suggestions)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Indic Vision Assistant")
input_type = gr.Radio(["audio", "text", "image"], label="Input Type", value="audio")
audio_input = gr.Audio(type="filepath", label="Speak (if audio input selected)")
text_input = gr.Textbox(label="Type your message or image prompt")
image_input = gr.Image(type="pil", label="Upload an image (if image input selected)")
submit_btn = gr.Button("Submit")
output_transcription = gr.Textbox(label="Transcription/Input")
output_response = gr.Textbox(label="Generated Response")
output_audio = gr.Audio(label="Audio Response")
submit_btn.click(
fn=indic_vision_assistant,
inputs=[input_type, audio_input, text_input, image_input],
outputs=[output_transcription, output_response, output_audio]
)
gr.HTML("")
# Launch the app
iface.launch()