sagar007 commited on
Commit
fc8a89a
·
verified ·
1 Parent(s): 57b203d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +359 -140
app.py CHANGED
@@ -1,124 +1,348 @@
1
- import os
 
 
 
2
  import torch
3
- from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor, TextIteratorStreamer, BitsAndBytesConfig
 
 
4
  import gradio as gr
5
- from threading import Thread
6
  from PIL import Image
 
 
 
 
 
7
  import subprocess
8
- import spaces # Add this import
9
 
10
- # Install flash-attention
 
 
11
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
12
 
13
- # Constants
14
- TITLE = "<h1><center>Phi 3.5 Multimodal (Text + Vision)</center></h1>"
15
- DESCRIPTION = "# Phi-3.5 Multimodal Demo (Text + Vision)"
16
-
17
- # Model configurations
18
- TEXT_MODEL_ID = "microsoft/Phi-3.5-mini-instruct"
19
- VISION_MODEL_ID = "microsoft/Phi-3.5-vision-instruct"
20
-
21
- device = "cuda" if torch.cuda.is_available() else "cpu"
22
-
23
- # Quantization config for text model
24
- quantization_config = BitsAndBytesConfig(
25
- load_in_4bit=True,
26
- bnb_4bit_compute_dtype=torch.bfloat16,
27
- bnb_4bit_use_double_quant=True,
28
- bnb_4bit_quant_type="nf4"
29
- )
30
-
31
- # Load models and tokenizers
32
- text_tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL_ID)
33
- text_model = AutoModelForCausalLM.from_pretrained(
34
- TEXT_MODEL_ID,
35
- torch_dtype=torch.bfloat16,
36
- device_map="auto",
37
- quantization_config=quantization_config
38
- )
39
-
40
- vision_model = AutoModelForCausalLM.from_pretrained(
41
- VISION_MODEL_ID,
42
- trust_remote_code=True,
43
- torch_dtype="auto",
44
- attn_implementation="flash_attention_2"
45
- ).to(device).eval()
46
-
47
- vision_processor = AutoProcessor.from_pretrained(VISION_MODEL_ID, trust_remote_code=True)
48
-
49
- # Helper functions
50
  @spaces.GPU
51
- def stream_text_chat(message, history, system_prompt, temperature=0.8, max_new_tokens=1024, top_p=1.0, top_k=20):
52
- conversation = [{"role": "system", "content": system_prompt}]
53
- for prompt, answer in history:
54
- conversation.extend([
55
- {"role": "user", "content": prompt},
56
- {"role": "assistant", "content": answer},
57
- ])
58
- conversation.append({"role": "user", "content": message})
59
-
60
- input_ids = text_tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(text_model.device)
61
- streamer = TextIteratorStreamer(text_tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
62
-
63
- generate_kwargs = dict(
64
- input_ids=input_ids,
65
- max_new_tokens=max_new_tokens,
66
- do_sample=temperature > 0,
67
- top_p=top_p,
68
- top_k=top_k,
69
- temperature=temperature,
70
- eos_token_id=[128001, 128008, 128009],
71
- streamer=streamer,
72
- )
73
 
74
- with torch.no_grad():
75
- thread = Thread(target=text_model.generate, kwargs=generate_kwargs)
76
- thread.start()
 
77
 
78
- buffer = ""
79
- for new_text in streamer:
80
- buffer += new_text
81
- yield history + [[message, buffer]]
 
 
 
 
 
 
 
 
 
82
 
83
- @spaces.GPU # Add this decorator
84
- def process_vision_query(image, text_input):
85
- prompt = f"<|user|>\n<|image_1|>\n{text_input}<|end|>\n<|assistant|>\n"
86
- image = Image.fromarray(image).convert("RGB")
87
- inputs = vision_processor(prompt, image, return_tensors="pt").to(device)
88
-
89
- with torch.no_grad():
90
- generate_ids = vision_model.generate(
91
- **inputs,
92
- max_new_tokens=1000,
93
- eos_token_id=vision_processor.tokenizer.eos_token_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
97
- response = vision_processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
98
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  # Custom CSS
101
  custom_css = """
102
- body { background-color: #0b0f19; color: #e2e8f0; font-family: 'Arial', sans-serif;}
103
- #custom-header { text-align: center; padding: 20px 0; background-color: #1a202c; margin-bottom: 20px; border-radius: 10px;}
104
- #custom-header h1 { font-size: 2.5rem; margin-bottom: 0.5rem;}
105
- #custom-header h1 .blue { color: #60a5fa;}
106
- #custom-header h1 .pink { color: #f472b6;}
107
- #custom-header h2 { font-size: 1.5rem; color: #94a3b8;}
108
- .suggestions { display: flex; justify-content: center; flex-wrap: wrap; gap: 1rem; margin: 20px 0;}
109
- .suggestion { background-color: #1e293b; border-radius: 0.5rem; padding: 1rem; display: flex; align-items: center; transition: transform 0.3s ease; width: 200px;}
110
- .suggestion:hover { transform: translateY(-5px);}
111
- .suggestion-icon { font-size: 1.5rem; margin-right: 1rem; background-color: #2d3748; padding: 0.5rem; border-radius: 50%;}
112
- .gradio-container { max-width: 100% !important;}
113
- #component-0, #component-1, #component-2 { max-width: 100% !important;}
114
- footer { text-align: center; margin-top: 2rem; color: #64748b;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  """
116
 
117
  # Custom HTML for the header
118
  custom_header = """
119
  <div id="custom-header">
120
- <h1><span class="blue">Phi 3.5</span> <span class="pink">Multimodal Assistant</span></h1>
121
- <h2>Text and Vision AI at Your Service</h2>
 
 
 
122
  </div>
123
  """
124
 
@@ -126,25 +350,28 @@ custom_header = """
126
  custom_suggestions = """
127
  <div class="suggestions">
128
  <div class="suggestion">
129
- <span class="suggestion-icon">💬</span>
130
- <p>Chat with the Text Model</p>
 
 
 
 
131
  </div>
132
  <div class="suggestion">
133
  <span class="suggestion-icon">🖼️</span>
134
- <p>Analyze Images with Vision Model</p>
135
  </div>
136
  <div class="suggestion">
137
  <span class="suggestion-icon">🤖</span>
138
  <p>Get AI-generated responses</p>
139
  </div>
140
  <div class="suggestion">
141
- <span class="suggestion-icon">🔍</span>
142
- <p>Explore advanced options</p>
143
  </div>
144
  </div>
145
  """
146
-
147
- # Gradio interface
148
  with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
149
  body_background_fill="#0b0f19",
150
  body_text_color="#e2e8f0",
@@ -153,38 +380,30 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
153
  button_primary_text_color="white",
154
  block_title_text_color="#94a3b8",
155
  block_label_text_color="#94a3b8",
156
- )) as demo:
157
  gr.HTML(custom_header)
158
  gr.HTML(custom_suggestions)
159
-
160
- with gr.Tab("Text Model (Phi-3.5-mini)"):
161
- chatbot = gr.Chatbot(height=400)
162
- msg = gr.Textbox(label="Message", placeholder="Type your message here...")
163
- with gr.Accordion("Advanced Options", open=False):
164
- system_prompt = gr.Textbox(value="You are a helpful assistant", label="System Prompt")
165
- temperature = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.8, label="Temperature")
166
- max_new_tokens = gr.Slider(minimum=128, maximum=8192, step=1, value=1024, label="Max new tokens")
167
- top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="top_p")
168
- top_k = gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_k")
169
-
170
- submit_btn = gr.Button("Submit", variant="primary")
171
- clear_btn = gr.Button("Clear Chat", variant="secondary")
172
-
173
- submit_btn.click(stream_text_chat, [msg, chatbot, system_prompt, temperature, max_new_tokens, top_p, top_k], [chatbot])
174
- clear_btn.click(lambda: None, None, chatbot, queue=False)
175
-
176
- with gr.Tab("Vision Model (Phi-3.5-vision)"):
177
- with gr.Row():
178
- with gr.Column(scale=1):
179
- vision_input_img = gr.Image(label="Upload an Image", type="pil")
180
- vision_text_input = gr.Textbox(label="Ask a question about the image", placeholder="What do you see in this image?")
181
- vision_submit_btn = gr.Button("Analyze Image", variant="primary")
182
- with gr.Column(scale=1):
183
- vision_output_text = gr.Textbox(label="AI Analysis", lines=10)
184
-
185
- vision_submit_btn.click(process_vision_query, [vision_input_img, vision_text_input], [vision_output_text])
186
-
187
- gr.HTML("<footer>Powered by Phi 3.5 Multimodal AI</footer>")
188
-
189
- if __name__ == "__main__":
190
- demo.launch()
 
1
+ # Import spaces first to avoid CUDA initialization issues
2
+ import spaces
3
+
4
+ # Then import other libraries
5
  import torch
6
+ import librosa
7
+ from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration, AutoModelForCausalLM, AutoProcessor
8
+ from gtts import gTTS
9
  import gradio as gr
 
10
  from PIL import Image
11
+ import os
12
+ import base64
13
+ from io import BytesIO
14
+
15
+ import io
16
  import subprocess
17
+ from langdetect import detect
18
 
19
+ print("Using GPU for operations when available")
20
+
21
+ # Install flash-attn
22
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
23
 
24
+ # Function to safely load pipeline within a GPU-decorated function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  @spaces.GPU
26
+ def load_pipeline(model_name, **kwargs):
27
+ try:
28
+ device = 0 if torch.cuda.is_available() else "cpu"
29
+ return pipeline(model=model_name, device=device, **kwargs)
30
+ except Exception as e:
31
+ print(f"Error loading {model_name} pipeline: {e}")
32
+ return None
33
+
34
+ # Load Whisper model for speech recognition within a GPU-decorated function
35
+ @spaces.GPU
36
+ def load_whisper():
37
+ try:
38
+ device = 0 if torch.cuda.is_available() else "cpu"
39
+ processor = WhisperProcessor.from_pretrained("openai/whisper-small")
40
+ model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
41
+ return processor, model
42
+ except Exception as e:
43
+ print(f"Error loading Whisper model: {e}")
44
+ return None, None
 
 
 
45
 
46
+ # Load sarvam-2b for text generation within a GPU-decorated function
47
+ @spaces.GPU
48
+ def load_sarvam():
49
+ return load_pipeline('sarvamai/sarvam-2b-v0.5')
50
 
51
+ # Load Phi-3.5-vision-instruct model
52
+ @spaces.GPU
53
+ def load_vision_model():
54
+ try:
55
+ model_id = "microsoft/Phi-3.5-vision-instruct"
56
+ model = AutoModelForCausalLM.from_pretrained(
57
+ model_id, trust_remote_code=True, torch_dtype=torch.float16, use_flash_attention_2=False
58
+ )
59
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
60
+ return model, processor
61
+ except Exception as e:
62
+ print(f"Error loading vision model: {e}")
63
+ return None, None
64
 
65
+
66
+ # Load sarvam-2b for text generation within a GPU-decorated function
67
+ @spaces.GPU
68
+ def load_sarvam():
69
+ return load_pipeline('sarvamai/sarvam-2b-v0.5')
70
+
71
+ # Load Phi-3.5-vision-instruct model
72
+ @spaces.GPU
73
+ def load_vision_model():
74
+ try:
75
+ print("Starting to load vision model...")
76
+ model_id = "microsoft/Phi-3.5-vision-instruct"
77
+ print(f"Loading model from {model_id}")
78
+
79
+ # Check for CUDA availability
80
+ device = "cuda" if torch.cuda.is_available() else "cpu"
81
+ print(f"Using device: {device}")
82
+
83
+ # Load model with potential memory optimization
84
+ model = AutoModelForCausalLM.from_pretrained(
85
+ model_id,
86
+ trust_remote_code=True,
87
+ torch_dtype=torch.float16,
88
+ use_flash_attention_2=True, # Enable if supported
89
+ device_map="auto", # Automatically manage model placement
90
+ low_cpu_mem_usage=True
91
  )
92
+ print("Model loaded successfully")
93
+
94
+ print("Loading processor...")
95
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
96
+ print("Processor loaded successfully")
97
+
98
+ return model, processor
99
+ except ImportError as e:
100
+ print(f"Error importing required modules: {str(e)}")
101
+ print("Please ensure all required dependencies are installed.")
102
+ except RuntimeError as e:
103
+ print(f"Runtime error (possibly CUDA out of memory): {str(e)}")
104
+ print("Consider using a smaller model or enabling GPU offloading.")
105
+ except Exception as e:
106
+ print(f"Unexpected error in loading vision model: {str(e)}")
107
+
108
+ return None, None
109
+
110
+
111
+ # Process audio input within a GPU-decorated function
112
+ @spaces.GPU
113
+ def process_audio_input(audio, whisper_processor, whisper_model):
114
+ if whisper_processor is None or whisper_model is None:
115
+ return "Error: Speech recognition model is not available. Please type your message instead."
116
 
117
+ try:
118
+ audio, sr = librosa.load(audio, sr=16000)
119
+ input_features = whisper_processor(audio, sampling_rate=sr, return_tensors="pt").input_features.to(whisper_model.device)
120
+ predicted_ids = whisper_model.generate(input_features)
121
+ transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
122
+ return transcription
123
+ except Exception as e:
124
+ return f"Error processing audio: {str(e)}. Please type your message instead."
125
+
126
+ # Updated process_image_input function
127
+ @spaces.GPU
128
+ @spaces.GPU
129
+ def process_image_input(image, text_prompt, vision_model, processor):
130
+ if vision_model is None or processor is None:
131
+ return "Error: Vision model is not available."
132
+
133
+ try:
134
+ # Convert image to base64
135
+ if isinstance(image, Image.Image):
136
+ buffered = BytesIO()
137
+ image.save(buffered, format="PNG")
138
+ img_str = base64.b64encode(buffered.getvalue()).decode()
139
+ else:
140
+ # If it's not a PIL Image, assume it's a file path
141
+ with open(image, "rb") as image_file:
142
+ img_str = base64.b64encode(image_file.read()).decode()
143
+
144
+ # Format the input with image tag
145
+ formatted_prompt = f"{text_prompt}\n"
146
+
147
+ # Process the formatted prompt
148
+ inputs = processor(text=formatted_prompt, return_tensors="pt").to(vision_model.device)
149
+
150
+ # Generate text
151
+ with torch.no_grad():
152
+ outputs = vision_model.generate(
153
+ **inputs,
154
+ max_new_tokens=100,
155
+ do_sample=True,
156
+ top_k=50,
157
+ top_p=0.95,
158
+ num_return_sequences=1
159
+ )
160
+
161
+ generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0]
162
+ return generated_text
163
+ except Exception as e:
164
+ return f"Error processing image: {str(e)}"
165
+
166
+ # Generate response within a GPU-decorated function
167
+ @spaces.GPU
168
+ def generate_response(transcription, sarvam_pipe):
169
+ if sarvam_pipe is None:
170
+ return "Error: Text generation model is not available."
171
+
172
+ try:
173
+ # Generate response using the sarvam-2b model
174
+ response = sarvam_pipe(transcription, max_length=100, num_return_sequences=1)[0]['generated_text']
175
+ return response
176
+ except Exception as e:
177
+ return f"Error generating response: {str(e)}"
178
+
179
+ # Text-to-speech function
180
+ def text_to_speech(text, lang='hi'):
181
+ try:
182
+ # Use a better TTS engine for Indic languages
183
+ if lang in ['hi', 'bn', 'gu', 'kn', 'ml', 'mr', 'or', 'pa', 'ta', 'te']:
184
+ # You might want to use a different TTS library here
185
+ # For example, you could use the Google Cloud Text-to-Speech API
186
+ # or a specialized Indic language TTS library
187
+
188
+ # This is a placeholder for a better Indic TTS solution
189
+ tts = gTTS(text=text, lang=lang, tld='co.in') # Use Indian TLD
190
+ else:
191
+ tts = gTTS(text=text, lang=lang)
192
+
193
+ tts.save("response.mp3")
194
+ return "response.mp3"
195
+ except Exception as e:
196
+ print(f"Error in text-to-speech: {str(e)}")
197
+ return None
198
+
199
+ # Improved language detection function
200
+ def detect_language(text):
201
+ lang_codes = {
202
+ 'bn': 'Bengali', 'gu': 'Gujarati', 'hi': 'Hindi', 'kn': 'Kannada',
203
+ 'ml': 'Malayalam', 'mr': 'Marathi', 'or': 'Oriya', 'pa': 'Punjabi',
204
+ 'ta': 'Tamil', 'te': 'Telugu', 'en': 'English'
205
+ }
206
+
207
+ try:
208
+ detected_lang = detect(text)
209
+ return detected_lang if detected_lang in lang_codes else 'en'
210
+ except:
211
+ # Fallback to simple script-based detection
212
+ for code, lang in lang_codes.items():
213
+ if any(ord(char) >= 0x0900 and ord(char) <= 0x097F for char in text): # Devanagari script
214
+ return 'hi'
215
+ return 'en' # Default to English if no Indic script is detected
216
+
217
+ @spaces.GPU
218
+ def indic_vision_assistant(input_type, audio_input, text_input, image_input):
219
+ try:
220
+ whisper_processor, whisper_model = load_whisper()
221
+ sarvam_pipe = load_sarvam()
222
+ vision_model, processor = load_vision_model()
223
+
224
+ if input_type == "audio" and audio_input is not None:
225
+ transcription = process_audio_input(audio_input, whisper_processor, whisper_model)
226
+ elif input_type == "text" and text_input:
227
+ transcription = text_input
228
+ elif input_type == "image" and image_input is not None:
229
+ # Use a default prompt if no text input is provided
230
+ text_prompt = text_input if text_input else "Describe this image in detail."
231
+ transcription = process_image_input(image_input, text_prompt, vision_model, processor)
232
+ else:
233
+ return "Please provide either audio, text, or image input.", "No input provided.", None
234
+
235
+ response = generate_response(transcription, sarvam_pipe)
236
+ lang = detect_language(response)
237
+ audio_response = text_to_speech(response, lang)
238
+
239
+ return transcription, response, audio_response
240
+ except Exception as e:
241
+ error_message = f"An error occurred: {str(e)}"
242
+ return error_message, error_message, None
243
+
244
 
245
  # Custom CSS
246
  custom_css = """
247
+ body {
248
+ background-color: #0b0f19;
249
+ color: #e2e8f0;
250
+ font-family: 'Arial', sans-serif;
251
+ }
252
+ #custom-header {
253
+ text-align: center;
254
+ padding: 20px 0;
255
+ background-color: #1a202c;
256
+ margin-bottom: 20px;
257
+ border-radius: 10px;
258
+ }
259
+ #custom-header h1 {
260
+ font-size: 2.5rem;
261
+ margin-bottom: 0.5rem;
262
+ }
263
+ #custom-header h1 .blue {
264
+ color: #60a5fa;
265
+ }
266
+ #custom-header h1 .pink {
267
+ color: #f472b6;
268
+ }
269
+ #custom-header h2 {@spaces.GPU
270
+ def indic_vision_assistant(input_type, audio_input, text_input, image_input):
271
+ try:
272
+ whisper_processor, whisper_model = load_whisper()
273
+ sarvam_pipe = load_sarvam()
274
+ vision_model, processor = load_vision_model()
275
+
276
+ if input_type == "audio" and audio_input is not None:
277
+ transcription = process_audio_input(audio_input, whisper_processor, whisper_model)
278
+ elif input_type == "text" and text_input:
279
+ transcription = text_input
280
+ elif input_type == "image" and image_input is not None:
281
+ # Use a default prompt if no text input is provided
282
+ text_prompt = text_input if text_input else "Describe this image in detail."
283
+ transcription = process_image_input(image_input, text_prompt, vision_model, processor)
284
+ else:
285
+ return "Please provide either audio, text, or image input.", "No input provided.", None
286
+
287
+ response = generate_response(transcription, sarvam_pipe)
288
+ lang = detect_language(response)
289
+ audio_response = text_to_speech(response, lang)
290
+
291
+ return transcription, response, audio_response
292
+ except Exception as e:
293
+ error_message = f"An error occurred: {str(e)}"
294
+ return error_message, error_message, None
295
+
296
+ font-size: 1.5rem;
297
+ color: #94a3b8;
298
+ }
299
+ .suggestions {
300
+ display: flex;
301
+ justify-content: center;
302
+ flex-wrap: wrap;
303
+ gap: 1rem;
304
+ margin: 20px 0;
305
+ }
306
+ .suggestion {
307
+ background-color: #1e293b;
308
+ border-radius: 0.5rem;
309
+ padding: 1rem;
310
+ display: flex;
311
+ align-items: center;
312
+ transition: transform 0.3s ease;
313
+ width: 200px;
314
+ }
315
+ .suggestion:hover {
316
+ transform: translateY(-5px);
317
+ }
318
+ .suggestion-icon {
319
+ font-size: 1.5rem;
320
+ margin-right: 1rem;
321
+ background-color: #2d3748;
322
+ padding: 0.5rem;
323
+ border-radius: 50%;
324
+ }
325
+ .gradio-container {
326
+ max-width: 100% !important;
327
+ }
328
+ #component-0, #component-1, #component-2 {
329
+ max-width: 100% !important;
330
+ }
331
+ footer {
332
+ text-align: center;
333
+ margin-top: 2rem;
334
+ color: #64748b;
335
+ }
336
  """
337
 
338
  # Custom HTML for the header
339
  custom_header = """
340
  <div id="custom-header">
341
+ <h1>
342
+ <span class="blue">Hello,</span>
343
+ <span class="pink">User</span>
344
+ </h1>
345
+ <h2>How can I help you today?</h2>
346
  </div>
347
  """
348
 
 
350
  custom_suggestions = """
351
  <div class="suggestions">
352
  <div class="suggestion">
353
+ <span class="suggestion-icon">🎤</span>
354
+ <p>Speak in any Indic language</p>
355
+ </div>
356
+ <div class="suggestion">
357
+ <span class="suggestion-icon">⌨️</span>
358
+ <p>Type in any Indic language</p>
359
  </div>
360
  <div class="suggestion">
361
  <span class="suggestion-icon">🖼️</span>
362
+ <p>Upload an image for analysis</p>
363
  </div>
364
  <div class="suggestion">
365
  <span class="suggestion-icon">🤖</span>
366
  <p>Get AI-generated responses</p>
367
  </div>
368
  <div class="suggestion">
369
+ <span class="suggestion-icon">🔊</span>
370
+ <p>Listen to audio responses</p>
371
  </div>
372
  </div>
373
  """
374
+ # Update the Gradio interface to allow text input for image processing
 
375
  with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
376
  body_background_fill="#0b0f19",
377
  body_text_color="#e2e8f0",
 
380
  button_primary_text_color="white",
381
  block_title_text_color="#94a3b8",
382
  block_label_text_color="#94a3b8",
383
+ )) as iface:
384
  gr.HTML(custom_header)
385
  gr.HTML(custom_suggestions)
386
+
387
+ with gr.Row():
388
+ with gr.Column(scale=1):
389
+ gr.Markdown("### Indic Vision Assistant")
390
+
391
+ input_type = gr.Radio(["audio", "text", "image"], label="Input Type", value="audio")
392
+ audio_input = gr.Audio(type="filepath", label="Speak (if audio input selected)")
393
+ text_input = gr.Textbox(label="Type your message or image prompt")
394
+ image_input = gr.Image(type="pil", label="Upload an image (if image input selected)")
395
+
396
+ submit_btn = gr.Button("Submit")
397
+
398
+ output_transcription = gr.Textbox(label="Transcription/Input")
399
+ output_response = gr.Textbox(label="Generated Response")
400
+ output_audio = gr.Audio(label="Audio Response")
401
+
402
+ submit_btn.click(
403
+ fn=indic_vision_assistant,
404
+ inputs=[input_type, audio_input, text_input, image_input],
405
+ outputs=[output_transcription, output_response, output_audio]
406
+ )
407
+ gr.HTML("<footer>Powered by Indic Language AI with Vision Capabilities</footer>")
408
+ # Launch the app
409
+ iface.launch()