sagar007 commited on
Commit
a5e055b
·
verified ·
1 Parent(s): 1cbbafe

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +309 -0
app.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import librosa
3
+ from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration, AutoModelForCausalLM, AutoProcessor
4
+ from gtts import gTTS
5
+ import gradio as gr
6
+ import spaces
7
+ from PIL import Image
8
+ import os
9
+ import io
10
+ import subprocess
11
+ from langdetect import detect
12
+
13
+ print("Using GPU for operations when available")
14
+
15
+ # Install flash-attn
16
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
17
+
18
+ # Function to safely load pipeline within a GPU-decorated function
19
+ @spaces.GPU
20
+ def load_pipeline(model_name, **kwargs):
21
+ try:
22
+ device = 0 if torch.cuda.is_available() else "cpu"
23
+ return pipeline(model=model_name, device=device, **kwargs)
24
+ except Exception as e:
25
+ print(f"Error loading {model_name} pipeline: {e}")
26
+ return None
27
+
28
+ # Load Whisper model for speech recognition within a GPU-decorated function
29
+ @spaces.GPU
30
+ def load_whisper():
31
+ try:
32
+ device = 0 if torch.cuda.is_available() else "cpu"
33
+ processor = WhisperProcessor.from_pretrained("openai/whisper-small")
34
+ model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
35
+ return processor, model
36
+ except Exception as e:
37
+ print(f"Error loading Whisper model: {e}")
38
+ return None, None
39
+
40
+ # Load sarvam-2b for text generation within a GPU-decorated function
41
+ @spaces.GPU
42
+ def load_sarvam():
43
+ return load_pipeline('sarvamai/sarvam-2b-v0.5')
44
+
45
+ # Load Phi-3.5-vision-instruct model
46
+ @spaces.GPU
47
+ def load_vision_model():
48
+ try:
49
+ model_id = "microsoft/Phi-3.5-vision-instruct"
50
+ model = AutoModelForCausalLM.from_pretrained(
51
+ model_id, trust_remote_code=True, torch_dtype=torch.float16, use_flash_attention_2=False
52
+ )
53
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True, num_crops=16)
54
+ return model, processor
55
+ except Exception as e:
56
+ print(f"Error loading vision model: {e}")
57
+ return None, None
58
+
59
+ # Process audio input within a GPU-decorated function
60
+ @spaces.GPU
61
+ def process_audio_input(audio, whisper_processor, whisper_model):
62
+ if whisper_processor is None or whisper_model is None:
63
+ return "Error: Speech recognition model is not available. Please type your message instead."
64
+
65
+ try:
66
+ audio, sr = librosa.load(audio, sr=16000)
67
+ input_features = whisper_processor(audio, sampling_rate=sr, return_tensors="pt").input_features.to(whisper_model.device)
68
+ predicted_ids = whisper_model.generate(input_features)
69
+ transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
70
+ return transcription
71
+ except Exception as e:
72
+ return f"Error processing audio: {str(e)}. Please type your message instead."
73
+
74
+ # Process image input
75
+ @spaces.GPU
76
+ def process_image_input(image, vision_model, vision_processor):
77
+ if vision_model is None or vision_processor is None:
78
+ return "Error: Vision model is not available."
79
+
80
+ try:
81
+ inputs = vision_processor(images=image, return_tensors="pt")
82
+ inputs = {k: v.to(vision_model.device) for k, v in inputs.items()}
83
+
84
+ with torch.no_grad():
85
+ outputs = vision_model.generate(**inputs, max_new_tokens=512, do_sample=True, top_k=50, top_p=0.95)
86
+
87
+ generated_text = vision_processor.batch_decode(outputs, skip_special_tokens=True)[0]
88
+ return generated_text
89
+ except Exception as e:
90
+ return f"Error processing image: {str(e)}"
91
+
92
+ # Generate response within a GPU-decorated function
93
+ @spaces.GPU
94
+ def generate_response(transcription, sarvam_pipe):
95
+ if sarvam_pipe is None:
96
+ return "Error: Text generation model is not available."
97
+
98
+ try:
99
+ # Generate response using the sarvam-2b model
100
+ response = sarvam_pipe(transcription, max_length=100, num_return_sequences=1)[0]['generated_text']
101
+ return response
102
+ except Exception as e:
103
+ return f"Error generating response: {str(e)}"
104
+
105
+ # Text-to-speech function
106
+ def text_to_speech(text, lang='hi'):
107
+ try:
108
+ # Use a better TTS engine for Indic languages
109
+ if lang in ['hi', 'bn', 'gu', 'kn', 'ml', 'mr', 'or', 'pa', 'ta', 'te']:
110
+ # You might want to use a different TTS library here
111
+ # For example, you could use the Google Cloud Text-to-Speech API
112
+ # or a specialized Indic language TTS library
113
+
114
+ # This is a placeholder for a better Indic TTS solution
115
+ tts = gTTS(text=text, lang=lang, tld='co.in') # Use Indian TLD
116
+ else:
117
+ tts = gTTS(text=text, lang=lang)
118
+
119
+ tts.save("response.mp3")
120
+ return "response.mp3"
121
+ except Exception as e:
122
+ print(f"Error in text-to-speech: {str(e)}")
123
+ return None
124
+
125
+ # Improved language detection function
126
+ def detect_language(text):
127
+ lang_codes = {
128
+ 'bn': 'Bengali', 'gu': 'Gujarati', 'hi': 'Hindi', 'kn': 'Kannada',
129
+ 'ml': 'Malayalam', 'mr': 'Marathi', 'or': 'Oriya', 'pa': 'Punjabi',
130
+ 'ta': 'Tamil', 'te': 'Telugu', 'en': 'English'
131
+ }
132
+
133
+ try:
134
+ detected_lang = detect(text)
135
+ return detected_lang if detected_lang in lang_codes else 'en'
136
+ except:
137
+ # Fallback to simple script-based detection
138
+ for code, lang in lang_codes.items():
139
+ if any(ord(char) >= 0x0900 and ord(char) <= 0x097F for char in text): # Devanagari script
140
+ return 'hi'
141
+ return 'en' # Default to English if no Indic script is detected
142
+
143
+ @spaces.GPU
144
+ def indic_vision_assistant(input_type, audio_input, text_input, image_input):
145
+ try:
146
+ # Load models within the GPU-decorated function
147
+ whisper_processor, whisper_model = load_whisper()
148
+ sarvam_pipe = load_sarvam()
149
+ vision_model, vision_processor = load_vision_model()
150
+
151
+ if input_type == "audio" and audio_input is not None:
152
+ transcription = process_audio_input(audio_input, whisper_processor, whisper_model)
153
+ elif input_type == "text" and text_input:
154
+ transcription = text_input
155
+ elif input_type == "image" and image_input is not None:
156
+ transcription = process_image_input(image_input, vision_model, vision_processor)
157
+ else:
158
+ return "Please provide either audio, text, or image input.", "No input provided.", None
159
+
160
+ response = generate_response(transcription, sarvam_pipe)
161
+ lang = detect_language(response)
162
+ audio_response = text_to_speech(response, lang)
163
+
164
+ return transcription, response, audio_response
165
+ except Exception as e:
166
+ error_message = f"An error occurred: {str(e)}"
167
+ return error_message, error_message, None
168
+
169
+ # Custom CSS
170
+ custom_css = """
171
+ body {
172
+ background-color: #0b0f19;
173
+ color: #e2e8f0;
174
+ font-family: 'Arial', sans-serif;
175
+ }
176
+ #custom-header {
177
+ text-align: center;
178
+ padding: 20px 0;
179
+ background-color: #1a202c;
180
+ margin-bottom: 20px;
181
+ border-radius: 10px;
182
+ }
183
+ #custom-header h1 {
184
+ font-size: 2.5rem;
185
+ margin-bottom: 0.5rem;
186
+ }
187
+ #custom-header h1 .blue {
188
+ color: #60a5fa;
189
+ }
190
+ #custom-header h1 .pink {
191
+ color: #f472b6;
192
+ }
193
+ #custom-header h2 {
194
+ font-size: 1.5rem;
195
+ color: #94a3b8;
196
+ }
197
+ .suggestions {
198
+ display: flex;
199
+ justify-content: center;
200
+ flex-wrap: wrap;
201
+ gap: 1rem;
202
+ margin: 20px 0;
203
+ }
204
+ .suggestion {
205
+ background-color: #1e293b;
206
+ border-radius: 0.5rem;
207
+ padding: 1rem;
208
+ display: flex;
209
+ align-items: center;
210
+ transition: transform 0.3s ease;
211
+ width: 200px;
212
+ }
213
+ .suggestion:hover {
214
+ transform: translateY(-5px);
215
+ }
216
+ .suggestion-icon {
217
+ font-size: 1.5rem;
218
+ margin-right: 1rem;
219
+ background-color: #2d3748;
220
+ padding: 0.5rem;
221
+ border-radius: 50%;
222
+ }
223
+ .gradio-container {
224
+ max-width: 100% !important;
225
+ }
226
+ #component-0, #component-1, #component-2 {
227
+ max-width: 100% !important;
228
+ }
229
+ footer {
230
+ text-align: center;
231
+ margin-top: 2rem;
232
+ color: #64748b;
233
+ }
234
+ """
235
+
236
+ # Custom HTML for the header
237
+ custom_header = """
238
+ <div id="custom-header">
239
+ <h1>
240
+ <span class="blue">Hello,</span>
241
+ <span class="pink">User</span>
242
+ </h1>
243
+ <h2>How can I help you today?</h2>
244
+ </div>
245
+ """
246
+
247
+ # Custom HTML for suggestions
248
+ custom_suggestions = """
249
+ <div class="suggestions">
250
+ <div class="suggestion">
251
+ <span class="suggestion-icon">🎤</span>
252
+ <p>Speak in any Indic language</p>
253
+ </div>
254
+ <div class="suggestion">
255
+ <span class="suggestion-icon">⌨️</span>
256
+ <p>Type in any Indic language</p>
257
+ </div>
258
+ <div class="suggestion">
259
+ <span class="suggestion-icon">🖼️</span>
260
+ <p>Upload an image for analysis</p>
261
+ </div>
262
+ <div class="suggestion">
263
+ <span class="suggestion-icon">🤖</span>
264
+ <p>Get AI-generated responses</p>
265
+ </div>
266
+ <div class="suggestion">
267
+ <span class="suggestion-icon">🔊</span>
268
+ <p>Listen to audio responses</p>
269
+ </div>
270
+ </div>
271
+ """
272
+
273
+ # Create Gradio interface
274
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
275
+ body_background_fill="#0b0f19",
276
+ body_text_color="#e2e8f0",
277
+ button_primary_background_fill="#3b82f6",
278
+ button_primary_background_fill_hover="#2563eb",
279
+ button_primary_text_color="white",
280
+ block_title_text_color="#94a3b8",
281
+ block_label_text_color="#94a3b8",
282
+ )) as iface:
283
+ gr.HTML(custom_header)
284
+ gr.HTML(custom_suggestions)
285
+
286
+ with gr.Row():
287
+ with gr.Column(scale=1):
288
+ gr.Markdown("### Indic Vision Assistant")
289
+
290
+ input_type = gr.Radio(["audio", "text", "image"], label="Input Type", value="audio")
291
+ audio_input = gr.Audio(type="filepath", label="Speak (if audio input selected)")
292
+ text_input = gr.Textbox(label="Type your message (if text input selected)")
293
+ image_input = gr.Image(type="pil", label="Upload an image (if image input selected)")
294
+
295
+ submit_btn = gr.Button("Submit")
296
+
297
+ output_transcription = gr.Textbox(label="Transcription/Input")
298
+ output_response = gr.Textbox(label="Generated Response")
299
+ output_audio = gr.Audio(label="Audio Response")
300
+
301
+ submit_btn.click(
302
+ fn=indic_vision_assistant,
303
+ inputs=[input_type, audio_input, text_input, image_input],
304
+ outputs=[output_transcription, output_response, output_audio]
305
+ )
306
+ gr.HTML("<footer>Powered by Indic Language AI with Vision Capabilities</footer>")
307
+
308
+ # Launch the app
309
+ iface.launch()