Aumkeshchy2003 commited on
Commit
814e245
·
verified ·
1 Parent(s): 806f921

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import pytesseract
3
+ from PIL import Image
4
+ import re
5
+ import gradio as gr
6
+ from transformers import AutoProcessor, AutoModelForTextToSpectrogram, SpeechT5HifiGan
7
+ from datasets import load_dataset
8
+ import torch
9
+ import soundfile as sf
10
+
11
+ def tesseract_ocr(filepath: str) -> str:
12
+ image = Image.open(filepath)
13
+ combined_languages = 'eng+hin'
14
+ extracted_text = pytesseract.image_to_string(image=image, lang=combined_languages)
15
+ return extracted_text
16
+
17
+ def search_and_highlight(text: str, keyword: str) -> str:
18
+ if keyword:
19
+ highlighted_text = re.sub(f"({keyword})", r"<mark>\1</mark>", text, flags=re.IGNORECASE)
20
+ return highlighted_text
21
+ return text
22
+
23
+ def ocr_and_tts(filepath: str, keyword: str) -> (str, str):
24
+ if filepath is None:
25
+ return "Please upload an image.", None
26
+
27
+ # OCR and keyword highlighting
28
+ extracted_text = tesseract_ocr(filepath)
29
+ highlighted_text = search_and_highlight(extracted_text, keyword)
30
+
31
+ # Convert text to speech
32
+ audio_path = text_to_speech(extracted_text)
33
+
34
+ return highlighted_text, audio_path
35
+
36
+ # Load model
37
+ processor = AutoProcessor.from_pretrained("Aumkeshchy2003/speecht5_finetuned_Aumkesh_English_tts")
38
+ model = AutoModelForTextToSpectrogram.from_pretrained("Aumkeshchy2003/speecht5_finetuned_Aumkesh_English_tts")
39
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
40
+
41
+ # Load speaker embedding
42
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
43
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
44
+
45
+ # Move models to GPU
46
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
+ model = model.to(device)
48
+ vocoder = vocoder.to(device)
49
+ speaker_embeddings = speaker_embeddings.to(device)
50
+
51
+ @torch.inference_mode()
52
+ def text_to_speech(text: str) -> str:
53
+ inputs = processor(text=text, return_tensors="pt").to(device)
54
+ speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
55
+ output_path = "output.wav"
56
+ sf.write(output_path, speech.cpu().numpy(), samplerate=16000)
57
+ return output_path
58
+
59
+ demo = gr.Interface(
60
+ fn=ocr_and_tts,
61
+ inputs=[
62
+ gr.Image(type="filepath", label="Upload Image for OCR"),
63
+ gr.Textbox(label="Keyword to Highlight", placeholder="Enter a keyword...")
64
+ ],
65
+ outputs=[
66
+ gr.HTML(label="Extracted and Highlighted Text"),
67
+ gr.Audio(label="Generated Speech")
68
+ ],
69
+ title="OCR to TTS",
70
+ description="Upload an image for OCR. The extracted text will be highlighted if a keyword is provided and converted to speech."
71
+ )
72
+
73
+ if __name__ == "__main__":
74
+ demo.launch()