RufusRubin777 commited on
Commit
ce8797e
·
verified ·
1 Parent(s): 9318992

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -0
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import json
4
+ from byaldi import RAGMultiModalModel
5
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
6
+ from qwen_vl_utils import process_vision_info
7
+ import torch
8
+
9
+ # Load models
10
+ def load_models():
11
+ RAG = RAGMultiModalModel.from_pretrained("vidore/colpali")
12
+ model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct",
13
+ trust_remote_code=True, torch_dtype=torch.float32) # float32 for CPU
14
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
15
+ return RAG, model, processor
16
+
17
+ RAG, model, processor = load_models()
18
+
19
+ # Function for OCR and search
20
+ def ocr_and_search(image, keyword):
21
+
22
+ text_query = "Extract all the text in Sanskrit and English from the image."
23
+
24
+ # Prepare message for Qwen model
25
+ messages = [
26
+ {
27
+ "role": "user",
28
+ "content": [
29
+ {"type": "image", "image": image},
30
+ {"type": "text", "text": text_query},
31
+ ],
32
+ }
33
+ ]
34
+
35
+ # Process the image
36
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
37
+ image_inputs, video_inputs = process_vision_info(messages)
38
+ inputs = processor(
39
+ text=[text],
40
+ images=image_inputs,
41
+ videos=video_inputs,
42
+ padding=True,
43
+ return_tensors="pt",
44
+ ).to("cpu") # Use CPU
45
+
46
+ # Generate text
47
+ with torch.no_grad():
48
+ generated_ids = model.generate(**inputs, max_new_tokens=2000)
49
+ generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
50
+ extracted_text = processor.batch_decode(
51
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
52
+ )[0]
53
+
54
+ # Save extracted text to JSON
55
+ output_json = {"query": text_query, "extracted_text": extracted_text}
56
+ json_output = json.dumps(output_json, ensure_ascii=False, indent=4)
57
+
58
+ # Perform keyword search
59
+ keyword_lower = keyword.lower()
60
+ sentences = extracted_text.split('. ')
61
+ matched_sentences = [sentence for sentence in sentences if keyword_lower in sentence.lower()]
62
+
63
+ return extracted_text, matched_sentences, json_output
64
+
65
+
66
+ # Gradio App
67
+ def app(image, keyword):
68
+
69
+ extracted_text, search_results, json_output = ocr_and_search(image, keyword)
70
+
71
+ search_results_str = "\n".join(search_results) if search_results else "No matches found."
72
+
73
+ return extracted_text, search_results_str, json_output
74
+
75
+ # Gradio Interface
76
+ iface = gr.Interface(
77
+ fn=app,
78
+ inputs=[
79
+ gr.Image(type="pil", label="Upload an Image"),
80
+ gr.Textbox(label="Enter keyword to search in extracted text", placeholder="Keyword")
81
+ ],
82
+ outputs=[
83
+ gr.Textbox(label="Extracted Text"),
84
+ gr.Textbox(label="Search Results"),
85
+ gr.JSON(label="JSON Output")
86
+ ],
87
+ title="OCR and Keyword Search in Images",
88
+ )
89
+
90
+ # Launch Gradio App
91
+ iface.launch()