andito HF staff commited on
Commit
c8f76e0
1 Parent(s): b7139b3

remove unused parts

Browse files
Files changed (1) hide show
  1. app.py +9 -147
app.py CHANGED
@@ -26,12 +26,6 @@ DESCRIPTION = "# [Florence-2-DocVQA Demo](https://huggingface.co/HuggingFaceM4/F
26
  colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
27
  'lime','indigo','violet','aqua','magenta','coral','gold','tan','skyblue']
28
 
29
- def fig_to_pil(fig):
30
- buf = io.BytesIO()
31
- fig.savefig(buf, format='png')
32
- buf.seek(0)
33
- return Image.open(buf)
34
-
35
  @spaces.GPU
36
  def run_example(task_prompt, image, text_input=None):
37
  if text_input is None:
@@ -53,138 +47,14 @@ def run_example(task_prompt, image, text_input=None):
53
  task=task_prompt,
54
  image_size=(image.width, image.height)
55
  )
56
- return parsed_answer[task_prompt]#.replace("<pad>", "")
57
-
58
- def plot_bbox(image, data):
59
- fig, ax = plt.subplots()
60
- ax.imshow(image)
61
- for bbox, label in zip(data['bboxes'], data['labels']):
62
- x1, y1, x2, y2 = bbox
63
- rect = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=1, edgecolor='r', facecolor='none')
64
- ax.add_patch(rect)
65
- plt.text(x1, y1, label, color='white', fontsize=8, bbox=dict(facecolor='red', alpha=0.5))
66
- ax.axis('off')
67
- return fig
68
-
69
- def draw_polygons(image, prediction, fill_mask=False):
70
-
71
- draw = ImageDraw.Draw(image)
72
- scale = 1
73
- for polygons, label in zip(prediction['polygons'], prediction['labels']):
74
- color = random.choice(colormap)
75
- fill_color = random.choice(colormap) if fill_mask else None
76
- for _polygon in polygons:
77
- _polygon = np.array(_polygon).reshape(-1, 2)
78
- if len(_polygon) < 3:
79
- print('Invalid polygon:', _polygon)
80
- continue
81
- _polygon = (_polygon * scale).reshape(-1).tolist()
82
- if fill_mask:
83
- draw.polygon(_polygon, outline=color, fill=fill_color)
84
- else:
85
- draw.polygon(_polygon, outline=color)
86
- draw.text((_polygon[0] + 8, _polygon[1] + 2), label, fill=color)
87
- return image
88
-
89
- def convert_to_od_format(data):
90
- bboxes = data.get('bboxes', [])
91
- labels = data.get('bboxes_labels', [])
92
- od_results = {
93
- 'bboxes': bboxes,
94
- 'labels': labels
95
- }
96
- return od_results
97
-
98
- def draw_ocr_bboxes(image, prediction):
99
- scale = 1
100
- draw = ImageDraw.Draw(image)
101
- bboxes, labels = prediction['quad_boxes'], prediction['labels']
102
- for box, label in zip(bboxes, labels):
103
- color = random.choice(colormap)
104
- new_box = (np.array(box) * scale).tolist()
105
- draw.polygon(new_box, width=3, outline=color)
106
- draw.text((new_box[0]+8, new_box[1]+2),
107
- "{}".format(label),
108
- align="right",
109
- fill=color)
110
- return image
111
-
112
- def process_image(image, task_prompt, text_input=None):
113
  image = Image.fromarray(image) # Convert NumPy array to PIL Image
114
- if task_prompt == 'Document Visual Question Answering':
115
- task_prompt = '<DocVQA>'
116
- results = run_example(task_prompt, image, text_input)
117
- return results, None
118
- elif task_prompt == 'Caption':
119
- task_prompt = '<CAPTION>'
120
- results = run_example(task_prompt, image)
121
- return results, None
122
- elif task_prompt == 'Detailed Caption':
123
- task_prompt = '<DETAILED_CAPTION>'
124
- results = run_example(task_prompt, image)
125
- return results, None
126
- elif task_prompt == 'More Detailed Caption':
127
- task_prompt = '<MORE_DETAILED_CAPTION>'
128
- results = run_example(task_prompt, image)
129
- return results, None
130
- elif task_prompt == 'Object Detection':
131
- task_prompt = '<OD>'
132
- results = run_example(task_prompt, image)
133
- fig = plot_bbox(image, results['<OD>'])
134
- return results, fig_to_pil(fig)
135
- elif task_prompt == 'Dense Region Caption':
136
- task_prompt = '<DENSE_REGION_CAPTION>'
137
- results = run_example(task_prompt, image)
138
- fig = plot_bbox(image, results['<DENSE_REGION_CAPTION>'])
139
- return results, fig_to_pil(fig)
140
- elif task_prompt == 'Region Proposal':
141
- task_prompt = '<REGION_PROPOSAL>'
142
- results = run_example(task_prompt, image)
143
- fig = plot_bbox(image, results['<REGION_PROPOSAL>'])
144
- return results, fig_to_pil(fig)
145
- elif task_prompt == 'Caption to Phrase Grounding':
146
- task_prompt = '<CAPTION_TO_PHRASE_GROUNDING>'
147
- results = run_example(task_prompt, image, text_input)
148
- fig = plot_bbox(image, results['<CAPTION_TO_PHRASE_GROUNDING>'])
149
- return results, fig_to_pil(fig)
150
- elif task_prompt == 'Referring Expression Segmentation':
151
- task_prompt = '<REFERRING_EXPRESSION_SEGMENTATION>'
152
- results = run_example(task_prompt, image, text_input)
153
- output_image = copy.deepcopy(image)
154
- output_image = draw_polygons(output_image, results['<REFERRING_EXPRESSION_SEGMENTATION>'], fill_mask=True)
155
- return results, output_image
156
- elif task_prompt == 'Region to Segmentation':
157
- task_prompt = '<REGION_TO_SEGMENTATION>'
158
- results = run_example(task_prompt, image, text_input)
159
- output_image = copy.deepcopy(image)
160
- output_image = draw_polygons(output_image, results['<REGION_TO_SEGMENTATION>'], fill_mask=True)
161
- return results, output_image
162
- elif task_prompt == 'Open Vocabulary Detection':
163
- task_prompt = '<OPEN_VOCABULARY_DETECTION>'
164
- results = run_example(task_prompt, image, text_input)
165
- bbox_results = convert_to_od_format(results['<OPEN_VOCABULARY_DETECTION>'])
166
- fig = plot_bbox(image, bbox_results)
167
- return results, fig_to_pil(fig)
168
- elif task_prompt == 'Region to Category':
169
- task_prompt = '<REGION_TO_CATEGORY>'
170
- results = run_example(task_prompt, image, text_input)
171
- return results, None
172
- elif task_prompt == 'Region to Description':
173
- task_prompt = '<REGION_TO_DESCRIPTION>'
174
- results = run_example(task_prompt, image, text_input)
175
- return results, None
176
- elif task_prompt == 'OCR':
177
- task_prompt = '<OCR>'
178
- results = run_example(task_prompt, image)
179
- return results, None
180
- elif task_prompt == 'OCR with Region':
181
- task_prompt = '<OCR_WITH_REGION>'
182
- results = run_example(task_prompt, image)
183
- output_image = copy.deepcopy(image)
184
- output_image = draw_ocr_bboxes(output_image, results['<OCR_WITH_REGION>'])
185
- return results, output_image
186
- else:
187
- return "", None # Return empty string and None for unknown task prompts
188
 
189
  css = """
190
  #output {
@@ -200,14 +70,6 @@ with gr.Blocks(css=css) as demo:
200
  with gr.Row():
201
  with gr.Column():
202
  input_img = gr.Image(label="Input Picture")
203
- task_prompt = gr.Dropdown(choices=[
204
- 'Document Visual Question Answering',
205
- 'Caption', 'Detailed Caption', 'More Detailed Caption', 'Object Detection',
206
- 'Dense Region Caption', 'Region Proposal', 'Caption to Phrase Grounding',
207
- 'Referring Expression Segmentation', 'Region to Segmentation',
208
- 'Open Vocabulary Detection', 'Region to Category', 'Region to Description',
209
- 'OCR', 'OCR with Region'
210
- ], label="Task Prompt", value= 'Document Visual Question Answering')
211
  text_input = gr.Textbox(label="Text Input (optional)")
212
  submit_btn = gr.Button(value="Submit")
213
  with gr.Column():
@@ -219,13 +81,13 @@ with gr.Blocks(css=css) as demo:
219
  ["image1.jpg", 'Object Detection'],
220
  ["image2.jpg", 'OCR with Region']
221
  ],
222
- inputs=[input_img, task_prompt],
223
  outputs=[output_text, output_img],
224
  fn=process_image,
225
  cache_examples=True,
226
  label='Try examples'
227
  )
228
 
229
- submit_btn.click(process_image, [input_img, task_prompt, text_input], [output_text, output_img])
230
 
231
  demo.launch(debug=True)
 
26
  colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
27
  'lime','indigo','violet','aqua','magenta','coral','gold','tan','skyblue']
28
 
 
 
 
 
 
 
29
  @spaces.GPU
30
  def run_example(task_prompt, image, text_input=None):
31
  if text_input is None:
 
47
  task=task_prompt,
48
  image_size=(image.width, image.height)
49
  )
50
+ return parsed_answer
51
+
52
+ def process_image(image, text_input=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  image = Image.fromarray(image) # Convert NumPy array to PIL Image
54
+ task_prompt = '<DocVQA>'
55
+ results = run_example(task_prompt, image, text_input)[task_prompt].replace("<pad>", "")
56
+ return results, None
57
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  css = """
60
  #output {
 
70
  with gr.Row():
71
  with gr.Column():
72
  input_img = gr.Image(label="Input Picture")
 
 
 
 
 
 
 
 
73
  text_input = gr.Textbox(label="Text Input (optional)")
74
  submit_btn = gr.Button(value="Submit")
75
  with gr.Column():
 
81
  ["image1.jpg", 'Object Detection'],
82
  ["image2.jpg", 'OCR with Region']
83
  ],
84
+ inputs=[input_img],
85
  outputs=[output_text, output_img],
86
  fn=process_image,
87
  cache_examples=True,
88
  label='Try examples'
89
  )
90
 
91
+ submit_btn.click(process_image, [input_img, text_input], [output_text, output_img])
92
 
93
  demo.launch(debug=True)