tdurbor commited on
Commit
a5da205
1 Parent(s): 439f303

Add zoom mechanism

Browse files
Files changed (1) hide show
  1. app.py +80 -29
app.py CHANGED
@@ -23,7 +23,7 @@ from db import (
23
 
24
  # Load environment variables
25
  load_dotenv()
26
- token = os.getenv("HUGGINGFACE_HUB_TOKEN")
27
 
28
  # Configure logging
29
  logging.basicConfig(level=logging.INFO)
@@ -37,12 +37,12 @@ JSON_DATASET_DIR = Path("data/json_dataset")
37
  JSON_DATASET_DIR.mkdir(parents=True, exist_ok=True)
38
 
39
  # Initialize CommitScheduler if running in space
40
- scheduler = CommitScheduler(
41
  repo_id="bgsys/votes_datasets_test2",
42
  repo_type="dataset",
43
  folder_path=JSON_DATASET_DIR,
44
  path_in_repo="data",
45
- token=token
46
  ) if is_running_in_space() else None
47
 
48
  def fetch_elo_scores():
@@ -178,10 +178,12 @@ def gradio_interface():
178
  outputs=feedback_output
179
  )
180
 
181
- filename, input_image, segmented_a, segmented_b, a_name, b_name = select_new_image()
182
- model_a_name = gr.State(a_name)
183
- model_b_name = gr.State(b_name)
184
- fpath_input = gr.State(filename)
 
 
185
 
186
  # Compute the absolute difference between the masks
187
  mask_difference = compute_mask_difference(segmented_a, segmented_b)
@@ -189,7 +191,6 @@ def gradio_interface():
189
  with gr.Row():
190
  image_a_display = gr.Image(
191
  value=segmented_a,
192
- type="pil",
193
  label="Model A",
194
  width=500,
195
  height=500
@@ -202,16 +203,15 @@ def gradio_interface():
202
  )
203
  image_b_display = gr.Image(
204
  value=segmented_b,
205
- type="pil",
206
  label="Model B",
207
  width=500,
208
  height=500
209
  )
210
- tie = gr.State("Tie")
211
  with gr.Row():
212
- vote_a_btn = gr.Button("👈 A is better")
213
- vote_tie = gr.Button("🤝 Tie")
214
- vote_b_btn = gr.Button("👉 B is better")
215
 
216
  def vote_for_model(choice, original_filename, model_a_name, model_b_name, user_username):
217
  """Submit a vote for a model and return updated images and model names."""
@@ -231,40 +231,91 @@ def gradio_interface():
231
  except Exception as e:
232
  logging.error("Error recording vote: %s", str(e))
233
 
234
- new_fpath_input, new_input_image, new_segmented_a, new_segmented_b, new_a_name, new_b_name = select_new_image()
235
- model_a_name.value = new_a_name
236
- model_b_name.value = new_b_name
237
- fpath_input.value = new_fpath_input
 
 
238
 
239
  mask_difference = compute_mask_difference(new_segmented_a, new_segmented_b)
240
 
241
  # Update the notice markdown with the new vote count
242
  new_notice_markdown = get_notice_markdown()
243
 
244
- return (fpath_input.value, (new_input_image, [(mask_difference, button_name)]), new_segmented_a,
245
- new_segmented_b, model_a_name.value, model_b_name.value, new_notice_markdown)
246
 
247
- vote_a_btn.click(
248
- fn=lambda username: vote_for_model("model_a", fpath_input, model_a_name, model_b_name, username),
249
  inputs=username_input,
250
  outputs=[
251
- fpath_input, input_image_display, image_a_display, image_b_display, model_a_name, model_b_name, notice_markdown
252
  ]
253
  )
254
- vote_b_btn.click(
255
- fn=lambda username: vote_for_model("model_b", fpath_input, model_a_name, model_b_name, username),
256
  inputs=username_input,
257
  outputs=[
258
- fpath_input, input_image_display, image_a_display, image_b_display, model_a_name, model_b_name, notice_markdown
259
  ]
260
  )
261
- vote_tie.click(
262
- fn=lambda username: vote_for_model("tie", fpath_input, model_a_name, model_b_name, username),
263
  inputs=username_input,
264
  outputs=[
265
- fpath_input, input_image_display, image_a_display, image_b_display, model_a_name, model_b_name, notice_markdown
266
  ]
267
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
  with gr.Tab("🏆 Leaderboard", id=1) as leaderboard_tab:
270
  rankings_table = gr.Dataframe(
@@ -369,7 +420,7 @@ def dump_database_to_json():
369
 
370
  json_file_path = JSON_DATASET_DIR / "votes.json"
371
  # Upload to Hugging Face
372
- with scheduler.lock:
373
  with json_file_path.open("w") as f:
374
  json.dump(json_data, f, indent=4)
375
 
 
23
 
24
  # Load environment variables
25
  load_dotenv()
26
+ huggingface_token = os.getenv("HUGGINGFACE_HUB_TOKEN")
27
 
28
  # Configure logging
29
  logging.basicConfig(level=logging.INFO)
 
37
  JSON_DATASET_DIR.mkdir(parents=True, exist_ok=True)
38
 
39
  # Initialize CommitScheduler if running in space
40
+ commit_scheduler = CommitScheduler(
41
  repo_id="bgsys/votes_datasets_test2",
42
  repo_type="dataset",
43
  folder_path=JSON_DATASET_DIR,
44
  path_in_repo="data",
45
+ token=huggingface_token
46
  ) if is_running_in_space() else None
47
 
48
  def fetch_elo_scores():
 
178
  outputs=feedback_output
179
  )
180
 
181
+ filename, input_image, segmented_a, segmented_b, model_a_name, model_b_name = select_new_image()
182
+ state_segmented_a = gr.State(segmented_a)
183
+ state_segmented_b = gr.State(segmented_b)
184
+ state_model_a_name = gr.State(model_a_name)
185
+ state_model_b_name = gr.State(model_b_name)
186
+ state_filename = gr.State(filename)
187
 
188
  # Compute the absolute difference between the masks
189
  mask_difference = compute_mask_difference(segmented_a, segmented_b)
 
191
  with gr.Row():
192
  image_a_display = gr.Image(
193
  value=segmented_a,
 
194
  label="Model A",
195
  width=500,
196
  height=500
 
203
  )
204
  image_b_display = gr.Image(
205
  value=segmented_b,
 
206
  label="Model B",
207
  width=500,
208
  height=500
209
  )
210
+ state_tie = gr.State("Tie")
211
  with gr.Row():
212
+ vote_a_button = gr.Button("👈 A is better")
213
+ vote_tie_button = gr.Button("🤝 Tie")
214
+ vote_b_button = gr.Button("👉 B is better")
215
 
216
  def vote_for_model(choice, original_filename, model_a_name, model_b_name, user_username):
217
  """Submit a vote for a model and return updated images and model names."""
 
231
  except Exception as e:
232
  logging.error("Error recording vote: %s", str(e))
233
 
234
+ new_filename, new_input_image, new_segmented_a, new_segmented_b, new_model_a_name, new_model_b_name = select_new_image()
235
+ model_a_name.value = new_model_a_name
236
+ model_b_name.value = new_model_b_name
237
+ original_filename.value = new_filename
238
+ state_segmented_a.value = new_segmented_a
239
+ state_segmented_b.value = new_segmented_b
240
 
241
  mask_difference = compute_mask_difference(new_segmented_a, new_segmented_b)
242
 
243
  # Update the notice markdown with the new vote count
244
  new_notice_markdown = get_notice_markdown()
245
 
246
+ return (original_filename.value, (new_input_image, [(mask_difference, button_name)]), new_segmented_a,
247
+ new_segmented_b, model_a_name.value, model_b_name.value, new_notice_markdown, state_segmented_a.value, state_segmented_b.value)
248
 
249
+ vote_a_button.click(
250
+ fn=lambda username: vote_for_model("model_a", state_filename, state_model_a_name, state_model_b_name, username),
251
  inputs=username_input,
252
  outputs=[
253
+ state_filename, input_image_display, image_a_display, image_b_display, state_model_a_name, state_model_b_name, notice_markdown, state_segmented_a, state_segmented_b
254
  ]
255
  )
256
+ vote_b_button.click(
257
+ fn=lambda username: vote_for_model("model_b", state_filename, state_model_a_name, state_model_b_name, username),
258
  inputs=username_input,
259
  outputs=[
260
+ state_filename, input_image_display, image_a_display, image_b_display, state_model_a_name, state_model_b_name, notice_markdown, state_segmented_a, state_segmented_b
261
  ]
262
  )
263
+ vote_tie_button.click(
264
+ fn=lambda username: vote_for_model("tie", state_filename, state_model_a_name, state_model_b_name, username),
265
  inputs=username_input,
266
  outputs=[
267
+ state_filename, input_image_display, image_a_display, image_b_display, state_model_a_name, state_model_b_name, notice_markdown, state_segmented_a, state_segmented_b
268
  ]
269
  )
270
+
271
+
272
+ def handle_zoom(image, event: gr.SelectData, zoomed_state, segmented_image):
273
+ """Toggle between zoomed and original image based on click events."""
274
+ if zoomed_state:
275
+ return gr.Image(
276
+ value=segmented_image,
277
+ label="Model",
278
+ width=500,
279
+ height=500
280
+ ), False
281
+
282
+ start_row, start_col = event.index[1], event.index[0]
283
+ zoom_size = max(10, min(image.shape[:2]) // 10)
284
+
285
+ row_start, row_end = max(start_row - zoom_size, 0), min(start_row + zoom_size, image.shape[0])
286
+ col_start, col_end = max(start_col - zoom_size, 0), min(start_col + zoom_size, image.shape[1])
287
+
288
+ grey_image = np.mean(image, axis=-1, keepdims=True).astype(image.dtype)
289
+ grey_image = np.repeat(grey_image, image.shape[-1], axis=-1)
290
+ output_image = grey_image.copy()
291
+
292
+ zoomed_area = image[row_start:row_end, col_start:col_end]
293
+ upscale_factor = 6
294
+ zoomed_area_upscaled = np.kron(zoomed_area, np.ones((upscale_factor, upscale_factor, 1)))
295
+
296
+ center_row, center_col = start_row, start_col
297
+ row_start_upscaled = max(center_row - zoomed_area_upscaled.shape[0] // 2, 0)
298
+ row_end_upscaled = min(center_row + zoomed_area_upscaled.shape[0] // 2, output_image.shape[0])
299
+ col_start_upscaled = max(center_col - zoomed_area_upscaled.shape[1] // 2, 0)
300
+ col_end_upscaled = min(center_col + zoomed_area_upscaled.shape[1] // 2, output_image.shape[1])
301
+
302
+ row_start_zoomed = max(0, -row_start_upscaled)
303
+ row_end_zoomed = row_start_zoomed + (row_end_upscaled - row_start_upscaled)
304
+ col_start_zoomed = max(0, -col_start_upscaled)
305
+ col_end_zoomed = col_start_zoomed + (col_end_upscaled - col_start_upscaled)
306
+
307
+ row_end_zoomed = min(row_end_zoomed, zoomed_area_upscaled.shape[0])
308
+ col_end_zoomed = min(col_end_zoomed, zoomed_area_upscaled.shape[1])
309
+
310
+ output_image[row_start_upscaled:row_end_upscaled, col_start_upscaled:col_end_upscaled] = \
311
+ zoomed_area_upscaled[row_start_zoomed:row_end_zoomed, col_start_zoomed:col_end_zoomed]
312
+
313
+ return output_image, True
314
+
315
+ zoomed_state_a = gr.State(False)
316
+ zoomed_state_b = gr.State(False)
317
+ image_a_display.select(handle_zoom, [image_a_display, zoomed_state_a, state_segmented_a], [image_a_display, zoomed_state_a])
318
+ image_b_display.select(handle_zoom, [image_b_display, zoomed_state_b, state_segmented_b], [image_b_display, zoomed_state_b])
319
 
320
  with gr.Tab("🏆 Leaderboard", id=1) as leaderboard_tab:
321
  rankings_table = gr.Dataframe(
 
420
 
421
  json_file_path = JSON_DATASET_DIR / "votes.json"
422
  # Upload to Hugging Face
423
+ with commit_scheduler.lock:
424
  with json_file_path.open("w") as f:
425
  json.dump(json_data, f, indent=4)
426