Leyo commited on
Commit
bc1a623
1 Parent(s): 6890dab

Fix saving

Browse files
Files changed (1) hide show
  1. app_dialogue.py +47 -76
app_dialogue.py CHANGED
@@ -77,7 +77,7 @@ def load_image_from_url(url):
77
 
78
 
79
  def img_to_bytes(image_path):
80
- image = Image.open(image_path)
81
  buffer = io.BytesIO()
82
  image.save(buffer, format="JPEG")
83
  img_bytes = buffer.getvalue()
@@ -253,29 +253,31 @@ def flag_dope(
253
  top_p,
254
  ):
255
  images = []
 
256
  for ex in chat_history:
257
  if isinstance(ex[0], dict):
258
- images.append(ex[0]["file"])
259
- prev_ex_is_image = True
260
-
261
- if len(images)== 0:
262
- black_image = Image.new('RGB', (20, 20), (0, 0, 0))
263
- black_image.save("/tmp/gradio/fake_image.png")
264
- image_flag = {'path': "/tmp/gradio/fake_image.png", 'size': None, 'orig_name': None, 'mime_type': 'image/png', 'is_stream': False, 'meta': {'_type': 'gradio.FileData'}}
265
- else:
266
- image_flag = images[0]
267
- dope_dataset_writer.flag(
268
- flag_data=[
269
- model_selector,
270
- image_flag,
271
- chat_history,
272
- decoding_strategy,
273
- temperature,
274
- max_new_tokens,
275
- repetition_penalty,
276
- top_p,
277
- ]
278
- )
 
279
 
280
 
281
  def flag_problematic(
@@ -288,28 +290,31 @@ def flag_problematic(
288
  top_p,
289
  ):
290
  images = []
 
291
  for ex in chat_history:
292
  if isinstance(ex[0], dict):
293
- images.append(ex[0]["file"])
294
-
295
- if len(images)== 0:
296
- black_image = Image.new('RGB', (20, 20), (0, 0, 0))
297
- black_image.save("/tmp/gradio/fake_image.png")
298
- image_flag = {'path': "/tmp/gradio/fake_image.png", 'size': None, 'orig_name': None, 'mime_type': 'image/png', 'is_stream': False, 'meta': {'_type': 'gradio.FileData'}}
299
- else:
300
- image_flag = images[0]
301
- problematic_dataset_writer.flag(
302
- flag_data=[
303
- model_selector,
304
- image_flag,
305
- chat_history,
306
- decoding_strategy,
307
- temperature,
308
- max_new_tokens,
309
- repetition_penalty,
310
- top_p,
311
- ]
312
- )
 
 
313
 
314
 
315
  # Hyper-parameters for generation
@@ -368,12 +373,6 @@ chatbot = gr.Chatbot(
368
  height=450,
369
  )
370
 
371
- dope_dataset_writer = gr.HuggingFaceDatasetSaver(
372
- HF_WRITE_TOKEN, "HuggingFaceM4/dope-dataset", private=True
373
- )
374
- problematic_dataset_writer = gr.HuggingFaceDatasetSaver(
375
- HF_WRITE_TOKEN, "HuggingFaceM4/problematic-dataset", private=True
376
- )
377
  # Using Flagging for saving dope and problematic examples
378
  # Dope examples flagging
379
 
@@ -460,20 +459,6 @@ with gr.Blocks(
460
  dope_bttn = gr.Button("Dope🔥")
461
  with gr.Column(scale=1, min_width=50):
462
  problematic_bttn = gr.Button("Problematic😬")
463
-
464
- dope_dataset_writer.setup(
465
- [
466
- model_selector,
467
- image_flag,
468
- chatbot,
469
- decoding_strategy,
470
- temperature,
471
- max_new_tokens,
472
- repetition_penalty,
473
- top_p,
474
- ],
475
- "gradio_dope_data_points",
476
- )
477
  dope_bttn.click(
478
  fn=flag_dope,
479
  inputs=[
@@ -488,20 +473,6 @@ with gr.Blocks(
488
  outputs=None,
489
  preprocess=False,
490
  )
491
- # Problematic examples flagging
492
- problematic_dataset_writer.setup(
493
- [
494
- model_selector,
495
- image_flag,
496
- chatbot,
497
- decoding_strategy,
498
- temperature,
499
- max_new_tokens,
500
- repetition_penalty,
501
- top_p,
502
- ],
503
- "gradio_problematic_data_points",
504
- )
505
  problematic_bttn.click(
506
  fn=flag_problematic,
507
  inputs=[
 
77
 
78
 
79
  def img_to_bytes(image_path):
80
+ image = Image.open(image_path).convert(mode='RGB')
81
  buffer = io.BytesIO()
82
  image.save(buffer, format="JPEG")
83
  img_bytes = buffer.getvalue()
 
253
  top_p,
254
  ):
255
  images = []
256
+ conversation = []
257
  for ex in chat_history:
258
  if isinstance(ex[0], dict):
259
+ images.append(img_to_bytes(ex[0]["file"]["path"]))
260
+ else:
261
+
262
+ conversation.append({"User": ex[0], "Assistant": ex[1]})
263
+
264
+ data = {
265
+ "model_selector": [model_selector],
266
+ "images": [images],
267
+ "conversation": [conversation],
268
+ "decoding_strategy": [decoding_strategy],
269
+ "temperature": [temperature],
270
+ "max_new_tokens": [max_new_tokens],
271
+ "repetition_penalty": [repetition_penalty],
272
+ "top_p": [top_p],
273
+ }
274
+ try:
275
+ ds = datasets.load_dataset("HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN)
276
+ new_data = datasets.Dataset.from_dict(data, features=FEATURES)
277
+ hf_dataset = datasets.concatenate_datasets([ds,new_data])
278
+ except Exception:
279
+ hf_dataset = datasets.Dataset.from_dict(data, features=FEATURES)
280
+ hf_dataset.push_to_hub( "HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN, private=True)
281
 
282
 
283
  def flag_problematic(
 
290
  top_p,
291
  ):
292
  images = []
293
+ conversation = []
294
  for ex in chat_history:
295
  if isinstance(ex[0], dict):
296
+ images.append(img_to_bytes(ex[0]["file"]["path"]))
297
+ else:
298
+
299
+ conversation.append({"User": ex[0], "Assistant": ex[1]})
300
+
301
+ data = {
302
+ "model_selector": [model_selector],
303
+ "images": [images],
304
+ "conversation": [conversation],
305
+ "decoding_strategy": [decoding_strategy],
306
+ "temperature": [temperature],
307
+ "max_new_tokens": [max_new_tokens],
308
+ "repetition_penalty": [repetition_penalty],
309
+ "top_p": [top_p],
310
+ }
311
+ try:
312
+ ds = datasets.load_dataset("HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN)
313
+ new_data = datasets.Dataset.from_dict(data, features=FEATURES)
314
+ hf_dataset = datasets.concatenate_datasets([ds,new_data])
315
+ except Exception:
316
+ hf_dataset = datasets.Dataset.from_dict(data, features=FEATURES)
317
+ hf_dataset.push_to_hub( "HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN, private=True)
318
 
319
 
320
  # Hyper-parameters for generation
 
373
  height=450,
374
  )
375
 
 
 
 
 
 
 
376
  # Using Flagging for saving dope and problematic examples
377
  # Dope examples flagging
378
 
 
459
  dope_bttn = gr.Button("Dope🔥")
460
  with gr.Column(scale=1, min_width=50):
461
  problematic_bttn = gr.Button("Problematic😬")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
  dope_bttn.click(
463
  fn=flag_dope,
464
  inputs=[
 
473
  outputs=None,
474
  preprocess=False,
475
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476
  problematic_bttn.click(
477
  fn=flag_problematic,
478
  inputs=[