yonatanbitton commited on
Commit
4b2d726
Β·
1 Parent(s): c2d8e85
Files changed (1) hide show
  1. app.py +146 -141
app.py CHANGED
@@ -1,133 +1,15 @@
1
- # import math
2
- # from datasets import load_dataset
3
- # import gradio as gr
4
- # import os
5
- #
6
- # # auth_token = os.environ.get("auth_token")
7
- # auth_token = os.environ.get("HF_TOKEN")
8
- # Visual_Riddles = load_dataset("nitzanguetta/Visual_Riddles", token=auth_token, trust_remote_code=True)['test']
9
- # # print(f"Loaded WHOOPS!, first example:")
10
- # # print(whoops[0])
11
- # dataset_size = len(Visual_Riddles)
12
- #
13
- # IMAGE = 'Image'
14
- # QUESTION = 'Question'
15
- # ANSWER = "Answer"
16
- # CAPTION = "Image caption"
17
- # PROMPT = "Prompt"
18
- # MODEL_NAME = "Model name"
19
- # HINT = "Hint"
20
- # ATTRIBUTION = "Attribution"
21
- # DLI = "Difficulty Level Index"
22
- # CATEGORY = "Category"
23
- # DESIGNER = "Designer"
24
- #
25
- #
26
- # left_side_columns = [IMAGE]
27
- # right_side_columns = [x for x in Visual_Riddles.features.keys() if x not in left_side_columns]
28
- # right_side_columns.remove('Image file name')
29
- # # right_side_columns.remove('Question')
30
- # # enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
31
- # emoji_to_label = {IMAGE: '🎨, πŸ§‘β€πŸŽ¨, πŸ’»', ANSWER: 'πŸ’‘, πŸ€”, πŸ§‘β€πŸŽ¨', QUESTION: '❓, πŸ€”, πŸ’‘', CATEGORY: 'πŸ€”, πŸ“š, πŸ’‘',
32
- # CAPTION: 'πŸ“, πŸ‘Œ, πŸ’¬', PROMPT: 'πŸ“, πŸ’»', MODEL_NAME: '🎨, πŸ’»', HINT:'πŸ€”, πŸ”',
33
- # ATTRIBUTION: 'πŸ”, πŸ“„', DLI:"🌑️, πŸ€”, 🎯", DESIGNER:"πŸ§‘β€πŸŽ¨"}
34
- # # batch_size = 16
35
- # batch_size = 8
36
- # target_size = (1024, 1024)
37
- #
38
- #
39
- # def func(index):
40
- # start_index = index * batch_size
41
- # end_index = start_index + batch_size
42
- # all_examples = [Visual_Riddles[index] for index in list(range(start_index, end_index))]
43
- # values_lst = []
44
- # for example_idx, example in enumerate(all_examples):
45
- # values = get_instance_values(example)
46
- # values_lst += values
47
- # return values_lst
48
- #
49
- #
50
- # def get_instance_values(example):
51
- # values = []
52
- # for k in left_side_columns + right_side_columns:
53
- # if k == IMAGE:
54
- # value = example["Image"].resize(target_size)
55
- # # elif k in enumerate_cols:
56
- # # value = list_to_string(example[k])
57
- # # elif k == QA:
58
- # # qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
59
- # # value = list_to_string(qa_list)
60
- # else:
61
- # value = example[k]
62
- # values.append(value)
63
- # return values
64
- # def list_to_string(lst):
65
- # return '\n'.join(['{}. {}'.format(i+1, item) for i, item in enumerate(lst)])
66
- #
67
- # demo = gr.Blocks()
68
- #
69
- #
70
- # def get_col(example):
71
- # instance_values = get_instance_values(example)
72
- # with gr.Column():
73
- # inputs_left = []
74
- # assert len(left_side_columns) == len(
75
- # instance_values[:len(left_side_columns)]) # excluding the image & designer
76
- # for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
77
- # if key == IMAGE:
78
- # img_resized = example["Image"].resize(target_size)
79
- # # input_k = gr.Image(value=img_resized, label=example['commonsense_category'])
80
- # input_k = gr.Image(value=img_resized)
81
- # else:
82
- # label = key.capitalize().replace("_", " ")
83
- # input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
84
- # inputs_left.append(input_k)
85
- # with gr.Accordion("Click for details", open=False):
86
- # # with gr.Accordion(example[QUESTION], open=False):
87
- # text_inputs_right = []
88
- # assert len(right_side_columns) == len(
89
- # instance_values[len(left_side_columns):]) # excluding the image & designer
90
- # for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
91
- # label = key.capitalize().replace("_", " ")
92
- # num_lines = max(1, len(value) // 50 + (len(value) % 50 > 0)) # Assuming ~50 chars per line
93
- # text_input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}", lines=num_lines)
94
- # text_inputs_right.append(text_input_k)
95
- # return inputs_left, text_inputs_right
96
- #
97
- #
98
- # with demo:
99
- # gr.Markdown("# Slide to iterate Visual Riddles")
100
- #
101
- # with gr.Column():
102
- # num_batches = math.ceil(dataset_size / batch_size)
103
- # slider = gr.Slider(minimum=0, maximum=num_batches, step=1, label=f'Page (out of {num_batches})')
104
- # with gr.Row():
105
- # index = slider.value
106
- # start_index = 0 * batch_size
107
- # end_index = start_index + batch_size
108
- # all_examples = [Visual_Riddles[index] for index in list(range(start_index, end_index))]
109
- # all_inputs_left_right = []
110
- # for example_idx, example in enumerate(all_examples):
111
- # inputs_left, text_inputs_right = get_col(example)
112
- # inputs_left_right = inputs_left + text_inputs_right
113
- # all_inputs_left_right += inputs_left_right
114
- #
115
- # slider.change(func, inputs=[slider], outputs=all_inputs_left_right)
116
- #
117
- # demo.launch()
118
-
119
-
120
  import math
121
  from datasets import load_dataset
122
  import gradio as gr
123
  import os
124
 
125
- # Set up environment variables and load dataset
126
  auth_token = os.environ.get("HF_TOKEN")
127
  Visual_Riddles = load_dataset("nitzanguetta/Visual_Riddles", token=auth_token, trust_remote_code=True)['test']
 
 
128
  dataset_size = len(Visual_Riddles)
129
 
130
- # Define attributes
131
  IMAGE = 'Image'
132
  QUESTION = 'Question'
133
  ANSWER = "Answer"
@@ -140,19 +22,20 @@ DLI = "Difficulty Level Index"
140
  CATEGORY = "Category"
141
  DESIGNER = "Designer"
142
 
 
143
  left_side_columns = [IMAGE]
144
  right_side_columns = [x for x in Visual_Riddles.features.keys() if x not in left_side_columns]
145
  right_side_columns.remove('Image file name')
146
-
147
- emoji_to_label = {
148
- IMAGE: '🎨, πŸ§‘β€πŸŽ¨, πŸ’»', ANSWER: 'πŸ’‘, πŸ€”, πŸ§‘β€πŸŽ¨', QUESTION: '❓, πŸ€”, πŸ’‘', CATEGORY: 'πŸ€”, πŸ“š, πŸ’‘',
149
- CAPTION: 'πŸ“, πŸ‘Œ, πŸ’¬', PROMPT: 'πŸ“, πŸ’»', MODEL_NAME: '🎨, πŸ’»', HINT:'πŸ€”, πŸ”',
150
- ATTRIBUTION: 'πŸ”, πŸ“„', DLI:"🌑️, πŸ€”, 🎯", DESIGNER:"πŸ§‘β€πŸŽ¨"
151
- }
152
-
153
  batch_size = 8
154
  target_size = (1024, 1024)
155
 
 
156
  def func(index):
157
  start_index = index * batch_size
158
  end_index = start_index + batch_size
@@ -163,51 +46,173 @@ def func(index):
163
  values_lst += values
164
  return values_lst
165
 
166
- # Define functions to handle data and interface
167
  def get_instance_values(example):
168
  values = []
169
  for k in left_side_columns + right_side_columns:
170
  if k == IMAGE:
171
  value = example["Image"].resize(target_size)
 
 
 
 
 
172
  else:
173
  value = example[k]
174
  values.append(value)
175
  return values
 
 
 
 
 
176
 
177
  def get_col(example):
178
  instance_values = get_instance_values(example)
179
- inputs_left, text_inputs_right = [], []
180
- with gr.Column() as col:
 
 
181
  for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
182
  if key == IMAGE:
183
  img_resized = example["Image"].resize(target_size)
 
184
  input_k = gr.Image(value=img_resized)
185
  else:
186
  label = key.capitalize().replace("_", " ")
187
  input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
188
  inputs_left.append(input_k)
189
  with gr.Accordion("Click for details", open=False):
 
 
 
 
190
  for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
191
  label = key.capitalize().replace("_", " ")
192
- num_lines = max(1, len(value) // 50 + (len(value) % 50 > 0))
193
  text_input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}", lines=num_lines)
194
  text_inputs_right.append(text_input_k)
195
  return inputs_left, text_inputs_right
196
 
197
- # Create the Gradio Blocks interface
198
- with gr.Blocks() as demo:
199
- with gr.Row():
200
- gr.Markdown("# Visual Riddles Explorer")
201
  with gr.Column():
202
  num_batches = math.ceil(dataset_size / batch_size)
203
- slider = gr.Slider(minimum=0, maximum=num_batches - 1, step=1, label=f'Page (out of {num_batches})')
204
- slider.change(lambda x: get_col(Visual_Riddles[x * batch_size]), inputs=[slider], outputs=[gr.Row()])
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- # Define credentials for authentication
207
  credentials = [
208
- ("user", "Aa123"),
209
- ("username2", "password2")
210
  ]
211
 
212
  # Launch the interface with password protection
213
  demo.launch(auth=credentials)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import math
2
  from datasets import load_dataset
3
  import gradio as gr
4
  import os
5
 
6
+ # auth_token = os.environ.get("auth_token")
7
  auth_token = os.environ.get("HF_TOKEN")
8
  Visual_Riddles = load_dataset("nitzanguetta/Visual_Riddles", token=auth_token, trust_remote_code=True)['test']
9
+ # print(f"Loaded WHOOPS!, first example:")
10
+ # print(whoops[0])
11
  dataset_size = len(Visual_Riddles)
12
 
 
13
  IMAGE = 'Image'
14
  QUESTION = 'Question'
15
  ANSWER = "Answer"
 
22
  CATEGORY = "Category"
23
  DESIGNER = "Designer"
24
 
25
+
26
  left_side_columns = [IMAGE]
27
  right_side_columns = [x for x in Visual_Riddles.features.keys() if x not in left_side_columns]
28
  right_side_columns.remove('Image file name')
29
+ # right_side_columns.remove('Question')
30
+ # enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
31
+ emoji_to_label = {IMAGE: '🎨, πŸ§‘β€πŸŽ¨, πŸ’»', ANSWER: 'πŸ’‘, πŸ€”, πŸ§‘β€πŸŽ¨', QUESTION: '❓, πŸ€”, πŸ’‘', CATEGORY: 'πŸ€”, πŸ“š, πŸ’‘',
32
+ CAPTION: 'πŸ“, πŸ‘Œ, πŸ’¬', PROMPT: 'πŸ“, πŸ’»', MODEL_NAME: '🎨, πŸ’»', HINT:'πŸ€”, πŸ”',
33
+ ATTRIBUTION: 'πŸ”, πŸ“„', DLI:"🌑️, πŸ€”, 🎯", DESIGNER:"πŸ§‘β€πŸŽ¨"}
34
+ # batch_size = 16
 
35
  batch_size = 8
36
  target_size = (1024, 1024)
37
 
38
+
39
  def func(index):
40
  start_index = index * batch_size
41
  end_index = start_index + batch_size
 
46
  values_lst += values
47
  return values_lst
48
 
49
+
50
  def get_instance_values(example):
51
  values = []
52
  for k in left_side_columns + right_side_columns:
53
  if k == IMAGE:
54
  value = example["Image"].resize(target_size)
55
+ # elif k in enumerate_cols:
56
+ # value = list_to_string(example[k])
57
+ # elif k == QA:
58
+ # qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
59
+ # value = list_to_string(qa_list)
60
  else:
61
  value = example[k]
62
  values.append(value)
63
  return values
64
+ def list_to_string(lst):
65
+ return '\n'.join(['{}. {}'.format(i+1, item) for i, item in enumerate(lst)])
66
+
67
+ demo = gr.Blocks()
68
+
69
 
70
  def get_col(example):
71
  instance_values = get_instance_values(example)
72
+ with gr.Column():
73
+ inputs_left = []
74
+ assert len(left_side_columns) == len(
75
+ instance_values[:len(left_side_columns)]) # excluding the image & designer
76
  for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
77
  if key == IMAGE:
78
  img_resized = example["Image"].resize(target_size)
79
+ # input_k = gr.Image(value=img_resized, label=example['commonsense_category'])
80
  input_k = gr.Image(value=img_resized)
81
  else:
82
  label = key.capitalize().replace("_", " ")
83
  input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
84
  inputs_left.append(input_k)
85
  with gr.Accordion("Click for details", open=False):
86
+ # with gr.Accordion(example[QUESTION], open=False):
87
+ text_inputs_right = []
88
+ assert len(right_side_columns) == len(
89
+ instance_values[len(left_side_columns):]) # excluding the image & designer
90
  for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
91
  label = key.capitalize().replace("_", " ")
92
+ num_lines = max(1, len(value) // 50 + (len(value) % 50 > 0)) # Assuming ~50 chars per line
93
  text_input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}", lines=num_lines)
94
  text_inputs_right.append(text_input_k)
95
  return inputs_left, text_inputs_right
96
 
97
+
98
+ with demo:
99
+ gr.Markdown("# Slide to iterate Visual Riddles")
100
+
101
  with gr.Column():
102
  num_batches = math.ceil(dataset_size / batch_size)
103
+ slider = gr.Slider(minimum=0, maximum=num_batches, step=1, label=f'Page (out of {num_batches})')
104
+ with gr.Row():
105
+ index = slider.value
106
+ start_index = 0 * batch_size
107
+ end_index = start_index + batch_size
108
+ all_examples = [Visual_Riddles[index] for index in list(range(start_index, end_index))]
109
+ all_inputs_left_right = []
110
+ for example_idx, example in enumerate(all_examples):
111
+ inputs_left, text_inputs_right = get_col(example)
112
+ inputs_left_right = inputs_left + text_inputs_right
113
+ all_inputs_left_right += inputs_left_right
114
+
115
+ slider.change(func, inputs=[slider], outputs=all_inputs_left_right)
116
 
117
+ # demo.launch()
118
  credentials = [
119
+ ("Viri", "Aa123")
 
120
  ]
121
 
122
  # Launch the interface with password protection
123
  demo.launch(auth=credentials)
124
+
125
+ # import math
126
+ # from datasets import load_dataset
127
+ # import gradio as gr
128
+ # import os
129
+ #
130
+ # # Set up environment variables and load dataset
131
+ # auth_token = os.environ.get("HF_TOKEN")
132
+ # Visual_Riddles = load_dataset("nitzanguetta/Visual_Riddles", token=auth_token, trust_remote_code=True)['test']
133
+ # dataset_size = len(Visual_Riddles)
134
+ #
135
+ # # Define attributes
136
+ # IMAGE = 'Image'
137
+ # QUESTION = 'Question'
138
+ # ANSWER = "Answer"
139
+ # CAPTION = "Image caption"
140
+ # PROMPT = "Prompt"
141
+ # MODEL_NAME = "Model name"
142
+ # HINT = "Hint"
143
+ # ATTRIBUTION = "Attribution"
144
+ # DLI = "Difficulty Level Index"
145
+ # CATEGORY = "Category"
146
+ # DESIGNER = "Designer"
147
+ #
148
+ # left_side_columns = [IMAGE]
149
+ # right_side_columns = [x for x in Visual_Riddles.features.keys() if x not in left_side_columns]
150
+ # right_side_columns.remove('Image file name')
151
+ #
152
+ # emoji_to_label = {
153
+ # IMAGE: '🎨, πŸ§‘β€πŸŽ¨, πŸ’»', ANSWER: 'πŸ’‘, πŸ€”, πŸ§‘β€πŸŽ¨', QUESTION: '❓, πŸ€”, πŸ’‘', CATEGORY: 'πŸ€”, πŸ“š, πŸ’‘',
154
+ # CAPTION: 'πŸ“, πŸ‘Œ, πŸ’¬', PROMPT: 'πŸ“, πŸ’»', MODEL_NAME: '🎨, πŸ’»', HINT:'πŸ€”, πŸ”',
155
+ # ATTRIBUTION: 'πŸ”, πŸ“„', DLI:"🌑️, πŸ€”, 🎯", DESIGNER:"πŸ§‘β€πŸŽ¨"
156
+ # }
157
+ #
158
+ # batch_size = 8
159
+ # target_size = (1024, 1024)
160
+ #
161
+ # def func(index):
162
+ # start_index = index * batch_size
163
+ # end_index = start_index + batch_size
164
+ # all_examples = [Visual_Riddles[index] for index in list(range(start_index, end_index))]
165
+ # values_lst = []
166
+ # for example_idx, example in enumerate(all_examples):
167
+ # values = get_instance_values(example)
168
+ # values_lst += values
169
+ # return values_lst
170
+ #
171
+ # # Define functions to handle data and interface
172
+ # def get_instance_values(example):
173
+ # values = []
174
+ # for k in left_side_columns + right_side_columns:
175
+ # if k == IMAGE:
176
+ # value = example["Image"].resize(target_size)
177
+ # else:
178
+ # value = example[k]
179
+ # values.append(value)
180
+ # return values
181
+ #
182
+ # def get_col(example):
183
+ # instance_values = get_instance_values(example)
184
+ # inputs_left, text_inputs_right = [], []
185
+ # with gr.Column() as col:
186
+ # for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
187
+ # if key == IMAGE:
188
+ # img_resized = example["Image"].resize(target_size)
189
+ # input_k = gr.Image(value=img_resized)
190
+ # else:
191
+ # label = key.capitalize().replace("_", " ")
192
+ # input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
193
+ # inputs_left.append(input_k)
194
+ # with gr.Accordion("Click for details", open=False):
195
+ # for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
196
+ # label = key.capitalize().replace("_", " ")
197
+ # num_lines = max(1, len(value) // 50 + (len(value) % 50 > 0))
198
+ # text_input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}", lines=num_lines)
199
+ # text_inputs_right.append(text_input_k)
200
+ # return inputs_left, text_inputs_right
201
+ #
202
+ # # Create the Gradio Blocks interface
203
+ # with gr.Blocks() as demo:
204
+ # with gr.Row():
205
+ # gr.Markdown("# Visual Riddles Explorer")
206
+ # with gr.Column():
207
+ # num_batches = math.ceil(dataset_size / batch_size)
208
+ # slider = gr.Slider(minimum=0, maximum=num_batches - 1, step=1, label=f'Page (out of {num_batches})')
209
+ # slider.change(lambda x: get_col(Visual_Riddles[x * batch_size]), inputs=[slider], outputs=[gr.Row()])
210
+ #
211
+ # # Define credentials for authentication
212
+ # credentials = [
213
+ # ("user", "Aa123"),
214
+ # ("username2", "password2")
215
+ # ]
216
+ #
217
+ # # Launch the interface with password protection
218
+ # demo.launch(auth=credentials)