toilaluan commited on
Commit
62786d6
1 Parent(s): e86e3f8
Files changed (2) hide show
  1. app.py +1 -1
  2. scorer.py +4 -0
app.py CHANGED
@@ -54,7 +54,7 @@ def process_image(image, prompt):
54
  reward = reward[0]
55
  answers = {str(i): v > 0.5 for i, v in enumerate(reward)}
56
  graph_img = draw_colored_graph(dependencies, questions, answers)
57
- return reward, f"""
58
  Question: {questions}.
59
  Reward per question: {reward}"""
60
 
 
54
  reward = reward[0]
55
  answers = {str(i): v > 0.5 for i, v in enumerate(reward)}
56
  graph_img = draw_colored_graph(dependencies, questions, answers)
57
+ return graph_img, f"""
58
  Question: {questions}.
59
  Reward per question: {reward}"""
60
 
scorer.py CHANGED
@@ -319,6 +319,7 @@ Each tuple contains the following information:
319
 
320
  return sorted_questions
321
 
 
322
  def get_reward(
323
  self,
324
  questions: list[str],
@@ -333,6 +334,9 @@ Each tuple contains the following information:
333
  dependencies (dict[list]): the dependencies between tuples
334
  images (list[str]): a list of image urls
335
  """
 
 
 
336
  scores = {}
337
 
338
  sorted_questions = self._create_graph_questions(questions, dependencies)
 
319
 
320
  return sorted_questions
321
 
322
+ @spaces.GPU()
323
  def get_reward(
324
  self,
325
  questions: list[str],
 
334
  dependencies (dict[list]): the dependencies between tuples
335
  images (list[str]): a list of image urls
336
  """
337
+
338
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
339
+ self.binary_vqa.to(self.device)
340
  scores = {}
341
 
342
  sorted_questions = self._create_graph_questions(questions, dependencies)