king007 commited on
Commit
ff376a0
1 Parent(s): 001fb6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -35,12 +35,12 @@ pipe_tapas = pipeline(task="table-question-answering", model="google/tapas-large
35
 
36
 
37
  def process2(query, csv_data):
38
- csv_data={"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]}
39
  table = pd.DataFrame.from_dict(csv_data)
40
  #microsoft
41
  encoding = tokenizer(table=table, query=query, return_tensors="pt")
42
  outputs = model.generate(**encoding)
43
- result_tapex=tokenizer.batch_decode(outputs, skip_special_tokens=True)
44
  #google
45
  result_tapas = pipe_tapas(table=table, query=query)['cells'][0]
46
  return result_tapex, result_tapas
@@ -74,7 +74,7 @@ iface = gr.Interface(
74
  inputs=[query_text, input_data],
75
  outputs=[answer_text_tapex, answer_text_tapas],
76
  examples=[
77
-
78
  ],
79
  allow_flagging="never",
80
  )
 
35
 
36
 
37
  def process2(query, csv_data):
38
+ # csv_data={"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]}
39
  table = pd.DataFrame.from_dict(csv_data)
40
  #microsoft
41
  encoding = tokenizer(table=table, query=query, return_tensors="pt")
42
  outputs = model.generate(**encoding)
43
+ result_tapex=tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
44
  #google
45
  result_tapas = pipe_tapas(table=table, query=query)['cells'][0]
46
  return result_tapex, result_tapas
 
74
  inputs=[query_text, input_data],
75
  outputs=[answer_text_tapex, answer_text_tapas],
76
  examples=[
77
+ ["how many movies does Leonardo Di Caprio have?",{"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]}]
78
  ],
79
  allow_flagging="never",
80
  )