Jingxiang Mo commited on
Commit
0afbd90
1 Parent(s): 114896e

Updated .gitignore

Browse files
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  .env
2
  .DS_Store
 
 
1
  .env
2
  .DS_Store
3
+ .vscode
.vscode/settings.json DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "python.linting.pylintEnabled": true,
3
- "python.linting.enabled": true,
4
- "python.formatting.provider": "yapf"
5
- }
 
 
 
 
 
 
__pycache__/app.cpython-39.pyc CHANGED
Binary files a/__pycache__/app.cpython-39.pyc and b/__pycache__/app.cpython-39.pyc differ
 
app.py CHANGED
@@ -106,9 +106,6 @@ def answer_question(question: str) -> str:
106
  input_ids_without_question = input_ids[
107
  input_ids.index(tokenizer.sep_token_id) + 1 :
108
  ]
109
- print(
110
- f"Query has {len(input_ids)} tokens, divided in {len(input_ids_without_question)//length_of_group + 1}.\n"
111
- )
112
 
113
  input_ids_split = []
114
  for group in range(len(input_ids_without_question) // length_of_group + 1):
@@ -139,10 +136,10 @@ def answer_question(question: str) -> str:
139
 
140
  # evaulate the model
141
  outputs = model(
142
- torch.tensor([input]), # The tokens representing our input text.
143
  token_type_ids=torch.tensor(
144
  [segment_ids]
145
- ), # The segment IDs to differentiate question from answer_text
146
  return_dict=True,
147
  )
148
 
 
106
  input_ids_without_question = input_ids[
107
  input_ids.index(tokenizer.sep_token_id) + 1 :
108
  ]
 
 
 
109
 
110
  input_ids_split = []
111
  for group in range(len(input_ids_without_question) // length_of_group + 1):
 
136
 
137
  # evaulate the model
138
  outputs = model(
139
+ torch.tensor([input]),
140
  token_type_ids=torch.tensor(
141
  [segment_ids]
142
+ ),
143
  return_dict=True,
144
  )
145