pszemraj commited on
Commit
9e30137
1 Parent(s): a738f02

🐛 debug

Browse files

Signed-off-by: peter szemraj <peterszemraj@gmail.com>

Files changed (1) hide show
  1. app.py +36 -15
app.py CHANGED
@@ -34,7 +34,7 @@ from grammar_improve import (
34
  symspeller,
35
  synthesize_grammar,
36
  )
37
- from utils import corr
38
 
39
  nltk.download("stopwords") # download stopwords
40
 
@@ -71,7 +71,7 @@ def chat(
71
  top_p=top_p,
72
  top_k=top_k,
73
  temperature=temperature,
74
- constrained_generation="true" in constrained_generation.lower(),
75
  )
76
  history = [prompt_message, response]
77
  html = ""
@@ -182,26 +182,47 @@ def get_parser():
182
  )
183
 
184
  parser.add_argument(
185
- "--verbose",
186
  action="store_true",
187
  default=False,
188
- help="turn on verbose logging",
189
  )
 
190
  parser.add_argument(
191
- "--test",
192
  action="store_true",
193
  default=False,
194
- help="load the smallest model for simple testing (ethzanalytics/distilgpt2-tiny-conversational)",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  )
196
-
197
  return parser
198
 
199
 
200
  if __name__ == "__main__":
201
  args = get_parser().parse_args()
 
 
 
 
 
202
  default_model = str(args.model)
203
- test = args.test
204
- if test:
205
  logging.info("loading the smallest model for testing")
206
  default_model = "ethzanalytics/distilgpt2-tiny-conversational"
207
 
@@ -217,16 +238,16 @@ if __name__ == "__main__":
217
  if model_loc.exists() and model_loc.is_dir()
218
  else pipeline("text-generation", model=default_model, device=device)
219
  ) # if the model is a name, use it. stays on CPU if no GPU available
220
- print(f"using model {my_chatbot.model}")
221
 
222
  if basic_sc:
223
- print("Using the baseline spellchecker")
224
  basic_spell = build_symspell_obj()
225
  else:
226
- print("using neural spell checker")
227
  grammarbot = pipeline("text2text-generation", gram_model, device=device)
228
 
229
- logging.info(f"using model stored here: \n {model_loc} \n")
230
  iface = gr.Interface(
231
  chat,
232
  inputs=[
@@ -242,8 +263,8 @@ if __name__ == "__main__":
242
  Slider(minimum=0.0, maximum=1.0, step=0.01, default=0.95, label="top_p"),
243
  Slider(minimum=0, maximum=100, step=5, default=20, label="top_k"),
244
  Radio(
245
- choices=["True", "False"],
246
- default="False",
247
  label="constrained_generation",
248
  ),
249
  ],
 
34
  symspeller,
35
  synthesize_grammar,
36
  )
37
+ from utils import corr, setup_logging
38
 
39
  nltk.download("stopwords") # download stopwords
40
 
 
71
  top_p=top_p,
72
  top_k=top_k,
73
  temperature=temperature,
74
+ constrained_generation=constrained_generation,
75
  )
76
  history = [prompt_message, response]
77
  html = ""
 
182
  )
183
 
184
  parser.add_argument(
185
+ "--test",
186
  action="store_true",
187
  default=False,
188
+ help="load the smallest model for simple testing (ethzanalytics/distilgpt2-tiny-conversational)",
189
  )
190
+
191
  parser.add_argument(
192
+ "--verbose",
193
  action="store_true",
194
  default=False,
195
+ help="turn on verbose printing",
196
+ )
197
+ parser.add_argument(
198
+ "-q",
199
+ "--quiet",
200
+ dest="loglevel",
201
+ help="set loglevel to WARNING (reduce output)",
202
+ action="store_const",
203
+ const=logging.WARNING,
204
+ )
205
+ parser.add_argument(
206
+ "-vv",
207
+ "--very-verbose",
208
+ dest="loglevel",
209
+ help="set loglevel to DEBUG",
210
+ action="store_const",
211
+ const=logging.DEBUG,
212
  )
 
213
  return parser
214
 
215
 
216
  if __name__ == "__main__":
217
  args = get_parser().parse_args()
218
+ loglevel = args.loglevel or logging.INFO
219
+ setup_logging(loglevel)
220
+ logging.info("\n\n\nStarting app.py\n\n\n")
221
+ logging.info(f"args: {args}")
222
+
223
  default_model = str(args.model)
224
+
225
+ if args.test:
226
  logging.info("loading the smallest model for testing")
227
  default_model = "ethzanalytics/distilgpt2-tiny-conversational"
228
 
 
238
  if model_loc.exists() and model_loc.is_dir()
239
  else pipeline("text-generation", model=default_model, device=device)
240
  ) # if the model is a name, use it. stays on CPU if no GPU available
241
+ logging.info(f"using model {my_chatbot.model}")
242
 
243
  if basic_sc:
244
+ logging.info("Using the baseline spellchecker")
245
  basic_spell = build_symspell_obj()
246
  else:
247
+ logging.info("using neural spell checker")
248
  grammarbot = pipeline("text2text-generation", gram_model, device=device)
249
 
250
+ logging.debug(f"using model stored here: \n {model_loc} \n")
251
  iface = gr.Interface(
252
  chat,
253
  inputs=[
 
263
  Slider(minimum=0.0, maximum=1.0, step=0.01, default=0.95, label="top_p"),
264
  Slider(minimum=0, maximum=100, step=5, default=20, label="top_k"),
265
  Radio(
266
+ choices=[True, False],
267
+ default=False,
268
  label="constrained_generation",
269
  ),
270
  ],