Kevin Fink commited on
Commit
1c0cc8c
·
1 Parent(s): 5b51e47
Files changed (1) hide show
  1. app.py +37 -2
app.py CHANGED
@@ -243,7 +243,8 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
243
 
244
  try:
245
  train_result = trainer.train(resume_from_checkpoint=True)
246
- except:
 
247
  import shutil
248
  checkpoint_dir = training_args.output_dir
249
  # If the trainer_state.json is missing, look for the previous checkpoint
@@ -266,6 +267,21 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
266
  return f"An error occurred: {str(e)}, TB: {traceback.format_exc()}"
267
  return 'DONE!'#train_result
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
 
270
  @spaces.GPU(duration=120)
271
  def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
@@ -288,6 +304,24 @@ def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
288
  #model = get_peft_model(model, lora_config)
289
  result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
290
  return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  # Create Gradio interface
292
  try:
293
  iface = gr.Interface(
@@ -309,4 +343,5 @@ try:
309
  # Launch the interface
310
  iface.launch()
311
  except Exception as e:
312
- print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}")
 
 
243
 
244
  try:
245
  train_result = trainer.train(resume_from_checkpoint=True)
246
+ except Exception as e:
247
+ print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}")
248
  import shutil
249
  checkpoint_dir = training_args.output_dir
250
  # If the trainer_state.json is missing, look for the previous checkpoint
 
267
  return f"An error occurred: {str(e)}, TB: {traceback.format_exc()}"
268
  return 'DONE!'#train_result
269
 
270
+ @spaces.GPU
271
+ def test(text):
272
+ from transformers import pipeline
273
+ model_name = 'shorecode/t5-efficient-tiny-nh8-summarizer'
274
+ summarizer = pipeline(
275
+ "summarization",
276
+ model=model_name,
277
+ tokenizer=model_name,
278
+ clean_up_tokenization_spaces=True,
279
+ )
280
+
281
+ max_length = 500
282
+ summary = summarizer(text, max_length=max_length, min_length=40, no_repeat_ngram_size=2)
283
+ return summary
284
+
285
 
286
  @spaces.GPU(duration=120)
287
  def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
 
304
  #model = get_peft_model(model, lora_config)
305
  result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
306
  return result
307
+
308
+
309
+ try:
310
+ iface = gr.Interface(
311
+ fn=test,
312
+ inputs=[
313
+ gr.Textbox(label="Text to summarize:"),
314
+ ],
315
+ outputs="text",
316
+ title="Fine-Tune Hugging Face Model shorecode/t5-efficient-tiny-nh8-summarizer",
317
+ description="This interface allows you to test shorecode/t5-efficient-tiny-nh8-summarizer."
318
+ )
319
+
320
+ # Launch the interface
321
+ iface.launch()
322
+ except Exception as e:
323
+ print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}")
324
+ '''
325
  # Create Gradio interface
326
  try:
327
  iface = gr.Interface(
 
343
  # Launch the interface
344
  iface.launch()
345
  except Exception as e:
346
+ print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}")
347
+ '''