lunadebruyne commited on
Commit
352ac27
·
1 Parent(s): 908e0d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -13,7 +13,7 @@ description2 = "<h3>Demo EmotioNL</h3>\nThis demo allows you to analyse the emot
13
 
14
  inference_modelpath = "model/checkpoint-128"
15
 
16
- """
17
  output_dir = "model"
18
  model_config = {
19
  "model_weights": "pdelobelle/robbert-v2-dutch-base",
@@ -57,7 +57,6 @@ trainer = Trainer(
57
 
58
 
59
  def inference_dataset(file_object):
60
- #input_file = open(file_object.name, 'r')
61
  input_file = file_object
62
  data_paths = {"train": input_file, "inference": input_file}
63
  dataset = load_dataset('csv', skiprows=1, data_files=data_paths, column_names = ['id', 'text', 'label'], delimiter='\t')
@@ -82,7 +81,7 @@ def inference_dataset(file_object):
82
  return output
83
  """
84
 
85
- def inference_dataset(file_object, option_list, progress=gr.Progress()):
86
  tokenizer = AutoTokenizer.from_pretrained(inference_modelpath)
87
  model = AutoModelForSequenceClassification.from_pretrained(inference_modelpath)
88
  data_path = open(file_object.name, 'r')
@@ -90,7 +89,6 @@ def inference_dataset(file_object, option_list, progress=gr.Progress()):
90
  ids = df["id"].tolist()
91
  texts = df["text"].tolist()
92
  preds = []
93
- progress(0, desc="Starting...")
94
  for text in tqdm(texts): # progressbar
95
  inputs = tokenizer(text, return_tensors="pt")
96
  with torch.no_grad(): # run model
@@ -117,7 +115,7 @@ def inference_dataset(file_object, option_list, progress=gr.Progress()):
117
  if "topics" in option_list:
118
  output5 = "This option was selected."
119
  return [output1, output2, output3, output4, output5]
120
-
121
  def what_happened(text, file_object, option_list):
122
  if file_object:
123
  output = "You uploaded a file."
@@ -153,8 +151,7 @@ def what_happened2(file_object, option_list):
153
  output5 = "This option was selected."
154
  return [output1, output2, output3, output4, output5]
155
 
156
- def inference_sentence(text, progress=gr.Progress()):
157
- progress(0, desc="Starting...")
158
  tokenizer = AutoTokenizer.from_pretrained(inference_modelpath)
159
  model = AutoModelForSequenceClassification.from_pretrained(inference_modelpath)
160
  for text in tqdm([text]):
 
13
 
14
  inference_modelpath = "model/checkpoint-128"
15
 
16
+
17
  output_dir = "model"
18
  model_config = {
19
  "model_weights": "pdelobelle/robbert-v2-dutch-base",
 
57
 
58
 
59
  def inference_dataset(file_object):
 
60
  input_file = file_object
61
  data_paths = {"train": input_file, "inference": input_file}
62
  dataset = load_dataset('csv', skiprows=1, data_files=data_paths, column_names = ['id', 'text', 'label'], delimiter='\t')
 
81
  return output
82
  """
83
 
84
+ def inference_dataset(file_object, option_list):
85
  tokenizer = AutoTokenizer.from_pretrained(inference_modelpath)
86
  model = AutoModelForSequenceClassification.from_pretrained(inference_modelpath)
87
  data_path = open(file_object.name, 'r')
 
89
  ids = df["id"].tolist()
90
  texts = df["text"].tolist()
91
  preds = []
 
92
  for text in tqdm(texts): # progressbar
93
  inputs = tokenizer(text, return_tensors="pt")
94
  with torch.no_grad(): # run model
 
115
  if "topics" in option_list:
116
  output5 = "This option was selected."
117
  return [output1, output2, output3, output4, output5]
118
+ """
119
  def what_happened(text, file_object, option_list):
120
  if file_object:
121
  output = "You uploaded a file."
 
151
  output5 = "This option was selected."
152
  return [output1, output2, output3, output4, output5]
153
 
154
+ def inference_sentence(text):
 
155
  tokenizer = AutoTokenizer.from_pretrained(inference_modelpath)
156
  model = AutoModelForSequenceClassification.from_pretrained(inference_modelpath)
157
  for text in tqdm([text]):