ethanrom commited on
Commit
741c64f
1 Parent(s): 65c729a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -15
app.py CHANGED
@@ -1,44 +1,64 @@
1
  import gradio as gr
 
2
  from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
3
 
4
- # Load the fine-tuned model and tokenizer
5
  model_name = "ethanrom/a2"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSequenceClassification.from_pretrained(model_name)
8
 
9
- # Load the pretrained model and tokenizer
10
  pretrained_model_name = "roberta-large-mnli"
11
  pretrained_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
12
  pretrained_model = pipeline("zero-shot-classification", model=pretrained_model_name, tokenizer=pretrained_tokenizer)
13
  candidate_labels = ["negative", "positive", "no impact", "mixed"]
14
 
 
 
 
15
 
16
  def predict_sentiment(text_input, model_selection):
 
 
 
17
  if model_selection == "Fine-tuned":
18
- # Use the fine-tuned model
19
  inputs = tokenizer.encode_plus(text_input, return_tensors='pt')
20
  outputs = model(**inputs)
21
  logits = outputs.logits.detach().cpu().numpy()[0]
22
  predicted_class = int(logits.argmax())
23
- return candidate_labels[predicted_class]
 
24
  else:
25
- # Use the pretrained model
26
  result = pretrained_model(text_input, candidate_labels)
27
  predicted_class = result["labels"][0]
28
- return predicted_class
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  inputs = [
31
  gr.inputs.Textbox("Enter text"),
32
  gr.inputs.Dropdown(["Pretrained", "Fine-tuned"], label="Select model"),
33
  ]
34
 
35
- outputs = gr.outputs.Textbox(label="Predicted Sentiment")
 
 
 
 
 
36
 
37
- gr.Interface(fn=predict_sentiment, inputs=inputs, outputs=outputs, title="Sentiment Analysis", description="Compare the output of two models", examples=[
38
- ["max laid his hand upon the old man's arm", "Pretrained Model"],
39
- ["the red sword sealed their vows!", "Fine-tuned Model"],
40
- ["and that is why, the lonesome day,", "Pretrained Model"],
41
- ["it flows so long as falls the rain", "Fine-tuned Model"],
42
- ["thy hands all cunning arts that women prize", "Pretrained Model"],
43
- ["on us lift up the light", "Fine-tuned Model"],
44
- ],).launch();
 
1
  import gradio as gr
2
+ import time
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
 
 
5
  model_name = "ethanrom/a2"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSequenceClassification.from_pretrained(model_name)
8
 
 
9
  pretrained_model_name = "roberta-large-mnli"
10
  pretrained_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
11
  pretrained_model = pipeline("zero-shot-classification", model=pretrained_model_name, tokenizer=pretrained_tokenizer)
12
  candidate_labels = ["negative", "positive", "no impact", "mixed"]
13
 
14
+ accuracy_scores = {"Fine-tuned": 0.0, "Pretrained": 0.0}
15
+ model_sizes = {"Fine-tuned": 0, "Pretrained": 0}
16
+ inference_times = {"Fine-tuned": 0.0, "Pretrained": 0.0}
17
 
18
  def predict_sentiment(text_input, model_selection):
19
+ global accuracy_scores, model_sizes, inference_times
20
+
21
+ start_time = time.time()
22
  if model_selection == "Fine-tuned":
 
23
  inputs = tokenizer.encode_plus(text_input, return_tensors='pt')
24
  outputs = model(**inputs)
25
  logits = outputs.logits.detach().cpu().numpy()[0]
26
  predicted_class = int(logits.argmax())
27
+ accuracy_scores[model_selection] += 1 if candidate_labels[predicted_class] == "positive" else 0
28
+ model_sizes[model_selection] = model.num_parameters()
29
  else:
 
30
  result = pretrained_model(text_input, candidate_labels)
31
  predicted_class = result["labels"][0]
32
+ accuracy_scores[model_selection] += 1 if predicted_class == 1 else 0
33
+ model_sizes[model_selection] = pretrained_model.model.num_parameters()
34
+ end_time = time.time()
35
+ inference_times[model_selection] = end_time - start_time
36
+
37
+ return candidate_labels[predicted_class]
38
+
39
+ def accuracy(model_selection):
40
+ return accuracy_scores[model_selection]/10
41
+
42
+ def model_size(model_selection):
43
+ return str(model_sizes[model_selection]//(1024*1024)) + " MB"
44
+
45
+ def inference_time(model_selection):
46
+ return str(inference_times[model_selection]*1000) + " ms"
47
 
48
  inputs = [
49
  gr.inputs.Textbox("Enter text"),
50
  gr.inputs.Dropdown(["Pretrained", "Fine-tuned"], label="Select model"),
51
  ]
52
 
53
+ outputs = [
54
+ gr.outputs.Textbox(label="Predicted Sentiment"),
55
+ gr.outputs.Label(label="Accuracy:"),
56
+ gr.outputs.Label(label="Model Size:"),
57
+ gr.outputs.Label(label="Inference Time:")
58
+ ]
59
 
60
+ gr.Interface(fn=predict_sentiment, inputs=inputs, outputs=outputs,
61
+ title="Sentiment Analysis", description="Compare the output of two models",
62
+ live=True,
63
+ examples=[["on us lift up the light", "Fine-tuned"], ["max laid his hand upon the old man's arm", "Pretrained"]]
64
+ ).launch();