tombm commited on
Commit
caae2c6
1 Parent(s): 909f40b

update Gradio components

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -6,7 +6,7 @@ from uq import BertForUQSequenceClassification
6
  def predict(sentence):
7
  model_path = "tombm/bert-base-uncased-finetuned-cola"
8
  classifier = pipeline("text-classification", model=model_path, tokenizer=model_path)
9
- label = classifier(sentence)
10
  return label
11
 
12
 
@@ -19,7 +19,7 @@ def uncertainty(sentence):
19
  model.return_gp_cov = True
20
  _, gp_cov = model(**test_input)
21
 
22
- return str(gp_cov.item())
23
 
24
 
25
  with gr.Blocks() as demo:
@@ -32,7 +32,7 @@ with gr.Blocks() as demo:
32
  gr.Interface(
33
  fn=predict,
34
  inputs=gr.Textbox(value="Good morning.", label="Input"),
35
- outputs="label",
36
  )
37
  gr.Interface(
38
  fn=predict,
@@ -40,7 +40,7 @@ with gr.Blocks() as demo:
40
  value="This sentence is sentence, this is a correct sentence!",
41
  label="Input",
42
  ),
43
- outputs="label",
44
  )
45
 
46
  explain_str = """As we can see, our model correctly classifies the first sentence, but misclassifies the second.
@@ -50,7 +50,7 @@ with gr.Blocks() as demo:
50
  gr.Interface(
51
  fn=uncertainty,
52
  inputs=gr.Textbox(value="Good morning.", label="Input"),
53
- outputs="text",
54
  ) # should have low uncertainty
55
  gr.Interface(
56
  fn=uncertainty,
@@ -58,7 +58,7 @@ with gr.Blocks() as demo:
58
  value="This sentence is sentence, this is a correct sentence!",
59
  label="Input",
60
  ),
61
- outputs="text",
62
  ) # should have high uncertainty
63
 
64
  final_str = """We can see here that the variance for the misclassified example is much higher than for the correctly
 
6
  def predict(sentence):
7
  model_path = "tombm/bert-base-uncased-finetuned-cola"
8
  classifier = pipeline("text-classification", model=model_path, tokenizer=model_path)
9
+ label = classifier(sentence)[0]["label"]
10
  return label
11
 
12
 
 
19
  model.return_gp_cov = True
20
  _, gp_cov = model(**test_input)
21
 
22
+ return gp_cov.item()
23
 
24
 
25
  with gr.Blocks() as demo:
 
32
  gr.Interface(
33
  fn=predict,
34
  inputs=gr.Textbox(value="Good morning.", label="Input"),
35
+ outputs="text",
36
  )
37
  gr.Interface(
38
  fn=predict,
 
40
  value="This sentence is sentence, this is a correct sentence!",
41
  label="Input",
42
  ),
43
+ outputs="text",
44
  )
45
 
46
  explain_str = """As we can see, our model correctly classifies the first sentence, but misclassifies the second.
 
50
  gr.Interface(
51
  fn=uncertainty,
52
  inputs=gr.Textbox(value="Good morning.", label="Input"),
53
+ outputs=gr.Number(label="Variance from GP head"),
54
  ) # should have low uncertainty
55
  gr.Interface(
56
  fn=uncertainty,
 
58
  value="This sentence is sentence, this is a correct sentence!",
59
  label="Input",
60
  ),
61
+ outputs=gr.Number(label="Variance from GP head"),
62
  ) # should have high uncertainty
63
 
64
  final_str = """We can see here that the variance for the misclassified example is much higher than for the correctly