ky2k commited on
Commit
02d4487
1 Parent(s): a81089b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -11
app.py CHANGED
@@ -1,12 +1,17 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load the model
5
- MODEL_PATH = "unitary/toxic-bert"
6
- classifier = pipeline("text-classification", model=MODEL_PATH, tokenizer=MODEL_PATH)
 
 
7
 
8
- def predict_toxicity(text):
 
 
9
  # Get predictions
 
10
  predictions = classifier(text, return_all_scores=True)[0]
11
 
12
  # Format results
@@ -19,16 +24,19 @@ def predict_toxicity(text):
19
  # Create the Gradio interface
20
  iface = gr.Interface(
21
  fn=predict_toxicity,
22
- inputs=gr.Textbox(lines=5, label="Enter text to analyze"),
 
 
 
23
  outputs=gr.Label(num_top_classes=6, label="Toxicity Scores"),
24
  title="Toxicity Prediction",
25
- description="This POC uses the model based onm toxic-bert to predict toxicity in text. Multi-class response.",
26
  examples=[
27
- ["Great game everyone!"],
28
- ["You're such a noob, uninstall please."],
29
- ["I hope you die in real life, loser."],
30
- ["Nice move! How did you do that?"],
31
- ["Go back to the kitchen where you belong."],
32
  ]
33
  )
34
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load the models
5
+ MODEL_PATHS = {
6
+ "Unitary Toxic BERT": "unitary/toxic-bert",
7
+ "Martin HA Toxic Comment": "martin-ha/toxic-comment-model"
8
+ }
9
 
10
+ classifiers = {name: pipeline("text-classification", model=path, tokenizer=path) for name, path in MODEL_PATHS.items()}
11
+
12
+ def predict_toxicity(text, model_choice):
13
  # Get predictions
14
+ classifier = classifiers[model_choice]
15
  predictions = classifier(text, return_all_scores=True)[0]
16
 
17
  # Format results
 
24
  # Create the Gradio interface
25
  iface = gr.Interface(
26
  fn=predict_toxicity,
27
+ inputs=[
28
+ gr.Textbox(lines=5, label="Enter text to analyze"),
29
+ gr.Radio(choices=list(MODEL_PATHS.keys()), label="Choose a model", value="Toxic BERT")
30
+ ],
31
  outputs=gr.Label(num_top_classes=6, label="Toxicity Scores"),
32
  title="Toxicity Prediction",
33
+ description="This POC uses trained&pre-trained models to predict toxicity in text. Choose between two models: 'Toxic BERT' based and 'Martin HA Toxic Comment'.",
34
  examples=[
35
+ ["Great game everyone!", "Toxic BERT"],
36
+ ["You're such a noob, uninstall please.", "Martin HA Toxic Comment"],
37
+ ["I hope you die in real life, loser.", "Toxic BERT"],
38
+ ["Nice move! How did you do that?", "Martin HA Toxic Comment"],
39
+ ["Go back to the kitchen where you belong.", "Toxic BERT"],
40
  ]
41
  )
42