prabhaskenche commited on
Commit
1e225c4
·
verified ·
1 Parent(s): a16f964

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,17 +1,18 @@
 
1
  from transformers import pipeline, AutoTokenizer
2
- from huggingface_hub import login
3
 
4
  # Load the tokenizer and model
5
  tokenizer = AutoTokenizer.from_pretrained('prabhaskenche/toxic-comment-classification-using-RoBERTa')
6
  classifier = pipeline(
7
  'text-classification',
8
  model='prabhaskenche/toxic-comment-classification-using-RoBERTa',
9
- tokenizer=tokenizer, # Pass the tokenizer here
10
- return_all_scores=True
11
  )
12
 
13
  def classify(text):
14
  results = classifier(text)
 
15
  non_toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_0'), 0)
16
  toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_1'), 0)
17
  return f"{non_toxic_score:.3f} non-toxic, {toxic_score:.3f} toxic"
 
1
+ import gradio as gr
2
  from transformers import pipeline, AutoTokenizer
 
3
 
4
  # Load the tokenizer and model
5
  tokenizer = AutoTokenizer.from_pretrained('prabhaskenche/toxic-comment-classification-using-RoBERTa')
6
  classifier = pipeline(
7
  'text-classification',
8
  model='prabhaskenche/toxic-comment-classification-using-RoBERTa',
9
+ tokenizer=tokenizer,
10
+ top_k=None # Use top_k=None to get all scores
11
  )
12
 
13
  def classify(text):
14
  results = classifier(text)
15
+ # Assuming LABEL_0 is non-toxic and LABEL_1 is toxic
16
  non_toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_0'), 0)
17
  toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_1'), 0)
18
  return f"{non_toxic_score:.3f} non-toxic, {toxic_score:.3f} toxic"