Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
|
|
|
5 |
|
6 |
-
# Define a prediction function
|
7 |
def classify(text):
|
8 |
results = classifier(text)
|
|
|
9 |
non_toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_0'), 0)
|
10 |
toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_1'), 0)
|
11 |
return f"{non_toxic_score:.3f} non-toxic, {toxic_score:.3f} toxic"
|
@@ -18,4 +19,4 @@ interface = gr.Interface(
|
|
18 |
)
|
19 |
|
20 |
# Launch the interface
|
21 |
-
interface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
# Load the model using the correct identifier
|
5 |
+
classifier = pipeline('text-classification', model='prabhaskenche/toxic-comment-classification-using-RoBERTa')
|
6 |
|
|
|
7 |
def classify(text):
|
8 |
results = classifier(text)
|
9 |
+
# Adjust the label names based on your model's label mapping
|
10 |
non_toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_0'), 0)
|
11 |
toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_1'), 0)
|
12 |
return f"{non_toxic_score:.3f} non-toxic, {toxic_score:.3f} toxic"
|
|
|
19 |
)
|
20 |
|
21 |
# Launch the interface
|
22 |
+
interface.launch()
|