OsnNos's picture
Upload 4 files
4423ebf verified
raw
history blame
1.56 kB
import gradio as gr
from transformers import pipeline
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
# Load the text classification pipeline from Hugging Face
classifier = pipeline("text-classification", model="./clickbait-classifier-model-90", tokenizer=tokenizer)
def classify_text(text):
prediction = classifier(text)[0]
clickbait_label = "LABEL_1" # Assuming LABEL_1 corresponds to clickbait
non_clickbait_label = "LABEL_0" # Assuming LABEL_0 corresponds to non-clickbait
predicted_label = prediction["label"]
predicted_score = prediction["score"] * 100
clickbait_score = predicted_score if predicted_label == clickbait_label else 0
non_clickbait_score = predicted_score if predicted_label == non_clickbait_label else 0
return clickbait_score, non_clickbait_score
# Example clickbait headline
clickbait_example = ["You'll Never Believe What This Dog Did Next!"]
# Example non-clickbait headline
non_clickbait_example = ["Local School Board Approves New Budget"]
# Combine into a list of examples
examples = [clickbait_example, non_clickbait_example]
# Create the Gradio interface
iface = gr.Interface(
fn=classify_text,
inputs=[gr.Textbox(lines=2, placeholder="Enter a text headline...")],
outputs=[
gr.Slider(label="Clickbait", minimum=0, maximum=100, step=1),
gr.Slider(label="Non-Clickbait", minimum=0, maximum=100, step=1),
],
title="Clickbait Detector",
examples=examples,
)
# Launch the interface
iface.launch()