from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("newsmediabias/fake-news-classifier-elections")
model = AutoModelForSequenceClassification.from_pretrained("newsmediabias/fake-news-classifier-elections")
# Initialize the pipeline for sequence classification with the model and tokenizer
fake_news_classifier = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer
)
# Define label mapping based on the model's training
label_mapping = {
"LABEL_0": "REAL",
"LABEL_1": "FAKE"
}
# Example text to classify
example_text = "The election was rigged and full of fraud."
# Perform inference
predictions = fake_news_classifier(example_text)
predicted_class = label_mapping[predictions[0]['label']]
confidence_score = predictions[0]['score'] * 100
# Output the results
print(f"Predicted Class: {predicted_class}")
print(f"Confidence Score: {confidence_score:.2f}%")
- Downloads last month
- 6
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.