TPM-28's picture
Upload 11 files
0ba7d7a verified
metadata
tags:
  - text-classification
base_model: almanach/camembert-base

Validation Metrics

loss: 0.1564033180475235

f1_macro: 0.9023266184854538

f1_micro: 0.9275

f1_weighted: 0.9281147770697895

precision_macro: 0.8944987578959265

precision_micro: 0.9275

precision_weighted: 0.9308721399366291

recall_macro: 0.9135199509056998

recall_micro: 0.9275

recall_weighted: 0.9275

accuracy: 0.9275

Exemple d'utilisation

from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Définir le nom du modèle et le token d'accès
model_name = "TPM-28/emotion-FR-camembert"
access_token = "<HF_token>"

# Charger le tokenizer et le modèle
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token)
model = AutoModelForSequenceClassification.from_pretrained(model_name, use_auth_token=access_token)

# Définir les classes
classes = ["anger", "fear", "joy", "love", "sadness", "surprise"]

def classify_text(text):
    # Tokenizer le texte
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)

    # Effectuer l'inférence
    with torch.no_grad():
        outputs = model(**inputs)

    # Obtenir les prédictions
    probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
    prediction = torch.argmax(probabilities, dim=-1)

    # Obtenir la classe prédite et sa probabilité
    predicted_class = classes[prediction.item()]
    confidence = probabilities[0][prediction].item()

    return predicted_class, confidence

# Exemple d'utilisation
text_to_classify = "je suis vraiment content"
predicted_class, confidence = classify_text(text_to_classify)

print(f"Texte : {text_to_classify}")
print(f"Classe prédite : {predicted_class}")
print(f"Confiance : {confidence:.2f}")