|
import gradio as gr |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
import torch |
|
from peft import PeftModel, PeftConfig |
|
|
|
base_model = "cardiffnlp/twitter-roberta-base-sentiment-latest" |
|
adapter_model = 'saideep-arikontham/twitter-roberta-base-sentiment-latest-biden-stance' |
|
|
|
|
|
id2label = {0: "Anti-Biden", 1 : "Pro-Biden"} |
|
label2id = {"Anti-Biden" : 0, "Pro-Biden" : 1} |
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(base_model, num_labels=2, id2label = id2label, label2id = label2id, ignore_mismatched_sizes=True) |
|
|
|
model = PeftModel.from_pretrained(model, adapter_model) |
|
tokenizer = AutoTokenizer.from_pretrained(adapter_model) |
|
|
|
def greet(text): |
|
|
|
model.to('cpu') |
|
inputs = tokenizer.encode(text, return_tensors="pt").to("cpu") |
|
|
|
logits = model(inputs).logits |
|
|
|
predictions = torch.argmax(logits) |
|
|
|
return "This text is " + id2label[predictions.tolist()] + "!!" |