import gradio as gr import time from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification model_name = "ethanrom/a2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) pretrained_model_name = "roberta-large-mnli" pretrained_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name) pretrained_model = pipeline("zero-shot-classification", model=pretrained_model_name, tokenizer=pretrained_tokenizer) candidate_labels = ["negative", "positive", "no impact", "mixed"] accuracy_scores = {"Fine-tuned": 0.0, "Pretrained": 0.0} model_sizes = {"Fine-tuned": 0, "Pretrained": 0} inference_times = {"Fine-tuned": 0.0, "Pretrained": 0.0} def predict_sentiment(text_input, model_selection): global accuracy_scores, model_sizes, inference_times start_time = time.time() if model_selection == "Fine-tuned": inputs = tokenizer.encode_plus(text_input, return_tensors='pt') outputs = model(**inputs) logits = outputs.logits.detach().cpu().numpy()[0] predicted_class = int(logits.argmax()) accuracy_scores[model_selection] += 1 if candidate_labels[predicted_class] == "positive" else 0 model_sizes[model_selection] = model.num_parameters() else: result = pretrained_model(text_input, candidate_labels) predicted_class = result["labels"][0] accuracy_scores[model_selection] += 1 if predicted_class == 1 else 0 model_sizes[model_selection] = pretrained_model.model.num_parameters() end_time = time.time() inference_times[model_selection] = end_time - start_time return candidate_labels[predicted_class] def accuracy(model_selection): return accuracy_scores[model_selection]/10 def model_size(model_selection): return str(model_sizes[model_selection]//(1024*1024)) + " MB" def inference_time(model_selection): return str(inference_times[model_selection]*1000) + " ms" inputs = [ gr.inputs.Textbox("Enter text"), gr.inputs.Dropdown(["Pretrained", "Fine-tuned"], label="Select model"), ] outputs = [ gr.outputs.Textbox(label="Predicted Sentiment"), gr.outputs.Label(label="Accuracy:"), gr.outputs.Label(label="Model Size:"), gr.outputs.Label(label="Inference Time:") ] gr.Interface(fn=predict_sentiment, inputs=inputs, outputs=outputs, title="Sentiment Analysis", description="Compare the output of two models", live=True, examples=[["on us lift up the light", "Fine-tuned"], ["max laid his hand upon the old man's arm", "Pretrained"]] ).launch();