import gradio as gr import spaces import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification # Initialize GPU zero = torch.Tensor([0]).cuda() print(f"Initial device: {zero.device}") # Load model and tokenizer model_name = "tabularisai/robust-sentiment-analysis" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) # Move model to GPU model = model.to(zero.device) @spaces.GPU def predict_sentiment(text): print(f"Device inside function: {zero.device}") inputs = tokenizer(text.lower(), return_tensors="pt", truncation=True, padding=True, max_length=512) inputs = {k: v.to(zero.device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) predicted_class = torch.argmax(probabilities, dim=-1).item() sentiment_map = {0: "Very Negative", 1: "Negative", 2: "Neutral", 3: "Positive", 4: "Very Positive"} return sentiment_map[predicted_class] # Gradio interface demo = gr.Interface( fn=predict_sentiment, inputs=gr.Textbox(label="Enter your text here"), outputs=gr.Textbox(label="Sentiment"), title="🎭 Sentiment Analysis Wizard", description="Discover the emotional tone behind any text with our advanced AI model!" ) demo.launch()