Update README.md
Browse files
README.md
CHANGED
@@ -35,7 +35,29 @@ The model was trained on [agentlans/tatoeba-english-translations](https://huggin
|
|
35 |
|
36 |
## Usage
|
37 |
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
## Results
|
41 |
|
|
|
35 |
|
36 |
## Usage
|
37 |
|
38 |
+
```python
|
39 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
40 |
+
import torch
|
41 |
+
|
42 |
+
model_name="agentlans/mdeberta-v3-base-sentiment"
|
43 |
+
|
44 |
+
# Put model on GPU or else CPU
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
46 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
47 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
48 |
+
model = model.to(device)
|
49 |
+
|
50 |
+
def sentiment(text):
|
51 |
+
"""Processes the text using the model and returns its logits.
|
52 |
+
In this case, it's interpreted as the sentiment score for that text."""
|
53 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device)
|
54 |
+
with torch.no_grad():
|
55 |
+
logits = model(**inputs).logits.squeeze().cpu()
|
56 |
+
return logits.tolist()
|
57 |
+
|
58 |
+
# Note: Recommend to preprocess text to remove special characters, e-mails, and hash tags
|
59 |
+
sentiment("Your text here.")
|
60 |
+
```
|
61 |
|
62 |
## Results
|
63 |
|