kevintu commited on
Commit
87e69b4
·
verified ·
1 Parent(s): 6205308

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +42 -28
README.md CHANGED
@@ -10,36 +10,50 @@ license: apache-2.0
10
  The following code shows how to test in the model.
11
  python
12
  ```
13
- import torch
14
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
 
15
 
16
- # Load the model and tokenizer from the directory where it's saved
17
- model_path = "model"
18
  model = AutoModelForSequenceClassification.from_pretrained(model_path)
19
  tokenizer = AutoTokenizer.from_pretrained(model_path)
20
 
21
- # Function to prepare and make predictions on text
22
- def predict_climate_att(text):
23
- # Encode the text using the tokenizer
24
- encoded_input = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=64)
25
-
26
- # Evaluate the model on the encoded text
27
- model.eval()
28
- with torch.no_grad():
29
- outputs = model(**encoded_input)
30
-
31
- # Extract logits (the outputs of the model before any final activation function)
32
- logits = outputs.logits.squeeze()
33
-
34
- # (Optional) Apply a final activation function if necessary (e.g., softmax for classification)
35
- # probabilities = torch.softmax(logits, dim=0)
36
-
37
- # For now, let's just return the raw logits
38
- return logits
39
-
40
- # Example usage
41
- text = "Your example text goes here."
42
- predictions = predict_climate_att(text)
43
- print(predictions)
44
-
45
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  The following code shows how to test in the model.
11
  python
12
  ```
 
13
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
14
+ import torch
15
 
16
+ # Load model and tokenizer
17
+ model_path = "model" # Ensure this path points to the correct directory
18
  model = AutoModelForSequenceClassification.from_pretrained(model_path)
19
  tokenizer = AutoTokenizer.from_pretrained(model_path)
20
 
21
+ # Define the path to your text file
22
+ file_path = 'cl.txt'
23
+
24
+ # Read the content of the file
25
+ with open(file_path, 'r', encoding='utf-8') as file:
26
+ new_text = file.read()
27
+
28
+ # Encode the text using the tokenizer used during training
29
+ encoded_input = tokenizer(new_text, return_tensors='pt', padding=True, truncation=True, max_length=64)
30
+
31
+ # Move the model to the correct device (CPU or GPU if available)
32
+ device = "cuda" if torch.cuda.is_available() else "cpu"
33
+ model = model.to(device) # Move model to the correct device
34
+ encoded_input = {k: v.to(device) for k, v in encoded_input.items()} # Move tensor to the correct device
35
+
36
+ model.eval() # Set the model to evaluation mode
37
+
38
+ # Perform the prediction
39
+ with torch.no_grad():
40
+ outputs = model(**encoded_input)
41
+
42
+ # Get the predictions (assumes classification with labels)
43
+ predictions = outputs.logits.squeeze()
44
+
45
+ # Assuming softmax is needed to interpret the logits as probabilities
46
+ probabilities = torch.softmax(predictions, dim=0)
47
+
48
+ # Define labels for each class index based on your classification categories
49
+ labels = ["risk", "neutral", "opportunity"]
50
+ predicted_index = torch.argmax(probabilities).item() # Get the index of the max probability
51
+ predicted_label = labels[predicted_index]
52
+ predicted_probability = probabilities[predicted_index].item()
53
+
54
+ # Print the predicted label and its probability
55
+ print(f"Predicted Label: {predicted_label}, Probability: {predicted_probability:.4f}")
56
+
57
+ ##the output example: predicted Label: neutral, Probability: 0.8377
58
+
59
+ ```