File size: 856 Bytes
6604d8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification

# Disk path where saved model & tokenizer is located
save_dir = (r"./ml_engine/saved-model") #relative path acc. to "ebookify-backend/" directory (i.e the root directory of the backend)

# Load the saved model and tokeniser from the disk 
loaded_tokeniser = AutoTokenizer.from_pretrained(save_dir)
loaded_model = AutoModelForSequenceClassification.from_pretrained(save_dir)

def is_it_title(string):

    # Input
    input = loaded_tokeniser(string, return_tensors='pt')

    with torch.no_grad():
        output = loaded_model(**input).logits.item()
    # print(output.logits.item())

    if(output >= 0.6):
        return True
    else:
        return False

if __name__ == "__main__":
    print(is_it_title("Secret to Success lies in hardwork and nothing else!"))