ebookify-backend2 / ml_engine /model_functions.py
Geetansh
initial commit
6604d8f
raw
history blame
856 Bytes
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
# Disk path where saved model & tokenizer is located
save_dir = (r"./ml_engine/saved-model") #relative path acc. to "ebookify-backend/" directory (i.e the root directory of the backend)
# Load the saved model and tokeniser from the disk
loaded_tokeniser = AutoTokenizer.from_pretrained(save_dir)
loaded_model = AutoModelForSequenceClassification.from_pretrained(save_dir)
def is_it_title(string):
# Input
input = loaded_tokeniser(string, return_tensors='pt')
with torch.no_grad():
output = loaded_model(**input).logits.item()
# print(output.logits.item())
if(output >= 0.6):
return True
else:
return False
if __name__ == "__main__":
print(is_it_title("Secret to Success lies in hardwork and nothing else!"))