File size: 1,374 Bytes
c9065f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from transformers import AutoTokenizer, AutoModelForMaskedLM
from transformers import RobertaTokenizer, RobertaTokenizerFast, RobertaForMaskedLM, pipeline
import torch

def evaluate(framework):
    text = "På biblioteket kan du [MASK] en bok."
    if framework == "flax":
        model = AutoModelForMaskedLM.from_pretrained("./", from_flax=True)
    elif framework == "tensorflow":
        model = AutoModelForMaskedLM.from_pretrained("./", from_tf=True)
    else:
        model = AutoModelForMaskedLM.from_pretrained("./")

    print("Testing with AutoTokenizer")
    tokenizer = AutoTokenizer.from_pretrained("./")
    my_unmasker_pipeline = pipeline('fill-mask', model=model, tokenizer=tokenizer)
    output = my_unmasker_pipeline(text)
    print(output)

    #print("\n\nTesting with RobertaTokenizer")
    #tokenizer = RobertaTokenizer.from_pretrained("./")
    #my_unmasker_pipeline = pipeline('fill-mask', model=model, tokenizer=tokenizer)
    #output = my_unmasker_pipeline(text)
    #print(output)

    #print("\n\nTesting with RobertaTokenizerFast")
    #tokenizer = RobertaTokenizerFast.from_pretrained("./")
    #my_unmasker_pipeline = pipeline('fill-mask', model=model, tokenizer=tokenizer)
    #output = my_unmasker_pipeline(text)
    #print(output)


print("Evaluating PyTorch Model")
evaluate("pytorch")


#print("Evaluating Flax Model")
#evaluate("flax")