Spaces:
Running
Running
File size: 1,572 Bytes
85ab89d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
import evaluate
# Use a pipeline as a high-level helper
from transformers import pipeline
from embed_score import Embed_Eval
class Evaluator:
def __init__(self):
self.gpt_score = Embed_Eval(model="gpt")
self.pubmedbert_score = Embed_Eval(model="pubmedbert")
self.rouge = evaluate.load("rouge")
self.bertscore = evaluate.load("bertscore")
self.bleu = evaluate.load("bleu")
self.meteor = evaluate.load("meteor")
self.mauve = evaluate.load("mauve")
self.biomedBERT = pipeline("fill-mask", model="microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext")
def eval(self, pred, ref):
results = {}
results["gpt_score"] = self.gpt_score.compute(predictions=pred, references=ref)
results["pubmedbert_score"] = self.pubmedbert_score.compute(predictions=pred, references=ref)
results["rouge"] = self.rouge.compute(predictions=pred, references=ref)
results["bert_score"] = self.bertscore.compute(predictions=pred, references=ref, model_type="distilbert-base-uncased")
results["bleu"] = self.bleu.compute(predictions=pred, references=ref) # precisions are bleu-1 to bleu-4, n-grams
results["meteor"] = self.meteor.compute(predictions=pred, references=ref)
results["mauve"] = self.mauve.compute(predictions=pred, references=ref)
return results
if __name__ == "__main__":
evaluator = Evaluator()
print(evaluator.eval(["hello there general kenobi", "foo bar foobar"], ["hello there general kenobi", "foo bar foobar"]))
|