|
class HallucinatonEvaluater: |
|
def __init__(self, item): |
|
self.question = item["question"] |
|
self.answer = item["answer"] |
|
|
|
self.context = item["context"] |
|
self.llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000}) |
|
|
|
def get_prompt_template(self): |
|
prompt = HallucinatePromptContext() |
|
template = prompt.base_template |
|
varialbles = prompt.variables_list |
|
eval_template = PromptTemplate(input_variables=varialbles, template=template) |
|
return eval_template |
|
|
|
def evaluate(self): |
|
prompt = self.get_prompt_template().format(query = self.question, answer = self.answer, context = self.context) |
|
score = self.llm(prompt) |
|
return score |