|
FAVA, a verification model. |
|
|
|
``` |
|
import torch |
|
import vllm |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
model = vllm.LLM(model="fava-uw/fava-model") |
|
sampling_params = vllm.SamplingParams( |
|
temperature=0, |
|
top_p=1.0, |
|
max_tokens=1024, |
|
) |
|
|
|
INPUT = "Read the following references:\n{evidence}\nPlease identify all the errors in the following text using the information in the references provided and suggest edits if necessary:\n[Text] {output}\n[Edited] " |
|
|
|
output = "" # add your passage to verify |
|
evidence = "" # add a piece of evidence |
|
prompts = [INPUT.format_map({"evidence": evidence, "output": output})] |
|
outputs = model.generate(prompts, sampling_params) |
|
outputs = [it.outputs[0].text for it in outputs] |
|
print(outputs[0]) |
|
``` |