|
|
|
from llmware.prompts import Prompt |
|
|
|
|
|
def load_rag_benchmark_tester_ds(): |
|
|
|
|
|
from datasets import load_dataset |
|
|
|
ds_name = "llmware/rag_instruct_benchmark_tester" |
|
|
|
dataset = load_dataset(ds_name) |
|
|
|
print("update: loading test dataset - ", dataset) |
|
|
|
test_set = [] |
|
for i, samples in enumerate(dataset["train"]): |
|
test_set.append(samples) |
|
|
|
|
|
|
|
|
|
return test_set |
|
|
|
|
|
def run_test(model_name, prompt_list): |
|
|
|
print("\nupdate: Starting RAG Benchmark Inference Test") |
|
|
|
prompter = Prompt().load_model(model_name,from_hf=True) |
|
|
|
for i, entries in enumerate(prompt_list): |
|
|
|
prompt = entries["query"] |
|
context = entries["context"] |
|
|
|
response = prompter.prompt_main(prompt,context=context,prompt_name="default_with_context", temperature=0.3) |
|
|
|
fc = prompter.evidence_check_numbers(response) |
|
sc = prompter.evidence_comparison_stats(response) |
|
sr = prompter.evidence_check_sources(response) |
|
|
|
print("\nupdate: model inference output - ", i, response["llm_response"]) |
|
print("update: gold_answer - ", i, entries["answer"]) |
|
|
|
for entries in fc: |
|
print("update: fact check - ", entries["fact_check"]) |
|
|
|
for entries in sc: |
|
print("update: comparison stats - ", entries["comparison_stats"]) |
|
|
|
for entries in sr: |
|
print("update: sources - ", entries["source_review"]) |
|
|
|
return 0 |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
core_test_set = load_rag_benchmark_tester_ds() |
|
|
|
model_name = "llmware/bling-sheared-llama-1.3b-0.1" |
|
|
|
output = run_test(model_name, core_test_set) |
|
|