Spaces:
Sleeping
Sleeping
Filip
commited on
Commit
·
b138416
1
Parent(s):
d55570b
update
Browse files
app.py
CHANGED
@@ -70,8 +70,8 @@ print("LoRA evaluation model loaded successfully!")
|
|
70 |
# Gradio interface
|
71 |
with gr.Blocks(title="LLM as a Judge") as demo:
|
72 |
gr.Markdown("## LLM as a Judge 🧐")
|
73 |
-
gr.Markdown("Welcome to the LLM as a Judge demo! This application uses a finetuned LLM to evaluate responses generated by two different models based on Relevance, Coherence and Completeness. The model will then evaluate the responses based on the
|
74 |
-
gr.Markdown("The default models are models we have finetuned on the FineTome-100k dataset, using Llama 3.2 3B as the base model. You can also specify your own models by entering the Hugging Face repository name and model filename for Model A and Model B.")
|
75 |
|
76 |
# Model inputs
|
77 |
repo_a_input = gr.Textbox(label="Model A Repository", placeholder="Enter the Hugging Face repo name for Model A...", value="forestav/LoRA-2000")
|
|
|
70 |
# Gradio interface
|
71 |
with gr.Blocks(title="LLM as a Judge") as demo:
|
72 |
gr.Markdown("## LLM as a Judge 🧐")
|
73 |
+
gr.Markdown("Welcome to the LLM as a Judge demo! This application uses a finetuned LLM to evaluate responses generated by two different models based on Relevance, Coherence and Completeness. The model will then evaluate the responses based on the criteria and determine the winner.")
|
74 |
+
gr.Markdown("The default models are models we have finetuned on the FineTome-100k dataset, using Llama 3.2 3B as the base model. You can also specify your own models by entering the Hugging Face repository name and model filename for Model A and Model B. Just make sure they are in GGUF format.")
|
75 |
|
76 |
# Model inputs
|
77 |
repo_a_input = gr.Textbox(label="Model A Repository", placeholder="Enter the Hugging Face repo name for Model A...", value="forestav/LoRA-2000")
|