File size: 5,015 Bytes
1700f59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486f0f6
 
 
1700f59
 
 
 
 
 
 
 
486f0f6
1700f59
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import runpod
import gradio as gr

GPU_LIST = ["NVIDIA A100 80GB PCIe", "NVIDIA A100-SXM4-80GB", "NVIDIA A30", "NVIDIA A40", "NVIDIA GeForce RTX 3070", "NVIDIA GeForce RTX 3080", "NVIDIA GeForce RTX 3080 Ti", "NVIDIA GeForce RTX 3090", "NVIDIA GeForce RTX 3090 Ti", "NVIDIA GeForce RTX 4070 Ti", "NVIDIA GeForce RTX 4080", "NVIDIA GeForce RTX 4090", "NVIDIA H100 80GB HBM3", "NVIDIA H100 PCIe", "NVIDIA L4", "NVIDIA L40", "NVIDIA RTX 4000 Ada Generation", "NVIDIA RTX 4000 SFF Ada Generation", "NVIDIA RTX 5000 Ada Generation", "NVIDIA RTX 6000 Ada Generation", "NVIDIA RTX A2000", "NVIDIA RTX A4000", "NVIDIA RTX A4500", "NVIDIA RTX A5000", "NVIDIA RTX A6000", "Tesla V100-FHHL-16GB", "Tesla V100-PCIE-16GB", "Tesla V100-SXM2-16GB", "Tesla V100-SXM2-32GB"]

TITLE = """
# 🧐 LLM AutoEval

> 🗣️ [Large Language Model Course](https://github.com/mlabonne/llm-course)

❤️ Created by [@maximelabonne](https://twitter.com/maximelabonne).

This notebook allows you to **automatically evaluate your LLMs** using RunPod. If you don't have an account, please consider using my [referral link](https://runpod.io?ref=9nvk2srl).

Once a pod has started, you can safely close this tab. The results are then privately uploaded to [GitHub Gist](https://gist.github.com/), and the pod is automatically destroyed.

For further details, see the project on 💻 [GitHub](https://github.com/mlabonne/llm-autoeval).
"""

def autoeval(BENCHMARK, MODEL, GPU, NUMBER_OF_GPUS, CONTAINER_DISK, CLOUD_TYPE, REPO, TRUST_REMOTE_CODE, DEBUG, RUNPOD_TOKEN, GITHUB_API_TOKEN):
    # BENCHMARK = "nous" # @param ["nous", "openllm"]
    # MODEL = "mlabonne/NeuralMarcoro14-7B" # @param {type:"string"}
    # GPU = "NVIDIA GeForce RTX 3090" # @param ["NVIDIA A100 80GB PCIe", "NVIDIA A100-SXM4-80GB", "NVIDIA A30", "NVIDIA A40", "NVIDIA GeForce RTX 3070", "NVIDIA GeForce RTX 3080", "NVIDIA GeForce RTX 3080 Ti", "NVIDIA GeForce RTX 3090", "NVIDIA GeForce RTX 3090 Ti", "NVIDIA GeForce RTX 4070 Ti", "NVIDIA GeForce RTX 4080", "NVIDIA GeForce RTX 4090", "NVIDIA H100 80GB HBM3", "NVIDIA H100 PCIe", "NVIDIA L4", "NVIDIA L40", "NVIDIA RTX 4000 Ada Generation", "NVIDIA RTX 4000 SFF Ada Generation", "NVIDIA RTX 5000 Ada Generation", "NVIDIA RTX 6000 Ada Generation", "NVIDIA RTX A2000", "NVIDIA RTX A4000", "NVIDIA RTX A4500", "NVIDIA RTX A5000", "NVIDIA RTX A6000", "Tesla V100-FHHL-16GB", "Tesla V100-PCIE-16GB", "Tesla V100-SXM2-16GB", "Tesla V100-SXM2-32GB"]
    # NUMBER_OF_GPUS = 1 # @param {type:"slider", min:1, max:8, step:1}
    # CONTAINER_DISK = 100 # @param {type:"slider", min:50, max:500, step:25}
    # CLOUD_TYPE = "COMMUNITY" # @param ["COMMUNITY", "SECURE"]
    # REPO = "https://github.com/mlabonne/llm-autoeval.git" # @param {type:"string"}
    # TRUST_REMOTE_CODE = False # @param {type:"boolean"}
    # DEBUG = False # @param {type:"boolean"}
    # runpod.api_key = RUNPOD_TOKEN
    
    # Create a pod
    pod = runpod.create_pod(
        name=f"Eval {MODEL.split('/')[-1]} on {BENCHMARK.capitalize()}",
        image_name="runpod/pytorch:2.0.1-py3.10-cuda11.8.0-devel-ubuntu22.04",
        gpu_type_id=GPU,
        cloud_type=CLOUD_TYPE,
        gpu_count=NUMBER_OF_GPUS,
        volume_in_gb=0,
        container_disk_in_gb=CONTAINER_DISK,
        template_id="au6nz6emhk",
        env={
            "BENCHMARK": BENCHMARK,
            "MODEL": MODEL,
            "REPO": REPO,
            "TRUST_REMOTE_CODE": TRUST_REMOTE_CODE,
            "DEBUG": DEBUG,
            "GITHUB_API_TOKEN": GITHUB_API_TOKEN,
        }
    )
    
    return gr.Textbox("Evaluation started!", label="Output", autofocus=True),

with gr.Blocks() as demo:
    gr.Markdown(TITLE)
    inputs = [
        gr.Dropdown(["nous", "openllm"], label="Benchmark", info="Select your benchmark suite", value="nous"),
        gr.Textbox("", label="Model", info="ID of the model you want to evaluate", placeholder="mlabonne/NeuralBeagle14-7B"),
        gr.Dropdown(GPU_LIST, label="GPU", value="NVIDIA GeForce RTX 3090", info="Select your GPU to run the evaluation"),
        gr.Slider(minium=1, maximum=8, value=1, step=1, label="Number of GPUs", info="Number of GPUs to use"),
        gr.Slider(minimum=50, maximum=500, value=100, step=25, label="Container disk", info="Size of the container disk in GB"),
        gr.Dropdown(["COMMUNITY", "SECURE"], label="Cloud type", info="Select your cloud type"),
        gr.Textbox("https://github.com/mlabonne/llm-autoeval.git", label="LLM AutoEval repo", info="Link to your LLM AutoEval repo"),
        gr.Checkbox(label="Trust Remote Code", value=False, info="Required for some models like phi-2"),
        gr.Checkbox(label="Debug", value=False, info="Don't kill the pod after evaluation if activated"),
        gr.Textbox("", label="Github API Token", info="Your Github API token"),
        gr.Textbox("", label="Runpod API Token", info="Your Runpod API token"),
    ]
    ]
    outputs = gr.Textbox(visible=False)
    btn = gr.Button("Evaluate!")
    btn.click(autoeval, inputs, outputs)

demo.launch()