|
import gradio as gr |
|
import subprocess |
|
import sys |
|
import os |
|
import threading |
|
import time |
|
import uuid |
|
import glob |
|
import shutil |
|
from pathlib import Path |
|
|
|
class Logger: |
|
def __init__(self, filename): |
|
self.terminal = sys.stdout |
|
self.log = open(filename, "w") |
|
|
|
def write(self, message): |
|
self.terminal.write(message) |
|
self.log.write(message) |
|
self.log.flush() |
|
|
|
def flush(self): |
|
self.terminal.flush() |
|
self.log.flush() |
|
|
|
def isatty(self): |
|
return False |
|
|
|
default_command = "bigcodebench.evaluate" |
|
is_running = False |
|
|
|
def generate_command( |
|
jsonl_file, split, subset, save_pass_rate, parallel, |
|
min_time_limit, max_as_limit, max_data_limit, max_stack_limit, |
|
check_gt_only, no_gt |
|
): |
|
command = [default_command] |
|
|
|
if jsonl_file is not None: |
|
|
|
local_filename = os.path.basename(jsonl_file.name) |
|
shutil.copy(jsonl_file.name, local_filename) |
|
command.extend(["--samples", local_filename]) |
|
|
|
command.extend(["--split", split, "--subset", subset]) |
|
|
|
if save_pass_rate: |
|
command.append("--save_pass_rate") |
|
|
|
if parallel is not None and parallel != 0: |
|
command.extend(["--parallel", str(int(parallel))]) |
|
|
|
command.extend([ |
|
"--min-time-limit", str(min_time_limit), |
|
"--max-as-limit", str(int(max_as_limit)), |
|
"--max-data-limit", str(int(max_data_limit)), |
|
"--max-stack-limit", str(int(max_stack_limit)) |
|
]) |
|
|
|
if check_gt_only: |
|
command.append("--check-gt-only") |
|
|
|
if no_gt: |
|
command.append("--no-gt") |
|
|
|
return " ".join(command) |
|
|
|
|
|
def cleanup_previous_files(jsonl_file=None): |
|
for file in glob.glob("*.json") + glob.glob("*.log") + glob.glob("*.jsonl"): |
|
try: |
|
if jsonl_file is not None and file == jsonl_file: |
|
continue |
|
os.remove(file) |
|
except Exception as e: |
|
print(f"Error during cleanup of {file}: {e}") |
|
|
|
def find_result_file(): |
|
json_files = glob.glob("*.json") |
|
if json_files: |
|
return max(json_files, key=os.path.getmtime) |
|
return None |
|
|
|
def run_bigcodebench(command): |
|
global is_running |
|
is_running = True |
|
yield f"Executing command: {command}\n" |
|
|
|
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) |
|
|
|
for line in process.stdout: |
|
yield line |
|
|
|
process.wait() |
|
|
|
if process.returncode != 0: |
|
yield f"Error: Command exited with status {process.returncode}\n" |
|
|
|
cleanup_command = "pids=$(ps -u $(id -u) -o pid,comm | grep 'bigcodebench' | awk '{print $1}'); if [ -n \"$pids\" ]; then echo $pids | xargs -r kill; fi; rm -rf /tmp/*" |
|
subprocess.run(cleanup_command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) |
|
|
|
is_running = False |
|
yield "Evaluation completed.\n" |
|
|
|
result_file = find_result_file() |
|
if result_file: |
|
yield f"Result file found: {result_file}\n" |
|
else: |
|
yield "No result file found.\n" |
|
|
|
def stream_logs(command, jsonl_file=None): |
|
global is_running |
|
if jsonl_file is not None: |
|
local_filename = os.path.basename(jsonl_file.name) |
|
if is_running: |
|
yield "A command is already running. Please wait for it to finish.\n" |
|
return |
|
|
|
cleanup_previous_files(local_filename) |
|
yield "Cleaned up previous files.\n" |
|
|
|
log_content = [] |
|
for log_line in run_bigcodebench(command): |
|
log_content.append(log_line) |
|
yield "".join(log_content) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# BigCodeBench Evaluator") |
|
|
|
with gr.Row(): |
|
jsonl_file = gr.File(label="Upload JSONL file", file_types=[".jsonl"]) |
|
split = gr.Dropdown(choices=["complete", "instruct"], label="Split", value="complete") |
|
subset = gr.Dropdown(choices=["full", "hard"], label="Subset", value="hard") |
|
|
|
with gr.Row(): |
|
save_pass_rate = gr.Checkbox(label="Save Pass Rate") |
|
parallel = gr.Number(label="Parallel (optional)", precision=0) |
|
min_time_limit = gr.Number(label="Min Time Limit", value=1, precision=1) |
|
max_as_limit = gr.Number(label="Max AS Limit", value=30*1024, precision=0) |
|
|
|
with gr.Row(): |
|
max_data_limit = gr.Number(label="Max Data Limit", value=30*1024, precision=0) |
|
max_stack_limit = gr.Number(label="Max Stack Limit", value=20, precision=0) |
|
check_gt_only = gr.Checkbox(label="Check GT Only") |
|
no_gt = gr.Checkbox(label="No GT") |
|
|
|
command_output = gr.Textbox(label="Command", value=default_command, interactive=False) |
|
with gr.Row(): |
|
submit_btn = gr.Button("Run Evaluation") |
|
download_btn = gr.DownloadButton(label="Download Result", visible=False) |
|
log_output = gr.Textbox(label="Execution Logs", lines=20) |
|
|
|
|
|
|
|
|
|
def update_command(*args): |
|
return generate_command(*args) |
|
|
|
input_components = [ |
|
jsonl_file, split, subset, save_pass_rate, parallel, |
|
min_time_limit, max_as_limit, max_data_limit, max_stack_limit, |
|
check_gt_only, no_gt |
|
] |
|
|
|
for component in input_components: |
|
component.change(update_command, inputs=input_components, outputs=command_output) |
|
|
|
def start_evaluation(command, jsonl_file): |
|
for log in stream_logs(command, jsonl_file): |
|
yield log, gr.update(), gr.update() |
|
|
|
result_file = find_result_file() |
|
if result_file: |
|
print(f"Result file: {result_file}") |
|
return (gr.update(label="Evaluation completed. Result file found."), |
|
gr.Button(visible=False), |
|
gr.DownloadButton(label="Download Result", value=result_file)) |
|
else: |
|
return (gr.update(label="Evaluation completed. No result file found."), |
|
gr.Button("Run Evaluation"), |
|
gr.DownloadButton(visible=False)) |
|
|
|
submit_btn.click(start_evaluation, |
|
inputs=[command_output, jsonl_file], |
|
outputs=[log_output, submit_btn, download_btn]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
demo.queue(max_size=300).launch(server_name="0.0.0.0", server_port=7860) |