Peiyan commited on
Commit
b2476bd
·
verified ·
1 Parent(s): ca972f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -31
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import pandas as pd
3
  import os
4
  from evaluation import evaluate_model # Import your evaluation function
 
5
 
6
  # Define the path where you want to save the leaderboard data
7
  leaderboard_file = "leaderboard.csv"
@@ -12,53 +13,89 @@ if os.path.exists(leaderboard_file):
12
  else:
13
  leaderboard = pd.DataFrame(columns=["Model Name", "Score"])
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # Submit the evaluation and update the leaderboard
16
  def submit_evaluation(model_name, model_file):
17
  """
18
  Handles the model submission, evaluates it, and updates the leaderboard.
19
  """
20
- # Save the uploaded model to a folder
21
- model_path = os.path.join("models", model_file.name)
22
- model_file.save(model_path)
 
 
 
23
 
24
- # Example test data (replace with your actual test dataset)
25
- test_data = [
26
- ("Example text 1", 0), # (text, label)
27
- ("Example text 2", 1),
28
- # Add more test data here
29
- ]
30
 
31
- # Evaluate the model using your custom evaluation code
32
- score = evaluate_model(model_path, test_data)
 
33
 
34
- # Update the leaderboard
35
- new_entry = {"Model Name": model_name, "Score": score}
36
- global leaderboard
37
- leaderboard = leaderboard.append(new_entry, ignore_index=True)
38
- leaderboard_sorted = leaderboard.sort_values(by="Score", ascending=False)
39
 
40
- # Save the updated leaderboard
41
- leaderboard_sorted.to_csv(leaderboard_file, index=False)
 
 
 
 
 
 
 
 
42
 
43
- # Return the sorted leaderboard as output
44
- return leaderboard_sorted
45
 
46
  # Create the Gradio interface
47
  with gr.Blocks() as demo:
48
  gr.Markdown("# Model Evaluation Leaderboard")
49
-
50
- # Model submission interface
51
  with gr.Row():
52
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter the model name")
53
- model_file_input = gr.File(label="Upload Model (Hugging Face Model Format)", file_types=[".pt", ".bin", ".h5", ".zip"])
 
 
 
54
 
55
  submit_button = gr.Button("Submit Evaluation")
56
-
57
- # Leaderboard display area
58
- leaderboard_display = gr.Dataframe(leaderboard)
59
- try:
60
- submit_button.click(submit_evaluation, inputs=[model_name_input, model_file_input], outputs=[leaderboard_display])
61
- except:
62
- print('No')
63
- # Launch the interface
 
 
 
 
 
64
  demo.launch(share=True)
 
2
  import pandas as pd
3
  import os
4
  from evaluation import evaluate_model # Import your evaluation function
5
+ import zipfile
6
 
7
  # Define the path where you want to save the leaderboard data
8
  leaderboard_file = "leaderboard.csv"
 
13
  else:
14
  leaderboard = pd.DataFrame(columns=["Model Name", "Score"])
15
 
16
+
17
+ def extract_model(model_file, extract_dir="models"):
18
+ """
19
+ Extracts the uploaded model file if it's a zip archive.
20
+ """
21
+ os.makedirs(extract_dir, exist_ok=True) # Ensure the directory exists
22
+ model_path = os.path.join(extract_dir, model_file.name)
23
+
24
+ if model_file.name.endswith(".zip"):
25
+ with zipfile.ZipFile(model_file, 'r') as zip_ref:
26
+ zip_ref.extractall(extract_dir)
27
+ print(f"Extracted model to: {extract_dir}")
28
+ return extract_dir
29
+ else:
30
+ # Save the file directly if it's not a zip
31
+ model_file.save(model_path)
32
+ return model_path
33
+
34
+
35
  # Submit the evaluation and update the leaderboard
36
  def submit_evaluation(model_name, model_file):
37
  """
38
  Handles the model submission, evaluates it, and updates the leaderboard.
39
  """
40
+ try:
41
+ # Extract or save the uploaded model
42
+ model_path = extract_model(model_file)
43
+
44
+ print(f"Model saved or extracted to: {model_path}")
45
+ print("Starting evaluation...")
46
 
47
+ # Example test data (replace with your actual test dataset)
48
+ test_data = [
49
+ ("Example text 1", 0), # (text, label)
50
+ ("Example text 2", 1),
51
+ ]
 
52
 
53
+ # Evaluate the model using your custom evaluation code
54
+ score = evaluate_model(model_path, test_data)
55
+ print(f"Model evaluated successfully. Score: {score}")
56
 
57
+ # Update the leaderboard
58
+ new_entry = {"Model Name": model_name, "Score": score}
59
+ global leaderboard
60
+ leaderboard = leaderboard.append(new_entry, ignore_index=True)
61
+ leaderboard_sorted = leaderboard.sort_values(by="Score", ascending=False)
62
 
63
+ # Save the updated leaderboard
64
+ leaderboard_sorted.to_csv(leaderboard_file, index=False)
65
+ print("Leaderboard updated.")
66
+
67
+ # Return the sorted leaderboard
68
+ return leaderboard_sorted, "Model submitted successfully!"
69
+
70
+ except Exception as e:
71
+ print(f"Error during evaluation: {str(e)}")
72
+ return leaderboard, f"Error: {str(e)}"
73
 
 
 
74
 
75
  # Create the Gradio interface
76
  with gr.Blocks() as demo:
77
  gr.Markdown("# Model Evaluation Leaderboard")
78
+
79
+ # User inputs for model name and file upload
80
  with gr.Row():
81
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter the model name")
82
+ model_file_input = gr.File(
83
+ label="Upload Model (Supported Formats: .pt, .bin, .h5, .zip)",
84
+ file_types=[".pt", ".bin", ".h5", ".zip"]
85
+ )
86
 
87
  submit_button = gr.Button("Submit Evaluation")
88
+
89
+ # Leaderboard display and status message
90
+ leaderboard_display = gr.Dataframe(leaderboard, label="Leaderboard")
91
+ status_message = gr.Textbox(label="Status", interactive=False)
92
+
93
+ # Link the submit button to the evaluation function
94
+ submit_button.click(
95
+ submit_evaluation,
96
+ inputs=[model_name_input, model_file_input],
97
+ outputs=[leaderboard_display, status_message]
98
+ )
99
+
100
+ # Launch the Gradio app
101
  demo.launch(share=True)