evijit HF staff commited on
Commit
b40662a
β€’
1 Parent(s): 51c883c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -15
app.py CHANGED
@@ -175,7 +175,7 @@ def create_overall_summary(model_data, selected_categories):
175
 
176
  # Create summary HTML
177
  html = "<div class='card overall-summary-card'>"
178
- html += "<div class='card-title'>πŸ“Š Overall Model Evaluation Summary</div>"
179
 
180
  # Key metrics section
181
  html += "<div class='summary-grid'>"
@@ -276,7 +276,7 @@ def get_modality_icon(modality):
276
  def create_metadata_card(metadata):
277
  """Create a formatted HTML card for metadata."""
278
  html = "<div class='card metadata-card'>"
279
- html += "<div class='card-title'>Model Information</div>"
280
  html += "<div class='metadata-content'>"
281
 
282
  # Handle special formatting for modalities
@@ -374,12 +374,16 @@ def create_leaderboard(selected_categories):
374
  # Calculate overall score
375
  score_percentage = (total_score / total_questions * 100) if total_questions > 0 else 0
376
 
377
- # Get model type
378
  model_type = data['metadata'].get('Type', 'Unknown')
 
 
 
 
379
 
380
  # Create entry with numerical scores
381
  model_entry = {
382
- 'AI System': model,
383
  'Type': model_type,
384
  'Overall Completion Rate': score_percentage
385
  }
@@ -453,7 +457,7 @@ def create_category_chart(selected_models, selected_categories):
453
 
454
  score_percentage = (total_score / total_questions * 100) if total_questions > 0 else 0
455
  data.append({
456
- 'Model': model,
457
  'Category': category,
458
  'Completion Rate': score_percentage
459
  })
@@ -462,8 +466,8 @@ def create_category_chart(selected_models, selected_categories):
462
  if df.empty:
463
  fig = px.bar(title='No data available for the selected models and categories')
464
  else:
465
- fig = px.bar(df, x='Model', y='Completion Rate', color='Category',
466
- title='AI Model Evaluation Completion Rates by Category',
467
  labels={'Completion Rate': 'Completion Rate (%)'},
468
  category_orders={"Category": selected_categories})
469
 
@@ -1224,7 +1228,7 @@ first_model = next(iter(models.values()))
1224
  category_choices = list(first_model['scores'].keys())
1225
 
1226
  with gr.Blocks(css=css) as demo:
1227
- gr.Markdown("# AI Model Social Impact Scorecard Dashboard")
1228
 
1229
  with gr.Row():
1230
  tab_selection = gr.Radio(["Leaderboard", "Category Analysis", "Detailed Scorecard"],
@@ -1232,16 +1236,16 @@ with gr.Blocks(css=css) as demo:
1232
 
1233
  with gr.Row():
1234
  model_chooser = gr.Dropdown(choices=[""] + list(models.keys()),
1235
- label="Select Model for Details",
1236
  value="",
1237
  interactive=True, visible=False)
1238
  model_multi_chooser = gr.Dropdown(choices=list(models.keys()),
1239
- label="Select Models for Comparison",
1240
  value=[],
1241
  multiselect=True,
1242
  interactive=True,
1243
  visible=False,
1244
- info="Select one or more models")
1245
 
1246
  # Category filter now visible for all tabs
1247
  category_filter = gr.CheckboxGroup(choices=category_choices,
@@ -1249,12 +1253,11 @@ with gr.Blocks(css=css) as demo:
1249
  value=category_choices)
1250
 
1251
  with gr.Column(visible=True) as leaderboard_tab:
1252
- # Initialize with data
1253
- initial_df = create_leaderboard(category_choices)
1254
  leaderboard_output = gr.DataFrame(
1255
- value=initial_df,
1256
  interactive=False,
1257
- wrap=True
 
1258
  )
1259
 
1260
  with gr.Column(visible=False) as category_analysis_tab:
 
175
 
176
  # Create summary HTML
177
  html = "<div class='card overall-summary-card'>"
178
+ html += "<div class='card-title'>πŸ“Š Overall AI System Evaluation Summary</div>"
179
 
180
  # Key metrics section
181
  html += "<div class='summary-grid'>"
 
276
  def create_metadata_card(metadata):
277
  """Create a formatted HTML card for metadata."""
278
  html = "<div class='card metadata-card'>"
279
+ html += "<div class='card-title'>AI System Information</div>"
280
  html += "<div class='metadata-content'>"
281
 
282
  # Handle special formatting for modalities
 
374
  # Calculate overall score
375
  score_percentage = (total_score / total_questions * 100) if total_questions > 0 else 0
376
 
377
+ # Get model type and URL
378
  model_type = data['metadata'].get('Type', 'Unknown')
379
+ model_url = data['metadata'].get('URL', '')
380
+
381
+ # Create model name with HTML link if URL exists
382
+ model_display = f'<a href="{model_url}" target="_blank">{model}</a>' if model_url else model
383
 
384
  # Create entry with numerical scores
385
  model_entry = {
386
+ 'AI System': model_display,
387
  'Type': model_type,
388
  'Overall Completion Rate': score_percentage
389
  }
 
457
 
458
  score_percentage = (total_score / total_questions * 100) if total_questions > 0 else 0
459
  data.append({
460
+ 'AI System': model,
461
  'Category': category,
462
  'Completion Rate': score_percentage
463
  })
 
466
  if df.empty:
467
  fig = px.bar(title='No data available for the selected models and categories')
468
  else:
469
+ fig = px.bar(df, x='AI System', y='Completion Rate', color='Category',
470
+ title='AI System Evaluation Completion Rates by Category',
471
  labels={'Completion Rate': 'Completion Rate (%)'},
472
  category_orders={"Category": selected_categories})
473
 
 
1228
  category_choices = list(first_model['scores'].keys())
1229
 
1230
  with gr.Blocks(css=css) as demo:
1231
+ gr.Markdown("# AI System Social Impact Dashboard")
1232
 
1233
  with gr.Row():
1234
  tab_selection = gr.Radio(["Leaderboard", "Category Analysis", "Detailed Scorecard"],
 
1236
 
1237
  with gr.Row():
1238
  model_chooser = gr.Dropdown(choices=[""] + list(models.keys()),
1239
+ label="Select AI System for Details",
1240
  value="",
1241
  interactive=True, visible=False)
1242
  model_multi_chooser = gr.Dropdown(choices=list(models.keys()),
1243
+ label="Select AI Systems for Comparison",
1244
  value=[],
1245
  multiselect=True,
1246
  interactive=True,
1247
  visible=False,
1248
+ info="Select one or more AI Systems")
1249
 
1250
  # Category filter now visible for all tabs
1251
  category_filter = gr.CheckboxGroup(choices=category_choices,
 
1253
  value=category_choices)
1254
 
1255
  with gr.Column(visible=True) as leaderboard_tab:
 
 
1256
  leaderboard_output = gr.DataFrame(
1257
+ value=create_leaderboard(category_choices),
1258
  interactive=False,
1259
+ wrap=True,
1260
+ datatype=["markdown", "markdown", "markdown"] + ["markdown"] * len(category_choices) # Set markdown type for all columns
1261
  )
1262
 
1263
  with gr.Column(visible=False) as category_analysis_tab: