liorgreenb commited on
Commit
2d73c50
·
1 Parent(s): 4a98ecf

Paper model dataset fix

Browse files
Files changed (2) hide show
  1. app.py +4 -10
  2. paper_models.csv +4 -4
app.py CHANGED
@@ -4,7 +4,8 @@ from huggingface_hub import HfApi, hf_hub_download
4
  from huggingface_hub.repocard import metadata_load
5
  import pandas as pd
6
 
7
- benchmark_name = 'vsd-benchmark/vsd-fashion'
 
8
  benchmark_tag = 'vsd'
9
 
10
  hf_api = HfApi()
@@ -12,7 +13,7 @@ hf_api = HfApi()
12
  models = list(hf_api.list_models(filter=benchmark_tag))
13
 
14
  MAIN_METRIC_PER_TASK = {
15
- benchmark_name: "ROC_AUC"
16
  }
17
 
18
  def create_model_link(model_id, link=None, type='repos'):
@@ -35,19 +36,12 @@ def get_model_results(model_meta):
35
  metrics_meta = []
36
  for index in model_meta['model-index']:
37
  for result in index['results']:
38
- if result['dataset']['type'] == benchmark_name:
39
  metrics_dict = {metric['name']: metric['value'] for metric in result['metrics']}
40
  metrics_meta += [dict(dataset=result['dataset']['type'], **metrics_dict)]
41
 
42
  return metrics_meta
43
 
44
- paper_models = [
45
- {"dataset": benchmark_name, "model": "Beit", "ROC_AUC": 75.5, "MRR@5": 93.5},
46
- {"dataset": benchmark_name, "model": "DINO", "ROC_AUC": 70.4, "MRR@5": 93.3},
47
- {"dataset": benchmark_name, "model": "ResNext", "ROC_AUC": 62.7, "MRR@5": 84.5},
48
- {"dataset": benchmark_name, "model": "CLIP", "ROC_AUC": 67.8, "MRR@5": 84.8},
49
- ]
50
-
51
  results = []
52
 
53
  for model in models[:10]:
 
4
  from huggingface_hub.repocard import metadata_load
5
  import pandas as pd
6
 
7
+ benchmark_user = 'vsd-benchmark'
8
+ fashion_dataset = f'{benchmark_user}/vsd-fashion'
9
  benchmark_tag = 'vsd'
10
 
11
  hf_api = HfApi()
 
13
  models = list(hf_api.list_models(filter=benchmark_tag))
14
 
15
  MAIN_METRIC_PER_TASK = {
16
+ fashion_dataset: "ROC_AUC"
17
  }
18
 
19
  def create_model_link(model_id, link=None, type='repos'):
 
36
  metrics_meta = []
37
  for index in model_meta['model-index']:
38
  for result in index['results']:
39
+ if result['dataset']['type'].split('/')[0] == benchmark_user:
40
  metrics_dict = {metric['name']: metric['value'] for metric in result['metrics']}
41
  metrics_meta += [dict(dataset=result['dataset']['type'], **metrics_dict)]
42
 
43
  return metrics_meta
44
 
 
 
 
 
 
 
 
45
  results = []
46
 
47
  for model in models[:10]:
paper_models.csv CHANGED
@@ -1,5 +1,5 @@
1
  ,dataset,model,ROC_AUC,MRR@5
2
- 0,vsdeds/vsd_eds_test,Beit,75.5,93.5
3
- 1,vsdeds/vsd_eds_test,DINO,70.4,93.3
4
- 2,vsdeds/vsd_eds_test,ResNext,62.7,84.5
5
- 3,vsdeds/vsd_eds_test,CLIP,67.8,84.8
 
1
  ,dataset,model,ROC_AUC,MRR@5
2
+ 0,vsd-benchmark/vsd-fashion,Beit,75.5,93.5
3
+ 1,vsd-benchmark/vsd-fashion,DINO,70.4,93.3
4
+ 2,vsd-benchmark/vsd-fashion,ResNext,62.7,84.5
5
+ 3,vsd-benchmark/vsd-fashion,CLIP,67.8,84.8