datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
2.54M
| likes
int64 0
6.35k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
40
⌀ | createdAt
unknown | card
stringlengths 19
1M
|
---|---|---|---|---|---|---|---|---|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_143add39-e304-4541-8699-70598629205d | argilla-internal-testing | "2024-11-18T14:12:55Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T14:12:54Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_3a3f1eb1-edfa-41ff-b19c-aa1916d8ae65 | argilla-internal-testing | "2024-11-18T14:13:02Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T14:13:01Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_17287d6f-46bb-4027-9feb-66dd191d7e9e | argilla-internal-testing | "2024-11-18T14:13:11Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T14:13:10Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_4b77bd13-0e97-4b94-98b8-b0b76cf25823 | argilla-internal-testing | "2024-11-18T14:13:35Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T14:13:33Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
yaniokodf/alpaca_zh_demo | yaniokodf | "2024-11-18T14:37:36Z" | 7 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T14:26:30Z" | ---
license: apache-2.0
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_bb984135-06e3-444c-9b1f-a56e50c0ad37 | argilla-internal-testing | "2024-11-18T23:56:18Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T23:56:17Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_192f91bb-4f43-4c04-a584-27eece2ff77e | argilla-internal-testing | "2024-11-18T23:56:18Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T23:56:18Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_36fe9989-9ace-49a0-876a-b074b7e22dc5 | argilla-internal-testing | "2024-11-18T23:56:41Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T23:56:39Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_11eb5a74-eebe-4592-b1fb-b899a71790ad | argilla-internal-testing | "2024-11-18T23:56:54Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T23:56:52Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_2f8106af-14b2-4708-975c-688725cfd911 | argilla-internal-testing | "2024-11-18T23:57:26Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T23:57:25Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Yotofu/so100_shoes | Yotofu | "2024-11-19T04:36:12Z" | 7 | 0 | [
"task_categories:robotics",
"region:us",
"LeRobot",
"so100_stereo",
"tutorial"
] | [
"robotics"
] | "2024-11-19T04:35:53Z" | ---
task_categories:
- robotics
tags:
- LeRobot
- so100_stereo
- tutorial
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
Lakshay1Dagar/marketing_prompts_v3 | Lakshay1Dagar | "2024-11-19T11:48:42Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-19T11:48:40Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 12784
num_examples: 19
download_size: 8681
dataset_size: 12784
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Vinisf/Vinirp | Vinisf | "2024-11-19T14:30:54Z" | 7 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-19T14:29:27Z" | ---
license: openrail
---
|
dooder35/whereisterminal | dooder35 | "2024-11-19T18:10:19Z" | 7 | 0 | [
"license:other",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-19T18:06:58Z" | ---
license: other
license_name: question
license_link: https://huggingface.co/new-dataset
---
|
vinesmsuic/SwissProtCLAP_random_10k_gpt4o | vinesmsuic | "2024-11-19T21:11:59Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-19T21:11:58Z" | ---
dataset_info:
features:
- name: UniProt ID
dtype: string
- name: Protein Sequence
dtype: string
- name: gt_desc
dtype: string
- name: structure_info
dtype: string
- name: functional_info
dtype: string
splits:
- name: train
num_bytes: 17074568
num_examples: 10000
download_size: 10103847
dataset_size: 17074568
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
YangZhoumill/factor_medium_64k | YangZhoumill | "2024-11-19T22:10:20Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-19T22:06:22Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: question
dtype: string
- name: solution
dtype: string
- name: op
dtype: int64
- name: id
dtype: int64
- name: template
dtype: string
- name: mode
dtype: string
- name: length
dtype: string
- name: d
dtype: int64
splits:
- name: ops_2
num_bytes: 138096441
num_examples: 480
- name: ops_3
num_bytes: 136357405
num_examples: 480
- name: ops_4
num_bytes: 132803657
num_examples: 480
- name: ops_5
num_bytes: 128621002
num_examples: 480
- name: ops_6
num_bytes: 317065486
num_examples: 1159
- name: ops_7
num_bytes: 127027447
num_examples: 492
- name: ops_8
num_bytes: 258260134
num_examples: 1003
- name: ops_9
num_bytes: 215930558
num_examples: 826
- name: ops_10
num_bytes: 186097498
num_examples: 759
- name: ops_11
num_bytes: 183021458
num_examples: 711
- name: ops_12
num_bytes: 180492543
num_examples: 692
- name: ops_13
num_bytes: 168159764
num_examples: 646
- name: ops_14
num_bytes: 151505080
num_examples: 571
- name: ops_15
num_bytes: 172408278
num_examples: 719
- name: ops_16
num_bytes: 164422053
num_examples: 678
- name: ops_17
num_bytes: 164290988
num_examples: 649
- name: ops_18
num_bytes: 156514421
num_examples: 610
- name: ops_19
num_bytes: 133023791
num_examples: 586
- name: ops_20
num_bytes: 125417848
num_examples: 538
download_size: 862021789
dataset_size: 3239515852
configs:
- config_name: default
data_files:
- split: ops_2
path: data/ops_2-*
- split: ops_3
path: data/ops_3-*
- split: ops_4
path: data/ops_4-*
- split: ops_5
path: data/ops_5-*
- split: ops_6
path: data/ops_6-*
- split: ops_7
path: data/ops_7-*
- split: ops_8
path: data/ops_8-*
- split: ops_9
path: data/ops_9-*
- split: ops_10
path: data/ops_10-*
- split: ops_11
path: data/ops_11-*
- split: ops_12
path: data/ops_12-*
- split: ops_13
path: data/ops_13-*
- split: ops_14
path: data/ops_14-*
- split: ops_15
path: data/ops_15-*
- split: ops_16
path: data/ops_16-*
- split: ops_17
path: data/ops_17-*
- split: ops_18
path: data/ops_18-*
- split: ops_19
path: data/ops_19-*
- split: ops_20
path: data/ops_20-*
---
|
amuvarma/mls-train-500 | amuvarma | "2024-11-20T10:50:55Z" | 7 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T03:06:37Z" | ---
dataset_info:
features:
- name: audio_values
sequence: float64
- name: transcript_ids
sequence: int64
- name: labels
sequence: int64
- name: input_ids
sequence: int32
splits:
- name: train
num_bytes: 1200822000000
num_examples: 500000
download_size: 738443026079
dataset_size: 1200822000000
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
CodeDPO/qwen_coder_train_20241120 | CodeDPO | "2024-11-20T05:29:16Z" | 7 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T05:29:03Z" | ---
dataset_info:
features:
- name: program_1
dtype: string
- name: program_2
dtype: string
- name: winner
dtype: int64
- name: accuracy_1
dtype: float64
- name: accuracy_2
dtype: float64
- name: accuracy_difference
dtype: float64
- name: model_1
dtype: string
- name: model_2
dtype: string
- name: prompt
dtype: string
- name: tests
sequence: string
splits:
- name: train
num_bytes: 2476888565
num_examples: 490333
download_size: 124834643
dataset_size: 2476888565
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
paulrichmond/astro_gen0 | paulrichmond | "2024-11-20T14:55:27Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T08:07:28Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: abstract
dtype: string
- name: prompt
dtype: string
- name: y_true
dtype: string
- name: comp_Llama-2-7b-hf
dtype: string
- name: preds_Llama-2-7b-hf
dtype: string
- name: comp_Llama-3.1-8B
dtype: string
- name: preds_Llama-3.1-8B
dtype: string
- name: comp_astrollama_4bit
dtype: string
- name: preds_astrollama_4bit
dtype: string
splits:
- name: test
num_bytes: 829787
num_examples: 50
download_size: 475338
dataset_size: 829787
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
Generated with the following parameters
- max_new_tokens: 1024
- min_new_tokens: 1
- temperature: 0.8
- do_sample: true |
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_16_0.05_32_BestF1 | ferrazzipietro | "2024-11-20T09:33:33Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T09:33:30Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 140757
num_examples: 97
- name: test
num_bytes: 1213941
num_examples: 743
download_size: 277939
dataset_size: 1354698
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
DopeorNope/only_gsm8k_v2 | DopeorNope | "2024-11-20T10:29:49Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T10:29:46Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
- name: input
dtype: string
splits:
- name: train
num_bytes: 3993094
num_examples: 7473
- name: validation
num_bytes: 3993094
num_examples: 7473
download_size: 4616658
dataset_size: 7986188
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
paulrichmond/hep_th_gen0 | paulrichmond | "2024-11-20T14:57:24Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T12:29:11Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: submitter
dtype: string
- name: authors
dtype: string
- name: title
dtype: string
- name: comments
dtype: string
- name: journal-ref
dtype: string
- name: doi
dtype: string
- name: report-no
dtype: string
- name: categories
dtype: string
- name: license
dtype: string
- name: orig_abstract
dtype: string
- name: versions
list:
- name: created
dtype: string
- name: version
dtype: string
- name: update_date
dtype: string
- name: authors_parsed
sequence:
sequence: string
- name: abstract
dtype: string
- name: prompt
dtype: string
- name: y_true
dtype: string
- name: comp_s3-L-3.1-8B-base_v3
dtype: string
- name: preds_s3-L-3.1-8B-base_v3
dtype: string
- name: comp_s1-L-3.1-8B-base
dtype: string
- name: preds_s1-L-3.1-8B-base
dtype: string
- name: comp_Llama-3.1-8B
dtype: string
- name: preds_Llama-3.1-8B
dtype: string
- name: comp_s2-L-3.1-8B-base
dtype: string
- name: preds_s2-L-3.1-8B-base
dtype: string
splits:
- name: test
num_bytes: 524473
num_examples: 50
download_size: 330237
dataset_size: 524473
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
Generated with the following parameters
- max_new_tokens: 1024
- min_new_tokens: 1
- temperature: 0.8
- do_sample: true |
youseon/pakdd_table_merge_data_task_1 | youseon | "2024-11-20T13:20:41Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T13:20:35Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 103509254
num_examples: 9196
download_size: 20719831
dataset_size: 103509254
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Hotpot-Killer/instructpg-dataset | Hotpot-Killer | "2024-11-20T15:08:43Z" | 7 | 0 | [
"license:mit",
"region:us"
] | null | "2024-11-20T15:08:43Z" | ---
license: mit
---
|
plaguss/test-vision-generation-Llama-3.2-11B-Vision-Instruct | plaguss | "2024-11-21T08:16:27Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif"
] | null | "2024-11-20T15:28:41Z" | ---
size_categories: n<1K
dataset_info:
features:
- name: instruction
dtype: string
- name: image
dtype: string
- name: generation
dtype: string
- name: distilabel_metadata
struct:
- name: raw_input_vision_gen
list:
- name: content
list:
- name: image_url
struct:
- name: url
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: role
dtype: string
- name: raw_output_vision_gen
dtype: string
- name: statistics_vision_gen
struct:
- name: input_tokens
dtype: int64
- name: output_tokens
dtype: int64
- name: model_name
dtype: string
splits:
- name: train
num_bytes: 1759
num_examples: 1
download_size: 18245
dataset_size: 1759
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for test-vision-generation-Llama-3.2-11B-Vision-Instruct
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/plaguss/test-vision-generation-Llama-3.2-11B-Vision-Instruct/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/plaguss/test-vision-generation-Llama-3.2-11B-Vision-Instruct/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"distilabel_metadata": {
"raw_input_vision_gen": [
{
"content": [
{
"image_url": null,
"text": "What\u2019s in this image?",
"type": "text"
},
{
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
},
"text": null,
"type": "image_url"
}
],
"role": "user"
}
],
"raw_output_vision_gen": "This image depicts a wooden boardwalk weaving its way through a lush meadow, flanked by vibrant green grass that stretches towards the horizon under a calm and inviting sky.\n\nThe boardwalk runs straight ahead, away from the viewer, forming a clear pathway through the tall, lush green grass, crops or other plant types or an assortment of small trees and shrubs. This meadow is dotted with trees and shrubs, appearing to be healthy and green. The sky above is a beautiful blue with white clouds scattered throughout, adding a sense of tranquility to the scene.\n\nWhile this image appears to be of a natural landscape, because grass is",
"statistics_vision_gen": {
"input_tokens": 43,
"output_tokens": 128
}
},
"generation": "This image depicts a wooden boardwalk weaving its way through a lush meadow, flanked by vibrant green grass that stretches towards the horizon under a calm and inviting sky.\n\nThe boardwalk runs straight ahead, away from the viewer, forming a clear pathway through the tall, lush green grass, crops or other plant types or an assortment of small trees and shrubs. This meadow is dotted with trees and shrubs, appearing to be healthy and green. The sky above is a beautiful blue with white clouds scattered throughout, adding a sense of tranquility to the scene.\n\nWhile this image appears to be of a natural landscape, because grass is",
"image": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
"instruction": "What\u2019s in this image?",
"model_name": "meta-llama/Llama-3.2-11B-Vision-Instruct"
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("plaguss/test-vision-generation-Llama-3.2-11B-Vision-Instruct", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("plaguss/test-vision-generation-Llama-3.2-11B-Vision-Instruct")
```
</details>
|
neurograce/SubstationDataset | neurograce | "2024-11-20T19:15:22Z" | 7 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-20T19:15:22Z" | ---
license: apache-2.0
---
|
sumuks/y1.5-single-shot-questions-original | sumuks | "2024-11-20T23:25:44Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T23:25:42Z" | ---
dataset_info:
features:
- name: chunk_uuid
dtype: string
- name: generator_model
dtype: string
- name: question_type
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: document_analysis
dtype: string
- name: chunk_analysis
dtype: string
- name: potential_question_directions
sequence: string
- name: best_direction
dtype: string
- name: reasoning
dtype: string
- name: estimated_difficulty
dtype: int64
- name: testable_concepts
sequence: string
- name: difficulty_justification
dtype: string
- name: quote_context
dtype: string
- name: supporting_quotes
sequence: string
splits:
- name: train
num_bytes: 58162258
num_examples: 28191
download_size: 15896400
dataset_size: 58162258
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
babs/ezine-8 | babs | "2024-11-21T00:26:36Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T00:26:34Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 58693103.0
num_examples: 194
download_size: 58660510
dataset_size: 58693103.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gcp-acp/flipkart-dataprep | gcp-acp | "2024-11-21T00:37:24Z" | 7 | 0 | [
"license:cc-by-sa-4.0",
"region:us"
] | null | "2024-11-21T00:37:22Z" | ---
license: cc-by-sa-4.0
---
- Generated prompt data, [Built with Llama 3.1](https://www.llama.com/llama3_1/license/)
- [Data Preparation](https://github.com/GoogleCloudPlatform/accelerated-platforms/tree/main/docs/use-cases/model-fine-tuning-pipeline#data-preparation)
- [Raw Data](https://www.kaggle.com/datasets/PromptCloudHQ/flipkart-products/data)
|
gcp-acp/flipkart-preprocessed | gcp-acp | "2024-11-21T00:38:43Z" | 7 | 0 | [
"license:cc-by-sa-4.0",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T00:38:39Z" | ---
size_categories:
- 10K<n<100K
license: cc-by-sa-4.0
---
- [Data Preprocessing](https://github.com/GoogleCloudPlatform/accelerated-platforms/tree/main/docs/use-cases/model-fine-tuning-pipeline#data-preprocessing-steps)
- [Raw Data](https://www.kaggle.com/datasets/PromptCloudHQ/flipkart-products/data)
|
jamesnatulan/cuelang | jamesnatulan | "2024-11-21T01:36:51Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T01:31:54Z" | ---
dataset_info:
features:
- name: repo_path
dtype: string
- name: repo_name
dtype: string
- name: url
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 88645676
num_examples: 13235
download_size: 20801533
dataset_size: 88645676
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ncoop57/my_dataset | ncoop57 | "2024-11-21T18:51:40Z" | 7 | 0 | [
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"region:us",
"fastdata",
"synthetic"
] | null | "2024-11-21T04:56:47Z" | ---
tags:
- fastdata
- synthetic
---
# my_dataset
_Note: This is an AI-generated dataset, so its content may be inaccurate or false._
**Source of the data:**
The dataset was generated using [Fastdata](https://github.com/AnswerDotAI/fastdata) library and claude-3-haiku-20240307 with the following input:
## System Prompt
```
You are a helpful assistant.
```
## Prompt Template
```
Generate English and Spanish translations on the following topic:
<topic>{topic}</topic>
```
## Sample Input
```json
[{'topic': 'I am going to the beach this weekend'}, {'topic': 'I am going to the gym after work'}]
```
|
adhammai/depression_v2 | adhammai | "2024-11-21T05:50:46Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T05:50:42Z" | ---
dataset_info:
features:
- name: is_depression
dtype: int64
- name: clean_text
dtype: string
splits:
- name: train
num_bytes: 472207
num_examples: 1000
- name: validation
num_bytes: 89917
num_examples: 200
download_size: 341335
dataset_size: 562124
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
piporica/custom_drug_dataset | piporica | "2024-11-21T06:44:31Z" | 7 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T06:44:16Z" | ---
dataset_info:
features:
- name: patient_id
dtype: int64
- name: drugName
dtype: string
- name: condition
dtype: string
- name: review
dtype: string
- name: rating
dtype: float64
- name: date
dtype: string
- name: usefulCount
dtype: int64
- name: review_length
dtype: int64
splits:
- name: train
num_bytes: 65975578
num_examples: 110811
- name: validation
num_bytes: 16422578
num_examples: 27703
- name: test
num_bytes: 27430466
num_examples: 46108
download_size: 63886981
dataset_size: 109828622
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
magneum/pixelhue-captioned-dataset | magneum | "2024-11-21T07:35:14Z" | 7 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-21T07:35:14Z" | ---
license: apache-2.0
---
|
liyu002/coco_label | liyu002 | "2024-11-21T08:36:13Z" | 7 | 0 | [
"license:apache-2.0",
"modality:image",
"region:us"
] | null | "2024-11-21T08:32:47Z" | ---
license: apache-2.0
---
|
joycewu/common_voice_16_1_zh_TW_pseudo_labelled_large_v2 | joycewu | "2024-11-21T10:11:11Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T09:18:41Z" | ---
dataset_info:
config_name: zh-TW
features:
- name: client_id
dtype: string
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: up_votes
dtype: int64
- name: down_votes
dtype: int64
- name: age
dtype: string
- name: gender
dtype: string
- name: accent
dtype: string
- name: locale
dtype: string
- name: segment
dtype: string
- name: variant
dtype: string
- name: whisper_transcript
dtype: string
- name: eval_preds
sequence: int64
splits:
- name: train
num_bytes: 162181076.339
num_examples: 7101
- name: validation
num_bytes: 124787561.76
num_examples: 4976
- name: test
num_bytes: 146090484.544
num_examples: 4976
download_size: 411678601
dataset_size: 433059122.643
configs:
- config_name: zh-TW
data_files:
- split: train
path: zh-TW/train-*
- split: validation
path: zh-TW/validation-*
- split: test
path: zh-TW/test-*
---
|
HamdanXI/libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_12 | HamdanXI | "2024-11-21T12:06:42Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T10:18:59Z" | ---
dataset_info:
features:
- name: audio_clip
sequence: float64
- name: layer0_prediction
sequence: float64
- name: predicted_text
dtype: string
- name: speaker_id
dtype: string
splits:
- name: train
num_bytes: 2646635831
num_examples: 100
download_size: 1959339393
dataset_size: 2646635831
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_12"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
hepicode/QA-Dataset-mini | hepicode | "2024-11-21T10:30:24Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T10:30:22Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 97331
num_examples: 287
download_size: 46421
dataset_size: 97331
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Holmeister/MLSUM-single | Holmeister | "2024-11-21T11:09:03Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T11:08:45Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: inst_no
dtype: int64
- name: system
dtype: string
splits:
- name: train
num_bytes: 55286755
num_examples: 20000
- name: validation
num_bytes: 2408874
num_examples: 1000
- name: test
num_bytes: 10404070
num_examples: 5000
download_size: 33379631
dataset_size: 68099699
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
VerboVision/VerboVision | VerboVision | "2024-11-21T11:29:33Z" | 7 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T11:29:13Z" | ---
dataset_info:
features:
- name: image
dtype: string
- name: caption
dtype: string
splits:
- name: train
num_bytes: 84588626
num_examples: 100002
download_size: 42890995
dataset_size: 84588626
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
procit007/treated_0.0 | procit007 | "2024-11-21T11:41:15Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T11:38:48Z" | ---
dataset_info:
features:
- name: gender
dtype: string
- name: accent
dtype: string
- name: speaker_id
dtype: int64
- name: speaker_name
dtype: string
- name: text
dtype: string
- name: normalized_text
dtype: string
- name: audio
dtype: audio
- name: treated
dtype: bool
- name: metrics
struct:
- name: clipping_ratio
dtype: float64
- name: duration
dtype: float64
- name: is_valid
dtype: bool
- name: rms_energy
dtype: float64
- name: sample_rate
dtype: int64
- name: silence_ratio
dtype: float64
- name: snr
dtype: float64
splits:
- name: train
num_bytes: 3179526859.0
num_examples: 10000
download_size: 2982631424
dataset_size: 3179526859.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/mistral_toxic_hatespeech3 | juliadollis | "2024-11-21T12:38:20Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T12:38:18Z" | ---
dataset_info:
features:
- name: nome_arquivo
dtype: string
- name: file_id
dtype: string
- name: user_id
dtype: int64
- name: subforum_id
dtype: int64
- name: num_contexts
dtype: int64
- name: label
dtype: string
- name: text
dtype: string
- name: is_toxic
dtype: int64
- name: predicted_is_toxic
dtype: int64
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 1693
num_examples: 10
download_size: 7545
dataset_size: 1693
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/mistral_ImplicitHateCorpus1 | juliadollis | "2024-11-21T12:44:04Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T12:44:02Z" | ---
dataset_info:
features:
- name: text_en
dtype: string
- name: class
dtype: string
- name: is_toxic
dtype: int64
- name: text
dtype: string
- name: predicted_is_toxic
dtype: int64
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 4890
num_examples: 20
download_size: 7892
dataset_size: 4890
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
open-llm-leaderboard/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct-details | open-llm-leaderboard | "2024-11-21T12:50:35Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T12:46:30Z" | ---
pretty_name: Evaluation run of GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct](https://huggingface.co/GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct-details\"\
,\n\tname=\"GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_boolean_expressions\"\
,\n\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-21T12-46-29.613339](https://huggingface.co/datasets/open-llm-leaderboard/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct-details/blob/main/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct/results_2024-11-21T12-46-29.613339.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"inst_level_loose_acc,none\": 0.6115107913669064,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.4805914972273567,\n \"prompt_level_loose_acc_stderr,none\": 0.021500357879025087,\n\
\ \"acc_norm,none\": 0.4541445064210663,\n \"acc_norm_stderr,none\"\
: 0.005315784780969996,\n \"exact_match,none\": 0.11858006042296072,\n\
\ \"exact_match_stderr,none\": 0.008506754143074618,\n \"\
prompt_level_strict_acc,none\": 0.4565619223659889,\n \"prompt_level_strict_acc_stderr,none\"\
: 0.021435222545538937,\n \"acc,none\": 0.3453291223404255,\n \
\ \"acc_stderr,none\": 0.004334881701803689,\n \"inst_level_strict_acc,none\"\
: 0.5911270983213429,\n \"inst_level_strict_acc_stderr,none\": \"N/A\"\
,\n \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\"\
: {\n \"acc_norm,none\": 0.4936642943933345,\n \"acc_norm_stderr,none\"\
: 0.006171633979063365,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.82,\n\
\ \"acc_norm_stderr,none\": 0.02434689065029351\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6096256684491979,\n \"acc_norm_stderr,none\"\
: 0.03576973947986408\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.496,\n \"acc_norm_stderr,none\": 0.0316851985511992\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.572,\n\
\ \"acc_norm_stderr,none\": 0.031355968923772626\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.552,\n \"acc_norm_stderr,none\":\
\ 0.03151438761115348\n },\n \"leaderboard_bbh_geometric_shapes\"\
: {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\",\n \
\ \"acc_norm,none\": 0.42,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.66,\n \
\ \"acc_norm_stderr,none\": 0.030020073605457876\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.32,\n \"acc_norm_stderr,none\": 0.029561724955240978\n\
\ },\n \"leaderboard_bbh_logical_deduction_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\",\n\
\ \"acc_norm,none\": 0.332,\n \"acc_norm_stderr,none\": 0.029844039047465857\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n\
\ \"acc_norm,none\": 0.524,\n \"acc_norm_stderr,none\": 0.03164968895968774\n\
\ },\n \"leaderboard_bbh_movie_recommendation\": {\n \"\
alias\": \" - leaderboard_bbh_movie_recommendation\",\n \"acc_norm,none\"\
: 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n },\n\
\ \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\":\
\ 0.030491555220405475\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.396,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.4452054794520548,\n \"acc_norm_stderr,none\": 0.04127264774457449\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.616,\n \"acc_norm_stderr,none\": 0.030821679117375447\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.676,\n \
\ \"acc_norm_stderr,none\": 0.029658294924545567\n },\n \"\
leaderboard_bbh_salient_translation_error_detection\": {\n \"alias\"\
: \" - leaderboard_bbh_salient_translation_error_detection\",\n \"acc_norm,none\"\
: 0.436,\n \"acc_norm_stderr,none\": 0.031425567060281365\n },\n\
\ \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.5842696629213483,\n \"acc_norm_stderr,none\"\
: 0.037044683959609616\n },\n \"leaderboard_bbh_sports_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \
\ \"acc_norm,none\": 0.744,\n \"acc_norm_stderr,none\": 0.027657108718204846\n\
\ },\n \"leaderboard_bbh_temporal_sequences\": {\n \"alias\"\
: \" - leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.244,\n\
\ \"acc_norm_stderr,none\": 0.02721799546455311\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.164,\n \"acc_norm_stderr,none\": 0.02346526100207671\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.176,\n \"acc_norm_stderr,none\":\
\ 0.024133497525457123\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.28,\n \"acc_norm_stderr,none\": 0.02845414827783231\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\":\
\ \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\": 0.488,\n\
\ \"acc_norm_stderr,none\": 0.03167708558254714\n },\n \
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.26677852348993286,\n\
\ \"acc_norm_stderr,none\": 0.012801592257275486,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.3282828282828283,\n \"acc_norm_stderr,none\": 0.03345678422756777\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.25457875457875456,\n\
\ \"acc_norm_stderr,none\": 0.01866008900417748\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.2544642857142857,\n \"acc_norm_stderr,none\"\
: 0.02060126475832284\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.4565619223659889,\n \"prompt_level_strict_acc_stderr,none\": 0.021435222545538937,\n\
\ \"inst_level_strict_acc,none\": 0.5911270983213429,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.4805914972273567,\n \"prompt_level_loose_acc_stderr,none\": 0.021500357879025083,\n\
\ \"inst_level_loose_acc,none\": 0.6115107913669064,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.11858006042296072,\n \"exact_match_stderr,none\"\
: 0.008506754143074618,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.24104234527687296,\n\
\ \"exact_match_stderr,none\": 0.024450893367555328\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \"\
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.11382113821138211,\n \"exact_match_stderr,none\": 0.02875360087323741\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.030303030303030304,\n\
\ \"exact_match_stderr,none\": 0.014977019714308254\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\":\
\ \" - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.010714285714285714,\n \"exact_match_stderr,none\": 0.006163684194761604\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.07792207792207792,\n\
\ \"exact_match_stderr,none\": 0.021670471414711772\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.22797927461139897,\n \"exact_match_stderr,none\"\
: 0.030276909945178256\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.044444444444444446,\n \"exact_match_stderr,none\"\
: 0.01780263602032457\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.3453291223404255,\n\
\ \"acc_stderr,none\": 0.004334881701803689\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.44841269841269843,\n \"acc_norm_stderr,none\"\
: 0.01786233407718341,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \"\
\ - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.568,\n\
\ \"acc_norm_stderr,none\": 0.03139181076542941\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.39453125,\n \"acc_norm_stderr,none\"\
: 0.030606698150250366\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.384,\n \"acc_norm_stderr,none\": 0.030821679117375447\n\
\ }\n },\n \"leaderboard\": {\n \"inst_level_loose_acc,none\"\
: 0.6115107913669064,\n \"inst_level_loose_acc_stderr,none\": \"N/A\",\n\
\ \"prompt_level_loose_acc,none\": 0.4805914972273567,\n \"prompt_level_loose_acc_stderr,none\"\
: 0.021500357879025087,\n \"acc_norm,none\": 0.4541445064210663,\n \
\ \"acc_norm_stderr,none\": 0.005315784780969996,\n \"exact_match,none\"\
: 0.11858006042296072,\n \"exact_match_stderr,none\": 0.008506754143074618,\n\
\ \"prompt_level_strict_acc,none\": 0.4565619223659889,\n \"prompt_level_strict_acc_stderr,none\"\
: 0.021435222545538937,\n \"acc,none\": 0.3453291223404255,\n \"acc_stderr,none\"\
: 0.004334881701803689,\n \"inst_level_strict_acc,none\": 0.5911270983213429,\n\
\ \"inst_level_strict_acc_stderr,none\": \"N/A\",\n \"alias\": \"\
leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\": 0.4936642943933345,\n\
\ \"acc_norm_stderr,none\": 0.006171633979063365,\n \"alias\": \"\
\ - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\": {\n\
\ \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\"\
: 0.82,\n \"acc_norm_stderr,none\": 0.02434689065029351\n },\n \"leaderboard_bbh_causal_judgement\"\
: {\n \"alias\": \" - leaderboard_bbh_causal_judgement\",\n \"acc_norm,none\"\
: 0.6096256684491979,\n \"acc_norm_stderr,none\": 0.03576973947986408\n \
\ },\n \"leaderboard_bbh_date_understanding\": {\n \"alias\": \" -\
\ leaderboard_bbh_date_understanding\",\n \"acc_norm,none\": 0.496,\n \
\ \"acc_norm_stderr,none\": 0.0316851985511992\n },\n \"leaderboard_bbh_disambiguation_qa\"\
: {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\"\
: 0.572,\n \"acc_norm_stderr,none\": 0.031355968923772626\n },\n \"\
leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.552,\n \"acc_norm_stderr,none\": 0.03151438761115348\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.42,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.66,\n \"acc_norm_stderr,none\": 0.030020073605457876\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.32,\n \"acc_norm_stderr,none\": 0.029561724955240978\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.332,\n \"acc_norm_stderr,none\": 0.029844039047465857\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.524,\n \"acc_norm_stderr,none\": 0.03164968895968774\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.396,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.4452054794520548,\n\
\ \"acc_norm_stderr,none\": 0.04127264774457449\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.616,\n \"acc_norm_stderr,none\": 0.030821679117375447\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.676,\n \"acc_norm_stderr,none\": 0.029658294924545567\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.436,\n \"acc_norm_stderr,none\": 0.031425567060281365\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.5842696629213483,\n \"acc_norm_stderr,none\"\
: 0.037044683959609616\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.744,\n \"acc_norm_stderr,none\": 0.027657108718204846\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.244,\n \"acc_norm_stderr,none\": 0.02721799546455311\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.164,\n \"acc_norm_stderr,none\": 0.02346526100207671\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.176,\n \"acc_norm_stderr,none\": 0.024133497525457123\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.28,\n \"acc_norm_stderr,none\": 0.02845414827783231\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.488,\n \"acc_norm_stderr,none\": 0.03167708558254714\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.26677852348993286,\n\
\ \"acc_norm_stderr,none\": 0.012801592257275486,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.3282828282828283,\n\
\ \"acc_norm_stderr,none\": 0.03345678422756777\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.25457875457875456,\n \"acc_norm_stderr,none\": 0.01866008900417748\n\
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.2544642857142857,\n \"acc_norm_stderr,none\"\
: 0.02060126475832284\n },\n \"leaderboard_ifeval\": {\n \"alias\"\
: \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.4565619223659889,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.021435222545538937,\n \
\ \"inst_level_strict_acc,none\": 0.5911270983213429,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.4805914972273567,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.021500357879025083,\n \"inst_level_loose_acc,none\"\
: 0.6115107913669064,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.11858006042296072,\n\
\ \"exact_match_stderr,none\": 0.008506754143074618,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.24104234527687296,\n \"exact_match_stderr,none\": 0.024450893367555328\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.11382113821138211,\n \"exact_match_stderr,none\": 0.02875360087323741\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.030303030303030304,\n \"exact_match_stderr,none\"\
: 0.014977019714308254\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.010714285714285714,\n \"exact_match_stderr,none\"\
: 0.006163684194761604\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.07792207792207792,\n \"exact_match_stderr,none\": 0.021670471414711772\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.22797927461139897,\n \"exact_match_stderr,none\"\
: 0.030276909945178256\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.044444444444444446,\n \"exact_match_stderr,none\": 0.01780263602032457\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.3453291223404255,\n \"acc_stderr,none\": 0.004334881701803689\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.44841269841269843,\n\
\ \"acc_norm_stderr,none\": 0.01786233407718341,\n \"alias\": \" -\
\ leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.568,\n \"acc_norm_stderr,none\": 0.03139181076542941\n },\n \"\
leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.39453125,\n \"acc_norm_stderr,none\": 0.030606698150250366\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.384,\n \"acc_norm_stderr,none\": 0.030821679117375447\n\
\ }\n}\n```"
repo_url: https://huggingface.co/GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_navigate
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_snarks
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_gpqa_extended
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_gpqa_main
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_ifeval
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_ifeval_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_mmlu_pro
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_musr_object_placements
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T12-46-29.613339.jsonl'
- config_name: GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_21T12_46_29.613339
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T12-46-29.613339.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T12-46-29.613339.jsonl'
---
# Dataset Card for Evaluation run of GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct](https://huggingface.co/GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct-details",
name="GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-21T12-46-29.613339](https://huggingface.co/datasets/open-llm-leaderboard/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct-details/blob/main/GoToCompany__llama3-8b-cpt-sahabatai-v1-instruct/results_2024-11-21T12-46-29.613339.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"inst_level_loose_acc,none": 0.6115107913669064,
"inst_level_loose_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.4805914972273567,
"prompt_level_loose_acc_stderr,none": 0.021500357879025087,
"acc_norm,none": 0.4541445064210663,
"acc_norm_stderr,none": 0.005315784780969996,
"exact_match,none": 0.11858006042296072,
"exact_match_stderr,none": 0.008506754143074618,
"prompt_level_strict_acc,none": 0.4565619223659889,
"prompt_level_strict_acc_stderr,none": 0.021435222545538937,
"acc,none": 0.3453291223404255,
"acc_stderr,none": 0.004334881701803689,
"inst_level_strict_acc,none": 0.5911270983213429,
"inst_level_strict_acc_stderr,none": "N/A",
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.4936642943933345,
"acc_norm_stderr,none": 0.006171633979063365,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.82,
"acc_norm_stderr,none": 0.02434689065029351
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6096256684491979,
"acc_norm_stderr,none": 0.03576973947986408
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.496,
"acc_norm_stderr,none": 0.0316851985511992
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.572,
"acc_norm_stderr,none": 0.031355968923772626
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.552,
"acc_norm_stderr,none": 0.03151438761115348
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.42,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.66,
"acc_norm_stderr,none": 0.030020073605457876
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.32,
"acc_norm_stderr,none": 0.029561724955240978
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.332,
"acc_norm_stderr,none": 0.029844039047465857
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.524,
"acc_norm_stderr,none": 0.03164968895968774
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.396,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.4452054794520548,
"acc_norm_stderr,none": 0.04127264774457449
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.616,
"acc_norm_stderr,none": 0.030821679117375447
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.676,
"acc_norm_stderr,none": 0.029658294924545567
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.436,
"acc_norm_stderr,none": 0.031425567060281365
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.5842696629213483,
"acc_norm_stderr,none": 0.037044683959609616
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.744,
"acc_norm_stderr,none": 0.027657108718204846
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.244,
"acc_norm_stderr,none": 0.02721799546455311
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.164,
"acc_norm_stderr,none": 0.02346526100207671
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.176,
"acc_norm_stderr,none": 0.024133497525457123
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.28,
"acc_norm_stderr,none": 0.02845414827783231
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.488,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_gpqa": {
"acc_norm,none": 0.26677852348993286,
"acc_norm_stderr,none": 0.012801592257275486,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3282828282828283,
"acc_norm_stderr,none": 0.03345678422756777
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.25457875457875456,
"acc_norm_stderr,none": 0.01866008900417748
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.2544642857142857,
"acc_norm_stderr,none": 0.02060126475832284
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.4565619223659889,
"prompt_level_strict_acc_stderr,none": 0.021435222545538937,
"inst_level_strict_acc,none": 0.5911270983213429,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.4805914972273567,
"prompt_level_loose_acc_stderr,none": 0.021500357879025083,
"inst_level_loose_acc,none": 0.6115107913669064,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.11858006042296072,
"exact_match_stderr,none": 0.008506754143074618,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.24104234527687296,
"exact_match_stderr,none": 0.024450893367555328
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.11382113821138211,
"exact_match_stderr,none": 0.02875360087323741
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.030303030303030304,
"exact_match_stderr,none": 0.014977019714308254
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.010714285714285714,
"exact_match_stderr,none": 0.006163684194761604
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.07792207792207792,
"exact_match_stderr,none": 0.021670471414711772
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.22797927461139897,
"exact_match_stderr,none": 0.030276909945178256
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.044444444444444446,
"exact_match_stderr,none": 0.01780263602032457
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.3453291223404255,
"acc_stderr,none": 0.004334881701803689
},
"leaderboard_musr": {
"acc_norm,none": 0.44841269841269843,
"acc_norm_stderr,none": 0.01786233407718341,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.568,
"acc_norm_stderr,none": 0.03139181076542941
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.39453125,
"acc_norm_stderr,none": 0.030606698150250366
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.384,
"acc_norm_stderr,none": 0.030821679117375447
}
},
"leaderboard": {
"inst_level_loose_acc,none": 0.6115107913669064,
"inst_level_loose_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.4805914972273567,
"prompt_level_loose_acc_stderr,none": 0.021500357879025087,
"acc_norm,none": 0.4541445064210663,
"acc_norm_stderr,none": 0.005315784780969996,
"exact_match,none": 0.11858006042296072,
"exact_match_stderr,none": 0.008506754143074618,
"prompt_level_strict_acc,none": 0.4565619223659889,
"prompt_level_strict_acc_stderr,none": 0.021435222545538937,
"acc,none": 0.3453291223404255,
"acc_stderr,none": 0.004334881701803689,
"inst_level_strict_acc,none": 0.5911270983213429,
"inst_level_strict_acc_stderr,none": "N/A",
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.4936642943933345,
"acc_norm_stderr,none": 0.006171633979063365,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.82,
"acc_norm_stderr,none": 0.02434689065029351
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6096256684491979,
"acc_norm_stderr,none": 0.03576973947986408
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.496,
"acc_norm_stderr,none": 0.0316851985511992
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.572,
"acc_norm_stderr,none": 0.031355968923772626
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.552,
"acc_norm_stderr,none": 0.03151438761115348
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.42,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.66,
"acc_norm_stderr,none": 0.030020073605457876
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.32,
"acc_norm_stderr,none": 0.029561724955240978
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.332,
"acc_norm_stderr,none": 0.029844039047465857
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.524,
"acc_norm_stderr,none": 0.03164968895968774
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.396,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.4452054794520548,
"acc_norm_stderr,none": 0.04127264774457449
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.616,
"acc_norm_stderr,none": 0.030821679117375447
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.676,
"acc_norm_stderr,none": 0.029658294924545567
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.436,
"acc_norm_stderr,none": 0.031425567060281365
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.5842696629213483,
"acc_norm_stderr,none": 0.037044683959609616
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.744,
"acc_norm_stderr,none": 0.027657108718204846
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.244,
"acc_norm_stderr,none": 0.02721799546455311
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.164,
"acc_norm_stderr,none": 0.02346526100207671
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.176,
"acc_norm_stderr,none": 0.024133497525457123
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.28,
"acc_norm_stderr,none": 0.02845414827783231
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.488,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_gpqa": {
"acc_norm,none": 0.26677852348993286,
"acc_norm_stderr,none": 0.012801592257275486,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3282828282828283,
"acc_norm_stderr,none": 0.03345678422756777
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.25457875457875456,
"acc_norm_stderr,none": 0.01866008900417748
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.2544642857142857,
"acc_norm_stderr,none": 0.02060126475832284
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.4565619223659889,
"prompt_level_strict_acc_stderr,none": 0.021435222545538937,
"inst_level_strict_acc,none": 0.5911270983213429,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.4805914972273567,
"prompt_level_loose_acc_stderr,none": 0.021500357879025083,
"inst_level_loose_acc,none": 0.6115107913669064,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.11858006042296072,
"exact_match_stderr,none": 0.008506754143074618,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.24104234527687296,
"exact_match_stderr,none": 0.024450893367555328
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.11382113821138211,
"exact_match_stderr,none": 0.02875360087323741
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.030303030303030304,
"exact_match_stderr,none": 0.014977019714308254
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.010714285714285714,
"exact_match_stderr,none": 0.006163684194761604
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.07792207792207792,
"exact_match_stderr,none": 0.021670471414711772
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.22797927461139897,
"exact_match_stderr,none": 0.030276909945178256
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.044444444444444446,
"exact_match_stderr,none": 0.01780263602032457
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.3453291223404255,
"acc_stderr,none": 0.004334881701803689
},
"leaderboard_musr": {
"acc_norm,none": 0.44841269841269843,
"acc_norm_stderr,none": 0.01786233407718341,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.568,
"acc_norm_stderr,none": 0.03139181076542941
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.39453125,
"acc_norm_stderr,none": 0.030606698150250366
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.384,
"acc_norm_stderr,none": 0.030821679117375447
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
artnoage/Numina | artnoage | "2024-11-21T13:47:45Z" | 7 | 0 | [
"task_categories:text-generation",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"source_datasets:AI-MO/NuminaMath-CoT",
"language:en",
"license:mit",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"mathematics",
"olympiads",
"problem-solving",
"latex",
"mathematical-reasoning",
"math-word-problems",
"olympiad-math"
] | [
"text-generation",
"mathematical-reasoning"
] | "2024-11-21T13:36:15Z" | ---
annotations_creators:
- expert-generated
language:
- en
language_creators:
- expert-generated
license: mit
multilinguality:
- monolingual
pretty_name: Numina-Olympiads
size_categories:
- 1K<n<10K
source_datasets:
- AI-MO/NuminaMath-CoT
task_categories:
- text-generation
- mathematical-reasoning
task_ids:
- math-word-problems
- olympiad-math
paperswithcode_id: numina-olympiads
tags:
- mathematics
- olympiads
- problem-solving
- latex
- mathematical-reasoning
- math-word-problems
- olympiad-math
metrics:
- name: filtered_ratio
type: ratio
value: 0.880
description: Ratio of filtered dataset size to original dataset size
---
# Numina-Olympiads
Filtered NuminaMath-CoT dataset containing only olympiads problems with valid answers.
## Dataset Information
- Split: train
- Original size: 859494
- Filtered size: 756193
- Source: olympiads
- All examples contain valid boxed answers
## Dataset Description
This dataset is a filtered version of the NuminaMath-CoT dataset, containing only problems from olympiad sources that have valid boxed answers. Each example includes:
- A mathematical word problem
- A detailed solution with step-by-step reasoning
- A boxed final answer in LaTeX format
## Usage
The dataset is particularly useful for:
- Training and evaluating math problem-solving models
- Studying olympiad-style mathematical reasoning
- Testing model capabilities on complex word problems
|
HamdanXI/libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_43 | HamdanXI | "2024-11-21T13:44:50Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T13:43:17Z" | ---
dataset_info:
features:
- name: audio_clip
sequence: float64
- name: layer0_prediction
sequence: float64
- name: predicted_text
dtype: string
- name: speaker_id
dtype: string
splits:
- name: train
num_bytes: 2646635635
num_examples: 100
download_size: 1905053582
dataset_size: 2646635635
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_43"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
open-llm-leaderboard/gmonsoon__StockSeaLLMs-7B-v1-details | open-llm-leaderboard | "2024-11-21T13:49:09Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T13:45:45Z" | ---
pretty_name: Evaluation run of gmonsoon/StockSeaLLMs-7B-v1
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [gmonsoon/StockSeaLLMs-7B-v1](https://huggingface.co/gmonsoon/StockSeaLLMs-7B-v1)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/gmonsoon__StockSeaLLMs-7B-v1-details\"\
,\n\tname=\"gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_boolean_expressions\"\
,\n\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-21T13-45-45.216237](https://huggingface.co/datasets/open-llm-leaderboard/gmonsoon__StockSeaLLMs-7B-v1-details/blob/main/gmonsoon__StockSeaLLMs-7B-v1/results_2024-11-21T13-45-45.216237.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"prompt_level_loose_acc,none\": 0.43068391866913125,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.021308808857898823,\n \"\
acc_norm,none\": 0.47956933454403944,\n \"acc_norm_stderr,none\": 0.005332877202997923,\n\
\ \"acc,none\": 0.39519614361702127,\n \"acc_stderr,none\"\
: 0.00445720656433847,\n \"exact_match,none\": 0.17598187311178248,\n\
\ \"exact_match_stderr,none\": 0.009729917778735123,\n \"\
inst_level_loose_acc,none\": 0.5407673860911271,\n \"inst_level_loose_acc_stderr,none\"\
: \"N/A\",\n \"inst_level_strict_acc,none\": 0.513189448441247,\n \
\ \"inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_strict_acc,none\"\
: 0.4066543438077634,\n \"prompt_level_strict_acc_stderr,none\": 0.021138283177336344,\n\
\ \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\"\
: {\n \"acc_norm,none\": 0.5238673841346988,\n \"acc_norm_stderr,none\"\
: 0.006158688482621799,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.852,\n\
\ \"acc_norm_stderr,none\": 0.022503547243806186\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6256684491978609,\n \"acc_norm_stderr,none\"\
: 0.0354849234134303\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.508,\n \"acc_norm_stderr,none\": 0.03168215643141386\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.58,\n\
\ \"acc_norm_stderr,none\": 0.03127799950463661\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.576,\n \"acc_norm_stderr,none\":\
\ 0.03131803437491622\n },\n \"leaderboard_bbh_geometric_shapes\"\
: {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\",\n \
\ \"acc_norm,none\": 0.408,\n \"acc_norm_stderr,none\": 0.031145209846548512\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.748,\n \
\ \"acc_norm_stderr,none\": 0.027513851933031318\n },\n \"\
leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\": \" \
\ - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.512,\n \"acc_norm_stderr,none\": 0.03167708558254714\n },\n\
\ \"leaderboard_bbh_logical_deduction_seven_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\",\n \"\
acc_norm,none\": 0.488,\n \"acc_norm_stderr,none\": 0.03167708558254714\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n\
\ \"acc_norm,none\": 0.676,\n \"acc_norm_stderr,none\": 0.029658294924545567\n\
\ },\n \"leaderboard_bbh_movie_recommendation\": {\n \"\
alias\": \" - leaderboard_bbh_movie_recommendation\",\n \"acc_norm,none\"\
: 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n },\n\
\ \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.652,\n \"acc_norm_stderr,none\":\
\ 0.030186568464511673\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.36,\n \"acc_norm_stderr,none\": 0.03041876402517494\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.5684931506849316,\n \"acc_norm_stderr,none\": 0.041131302645371945\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.576,\n \
\ \"acc_norm_stderr,none\": 0.03131803437491622\n },\n \"leaderboard_bbh_salient_translation_error_detection\"\
: {\n \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\"\
,\n \"acc_norm,none\": 0.452,\n \"acc_norm_stderr,none\":\
\ 0.03153986449255664\n },\n \"leaderboard_bbh_snarks\": {\n \
\ \"alias\": \" - leaderboard_bbh_snarks\",\n \"acc_norm,none\"\
: 0.6404494382022472,\n \"acc_norm_stderr,none\": 0.03606913914074032\n\
\ },\n \"leaderboard_bbh_sports_understanding\": {\n \"\
alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.812,\n \"acc_norm_stderr,none\": 0.02476037772775051\n },\n\
\ \"leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" -\
\ leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.456,\n\
\ \"acc_norm_stderr,none\": 0.031563285061213475\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.16,\n \"acc_norm_stderr,none\": 0.023232714782060626\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.124,\n \"acc_norm_stderr,none\":\
\ 0.020886382258673272\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.268,\n \"acc_norm_stderr,none\":\
\ 0.02806876238252672\n },\n \"leaderboard_bbh_web_of_lies\": {\n\
\ \"alias\": \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\"\
: 0.52,\n \"acc_norm_stderr,none\": 0.03166085340849512\n },\n\
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3028523489932886,\n\
\ \"acc_norm_stderr,none\": 0.013316733936515984,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.2676767676767677,\n \"acc_norm_stderr,none\": 0.031544498882702825\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.304029304029304,\n\
\ \"acc_norm_stderr,none\": 0.019704024937907735\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.3169642857142857,\n \"acc_norm_stderr,none\"\
: 0.0220076215848248\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.4066543438077634,\n \"prompt_level_strict_acc_stderr,none\": 0.021138283177336344,\n\
\ \"inst_level_strict_acc,none\": 0.513189448441247,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.43068391866913125,\n \"prompt_level_loose_acc_stderr,none\": 0.021308808857898823,\n\
\ \"inst_level_loose_acc,none\": 0.5407673860911271,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.17598187311178248,\n \"exact_match_stderr,none\"\
: 0.009729917778735123,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.3811074918566775,\n\
\ \"exact_match_stderr,none\": 0.02776327166045321\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \" \
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.10569105691056911,\n \"exact_match_stderr,none\": 0.0278344722877674\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.09090909090909091,\n\
\ \"exact_match_stderr,none\": 0.0251172256361608\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\": \"\
\ - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.025,\n \"exact_match_stderr,none\": 0.009346956263824575\n \
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\": \"\
\ - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.14285714285714285,\n\
\ \"exact_match_stderr,none\": 0.028289929799333556\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.3005181347150259,\n \"exact_match_stderr,none\"\
: 0.033088185944157515\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.02962962962962963,\n \"exact_match_stderr,none\"\
: 0.014648038602753809\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.39519614361702127,\n\
\ \"acc_stderr,none\": 0.00445720656433847\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.42063492063492064,\n \"acc_norm_stderr,none\"\
: 0.017713270487861726,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\":\
\ \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.536,\n\
\ \"acc_norm_stderr,none\": 0.031603975145223735\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.328125,\n \"acc_norm_stderr,none\"\
: 0.029403146715355242\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.4,\n \"acc_norm_stderr,none\": 0.031046021028253316\n\
\ }\n },\n \"leaderboard\": {\n \"prompt_level_loose_acc,none\"\
: 0.43068391866913125,\n \"prompt_level_loose_acc_stderr,none\": 0.021308808857898823,\n\
\ \"acc_norm,none\": 0.47956933454403944,\n \"acc_norm_stderr,none\"\
: 0.005332877202997923,\n \"acc,none\": 0.39519614361702127,\n \"\
acc_stderr,none\": 0.00445720656433847,\n \"exact_match,none\": 0.17598187311178248,\n\
\ \"exact_match_stderr,none\": 0.009729917778735123,\n \"inst_level_loose_acc,none\"\
: 0.5407673860911271,\n \"inst_level_loose_acc_stderr,none\": \"N/A\",\n\
\ \"inst_level_strict_acc,none\": 0.513189448441247,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_strict_acc,none\": 0.4066543438077634,\n \
\ \"prompt_level_strict_acc_stderr,none\": 0.021138283177336344,\n \"\
alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\"\
: 0.5238673841346988,\n \"acc_norm_stderr,none\": 0.006158688482621799,\n\
\ \"alias\": \" - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\"\
: {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"\
acc_norm,none\": 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6256684491978609,\n \"acc_norm_stderr,none\"\
: 0.0354849234134303\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.508,\n \"acc_norm_stderr,none\": 0.03168215643141386\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.58,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.576,\n \"acc_norm_stderr,none\": 0.03131803437491622\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.408,\n \"acc_norm_stderr,none\": 0.031145209846548512\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.748,\n \"acc_norm_stderr,none\": 0.027513851933031318\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.512,\n \"acc_norm_stderr,none\": 0.03167708558254714\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.488,\n \"acc_norm_stderr,none\": 0.03167708558254714\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.676,\n \"acc_norm_stderr,none\": 0.029658294924545567\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.652,\n \"acc_norm_stderr,none\": 0.030186568464511673\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.36,\n \"acc_norm_stderr,none\": 0.03041876402517494\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.5684931506849316,\n\
\ \"acc_norm_stderr,none\": 0.041131302645371945\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.576,\n \"acc_norm_stderr,none\": 0.03131803437491622\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.452,\n \"acc_norm_stderr,none\": 0.03153986449255664\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.6404494382022472,\n \"acc_norm_stderr,none\"\
: 0.03606913914074032\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.812,\n \"acc_norm_stderr,none\": 0.02476037772775051\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.16,\n \"acc_norm_stderr,none\": 0.023232714782060626\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.124,\n \"acc_norm_stderr,none\": 0.020886382258673272\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.268,\n \"acc_norm_stderr,none\": 0.02806876238252672\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.52,\n \"acc_norm_stderr,none\": 0.03166085340849512\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3028523489932886,\n\
\ \"acc_norm_stderr,none\": 0.013316733936515984,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.2676767676767677,\n\
\ \"acc_norm_stderr,none\": 0.031544498882702825\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.304029304029304,\n \"acc_norm_stderr,none\": 0.019704024937907735\n \
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.3169642857142857,\n \"acc_norm_stderr,none\"\
: 0.0220076215848248\n },\n \"leaderboard_ifeval\": {\n \"alias\":\
\ \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.4066543438077634,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.021138283177336344,\n \
\ \"inst_level_strict_acc,none\": 0.513189448441247,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.43068391866913125,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.021308808857898823,\n \"inst_level_loose_acc,none\"\
: 0.5407673860911271,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.17598187311178248,\n\
\ \"exact_match_stderr,none\": 0.009729917778735123,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.3811074918566775,\n \"exact_match_stderr,none\": 0.02776327166045321\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.10569105691056911,\n \"exact_match_stderr,none\": 0.0278344722877674\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.09090909090909091,\n \"exact_match_stderr,none\"\
: 0.0251172256361608\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.025,\n \"exact_match_stderr,none\": 0.009346956263824575\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\": \" - leaderboard_math_num_theory_hard\"\
,\n \"exact_match,none\": 0.14285714285714285,\n \"exact_match_stderr,none\"\
: 0.028289929799333556\n },\n \"leaderboard_math_prealgebra_hard\": {\n \
\ \"alias\": \" - leaderboard_math_prealgebra_hard\",\n \"exact_match,none\"\
: 0.3005181347150259,\n \"exact_match_stderr,none\": 0.033088185944157515\n\
\ },\n \"leaderboard_math_precalculus_hard\": {\n \"alias\": \" -\
\ leaderboard_math_precalculus_hard\",\n \"exact_match,none\": 0.02962962962962963,\n\
\ \"exact_match_stderr,none\": 0.014648038602753809\n },\n \"leaderboard_mmlu_pro\"\
: {\n \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.39519614361702127,\n\
\ \"acc_stderr,none\": 0.00445720656433847\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.42063492063492064,\n \"acc_norm_stderr,none\"\
: 0.017713270487861726,\n \"alias\": \" - leaderboard_musr\"\n },\n \
\ \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \" - leaderboard_musr_murder_mysteries\"\
,\n \"acc_norm,none\": 0.536,\n \"acc_norm_stderr,none\": 0.031603975145223735\n\
\ },\n \"leaderboard_musr_object_placements\": {\n \"alias\": \" -\
\ leaderboard_musr_object_placements\",\n \"acc_norm,none\": 0.328125,\n\
\ \"acc_norm_stderr,none\": 0.029403146715355242\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \"acc_norm,none\"\
: 0.4,\n \"acc_norm_stderr,none\": 0.031046021028253316\n }\n}\n```"
repo_url: https://huggingface.co/gmonsoon/StockSeaLLMs-7B-v1
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_navigate
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_snarks
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_gpqa_extended
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_gpqa_main
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_ifeval
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_ifeval_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_mmlu_pro
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_musr_object_placements
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T13-45-45.216237.jsonl'
- config_name: gmonsoon__StockSeaLLMs-7B-v1__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_21T13_45_45.216237
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T13-45-45.216237.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T13-45-45.216237.jsonl'
---
# Dataset Card for Evaluation run of gmonsoon/StockSeaLLMs-7B-v1
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [gmonsoon/StockSeaLLMs-7B-v1](https://huggingface.co/gmonsoon/StockSeaLLMs-7B-v1)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/gmonsoon__StockSeaLLMs-7B-v1-details",
name="gmonsoon__StockSeaLLMs-7B-v1__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-21T13-45-45.216237](https://huggingface.co/datasets/open-llm-leaderboard/gmonsoon__StockSeaLLMs-7B-v1-details/blob/main/gmonsoon__StockSeaLLMs-7B-v1/results_2024-11-21T13-45-45.216237.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"prompt_level_loose_acc,none": 0.43068391866913125,
"prompt_level_loose_acc_stderr,none": 0.021308808857898823,
"acc_norm,none": 0.47956933454403944,
"acc_norm_stderr,none": 0.005332877202997923,
"acc,none": 0.39519614361702127,
"acc_stderr,none": 0.00445720656433847,
"exact_match,none": 0.17598187311178248,
"exact_match_stderr,none": 0.009729917778735123,
"inst_level_loose_acc,none": 0.5407673860911271,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.513189448441247,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_strict_acc,none": 0.4066543438077634,
"prompt_level_strict_acc_stderr,none": 0.021138283177336344,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.5238673841346988,
"acc_norm_stderr,none": 0.006158688482621799,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6256684491978609,
"acc_norm_stderr,none": 0.0354849234134303
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.508,
"acc_norm_stderr,none": 0.03168215643141386
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.408,
"acc_norm_stderr,none": 0.031145209846548512
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.748,
"acc_norm_stderr,none": 0.027513851933031318
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.512,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.488,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.676,
"acc_norm_stderr,none": 0.029658294924545567
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.652,
"acc_norm_stderr,none": 0.030186568464511673
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.36,
"acc_norm_stderr,none": 0.03041876402517494
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5684931506849316,
"acc_norm_stderr,none": 0.041131302645371945
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.452,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6404494382022472,
"acc_norm_stderr,none": 0.03606913914074032
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.812,
"acc_norm_stderr,none": 0.02476037772775051
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.16,
"acc_norm_stderr,none": 0.023232714782060626
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.124,
"acc_norm_stderr,none": 0.020886382258673272
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.268,
"acc_norm_stderr,none": 0.02806876238252672
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.52,
"acc_norm_stderr,none": 0.03166085340849512
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3028523489932886,
"acc_norm_stderr,none": 0.013316733936515984,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.2676767676767677,
"acc_norm_stderr,none": 0.031544498882702825
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.304029304029304,
"acc_norm_stderr,none": 0.019704024937907735
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.3169642857142857,
"acc_norm_stderr,none": 0.0220076215848248
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.4066543438077634,
"prompt_level_strict_acc_stderr,none": 0.021138283177336344,
"inst_level_strict_acc,none": 0.513189448441247,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.43068391866913125,
"prompt_level_loose_acc_stderr,none": 0.021308808857898823,
"inst_level_loose_acc,none": 0.5407673860911271,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.17598187311178248,
"exact_match_stderr,none": 0.009729917778735123,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.3811074918566775,
"exact_match_stderr,none": 0.02776327166045321
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.10569105691056911,
"exact_match_stderr,none": 0.0278344722877674
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.09090909090909091,
"exact_match_stderr,none": 0.0251172256361608
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.025,
"exact_match_stderr,none": 0.009346956263824575
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.14285714285714285,
"exact_match_stderr,none": 0.028289929799333556
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.3005181347150259,
"exact_match_stderr,none": 0.033088185944157515
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.02962962962962963,
"exact_match_stderr,none": 0.014648038602753809
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.39519614361702127,
"acc_stderr,none": 0.00445720656433847
},
"leaderboard_musr": {
"acc_norm,none": 0.42063492063492064,
"acc_norm_stderr,none": 0.017713270487861726,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.328125,
"acc_norm_stderr,none": 0.029403146715355242
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.4,
"acc_norm_stderr,none": 0.031046021028253316
}
},
"leaderboard": {
"prompt_level_loose_acc,none": 0.43068391866913125,
"prompt_level_loose_acc_stderr,none": 0.021308808857898823,
"acc_norm,none": 0.47956933454403944,
"acc_norm_stderr,none": 0.005332877202997923,
"acc,none": 0.39519614361702127,
"acc_stderr,none": 0.00445720656433847,
"exact_match,none": 0.17598187311178248,
"exact_match_stderr,none": 0.009729917778735123,
"inst_level_loose_acc,none": 0.5407673860911271,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.513189448441247,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_strict_acc,none": 0.4066543438077634,
"prompt_level_strict_acc_stderr,none": 0.021138283177336344,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.5238673841346988,
"acc_norm_stderr,none": 0.006158688482621799,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6256684491978609,
"acc_norm_stderr,none": 0.0354849234134303
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.508,
"acc_norm_stderr,none": 0.03168215643141386
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.408,
"acc_norm_stderr,none": 0.031145209846548512
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.748,
"acc_norm_stderr,none": 0.027513851933031318
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.512,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.488,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.676,
"acc_norm_stderr,none": 0.029658294924545567
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.652,
"acc_norm_stderr,none": 0.030186568464511673
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.36,
"acc_norm_stderr,none": 0.03041876402517494
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5684931506849316,
"acc_norm_stderr,none": 0.041131302645371945
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.452,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6404494382022472,
"acc_norm_stderr,none": 0.03606913914074032
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.812,
"acc_norm_stderr,none": 0.02476037772775051
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.16,
"acc_norm_stderr,none": 0.023232714782060626
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.124,
"acc_norm_stderr,none": 0.020886382258673272
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.268,
"acc_norm_stderr,none": 0.02806876238252672
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.52,
"acc_norm_stderr,none": 0.03166085340849512
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3028523489932886,
"acc_norm_stderr,none": 0.013316733936515984,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.2676767676767677,
"acc_norm_stderr,none": 0.031544498882702825
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.304029304029304,
"acc_norm_stderr,none": 0.019704024937907735
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.3169642857142857,
"acc_norm_stderr,none": 0.0220076215848248
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.4066543438077634,
"prompt_level_strict_acc_stderr,none": 0.021138283177336344,
"inst_level_strict_acc,none": 0.513189448441247,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.43068391866913125,
"prompt_level_loose_acc_stderr,none": 0.021308808857898823,
"inst_level_loose_acc,none": 0.5407673860911271,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.17598187311178248,
"exact_match_stderr,none": 0.009729917778735123,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.3811074918566775,
"exact_match_stderr,none": 0.02776327166045321
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.10569105691056911,
"exact_match_stderr,none": 0.0278344722877674
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.09090909090909091,
"exact_match_stderr,none": 0.0251172256361608
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.025,
"exact_match_stderr,none": 0.009346956263824575
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.14285714285714285,
"exact_match_stderr,none": 0.028289929799333556
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.3005181347150259,
"exact_match_stderr,none": 0.033088185944157515
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.02962962962962963,
"exact_match_stderr,none": 0.014648038602753809
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.39519614361702127,
"acc_stderr,none": 0.00445720656433847
},
"leaderboard_musr": {
"acc_norm,none": 0.42063492063492064,
"acc_norm_stderr,none": 0.017713270487861726,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.328125,
"acc_norm_stderr,none": 0.029403146715355242
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.4,
"acc_norm_stderr,none": 0.031046021028253316
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
HamdanXI/libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_51 | HamdanXI | "2024-11-21T14:03:32Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T14:01:59Z" | ---
dataset_info:
features:
- name: audio_clip
sequence: float64
- name: layer0_prediction
sequence: float64
- name: predicted_text
dtype: string
- name: speaker_id
dtype: string
splits:
- name: train
num_bytes: 2646635310
num_examples: 100
download_size: 1926578960
dataset_size: 2646635310
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_51"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
SeppeV/joke_gen_of_mistral_ft_mean_score_dpo_w_ex_reasoning_prompt_wo_ex | SeppeV | "2024-11-21T14:02:32Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T14:02:30Z" | ---
dataset_info:
features:
- name: jokeText
dtype: string
- name: userId
dtype: int64
splits:
- name: train
num_bytes: 173754
num_examples: 125
download_size: 90163
dataset_size: 173754
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HamdanXI/libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_52 | HamdanXI | "2024-11-21T14:05:48Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T14:04:16Z" | ---
dataset_info:
features:
- name: audio_clip
sequence: float64
- name: layer0_prediction
sequence: float64
- name: predicted_text
dtype: string
- name: speaker_id
dtype: string
splits:
- name: train
num_bytes: 2646635420
num_examples: 100
download_size: 1891530993
dataset_size: 2646635420
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "libriTTS_dev_wav2vec2_latent_layer0_2sec_PERFECT_chunk_52"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
dgambettaphd/P_wiki_doc5000_real96 | dgambettaphd | "2024-11-21T14:27:09Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T14:27:07Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 2628049
num_examples: 5000
download_size: 1706149
dataset_size: 2628049
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
katyazevskaya/python-course | katyazevskaya | "2024-11-21T14:40:46Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T14:40:43Z" | ---
dataset_info:
features:
- name: Original Text
dtype: string
- name: Lemmatized Text
sequence: string
- name: POS Annotation
sequence:
sequence: string
- name: NER Annotation
sequence:
sequence: string
splits:
- name: train
num_bytes: 190320
num_examples: 1
download_size: 104732
dataset_size: 190320
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
VladLoPG/alice_gpt | VladLoPG | "2024-11-21T15:12:11Z" | 7 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-21T15:12:10Z" | ---
license: apache-2.0
---
|
A-l-e-x/gravitation | A-l-e-x | "2024-11-21T15:20:56Z" | 7 | 0 | [
"license:mit",
"region:us"
] | null | "2024-11-21T15:19:53Z" | ---
license: mit
---
|
Priya098098/Raj | Priya098098 | "2024-11-21T15:47:12Z" | 7 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-21T15:47:12Z" | ---
license: apache-2.0
---
|
open-llm-leaderboard/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct-details | open-llm-leaderboard | "2024-11-21T16:09:20Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T16:06:21Z" | ---
pretty_name: Evaluation run of GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct](https://huggingface.co/GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct-details\"\
,\n\tname=\"GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_boolean_expressions\"\
,\n\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-21T16-06-20.952173](https://huggingface.co/datasets/open-llm-leaderboard/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct-details/blob/main/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct/results_2024-11-21T16-06-20.952173.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"prompt_level_strict_acc,none\": 0.6062846580406654,\n \
\ \"prompt_level_strict_acc_stderr,none\": 0.021024834145872404,\n \"\
acc_norm,none\": 0.5431314048514723,\n \"acc_norm_stderr,none\": 0.005317050852347761,\n\
\ \"inst_level_loose_acc,none\": 0.7302158273381295,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\",\n \"acc,none\": 0.4263630319148936,\n\
\ \"acc_stderr,none\": 0.004508763683858449,\n \"inst_level_strict_acc,none\"\
: 0.7038369304556354,\n \"inst_level_strict_acc_stderr,none\": \"N/A\"\
,\n \"exact_match,none\": 0.19788519637462235,\n \"exact_match_stderr,none\"\
: 0.009998835994126825,\n \"prompt_level_loose_acc,none\": 0.6395563770794824,\n\
\ \"prompt_level_loose_acc_stderr,none\": 0.0206614696698795,\n \
\ \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n\
\ \"acc_norm,none\": 0.5948620031244576,\n \"acc_norm_stderr,none\"\
: 0.006083807836624403,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.856,\n\
\ \"acc_norm_stderr,none\": 0.022249407735450245\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6363636363636364,\n \"acc_norm_stderr,none\"\
: 0.03527198153014412\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.608,\n \"acc_norm_stderr,none\": 0.030938207620401222\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.636,\n\
\ \"acc_norm_stderr,none\": 0.030491555220405475\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.584,\n \"acc_norm_stderr,none\":\
\ 0.031235856237014505\n },\n \"leaderboard_bbh_geometric_shapes\"\
: {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\",\n \
\ \"acc_norm,none\": 0.436,\n \"acc_norm_stderr,none\": 0.031425567060281365\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.724,\n \
\ \"acc_norm_stderr,none\": 0.02832853727421142\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\":\
\ 0.031563285061213475\n },\n \"leaderboard_bbh_logical_deduction_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.536,\n \"acc_norm_stderr,none\":\
\ 0.031603975145223735\n },\n \"leaderboard_bbh_logical_deduction_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\"\
,\n \"acc_norm,none\": 0.796,\n \"acc_norm_stderr,none\":\
\ 0.025537121574548162\n },\n \"leaderboard_bbh_movie_recommendation\"\
: {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\",\n \
\ \"acc_norm,none\": 0.744,\n \"acc_norm_stderr,none\": 0.027657108718204846\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \"\
\ - leaderboard_bbh_navigate\",\n \"acc_norm,none\": 0.644,\n \
\ \"acc_norm_stderr,none\": 0.0303436806571532\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.448,\n \"acc_norm_stderr,none\": 0.03151438761115349\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.5684931506849316,\n \"acc_norm_stderr,none\": 0.041131302645371945\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.864,\n \
\ \"acc_norm_stderr,none\": 0.021723342617052086\n },\n \"\
leaderboard_bbh_salient_translation_error_detection\": {\n \"alias\"\
: \" - leaderboard_bbh_salient_translation_error_detection\",\n \"acc_norm,none\"\
: 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n },\n\
\ \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.6460674157303371,\n \"acc_norm_stderr,none\"\
: 0.03594285405211505\n },\n \"leaderboard_bbh_sports_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \
\ \"acc_norm,none\": 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n\
\ },\n \"leaderboard_bbh_temporal_sequences\": {\n \"alias\"\
: \" - leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.592,\n\
\ \"acc_norm_stderr,none\": 0.03114520984654851\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.252,\n \"acc_norm_stderr,none\": 0.027513851933031318\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.268,\n \"acc_norm_stderr,none\":\
\ 0.02806876238252672\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.344,\n \"acc_norm_stderr,none\":\
\ 0.03010450339231644\n },\n \"leaderboard_bbh_web_of_lies\": {\n\
\ \"alias\": \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\"\
: 0.476,\n \"acc_norm_stderr,none\": 0.03164968895968774\n },\n\
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3347315436241611,\n\
\ \"acc_norm_stderr,none\": 0.013681339748209233,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.3434343434343434,\n \"acc_norm_stderr,none\": 0.03383201223244441\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.32234432234432236,\n\
\ \"acc_norm_stderr,none\": 0.020020102750045735\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.34598214285714285,\n \"acc_norm_stderr,none\"\
: 0.022499241830682457\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.6062846580406654,\n \"prompt_level_strict_acc_stderr,none\": 0.021024834145872404,\n\
\ \"inst_level_strict_acc,none\": 0.7038369304556354,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.6395563770794824,\n \"prompt_level_loose_acc_stderr,none\": 0.0206614696698795,\n\
\ \"inst_level_loose_acc,none\": 0.7302158273381295,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.19788519637462235,\n \"exact_match_stderr,none\"\
: 0.009998835994126825,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.43322475570032576,\n\
\ \"exact_match_stderr,none\": 0.028327050442298423\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \"\
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.11382113821138211,\n \"exact_match_stderr,none\": 0.02875360087323741\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.06818181818181818,\n\
\ \"exact_match_stderr,none\": 0.022022378945902827\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\":\
\ \" - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.03571428571428571,\n \"exact_match_stderr,none\": 0.011110196729254557\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.12337662337662338,\n\
\ \"exact_match_stderr,none\": 0.026587484423674337\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.35751295336787564,\n \"exact_match_stderr,none\"\
: 0.03458816042181008\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.05925925925925926,\n \"exact_match_stderr,none\"\
: 0.02039673654232189\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.4263630319148936,\n\
\ \"acc_stderr,none\": 0.004508763683858449\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.4775132275132275,\n \"acc_norm_stderr,none\"\
: 0.01802634312352244,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \"\
\ - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.576,\n\
\ \"acc_norm_stderr,none\": 0.03131803437491622\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.43359375,\n \"acc_norm_stderr,none\"\
: 0.031033834158735715\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.424,\n \"acc_norm_stderr,none\": 0.03131803437491622\n\
\ }\n },\n \"leaderboard\": {\n \"prompt_level_strict_acc,none\"\
: 0.6062846580406654,\n \"prompt_level_strict_acc_stderr,none\": 0.021024834145872404,\n\
\ \"acc_norm,none\": 0.5431314048514723,\n \"acc_norm_stderr,none\"\
: 0.005317050852347761,\n \"inst_level_loose_acc,none\": 0.7302158273381295,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"acc,none\": 0.4263630319148936,\n\
\ \"acc_stderr,none\": 0.004508763683858449,\n \"inst_level_strict_acc,none\"\
: 0.7038369304556354,\n \"inst_level_strict_acc_stderr,none\": \"N/A\",\n\
\ \"exact_match,none\": 0.19788519637462235,\n \"exact_match_stderr,none\"\
: 0.009998835994126825,\n \"prompt_level_loose_acc,none\": 0.6395563770794824,\n\
\ \"prompt_level_loose_acc_stderr,none\": 0.0206614696698795,\n \"\
alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\"\
: 0.5948620031244576,\n \"acc_norm_stderr,none\": 0.006083807836624403,\n\
\ \"alias\": \" - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\"\
: {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"\
acc_norm,none\": 0.856,\n \"acc_norm_stderr,none\": 0.022249407735450245\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6363636363636364,\n \"acc_norm_stderr,none\"\
: 0.03527198153014412\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.608,\n \"acc_norm_stderr,none\": 0.030938207620401222\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.584,\n \"acc_norm_stderr,none\": 0.031235856237014505\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.436,\n \"acc_norm_stderr,none\": 0.031425567060281365\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.724,\n \"acc_norm_stderr,none\": 0.02832853727421142\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.536,\n \"acc_norm_stderr,none\": 0.031603975145223735\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.796,\n \"acc_norm_stderr,none\": 0.025537121574548162\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.744,\n \"acc_norm_stderr,none\": 0.027657108718204846\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.644,\n \"acc_norm_stderr,none\": 0.0303436806571532\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.448,\n \"acc_norm_stderr,none\": 0.03151438761115349\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.5684931506849316,\n\
\ \"acc_norm_stderr,none\": 0.041131302645371945\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.864,\n \"acc_norm_stderr,none\": 0.021723342617052086\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.6460674157303371,\n \"acc_norm_stderr,none\"\
: 0.03594285405211505\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.592,\n \"acc_norm_stderr,none\": 0.03114520984654851\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.252,\n \"acc_norm_stderr,none\": 0.027513851933031318\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.268,\n \"acc_norm_stderr,none\": 0.02806876238252672\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.344,\n \"acc_norm_stderr,none\": 0.03010450339231644\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.476,\n \"acc_norm_stderr,none\": 0.03164968895968774\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3347315436241611,\n\
\ \"acc_norm_stderr,none\": 0.013681339748209233,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.3434343434343434,\n\
\ \"acc_norm_stderr,none\": 0.03383201223244441\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.32234432234432236,\n \"acc_norm_stderr,none\": 0.020020102750045735\n\
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.34598214285714285,\n \"acc_norm_stderr,none\"\
: 0.022499241830682457\n },\n \"leaderboard_ifeval\": {\n \"alias\"\
: \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.6062846580406654,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.021024834145872404,\n \
\ \"inst_level_strict_acc,none\": 0.7038369304556354,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.6395563770794824,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.0206614696698795,\n \"inst_level_loose_acc,none\"\
: 0.7302158273381295,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.19788519637462235,\n\
\ \"exact_match_stderr,none\": 0.009998835994126825,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.43322475570032576,\n \"exact_match_stderr,none\": 0.028327050442298423\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.11382113821138211,\n \"exact_match_stderr,none\": 0.02875360087323741\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.06818181818181818,\n \"exact_match_stderr,none\"\
: 0.022022378945902827\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.03571428571428571,\n \"exact_match_stderr,none\"\
: 0.011110196729254557\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.12337662337662338,\n \"exact_match_stderr,none\": 0.026587484423674337\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.35751295336787564,\n \"exact_match_stderr,none\"\
: 0.03458816042181008\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.05925925925925926,\n \"exact_match_stderr,none\": 0.02039673654232189\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.4263630319148936,\n \"acc_stderr,none\": 0.004508763683858449\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.4775132275132275,\n\
\ \"acc_norm_stderr,none\": 0.01802634312352244,\n \"alias\": \" -\
\ leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.576,\n \"acc_norm_stderr,none\": 0.03131803437491622\n },\n \"\
leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.43359375,\n \"acc_norm_stderr,none\": 0.031033834158735715\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.424,\n \"acc_norm_stderr,none\": 0.03131803437491622\n\
\ }\n}\n```"
repo_url: https://huggingface.co/GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_navigate
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_snarks
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_gpqa_extended
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_gpqa_main
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_ifeval
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_ifeval_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_mmlu_pro
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_musr_object_placements
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T16-06-20.952173.jsonl'
- config_name: GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_21T16_06_20.952173
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T16-06-20.952173.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T16-06-20.952173.jsonl'
---
# Dataset Card for Evaluation run of GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct](https://huggingface.co/GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct-details",
name="GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-21T16-06-20.952173](https://huggingface.co/datasets/open-llm-leaderboard/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct-details/blob/main/GoToCompany__gemma2-9b-cpt-sahabatai-v1-instruct/results_2024-11-21T16-06-20.952173.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"prompt_level_strict_acc,none": 0.6062846580406654,
"prompt_level_strict_acc_stderr,none": 0.021024834145872404,
"acc_norm,none": 0.5431314048514723,
"acc_norm_stderr,none": 0.005317050852347761,
"inst_level_loose_acc,none": 0.7302158273381295,
"inst_level_loose_acc_stderr,none": "N/A",
"acc,none": 0.4263630319148936,
"acc_stderr,none": 0.004508763683858449,
"inst_level_strict_acc,none": 0.7038369304556354,
"inst_level_strict_acc_stderr,none": "N/A",
"exact_match,none": 0.19788519637462235,
"exact_match_stderr,none": 0.009998835994126825,
"prompt_level_loose_acc,none": 0.6395563770794824,
"prompt_level_loose_acc_stderr,none": 0.0206614696698795,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.5948620031244576,
"acc_norm_stderr,none": 0.006083807836624403,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.856,
"acc_norm_stderr,none": 0.022249407735450245
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6363636363636364,
"acc_norm_stderr,none": 0.03527198153014412
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.608,
"acc_norm_stderr,none": 0.030938207620401222
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.584,
"acc_norm_stderr,none": 0.031235856237014505
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.436,
"acc_norm_stderr,none": 0.031425567060281365
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.724,
"acc_norm_stderr,none": 0.02832853727421142
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.796,
"acc_norm_stderr,none": 0.025537121574548162
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.744,
"acc_norm_stderr,none": 0.027657108718204846
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.644,
"acc_norm_stderr,none": 0.0303436806571532
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.448,
"acc_norm_stderr,none": 0.03151438761115349
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5684931506849316,
"acc_norm_stderr,none": 0.041131302645371945
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.864,
"acc_norm_stderr,none": 0.021723342617052086
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6460674157303371,
"acc_norm_stderr,none": 0.03594285405211505
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.592,
"acc_norm_stderr,none": 0.03114520984654851
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.252,
"acc_norm_stderr,none": 0.027513851933031318
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.268,
"acc_norm_stderr,none": 0.02806876238252672
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.344,
"acc_norm_stderr,none": 0.03010450339231644
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.476,
"acc_norm_stderr,none": 0.03164968895968774
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3347315436241611,
"acc_norm_stderr,none": 0.013681339748209233,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3434343434343434,
"acc_norm_stderr,none": 0.03383201223244441
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.32234432234432236,
"acc_norm_stderr,none": 0.020020102750045735
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.34598214285714285,
"acc_norm_stderr,none": 0.022499241830682457
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.6062846580406654,
"prompt_level_strict_acc_stderr,none": 0.021024834145872404,
"inst_level_strict_acc,none": 0.7038369304556354,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.6395563770794824,
"prompt_level_loose_acc_stderr,none": 0.0206614696698795,
"inst_level_loose_acc,none": 0.7302158273381295,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.19788519637462235,
"exact_match_stderr,none": 0.009998835994126825,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.43322475570032576,
"exact_match_stderr,none": 0.028327050442298423
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.11382113821138211,
"exact_match_stderr,none": 0.02875360087323741
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.06818181818181818,
"exact_match_stderr,none": 0.022022378945902827
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.03571428571428571,
"exact_match_stderr,none": 0.011110196729254557
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.12337662337662338,
"exact_match_stderr,none": 0.026587484423674337
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.35751295336787564,
"exact_match_stderr,none": 0.03458816042181008
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.05925925925925926,
"exact_match_stderr,none": 0.02039673654232189
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.4263630319148936,
"acc_stderr,none": 0.004508763683858449
},
"leaderboard_musr": {
"acc_norm,none": 0.4775132275132275,
"acc_norm_stderr,none": 0.01802634312352244,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.43359375,
"acc_norm_stderr,none": 0.031033834158735715
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.424,
"acc_norm_stderr,none": 0.03131803437491622
}
},
"leaderboard": {
"prompt_level_strict_acc,none": 0.6062846580406654,
"prompt_level_strict_acc_stderr,none": 0.021024834145872404,
"acc_norm,none": 0.5431314048514723,
"acc_norm_stderr,none": 0.005317050852347761,
"inst_level_loose_acc,none": 0.7302158273381295,
"inst_level_loose_acc_stderr,none": "N/A",
"acc,none": 0.4263630319148936,
"acc_stderr,none": 0.004508763683858449,
"inst_level_strict_acc,none": 0.7038369304556354,
"inst_level_strict_acc_stderr,none": "N/A",
"exact_match,none": 0.19788519637462235,
"exact_match_stderr,none": 0.009998835994126825,
"prompt_level_loose_acc,none": 0.6395563770794824,
"prompt_level_loose_acc_stderr,none": 0.0206614696698795,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.5948620031244576,
"acc_norm_stderr,none": 0.006083807836624403,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.856,
"acc_norm_stderr,none": 0.022249407735450245
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6363636363636364,
"acc_norm_stderr,none": 0.03527198153014412
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.608,
"acc_norm_stderr,none": 0.030938207620401222
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.584,
"acc_norm_stderr,none": 0.031235856237014505
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.436,
"acc_norm_stderr,none": 0.031425567060281365
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.724,
"acc_norm_stderr,none": 0.02832853727421142
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.796,
"acc_norm_stderr,none": 0.025537121574548162
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.744,
"acc_norm_stderr,none": 0.027657108718204846
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.644,
"acc_norm_stderr,none": 0.0303436806571532
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.448,
"acc_norm_stderr,none": 0.03151438761115349
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5684931506849316,
"acc_norm_stderr,none": 0.041131302645371945
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.864,
"acc_norm_stderr,none": 0.021723342617052086
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6460674157303371,
"acc_norm_stderr,none": 0.03594285405211505
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.592,
"acc_norm_stderr,none": 0.03114520984654851
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.252,
"acc_norm_stderr,none": 0.027513851933031318
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.268,
"acc_norm_stderr,none": 0.02806876238252672
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.344,
"acc_norm_stderr,none": 0.03010450339231644
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.476,
"acc_norm_stderr,none": 0.03164968895968774
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3347315436241611,
"acc_norm_stderr,none": 0.013681339748209233,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3434343434343434,
"acc_norm_stderr,none": 0.03383201223244441
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.32234432234432236,
"acc_norm_stderr,none": 0.020020102750045735
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.34598214285714285,
"acc_norm_stderr,none": 0.022499241830682457
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.6062846580406654,
"prompt_level_strict_acc_stderr,none": 0.021024834145872404,
"inst_level_strict_acc,none": 0.7038369304556354,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.6395563770794824,
"prompt_level_loose_acc_stderr,none": 0.0206614696698795,
"inst_level_loose_acc,none": 0.7302158273381295,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.19788519637462235,
"exact_match_stderr,none": 0.009998835994126825,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.43322475570032576,
"exact_match_stderr,none": 0.028327050442298423
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.11382113821138211,
"exact_match_stderr,none": 0.02875360087323741
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.06818181818181818,
"exact_match_stderr,none": 0.022022378945902827
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.03571428571428571,
"exact_match_stderr,none": 0.011110196729254557
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.12337662337662338,
"exact_match_stderr,none": 0.026587484423674337
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.35751295336787564,
"exact_match_stderr,none": 0.03458816042181008
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.05925925925925926,
"exact_match_stderr,none": 0.02039673654232189
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.4263630319148936,
"acc_stderr,none": 0.004508763683858449
},
"leaderboard_musr": {
"acc_norm,none": 0.4775132275132275,
"acc_norm_stderr,none": 0.01802634312352244,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.43359375,
"acc_norm_stderr,none": 0.031033834158735715
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.424,
"acc_norm_stderr,none": 0.03131803437491622
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
Tensorists/SD3_5_Turbo_images_combined | Tensorists | "2024-11-21T16:13:20Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T16:08:05Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': Airplane
'1': Automobile
'2': Bird
'3': Cat
'4': Deer
'5': Dog
'6': Frog
'7': Horse
'8': Ship
'9': Truck
splits:
- name: train
num_bytes: 4472583093.935
num_examples: 2985
download_size: 4472859758
dataset_size: 4472583093.935
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Tensorists/SD3_images | Tensorists | "2024-11-21T16:20:46Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T16:19:54Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': Dog
'1': Frog
'2': Horse
'3': Ship
'4': Truck
splits:
- name: train
num_bytes: 619872098.741
num_examples: 1489
download_size: 619156629
dataset_size: 619872098.741
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/mistral_toxigen-data-test_2fewshot_limiar3 | juliadollis | "2024-11-21T16:27:19Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T16:27:17Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: target_group
dtype: string
- name: factual?
dtype: string
- name: ingroup_effect
dtype: string
- name: lewd
dtype: string
- name: framing
dtype: string
- name: predicted_group
dtype: string
- name: stereotyping
dtype: string
- name: intent
dtype: float64
- name: toxicity_ai
dtype: float64
- name: toxicity_human
dtype: float64
- name: predicted_author
dtype: string
- name: actual_method
dtype: string
- name: is_toxic
dtype: int64
- name: predicted_is_toxic
dtype: int64
- name: y_true
dtype: int64
splits:
- name: train
num_bytes: 393176
num_examples: 940
download_size: 85177
dataset_size: 393176
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Vinisf/Vinicin | Vinisf | "2024-11-21T16:41:48Z" | 7 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-21T16:41:17Z" | ---
license: openrail
---
|
procit007/treated_0.3 | procit007 | "2024-11-21T16:54:13Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T16:52:37Z" | ---
dataset_info:
features:
- name: gender
dtype: string
- name: accent
dtype: string
- name: speaker_id
dtype: int64
- name: speaker_name
dtype: string
- name: text
dtype: string
- name: normalized_text
dtype: string
- name: audio
dtype: audio
- name: treated
dtype: bool
- name: metrics
struct:
- name: clipping_ratio
dtype: float64
- name: duration
dtype: float64
- name: is_valid
dtype: bool
- name: rms_energy
dtype: float64
- name: sample_rate
dtype: int64
- name: silence_ratio
dtype: float64
- name: snr
dtype: float64
splits:
- name: train
num_bytes: 3176831243.0
num_examples: 10000
download_size: 2978489519
dataset_size: 3176831243.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/P_wiki_doc10000_real64 | dgambettaphd | "2024-11-21T16:55:03Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T16:55:00Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 3528280
num_examples: 10000
download_size: 2306183
dataset_size: 3528280
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
GabrielML/SteamGRS | GabrielML | "2024-11-21T18:09:22Z" | 7 | 0 | [
"task_categories:text-classification",
"language:de",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-classification"
] | "2024-11-21T18:07:10Z" | ---
license: apache-2.0
task_categories:
- text-classification
language:
- de
size_categories:
- 1K<n<10K
---
Steam German Review Sentiment (SteamGRS) for seminar work (LoRA experiments). |
open-llm-leaderboard/zelk12__MT3-Gen2-gemma-2-9B-details | open-llm-leaderboard | "2024-11-21T18:47:37Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T18:43:34Z" | ---
pretty_name: Evaluation run of zelk12/MT3-Gen2-gemma-2-9B
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [zelk12/MT3-Gen2-gemma-2-9B](https://huggingface.co/zelk12/MT3-Gen2-gemma-2-9B)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/zelk12__MT3-Gen2-gemma-2-9B-details\"\
,\n\tname=\"zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_boolean_expressions\",\n\
\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-21T18-43-33.559212](https://huggingface.co/datasets/open-llm-leaderboard/zelk12__MT3-Gen2-gemma-2-9B-details/blob/main/zelk12__MT3-Gen2-gemma-2-9B/results_2024-11-21T18-43-33.559212.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"prompt_level_strict_acc,none\": 0.744916820702403,\n \"\
prompt_level_strict_acc_stderr,none\": 0.018758491950414184,\n \"acc,none\"\
: 0.43326130319148937,\n \"acc_stderr,none\": 0.004517680579088188,\n\
\ \"acc_norm,none\": 0.54987676741471,\n \"acc_norm_stderr,none\"\
: 0.005289250250282228,\n \"prompt_level_loose_acc,none\": 0.767097966728281,\n\
\ \"prompt_level_loose_acc_stderr,none\": 0.01818926607409182,\n \
\ \"exact_match,none\": 0.02039274924471299,\n \"exact_match_stderr,none\"\
: 0.003847017757728751,\n \"inst_level_loose_acc,none\": 0.842925659472422,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"inst_level_strict_acc,none\"\
: 0.8237410071942446,\n \"inst_level_strict_acc_stderr,none\": \"N/A\"\
,\n \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\"\
: {\n \"acc_norm,none\": 0.6080541572643638,\n \"acc_norm_stderr,none\"\
: 0.0060467875310710436,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.852,\n\
\ \"acc_norm_stderr,none\": 0.022503547243806186\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6310160427807486,\n \"acc_norm_stderr,none\"\
: 0.03538078548260318\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.596,\n \"acc_norm_stderr,none\": 0.03109668818482536\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.656,\n\
\ \"acc_norm_stderr,none\": 0.03010450339231644\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.62,\n \"acc_norm_stderr,none\": 0.030760116042626098\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\"\
: \" - leaderboard_bbh_geometric_shapes\",\n \"acc_norm,none\": 0.52,\n\
\ \"acc_norm_stderr,none\": 0.03166085340849512\n },\n \
\ \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.692,\n \"acc_norm_stderr,none\":\
\ 0.02925692860650181\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.576,\n \"acc_norm_stderr,none\":\
\ 0.03131803437491622\n },\n \"leaderboard_bbh_logical_deduction_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.572,\n \"acc_norm_stderr,none\":\
\ 0.031355968923772626\n },\n \"leaderboard_bbh_logical_deduction_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\"\
,\n \"acc_norm,none\": 0.836,\n \"acc_norm_stderr,none\":\
\ 0.023465261002076715\n },\n \"leaderboard_bbh_movie_recommendation\"\
: {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\",\n \
\ \"acc_norm,none\": 0.58,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \"\
\ - leaderboard_bbh_navigate\",\n \"acc_norm,none\": 0.676,\n \
\ \"acc_norm_stderr,none\": 0.029658294924545567\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.284,\n \"acc_norm_stderr,none\": 0.02857695873043744\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.5958904109589042,\n \"acc_norm_stderr,none\": 0.0407519857003932\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.684,\n \"acc_norm_stderr,none\": 0.02946265759857865\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.808,\n \
\ \"acc_norm_stderr,none\": 0.02496069198917196\n },\n \"leaderboard_bbh_salient_translation_error_detection\"\
: {\n \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\"\
,\n \"acc_norm,none\": 0.592,\n \"acc_norm_stderr,none\":\
\ 0.03114520984654851\n },\n \"leaderboard_bbh_snarks\": {\n \
\ \"alias\": \" - leaderboard_bbh_snarks\",\n \"acc_norm,none\"\
: 0.6966292134831461,\n \"acc_norm_stderr,none\": 0.03455421944400101\n\
\ },\n \"leaderboard_bbh_sports_understanding\": {\n \"\
alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.832,\n \"acc_norm_stderr,none\": 0.023692813205492536\n },\n\
\ \"leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" -\
\ leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.844,\n\
\ \"acc_norm_stderr,none\": 0.022995023034068682\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.296,\n \"acc_norm_stderr,none\": 0.028928939388379694\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.304,\n \"acc_norm_stderr,none\":\
\ 0.02915021337415965\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.364,\n \"acc_norm_stderr,none\":\
\ 0.030491555220405475\n },\n \"leaderboard_bbh_web_of_lies\": {\n\
\ \"alias\": \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\"\
: 0.512,\n \"acc_norm_stderr,none\": 0.03167708558254714\n },\n\
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3573825503355705,\n\
\ \"acc_norm_stderr,none\": 0.013891832771494425,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.3888888888888889,\n \"acc_norm_stderr,none\": 0.03473279590836963\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.3534798534798535,\n\
\ \"acc_norm_stderr,none\": 0.020477414126085836\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.3482142857142857,\n \"acc_norm_stderr,none\"\
: 0.022533152157915175\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.744916820702403,\n \"prompt_level_strict_acc_stderr,none\": 0.018758491950414184,\n\
\ \"inst_level_strict_acc,none\": 0.8237410071942446,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.767097966728281,\n \"prompt_level_loose_acc_stderr,none\": 0.01818926607409182,\n\
\ \"inst_level_loose_acc,none\": 0.842925659472422,\n \"inst_level_loose_acc_stderr,none\"\
: \"N/A\"\n },\n \"leaderboard_math_hard\": {\n \"exact_match,none\"\
: 0.02039274924471299,\n \"exact_match_stderr,none\": 0.003847017757728751,\n\
\ \"alias\": \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_algebra_hard\",\n \
\ \"exact_match,none\": 0.05537459283387622,\n \"exact_match_stderr,none\"\
: 0.01307447837002421\n },\n \"leaderboard_math_counting_and_prob_hard\"\
: {\n \"alias\": \" - leaderboard_math_counting_and_prob_hard\",\n \
\ \"exact_match,none\": 0.0,\n \"exact_match_stderr,none\"\
: 0.0\n },\n \"leaderboard_math_geometry_hard\": {\n \"\
alias\": \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\"\
: 0.007575757575757576,\n \"exact_match_stderr,none\": 0.007575757575757577\n\
\ },\n \"leaderboard_math_intermediate_algebra_hard\": {\n \
\ \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.0,\n \"exact_match_stderr,none\": 0.0\n \
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.01948051948051948,\n\
\ \"exact_match_stderr,none\": 0.011173331005571083\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.031088082901554404,\n \"exact_match_stderr,none\"\
: 0.012525310625527019\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.0,\n \"exact_match_stderr,none\": 0.0\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.43326130319148937,\n \"acc_stderr,none\"\
: 0.004517680579088188\n },\n \"leaderboard_musr\": {\n \
\ \"acc_norm,none\": 0.41005291005291006,\n \"acc_norm_stderr,none\"\
: 0.017490273970870246,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\":\
\ \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.54,\n\
\ \"acc_norm_stderr,none\": 0.031584653891499004\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.27734375,\n \"acc_norm_stderr,none\"\
: 0.02803528549328419\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.416,\n \"acc_norm_stderr,none\": 0.031235856237014505\n\
\ }\n },\n \"leaderboard\": {\n \"prompt_level_strict_acc,none\"\
: 0.744916820702403,\n \"prompt_level_strict_acc_stderr,none\": 0.018758491950414184,\n\
\ \"acc,none\": 0.43326130319148937,\n \"acc_stderr,none\": 0.004517680579088188,\n\
\ \"acc_norm,none\": 0.54987676741471,\n \"acc_norm_stderr,none\"\
: 0.005289250250282228,\n \"prompt_level_loose_acc,none\": 0.767097966728281,\n\
\ \"prompt_level_loose_acc_stderr,none\": 0.01818926607409182,\n \"\
exact_match,none\": 0.02039274924471299,\n \"exact_match_stderr,none\": 0.003847017757728751,\n\
\ \"inst_level_loose_acc,none\": 0.842925659472422,\n \"inst_level_loose_acc_stderr,none\"\
: \"N/A\",\n \"inst_level_strict_acc,none\": 0.8237410071942446,\n \
\ \"inst_level_strict_acc_stderr,none\": \"N/A\",\n \"alias\": \"leaderboard\"\
\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\": 0.6080541572643638,\n\
\ \"acc_norm_stderr,none\": 0.0060467875310710436,\n \"alias\": \"\
\ - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\": {\n\
\ \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\"\
: 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n },\n \"\
leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6310160427807486,\n \"acc_norm_stderr,none\"\
: 0.03538078548260318\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.596,\n \"acc_norm_stderr,none\": 0.03109668818482536\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.656,\n \"acc_norm_stderr,none\": 0.03010450339231644\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.62,\n \"acc_norm_stderr,none\": 0.030760116042626098\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.52,\n \"acc_norm_stderr,none\": 0.03166085340849512\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.576,\n \"acc_norm_stderr,none\": 0.03131803437491622\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.572,\n \"acc_norm_stderr,none\": 0.031355968923772626\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.836,\n \"acc_norm_stderr,none\": 0.023465261002076715\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.58,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.676,\n \"acc_norm_stderr,none\": 0.029658294924545567\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.284,\n \"acc_norm_stderr,none\": 0.02857695873043744\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.5958904109589042,\n\
\ \"acc_norm_stderr,none\": 0.0407519857003932\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.684,\n \"acc_norm_stderr,none\": 0.02946265759857865\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.808,\n \"acc_norm_stderr,none\": 0.02496069198917196\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.592,\n \"acc_norm_stderr,none\": 0.03114520984654851\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.6966292134831461,\n \"acc_norm_stderr,none\"\
: 0.03455421944400101\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.832,\n \"acc_norm_stderr,none\": 0.023692813205492536\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.844,\n \"acc_norm_stderr,none\": 0.022995023034068682\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.296,\n \"acc_norm_stderr,none\": 0.028928939388379694\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.304,\n \"acc_norm_stderr,none\": 0.02915021337415965\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.364,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.512,\n \"acc_norm_stderr,none\": 0.03167708558254714\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3573825503355705,\n\
\ \"acc_norm_stderr,none\": 0.013891832771494425,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.3888888888888889,\n\
\ \"acc_norm_stderr,none\": 0.03473279590836963\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.3534798534798535,\n \"acc_norm_stderr,none\": 0.020477414126085836\n\
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.3482142857142857,\n \"acc_norm_stderr,none\"\
: 0.022533152157915175\n },\n \"leaderboard_ifeval\": {\n \"alias\"\
: \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.744916820702403,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.018758491950414184,\n \
\ \"inst_level_strict_acc,none\": 0.8237410071942446,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.767097966728281,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.01818926607409182,\n \"inst_level_loose_acc,none\"\
: 0.842925659472422,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.02039274924471299,\n\
\ \"exact_match_stderr,none\": 0.003847017757728751,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.05537459283387622,\n \"exact_match_stderr,none\": 0.01307447837002421\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.0,\n \"exact_match_stderr,none\": 0.0\n },\n \"leaderboard_math_geometry_hard\"\
: {\n \"alias\": \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\"\
: 0.007575757575757576,\n \"exact_match_stderr,none\": 0.007575757575757577\n\
\ },\n \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.0,\n \"exact_match_stderr,none\": 0.0\n },\n \"leaderboard_math_num_theory_hard\"\
: {\n \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.01948051948051948,\n \"exact_match_stderr,none\": 0.011173331005571083\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.031088082901554404,\n \"exact_match_stderr,none\"\
: 0.012525310625527019\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.0,\n \"exact_match_stderr,none\": 0.0\n },\n \"leaderboard_mmlu_pro\"\
: {\n \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.43326130319148937,\n\
\ \"acc_stderr,none\": 0.004517680579088188\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.41005291005291006,\n \"acc_norm_stderr,none\"\
: 0.017490273970870246,\n \"alias\": \" - leaderboard_musr\"\n },\n \
\ \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \" - leaderboard_musr_murder_mysteries\"\
,\n \"acc_norm,none\": 0.54,\n \"acc_norm_stderr,none\": 0.031584653891499004\n\
\ },\n \"leaderboard_musr_object_placements\": {\n \"alias\": \" -\
\ leaderboard_musr_object_placements\",\n \"acc_norm,none\": 0.27734375,\n\
\ \"acc_norm_stderr,none\": 0.02803528549328419\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \"acc_norm,none\"\
: 0.416,\n \"acc_norm_stderr,none\": 0.031235856237014505\n }\n}\n```"
repo_url: https://huggingface.co/zelk12/MT3-Gen2-gemma-2-9B
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_navigate
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_snarks
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_gpqa_extended
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_gpqa_main
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_ifeval
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_ifeval_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_mmlu_pro
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_musr_object_placements
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-21T18-43-33.559212.jsonl'
- config_name: zelk12__MT3-Gen2-gemma-2-9B__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_21T18_43_33.559212
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T18-43-33.559212.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-21T18-43-33.559212.jsonl'
---
# Dataset Card for Evaluation run of zelk12/MT3-Gen2-gemma-2-9B
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [zelk12/MT3-Gen2-gemma-2-9B](https://huggingface.co/zelk12/MT3-Gen2-gemma-2-9B)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/zelk12__MT3-Gen2-gemma-2-9B-details",
name="zelk12__MT3-Gen2-gemma-2-9B__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-21T18-43-33.559212](https://huggingface.co/datasets/open-llm-leaderboard/zelk12__MT3-Gen2-gemma-2-9B-details/blob/main/zelk12__MT3-Gen2-gemma-2-9B/results_2024-11-21T18-43-33.559212.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"prompt_level_strict_acc,none": 0.744916820702403,
"prompt_level_strict_acc_stderr,none": 0.018758491950414184,
"acc,none": 0.43326130319148937,
"acc_stderr,none": 0.004517680579088188,
"acc_norm,none": 0.54987676741471,
"acc_norm_stderr,none": 0.005289250250282228,
"prompt_level_loose_acc,none": 0.767097966728281,
"prompt_level_loose_acc_stderr,none": 0.01818926607409182,
"exact_match,none": 0.02039274924471299,
"exact_match_stderr,none": 0.003847017757728751,
"inst_level_loose_acc,none": 0.842925659472422,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.8237410071942446,
"inst_level_strict_acc_stderr,none": "N/A",
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.6080541572643638,
"acc_norm_stderr,none": 0.0060467875310710436,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6310160427807486,
"acc_norm_stderr,none": 0.03538078548260318
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.596,
"acc_norm_stderr,none": 0.03109668818482536
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.656,
"acc_norm_stderr,none": 0.03010450339231644
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.62,
"acc_norm_stderr,none": 0.030760116042626098
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.52,
"acc_norm_stderr,none": 0.03166085340849512
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.572,
"acc_norm_stderr,none": 0.031355968923772626
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.836,
"acc_norm_stderr,none": 0.023465261002076715
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.676,
"acc_norm_stderr,none": 0.029658294924545567
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.284,
"acc_norm_stderr,none": 0.02857695873043744
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5958904109589042,
"acc_norm_stderr,none": 0.0407519857003932
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.684,
"acc_norm_stderr,none": 0.02946265759857865
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.808,
"acc_norm_stderr,none": 0.02496069198917196
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.592,
"acc_norm_stderr,none": 0.03114520984654851
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6966292134831461,
"acc_norm_stderr,none": 0.03455421944400101
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.832,
"acc_norm_stderr,none": 0.023692813205492536
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.844,
"acc_norm_stderr,none": 0.022995023034068682
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.296,
"acc_norm_stderr,none": 0.028928939388379694
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.304,
"acc_norm_stderr,none": 0.02915021337415965
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.364,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.512,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3573825503355705,
"acc_norm_stderr,none": 0.013891832771494425,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3888888888888889,
"acc_norm_stderr,none": 0.03473279590836963
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.3534798534798535,
"acc_norm_stderr,none": 0.020477414126085836
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.3482142857142857,
"acc_norm_stderr,none": 0.022533152157915175
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.744916820702403,
"prompt_level_strict_acc_stderr,none": 0.018758491950414184,
"inst_level_strict_acc,none": 0.8237410071942446,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.767097966728281,
"prompt_level_loose_acc_stderr,none": 0.01818926607409182,
"inst_level_loose_acc,none": 0.842925659472422,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.02039274924471299,
"exact_match_stderr,none": 0.003847017757728751,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.05537459283387622,
"exact_match_stderr,none": 0.01307447837002421
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.0,
"exact_match_stderr,none": 0.0
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.007575757575757576,
"exact_match_stderr,none": 0.007575757575757577
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.0,
"exact_match_stderr,none": 0.0
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.01948051948051948,
"exact_match_stderr,none": 0.011173331005571083
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.031088082901554404,
"exact_match_stderr,none": 0.012525310625527019
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.0,
"exact_match_stderr,none": 0.0
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.43326130319148937,
"acc_stderr,none": 0.004517680579088188
},
"leaderboard_musr": {
"acc_norm,none": 0.41005291005291006,
"acc_norm_stderr,none": 0.017490273970870246,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.54,
"acc_norm_stderr,none": 0.031584653891499004
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.27734375,
"acc_norm_stderr,none": 0.02803528549328419
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.416,
"acc_norm_stderr,none": 0.031235856237014505
}
},
"leaderboard": {
"prompt_level_strict_acc,none": 0.744916820702403,
"prompt_level_strict_acc_stderr,none": 0.018758491950414184,
"acc,none": 0.43326130319148937,
"acc_stderr,none": 0.004517680579088188,
"acc_norm,none": 0.54987676741471,
"acc_norm_stderr,none": 0.005289250250282228,
"prompt_level_loose_acc,none": 0.767097966728281,
"prompt_level_loose_acc_stderr,none": 0.01818926607409182,
"exact_match,none": 0.02039274924471299,
"exact_match_stderr,none": 0.003847017757728751,
"inst_level_loose_acc,none": 0.842925659472422,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.8237410071942446,
"inst_level_strict_acc_stderr,none": "N/A",
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.6080541572643638,
"acc_norm_stderr,none": 0.0060467875310710436,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6310160427807486,
"acc_norm_stderr,none": 0.03538078548260318
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.596,
"acc_norm_stderr,none": 0.03109668818482536
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.656,
"acc_norm_stderr,none": 0.03010450339231644
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.62,
"acc_norm_stderr,none": 0.030760116042626098
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.52,
"acc_norm_stderr,none": 0.03166085340849512
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.576,
"acc_norm_stderr,none": 0.03131803437491622
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.572,
"acc_norm_stderr,none": 0.031355968923772626
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.836,
"acc_norm_stderr,none": 0.023465261002076715
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.676,
"acc_norm_stderr,none": 0.029658294924545567
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.284,
"acc_norm_stderr,none": 0.02857695873043744
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5958904109589042,
"acc_norm_stderr,none": 0.0407519857003932
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.684,
"acc_norm_stderr,none": 0.02946265759857865
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.808,
"acc_norm_stderr,none": 0.02496069198917196
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.592,
"acc_norm_stderr,none": 0.03114520984654851
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6966292134831461,
"acc_norm_stderr,none": 0.03455421944400101
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.832,
"acc_norm_stderr,none": 0.023692813205492536
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.844,
"acc_norm_stderr,none": 0.022995023034068682
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.296,
"acc_norm_stderr,none": 0.028928939388379694
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.304,
"acc_norm_stderr,none": 0.02915021337415965
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.364,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.512,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3573825503355705,
"acc_norm_stderr,none": 0.013891832771494425,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3888888888888889,
"acc_norm_stderr,none": 0.03473279590836963
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.3534798534798535,
"acc_norm_stderr,none": 0.020477414126085836
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.3482142857142857,
"acc_norm_stderr,none": 0.022533152157915175
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.744916820702403,
"prompt_level_strict_acc_stderr,none": 0.018758491950414184,
"inst_level_strict_acc,none": 0.8237410071942446,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.767097966728281,
"prompt_level_loose_acc_stderr,none": 0.01818926607409182,
"inst_level_loose_acc,none": 0.842925659472422,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.02039274924471299,
"exact_match_stderr,none": 0.003847017757728751,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.05537459283387622,
"exact_match_stderr,none": 0.01307447837002421
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.0,
"exact_match_stderr,none": 0.0
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.007575757575757576,
"exact_match_stderr,none": 0.007575757575757577
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.0,
"exact_match_stderr,none": 0.0
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.01948051948051948,
"exact_match_stderr,none": 0.011173331005571083
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.031088082901554404,
"exact_match_stderr,none": 0.012525310625527019
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.0,
"exact_match_stderr,none": 0.0
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.43326130319148937,
"acc_stderr,none": 0.004517680579088188
},
"leaderboard_musr": {
"acc_norm,none": 0.41005291005291006,
"acc_norm_stderr,none": 0.017490273970870246,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.54,
"acc_norm_stderr,none": 0.031584653891499004
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.27734375,
"acc_norm_stderr,none": 0.02803528549328419
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.416,
"acc_norm_stderr,none": 0.031235856237014505
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
ncoop57/personas-translation-f4d93fec-2af0-4abc-8419-29c0b5450e1f | ncoop57 | "2024-11-21T18:52:28Z" | 7 | 0 | [
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"region:us",
"fastdata",
"synthetic"
] | null | "2024-11-21T18:52:25Z" | ---
tags:
- fastdata
- synthetic
---
# personas-translation-f4d93fec-2af0-4abc-8419-29c0b5450e1f
_Note: This is an AI-generated dataset, so its content may be inaccurate or false._
**Source of the data:**
The dataset was generated using [Fastdata](https://github.com/AnswerDotAI/fastdata) library and claude-3-haiku-20240307 with the following input:
## System Prompt
```
You will help generate synthetic data of English and Spanish phrases.
```
## Prompt Template
```
<examples>
{examples}
</examples>
Create an English and Spanish translation pair that is similar to the examples and would be appropriate for the following persona:
<persona>{persona}</persona>
```
## Sample Input
```json
[{'persona': "A Political Analyst specialized in El Salvador's political landscape.", 'examples': [Hello, my name is Nathan. I am a research scientist at an AI startup. ➡ *Hola, me llamo Nathan. Soy ciencia investigador en un startup de IA.*, How much wood could a woodchuck chuck if a woodchuck could chuck wood? ➡ *¿Cuánta madera podría arrojar una marmota si una marmota pudiera arrojar madera?*, Thomas Cranmer (2 July 1489 - 21 March 1556) was a leader of the English Reformation and Archbishop of Canterbury during the reigns of Henry VIII, Edward VI and, for a short time, Mary I. He helped build the case for the annulment of Henry's marriage to Catherine of Aragon, which was one of the causes of the separation of the English Church from union with the Holy See. ➡ *Thomas Cranmer (2 de julio de 1489 - 21 de marzo de 1556) fue un líder de la Reforma inglesa y arzobispo de Canterbury durante los reinados de Henry VIII, Edward VI y, por un corto tiempo, María I. Ayudó a construir el caso para la anulación de El matrimonio de Henry con Catalina de Aragón, que fue una de las causas de la separación de la Iglesia inglesa de la unión con la Santa Sede.*]}, {'persona': 'A legal advisor who understands the legal implications of incomplete or inaccurate project documentation', 'examples': [Hello, my name is Nathan. I am a research scientist at an AI startup. ➡ *Hola, me llamo Nathan. Soy ciencia investigador en un startup de IA.*, How much wood could a woodchuck chuck if a woodchuck could chuck wood? ➡ *¿Cuánta madera podría arrojar una marmota si una marmota pudiera arrojar madera?*, Thomas Cranmer (2 July 1489 - 21 March 1556) was a leader of the English Reformation and Archbishop of Canterbury during the reigns of Henry VIII, Edward VI and, for a short time, Mary I. He helped build the case for the annulment of Henry's marriage to Catherine of Aragon, which was one of the causes of the separation of the English Church from union with the Holy See. ➡ *Thomas Cranmer (2 de julio de 1489 - 21 de marzo de 1556) fue un líder de la Reforma inglesa y arzobispo de Canterbury durante los reinados de Henry VIII, Edward VI y, por un corto tiempo, María I. Ayudó a construir el caso para la anulación de El matrimonio de Henry con Catalina de Aragón, que fue una de las causas de la separación de la Iglesia inglesa de la unión con la Santa Sede.*]}]
```
|
theazer69/padilha2 | theazer69 | "2024-11-21T19:32:57Z" | 7 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-21T19:31:32Z" | ---
license: openrail
---
|
neoneye/simon-arc-solve-rotate-v11 | neoneye | "2024-11-21T20:45:35Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-21T20:44:36Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve rotate version 11
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the image gets rotated cw/ccw/180 and transposed.
The image sizes are between 1 and 4 pixels.
Predict the number of rows in the output image.
# Version 2
image size: 1-5.
# Version 3
image size: 1-5.
Added `flipx` and `flipy` transformations.
# Version 4
image size: 1-5.
number of tests: 1-2. Previously there were always just 1 test.
Added `flipa` and `flipb` transformations, that flips over the diagonal.
# Version 5
image size: 1-5.
number of tests: 1-2.
# Version 6
image size: 1-13.
# Version 7
Earlier predictions added to some of the rows.
# Version 8
Earlier predictions with focus on repair 1 bad pixel.
# Version 9
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 10
Replaced RLE compressed response with raw pixel response.
# Version 11
image size: 1-16.
|
nace-ai/test-subtask-guardrails-v9-1-irrelevant-v1 | nace-ai | "2024-11-21T20:52:38Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T20:52:28Z" | ---
dataset_info:
features:
- name: file
dtype: string
- name: instruction
dtype: string
- name: data_seed
dtype: string
- name: answer
dtype: string
- name: llm_label
dtype: string
- name: input
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: full
dtype: string
- name: reasoning
dtype: string
- name: __index_level_0__
dtype: float64
- name: mode
dtype: string
- name: multi_policy
dtype: string
splits:
- name: train
num_bytes: 513233165
num_examples: 32719
- name: validation
num_bytes: 30005447
num_examples: 1924
- name: test
num_bytes: 59512463
num_examples: 3850
download_size: 282372908
dataset_size: 602751075
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
juliadollis/ner_stf_fuzzy | juliadollis | "2024-11-21T21:37:15Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T21:34:37Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: ner_results
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 6654843253
num_examples: 78477
download_size: 1425175145
dataset_size: 6654843253
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
IntelligentEstate/The_Key | IntelligentEstate | "2024-11-21T21:48:45Z" | 7 | 0 | [
"language:en",
"license:cc-by-nc-4.0",
"region:us"
] | null | "2024-11-21T21:44:18Z" | ---
license: cc-by-nc-4.0
language:
- en
---
** The Key is a Unique Spacial awareness dataset with problemsolving in diverse scenarios with an infered reflection in the form of a Haiku --This is experimental--
---
** For Comercial Use please get in touch with intelligentestate@gmail.com for all else Enjoy and please respond with results.
--- |
neoneye/simon-arc-solve-translate-v12 | neoneye | "2024-11-21T22:06:16Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-21T22:05:04Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve translate version 12
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the image gets translated by plus/minus 1 pixel in up/down/left/right directions.
The image sizes are between 1 and 4 pixels.
# Version 2
Only translate plus/minus 1 up/down are enabled.
image width: 1-4, image height: 3-4.
My hypothesis is that it's easy with RLE data to translate up/down.
# Version 3
Only translate plus/minus 1 left/right are enabled.
image width: 3-4, image height: 1-4.
# Version 4
All transformations have same weight.
image size: 3-4.
# Version 5
Added diagonal translation by 1 pixel.
All transformations have same weight.
image size: 3-4.
# Version 6
All transformations have same weight.
image size: 3-5.
# Version 7
All transformations have same weight.
image size: 3-5.
number of test pairs: 1-2. Previous it was alway 1 test pair.
# Version 8
All transformations have same weight.
image size: 3-5.
number of test pairs: 1-2.
Added: Predict the number of rows in the output image.
# Version 9
Increased the translation distance from -1..+1, to -2..+2.
image size 1-8.
# Version 10
Increased the translation distance from -2..+2, to -3..+3.
image size 1-12.
# Version 11
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 12
Replaced RLE compressed response with raw pixel response.
image size 1-5.
max translation 1.
|
neoneye/simon-arc-solve-rotate-v12 | neoneye | "2024-11-21T22:31:10Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-21T22:29:55Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve rotate version 12
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the image gets rotated cw/ccw/180 and transposed.
The image sizes are between 1 and 4 pixels.
Predict the number of rows in the output image.
# Version 2
image size: 1-5.
# Version 3
image size: 1-5.
Added `flipx` and `flipy` transformations.
# Version 4
image size: 1-5.
number of tests: 1-2. Previously there were always just 1 test.
Added `flipa` and `flipb` transformations, that flips over the diagonal.
# Version 5
image size: 1-5.
number of tests: 1-2.
# Version 6
image size: 1-13.
# Version 7
Earlier predictions added to some of the rows.
# Version 8
Earlier predictions with focus on repair 1 bad pixel.
# Version 9
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 10
Replaced RLE compressed response with raw pixel response.
# Version 11
image size: 1-16.
# Version 12
I think the image sizes was too big for the model to make sense of the data. Trying with smaller images.
image size: 1-5.
|
Tippawan/Finetune-mt-story-telling-221124-messages | Tippawan | "2024-11-21T22:51:05Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T22:51:04Z" | ---
dataset_info:
features:
- name: en
dtype: string
- name: th
dtype: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 719000
num_examples: 5629
- name: test
num_bytes: 145238
num_examples: 1126
- name: validation
num_bytes: 145163
num_examples: 1126
download_size: 577598
dataset_size: 1009401
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
neoneye/simon-arc-solve-scale-v9 | neoneye | "2024-11-21T23:16:54Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-21T23:14:58Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve scale version 9
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the images gets scaled up/down in both x and y direction.
example count: 2-4.
test count: 1-2.
image size: 3-10.
scale factor: 1-3.
# Version 2
image size: 1-20.
scale factor: 1-7.
# Version 3
image size: 1-30.
scale factor: 1-7.
# Version 4
Added a few noise to the images.
image size: 1-10.
scale factor: 1-7.
Only scale down.
Number of noise pixels per pixel cell: 0-2.
# Version 5
More noisy images for down scaling.
image size: 1-12.
Number of noise pixels per pixel cell: 0-half.
# Version 6
Earlier predictions added to some of the rows.
# Version 7
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 8
Replaced RLE compressed response with raw pixel response.
image size: 1-5.
scale factor: 1-7.
# Version 9
image size: 1-7.
scale factor: 1-3.
|
neoneye/simon-arc-solve-skew-v5 | neoneye | "2024-11-21T23:33:46Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-21T23:32:47Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve skew version 5
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the job is to apply skew/unkew in the directions up/down/left/right.
example count: 2-4.
test count: 1-2.
image size: 1-4.
# Version 2
image size: 1-7.
# Version 3
Earlier predictions added to some of the rows.
# Version 4
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 5
Replaced RLE compressed response with raw pixel response. |
adipanda/anime-characters-2M | adipanda | "2024-11-22T03:29:21Z" | 7 | 1 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:image",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-21T23:35:35Z" | ---
dataset_info:
features:
- name: filename
dtype: string
- name: tags
dtype: string
- name: bangumi_name
dtype: string
- name: has_face
dtype: bool
- name: face_count
dtype: int64
- name: face_x0
dtype: float64
- name: face_y0
dtype: float64
- name: face_x1
dtype: float64
- name: face_y1
dtype: float64
- name: face_width
dtype: float64
- name: face_height
dtype: float64
- name: face_max
dtype: float64
- name: face_min
dtype: float64
- name: face_area
dtype: float64
- name: face_max_ratio
dtype: float64
- name: face_min_ratio
dtype: float64
- name: face_width_ratio
dtype: float64
- name: face_height_ratio
dtype: float64
- name: face_area_ratio
dtype: float64
- name: image_url
dtype: string
splits:
- name: train
num_bytes: 748632926
num_examples: 1968055
download_size: 267677923
dataset_size: 748632926
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
TSOWatch/1001NightsBesiegedCity | TSOWatch | "2024-11-22T00:14:26Z" | 7 | 0 | [
"license:creativeml-openrail-m",
"size_categories:n<1K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:14:05Z" | ---
license: creativeml-openrail-m
---
|
TSOWatch/1001NightsSeaSnake | TSOWatch | "2024-11-22T00:19:25Z" | 7 | 0 | [
"license:creativeml-openrail-m",
"size_categories:n<1K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:19:10Z" | ---
license: creativeml-openrail-m
---
|
TSOWatch/1001NightsTreasureWisdom | TSOWatch | "2024-11-22T00:24:26Z" | 7 | 0 | [
"license:creativeml-openrail-m",
"size_categories:n<1K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:24:05Z" | ---
license: creativeml-openrail-m
---
|
TSOWatch/1001NightsFirstThief | TSOWatch | "2024-11-22T00:25:06Z" | 7 | 0 | [
"license:creativeml-openrail-m",
"size_categories:n<1K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:24:54Z" | ---
license: creativeml-openrail-m
---
|
TSOWatch/1001NightsSecondThief | TSOWatch | "2024-11-22T00:25:46Z" | 7 | 0 | [
"license:creativeml-openrail-m",
"size_categories:n<1K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:25:35Z" | ---
license: creativeml-openrail-m
---
|
TSOWatch/1001NightsThirdThief | TSOWatch | "2024-11-22T00:26:27Z" | 7 | 0 | [
"license:creativeml-openrail-m",
"size_categories:n<1K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:26:15Z" | ---
license: creativeml-openrail-m
---
|
open-llm-leaderboard/allenai__Llama-3.1-Tulu-3-8B-details | open-llm-leaderboard | "2024-11-22T00:34:22Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:31:09Z" | ---
pretty_name: Evaluation run of allenai/Llama-3.1-Tulu-3-8B
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [allenai/Llama-3.1-Tulu-3-8B](https://huggingface.co/allenai/Llama-3.1-Tulu-3-8B)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/allenai__Llama-3.1-Tulu-3-8B-details\"\
,\n\tname=\"allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_boolean_expressions\"\
,\n\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-22T00-31-08.901515](https://huggingface.co/datasets/open-llm-leaderboard/allenai__Llama-3.1-Tulu-3-8B-details/blob/main/allenai__Llama-3.1-Tulu-3-8B/results_2024-11-22T00-31-08.901515.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"acc_norm,none\": 0.38785834738617203,\n \"acc_norm_stderr,none\"\
: 0.005273329157943381,\n \"inst_level_loose_acc,none\": 0.8752997601918465,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"inst_level_strict_acc,none\"\
: 0.8585131894484412,\n \"inst_level_strict_acc_stderr,none\": \"N/A\"\
,\n \"exact_match,none\": 0.19637462235649547,\n \"exact_match_stderr,none\"\
: 0.009854609082277298,\n \"acc,none\": 0.2826628989361702,\n \
\ \"acc_stderr,none\": 0.0041053027261143855,\n \"prompt_level_strict_acc,none\"\
: 0.7948243992606284,\n \"prompt_level_strict_acc_stderr,none\": 0.01737807119675965,\n\
\ \"prompt_level_loose_acc,none\": 0.8151571164510166,\n \"\
prompt_level_loose_acc_stderr,none\": 0.01670417955850395,\n \"alias\"\
: \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\"\
: 0.4025342822426662,\n \"acc_norm_stderr,none\": 0.006072426154807149,\n\
\ \"alias\": \" - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\"\
: {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \
\ \"acc_norm,none\": 0.8,\n \"acc_norm_stderr,none\": 0.02534897002097912\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\"\
: \" - leaderboard_bbh_causal_judgement\",\n \"acc_norm,none\": 0.5187165775401069,\n\
\ \"acc_norm_stderr,none\": 0.03663608375537843\n },\n \
\ \"leaderboard_bbh_date_understanding\": {\n \"alias\": \" - leaderboard_bbh_date_understanding\"\
,\n \"acc_norm,none\": 0.288,\n \"acc_norm_stderr,none\":\
\ 0.028697004587398253\n },\n \"leaderboard_bbh_disambiguation_qa\"\
: {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\",\n \
\ \"acc_norm,none\": 0.604,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\"\
: \" - leaderboard_bbh_formal_fallacies\",\n \"acc_norm,none\": 0.472,\n\
\ \"acc_norm_stderr,none\": 0.031636489531544396\n },\n \
\ \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.328,\n \"acc_norm_stderr,none\":\
\ 0.029752391824475363\n },\n \"leaderboard_bbh_hyperbaton\": {\n\
\ \"alias\": \" - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\"\
: 0.536,\n \"acc_norm_stderr,none\": 0.031603975145223735\n },\n\
\ \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.256,\n \"acc_norm_stderr,none\": 0.027657108718204846\n },\n\
\ \"leaderboard_bbh_logical_deduction_seven_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\",\n \"\
acc_norm,none\": 0.212,\n \"acc_norm_stderr,none\": 0.025901884690541117\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n\
\ \"acc_norm,none\": 0.416,\n \"acc_norm_stderr,none\": 0.031235856237014505\n\
\ },\n \"leaderboard_bbh_movie_recommendation\": {\n \"\
alias\": \" - leaderboard_bbh_movie_recommendation\",\n \"acc_norm,none\"\
: 0.688,\n \"acc_norm_stderr,none\": 0.029361067575219852\n },\n\
\ \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.42,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\"\
: \" - leaderboard_bbh_object_counting\",\n \"acc_norm,none\": 0.288,\n\
\ \"acc_norm_stderr,none\": 0.028697004587398253\n },\n \
\ \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" - leaderboard_bbh_penguins_in_a_table\"\
,\n \"acc_norm,none\": 0.3904109589041096,\n \"acc_norm_stderr,none\"\
: 0.040513109165891854\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\":\
\ 0.031563285061213475\n },\n \"leaderboard_bbh_ruin_names\": {\n\
\ \"alias\": \" - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\"\
: 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n },\n\
\ \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.396,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" -\
\ leaderboard_bbh_snarks\",\n \"acc_norm,none\": 0.5224719101123596,\n\
\ \"acc_norm_stderr,none\": 0.03754432508487191\n },\n \
\ \"leaderboard_bbh_sports_understanding\": {\n \"alias\": \" - leaderboard_bbh_sports_understanding\"\
,\n \"acc_norm,none\": 0.496,\n \"acc_norm_stderr,none\":\
\ 0.0316851985511992\n },\n \"leaderboard_bbh_temporal_sequences\"\
: {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\",\n \
\ \"acc_norm,none\": 0.116,\n \"acc_norm_stderr,none\": 0.020293429803083823\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.136,\n \"acc_norm_stderr,none\":\
\ 0.021723342617052086\n },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.144,\n \"acc_norm_stderr,none\":\
\ 0.022249407735450245\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.292,\n \"acc_norm_stderr,none\":\
\ 0.02881432040220563\n },\n \"leaderboard_bbh_web_of_lies\": {\n\
\ \"alias\": \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\"\
: 0.488,\n \"acc_norm_stderr,none\": 0.03167708558254714\n },\n\
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.2986577181208054,\n\
\ \"acc_norm_stderr,none\": 0.013264655332365493,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.30303030303030304,\n \"acc_norm_stderr,none\": 0.03274287914026869\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.28205128205128205,\n\
\ \"acc_norm_stderr,none\": 0.019275803929950375\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.3169642857142857,\n \"acc_norm_stderr,none\"\
: 0.0220076215848248\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.7948243992606284,\n \"prompt_level_strict_acc_stderr,none\": 0.01737807119675965,\n\
\ \"inst_level_strict_acc,none\": 0.8585131894484412,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.8151571164510166,\n \"prompt_level_loose_acc_stderr,none\": 0.01670417955850395,\n\
\ \"inst_level_loose_acc,none\": 0.8752997601918465,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.19637462235649547,\n \"exact_match_stderr,none\"\
: 0.009854609082277298,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.3811074918566775,\n\
\ \"exact_match_stderr,none\": 0.02776327166045321\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \" \
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.11382113821138211,\n \"exact_match_stderr,none\": 0.02875360087323741\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.06060606060606061,\n\
\ \"exact_match_stderr,none\": 0.020847129156682045\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\":\
\ \" - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.03214285714285714,\n \"exact_match_stderr,none\": 0.01055955866175321\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.12987012987012986,\n\
\ \"exact_match_stderr,none\": 0.02717696535667076\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.46113989637305697,\n \"exact_match_stderr,none\"\
: 0.03597524411734576\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.022222222222222223,\n \"exact_match_stderr,none\"\
: 0.01273389971505968\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.2826628989361702,\n\
\ \"acc_stderr,none\": 0.004105302726114385\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.4166666666666667,\n \"acc_norm_stderr,none\"\
: 0.01768575862518651,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \"\
\ - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.528,\n\
\ \"acc_norm_stderr,none\": 0.031636489531544396\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.31640625,\n \"acc_norm_stderr,none\"\
: 0.02912403057115479\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.408,\n \"acc_norm_stderr,none\": 0.031145209846548512\n\
\ }\n },\n \"leaderboard\": {\n \"acc_norm,none\": 0.38785834738617203,\n\
\ \"acc_norm_stderr,none\": 0.005273329157943381,\n \"inst_level_loose_acc,none\"\
: 0.8752997601918465,\n \"inst_level_loose_acc_stderr,none\": \"N/A\",\n\
\ \"inst_level_strict_acc,none\": 0.8585131894484412,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"exact_match,none\": 0.19637462235649547,\n \"exact_match_stderr,none\"\
: 0.009854609082277298,\n \"acc,none\": 0.2826628989361702,\n \"acc_stderr,none\"\
: 0.0041053027261143855,\n \"prompt_level_strict_acc,none\": 0.7948243992606284,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.01737807119675965,\n \
\ \"prompt_level_loose_acc,none\": 0.8151571164510166,\n \"prompt_level_loose_acc_stderr,none\"\
: 0.01670417955850395,\n \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\"\
: {\n \"acc_norm,none\": 0.4025342822426662,\n \"acc_norm_stderr,none\"\
: 0.006072426154807149,\n \"alias\": \" - leaderboard_bbh\"\n },\n \
\ \"leaderboard_bbh_boolean_expressions\": {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\"\
,\n \"acc_norm,none\": 0.8,\n \"acc_norm_stderr,none\": 0.02534897002097912\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.5187165775401069,\n \"acc_norm_stderr,none\"\
: 0.03663608375537843\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.288,\n \"acc_norm_stderr,none\": 0.028697004587398253\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.604,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.472,\n \"acc_norm_stderr,none\": 0.031636489531544396\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.328,\n \"acc_norm_stderr,none\": 0.029752391824475363\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.536,\n \"acc_norm_stderr,none\": 0.031603975145223735\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.256,\n \"acc_norm_stderr,none\": 0.027657108718204846\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.212,\n \"acc_norm_stderr,none\": 0.025901884690541117\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.416,\n \"acc_norm_stderr,none\": 0.031235856237014505\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.688,\n \"acc_norm_stderr,none\": 0.029361067575219852\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.42,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.288,\n \"acc_norm_stderr,none\": 0.028697004587398253\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.3904109589041096,\n\
\ \"acc_norm_stderr,none\": 0.040513109165891854\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.396,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.5224719101123596,\n \"acc_norm_stderr,none\"\
: 0.03754432508487191\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.496,\n \"acc_norm_stderr,none\": 0.0316851985511992\n },\n \"leaderboard_bbh_temporal_sequences\"\
: {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\",\n \"\
acc_norm,none\": 0.116,\n \"acc_norm_stderr,none\": 0.020293429803083823\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.136,\n \"acc_norm_stderr,none\": 0.021723342617052086\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.144,\n \"acc_norm_stderr,none\": 0.022249407735450245\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.292,\n \"acc_norm_stderr,none\": 0.02881432040220563\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.488,\n \"acc_norm_stderr,none\": 0.03167708558254714\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.2986577181208054,\n\
\ \"acc_norm_stderr,none\": 0.013264655332365493,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.30303030303030304,\n\
\ \"acc_norm_stderr,none\": 0.03274287914026869\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.28205128205128205,\n \"acc_norm_stderr,none\": 0.019275803929950375\n\
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.3169642857142857,\n \"acc_norm_stderr,none\"\
: 0.0220076215848248\n },\n \"leaderboard_ifeval\": {\n \"alias\":\
\ \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.7948243992606284,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.01737807119675965,\n \
\ \"inst_level_strict_acc,none\": 0.8585131894484412,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.8151571164510166,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.01670417955850395,\n \"inst_level_loose_acc,none\"\
: 0.8752997601918465,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.19637462235649547,\n\
\ \"exact_match_stderr,none\": 0.009854609082277298,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.3811074918566775,\n \"exact_match_stderr,none\": 0.02776327166045321\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.11382113821138211,\n \"exact_match_stderr,none\": 0.02875360087323741\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.06060606060606061,\n \"exact_match_stderr,none\"\
: 0.020847129156682045\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.03214285714285714,\n \"exact_match_stderr,none\"\
: 0.01055955866175321\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.12987012987012986,\n \"exact_match_stderr,none\": 0.02717696535667076\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.46113989637305697,\n \"exact_match_stderr,none\"\
: 0.03597524411734576\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.022222222222222223,\n \"exact_match_stderr,none\": 0.01273389971505968\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.2826628989361702,\n \"acc_stderr,none\": 0.004105302726114385\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.4166666666666667,\n\
\ \"acc_norm_stderr,none\": 0.01768575862518651,\n \"alias\": \" -\
\ leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.528,\n \"acc_norm_stderr,none\": 0.031636489531544396\n },\n \"\
leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.31640625,\n \"acc_norm_stderr,none\": 0.02912403057115479\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.408,\n \"acc_norm_stderr,none\": 0.031145209846548512\n\
\ }\n}\n```"
repo_url: https://huggingface.co/allenai/Llama-3.1-Tulu-3-8B
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_navigate
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_snarks
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_gpqa_extended
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_gpqa_main
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_gpqa_main_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_ifeval
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_ifeval_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_mmlu_pro
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_musr_object_placements
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-22T00-31-08.901515.jsonl'
- config_name: allenai__Llama-3.1-Tulu-3-8B__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_22T00_31_08.901515
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-22T00-31-08.901515.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-22T00-31-08.901515.jsonl'
---
# Dataset Card for Evaluation run of allenai/Llama-3.1-Tulu-3-8B
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [allenai/Llama-3.1-Tulu-3-8B](https://huggingface.co/allenai/Llama-3.1-Tulu-3-8B)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/allenai__Llama-3.1-Tulu-3-8B-details",
name="allenai__Llama-3.1-Tulu-3-8B__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-22T00-31-08.901515](https://huggingface.co/datasets/open-llm-leaderboard/allenai__Llama-3.1-Tulu-3-8B-details/blob/main/allenai__Llama-3.1-Tulu-3-8B/results_2024-11-22T00-31-08.901515.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"acc_norm,none": 0.38785834738617203,
"acc_norm_stderr,none": 0.005273329157943381,
"inst_level_loose_acc,none": 0.8752997601918465,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.8585131894484412,
"inst_level_strict_acc_stderr,none": "N/A",
"exact_match,none": 0.19637462235649547,
"exact_match_stderr,none": 0.009854609082277298,
"acc,none": 0.2826628989361702,
"acc_stderr,none": 0.0041053027261143855,
"prompt_level_strict_acc,none": 0.7948243992606284,
"prompt_level_strict_acc_stderr,none": 0.01737807119675965,
"prompt_level_loose_acc,none": 0.8151571164510166,
"prompt_level_loose_acc_stderr,none": 0.01670417955850395,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.4025342822426662,
"acc_norm_stderr,none": 0.006072426154807149,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.8,
"acc_norm_stderr,none": 0.02534897002097912
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.5187165775401069,
"acc_norm_stderr,none": 0.03663608375537843
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.288,
"acc_norm_stderr,none": 0.028697004587398253
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.604,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.472,
"acc_norm_stderr,none": 0.031636489531544396
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.328,
"acc_norm_stderr,none": 0.029752391824475363
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.256,
"acc_norm_stderr,none": 0.027657108718204846
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.212,
"acc_norm_stderr,none": 0.025901884690541117
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.416,
"acc_norm_stderr,none": 0.031235856237014505
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.688,
"acc_norm_stderr,none": 0.029361067575219852
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.42,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.288,
"acc_norm_stderr,none": 0.028697004587398253
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.3904109589041096,
"acc_norm_stderr,none": 0.040513109165891854
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.396,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.5224719101123596,
"acc_norm_stderr,none": 0.03754432508487191
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.496,
"acc_norm_stderr,none": 0.0316851985511992
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.116,
"acc_norm_stderr,none": 0.020293429803083823
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.136,
"acc_norm_stderr,none": 0.021723342617052086
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.144,
"acc_norm_stderr,none": 0.022249407735450245
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.292,
"acc_norm_stderr,none": 0.02881432040220563
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.488,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_gpqa": {
"acc_norm,none": 0.2986577181208054,
"acc_norm_stderr,none": 0.013264655332365493,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.30303030303030304,
"acc_norm_stderr,none": 0.03274287914026869
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.28205128205128205,
"acc_norm_stderr,none": 0.019275803929950375
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.3169642857142857,
"acc_norm_stderr,none": 0.0220076215848248
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7948243992606284,
"prompt_level_strict_acc_stderr,none": 0.01737807119675965,
"inst_level_strict_acc,none": 0.8585131894484412,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.8151571164510166,
"prompt_level_loose_acc_stderr,none": 0.01670417955850395,
"inst_level_loose_acc,none": 0.8752997601918465,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.19637462235649547,
"exact_match_stderr,none": 0.009854609082277298,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.3811074918566775,
"exact_match_stderr,none": 0.02776327166045321
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.11382113821138211,
"exact_match_stderr,none": 0.02875360087323741
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.06060606060606061,
"exact_match_stderr,none": 0.020847129156682045
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.03214285714285714,
"exact_match_stderr,none": 0.01055955866175321
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.12987012987012986,
"exact_match_stderr,none": 0.02717696535667076
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.46113989637305697,
"exact_match_stderr,none": 0.03597524411734576
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.022222222222222223,
"exact_match_stderr,none": 0.01273389971505968
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.2826628989361702,
"acc_stderr,none": 0.004105302726114385
},
"leaderboard_musr": {
"acc_norm,none": 0.4166666666666667,
"acc_norm_stderr,none": 0.01768575862518651,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.528,
"acc_norm_stderr,none": 0.031636489531544396
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.31640625,
"acc_norm_stderr,none": 0.02912403057115479
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.408,
"acc_norm_stderr,none": 0.031145209846548512
}
},
"leaderboard": {
"acc_norm,none": 0.38785834738617203,
"acc_norm_stderr,none": 0.005273329157943381,
"inst_level_loose_acc,none": 0.8752997601918465,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.8585131894484412,
"inst_level_strict_acc_stderr,none": "N/A",
"exact_match,none": 0.19637462235649547,
"exact_match_stderr,none": 0.009854609082277298,
"acc,none": 0.2826628989361702,
"acc_stderr,none": 0.0041053027261143855,
"prompt_level_strict_acc,none": 0.7948243992606284,
"prompt_level_strict_acc_stderr,none": 0.01737807119675965,
"prompt_level_loose_acc,none": 0.8151571164510166,
"prompt_level_loose_acc_stderr,none": 0.01670417955850395,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.4025342822426662,
"acc_norm_stderr,none": 0.006072426154807149,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.8,
"acc_norm_stderr,none": 0.02534897002097912
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.5187165775401069,
"acc_norm_stderr,none": 0.03663608375537843
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.288,
"acc_norm_stderr,none": 0.028697004587398253
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.604,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.472,
"acc_norm_stderr,none": 0.031636489531544396
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.328,
"acc_norm_stderr,none": 0.029752391824475363
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.256,
"acc_norm_stderr,none": 0.027657108718204846
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.212,
"acc_norm_stderr,none": 0.025901884690541117
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.416,
"acc_norm_stderr,none": 0.031235856237014505
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.688,
"acc_norm_stderr,none": 0.029361067575219852
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.42,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.288,
"acc_norm_stderr,none": 0.028697004587398253
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.3904109589041096,
"acc_norm_stderr,none": 0.040513109165891854
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.396,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.5224719101123596,
"acc_norm_stderr,none": 0.03754432508487191
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.496,
"acc_norm_stderr,none": 0.0316851985511992
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.116,
"acc_norm_stderr,none": 0.020293429803083823
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.136,
"acc_norm_stderr,none": 0.021723342617052086
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.144,
"acc_norm_stderr,none": 0.022249407735450245
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.292,
"acc_norm_stderr,none": 0.02881432040220563
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.488,
"acc_norm_stderr,none": 0.03167708558254714
},
"leaderboard_gpqa": {
"acc_norm,none": 0.2986577181208054,
"acc_norm_stderr,none": 0.013264655332365493,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.30303030303030304,
"acc_norm_stderr,none": 0.03274287914026869
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.28205128205128205,
"acc_norm_stderr,none": 0.019275803929950375
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.3169642857142857,
"acc_norm_stderr,none": 0.0220076215848248
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7948243992606284,
"prompt_level_strict_acc_stderr,none": 0.01737807119675965,
"inst_level_strict_acc,none": 0.8585131894484412,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.8151571164510166,
"prompt_level_loose_acc_stderr,none": 0.01670417955850395,
"inst_level_loose_acc,none": 0.8752997601918465,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.19637462235649547,
"exact_match_stderr,none": 0.009854609082277298,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.3811074918566775,
"exact_match_stderr,none": 0.02776327166045321
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.11382113821138211,
"exact_match_stderr,none": 0.02875360087323741
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.06060606060606061,
"exact_match_stderr,none": 0.020847129156682045
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.03214285714285714,
"exact_match_stderr,none": 0.01055955866175321
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.12987012987012986,
"exact_match_stderr,none": 0.02717696535667076
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.46113989637305697,
"exact_match_stderr,none": 0.03597524411734576
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.022222222222222223,
"exact_match_stderr,none": 0.01273389971505968
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.2826628989361702,
"acc_stderr,none": 0.004105302726114385
},
"leaderboard_musr": {
"acc_norm,none": 0.4166666666666667,
"acc_norm_stderr,none": 0.01768575862518651,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.528,
"acc_norm_stderr,none": 0.031636489531544396
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.31640625,
"acc_norm_stderr,none": 0.02912403057115479
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.408,
"acc_norm_stderr,none": 0.031145209846548512
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix-details | open-llm-leaderboard | "2024-11-22T00:35:43Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:32:12Z" | ---
pretty_name: Evaluation run of ZeroXClem/Qwen2.5-7B-HomerCreative-Mix
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [ZeroXClem/Qwen2.5-7B-HomerCreative-Mix](https://huggingface.co/ZeroXClem/Qwen2.5-7B-HomerCreative-Mix)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix-details\"\
,\n\tname=\"ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_boolean_expressions\"\
,\n\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-22T00-32-11.693490](https://huggingface.co/datasets/open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix-details/blob/main/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix/results_2024-11-22T00-32-11.693490.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"acc,none\": 0.4447307180851064,\n \"acc_stderr,none\"\
: 0.004530535363926051,\n \"inst_level_loose_acc,none\": 0.8285371702637889,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"inst_level_strict_acc,none\"\
: 0.8165467625899281,\n \"inst_level_strict_acc_stderr,none\": \"N/A\"\
,\n \"exact_match,none\": 0.32326283987915405,\n \"exact_match_stderr,none\"\
: 0.011761711608666757,\n \"prompt_level_loose_acc,none\": 0.7634011090573013,\n\
\ \"prompt_level_loose_acc_stderr,none\": 0.018288827582625598,\n \
\ \"acc_norm,none\": 0.5014917628745622,\n \"acc_norm_stderr,none\"\
: 0.005340969872084893,\n \"prompt_level_strict_acc,none\": 0.7504621072088724,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.018622404509805804,\n \
\ \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\":\
\ {\n \"acc_norm,none\": 0.5521610831452872,\n \"acc_norm_stderr,none\"\
: 0.006179016832046109,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.86,\n\
\ \"acc_norm_stderr,none\": 0.021989409645240245\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.5668449197860963,\n \"acc_norm_stderr,none\"\
: 0.03633267411102591\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.588,\n \"acc_norm_stderr,none\": 0.031191596026022818\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.632,\n\
\ \"acc_norm_stderr,none\": 0.03056207062099311\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.604,\n \"acc_norm_stderr,none\":\
\ 0.030993197854577898\n },\n \"leaderboard_bbh_geometric_shapes\"\
: {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\",\n \
\ \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.556,\n \
\ \"acc_norm_stderr,none\": 0.03148684942554571\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.528,\n \"acc_norm_stderr,none\":\
\ 0.031636489531544396\n },\n \"leaderboard_bbh_logical_deduction_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.468,\n \"acc_norm_stderr,none\":\
\ 0.03162125257572558\n },\n \"leaderboard_bbh_logical_deduction_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\"\
,\n \"acc_norm,none\": 0.784,\n \"acc_norm_stderr,none\":\
\ 0.02607865766373279\n },\n \"leaderboard_bbh_movie_recommendation\"\
: {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\",\n \
\ \"acc_norm,none\": 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \"\
\ - leaderboard_bbh_navigate\",\n \"acc_norm,none\": 0.7,\n \
\ \"acc_norm_stderr,none\": 0.029040893477575786\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.36,\n \"acc_norm_stderr,none\": 0.03041876402517494\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.5958904109589042,\n \"acc_norm_stderr,none\": 0.0407519857003932\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.628,\n \"acc_norm_stderr,none\": 0.03063032594455827\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.58,\n \
\ \"acc_norm_stderr,none\": 0.03127799950463661\n },\n \"leaderboard_bbh_salient_translation_error_detection\"\
: {\n \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\"\
,\n \"acc_norm,none\": 0.536,\n \"acc_norm_stderr,none\":\
\ 0.031603975145223735\n },\n \"leaderboard_bbh_snarks\": {\n \
\ \"alias\": \" - leaderboard_bbh_snarks\",\n \"acc_norm,none\"\
: 0.6966292134831461,\n \"acc_norm_stderr,none\": 0.03455421944400101\n\
\ },\n \"leaderboard_bbh_sports_understanding\": {\n \"\
alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.74,\n \"acc_norm_stderr,none\": 0.027797315752644335\n },\n\
\ \"leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" -\
\ leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.548,\n\
\ \"acc_norm_stderr,none\": 0.03153986449255664\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.212,\n \"acc_norm_stderr,none\": 0.025901884690541117\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.168,\n \"acc_norm_stderr,none\":\
\ 0.023692813205492536\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.24,\n \"acc_norm_stderr,none\": 0.027065293652238982\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\":\
\ \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\": 0.548,\n\
\ \"acc_norm_stderr,none\": 0.03153986449255664\n },\n \
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.29949664429530204,\n\
\ \"acc_norm_stderr,none\": 0.013278959534799928,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.2878787878787879,\n \"acc_norm_stderr,none\": 0.03225883512300998\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.29120879120879123,\n\
\ \"acc_norm_stderr,none\": 0.019460910297288078\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.31473214285714285,\n \"acc_norm_stderr,none\"\
: 0.021965797142222607\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.7504621072088724,\n \"prompt_level_strict_acc_stderr,none\": 0.018622404509805804,\n\
\ \"inst_level_strict_acc,none\": 0.8165467625899281,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7634011090573013,\n \"prompt_level_loose_acc_stderr,none\": 0.018288827582625598,\n\
\ \"inst_level_loose_acc,none\": 0.8285371702637889,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.32326283987915405,\n \"exact_match_stderr,none\"\
: 0.011761711608666757,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.6091205211726385,\n\
\ \"exact_match_stderr,none\": 0.027894098976471507\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \"\
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.2032520325203252,\n \"exact_match_stderr,none\": 0.03643325851749072\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.20454545454545456,\n\
\ \"exact_match_stderr,none\": 0.03524251981380333\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\": \"\
\ - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.1392857142857143,\n \"exact_match_stderr,none\": 0.02072911170255923\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.3051948051948052,\n\
\ \"exact_match_stderr,none\": 0.0372284008596668\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.46113989637305697,\n \"exact_match_stderr,none\"\
: 0.03597524411734576\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.1037037037037037,\n \"exact_match_stderr,none\"\
: 0.02633725661744443\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.4447307180851064,\n\
\ \"acc_stderr,none\": 0.004530535363926052\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.43386243386243384,\n \"acc_norm_stderr,none\"\
: 0.01762618265060195,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \"\
\ - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.56,\n\
\ \"acc_norm_stderr,none\": 0.03145724452223569\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.296875,\n \"acc_norm_stderr,none\"\
: 0.028610997088737832\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.448,\n \"acc_norm_stderr,none\": 0.03151438761115349\n\
\ }\n },\n \"leaderboard\": {\n \"acc,none\": 0.4447307180851064,\n\
\ \"acc_stderr,none\": 0.004530535363926051,\n \"inst_level_loose_acc,none\"\
: 0.8285371702637889,\n \"inst_level_loose_acc_stderr,none\": \"N/A\",\n\
\ \"inst_level_strict_acc,none\": 0.8165467625899281,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"exact_match,none\": 0.32326283987915405,\n \"exact_match_stderr,none\"\
: 0.011761711608666757,\n \"prompt_level_loose_acc,none\": 0.7634011090573013,\n\
\ \"prompt_level_loose_acc_stderr,none\": 0.018288827582625598,\n \
\ \"acc_norm,none\": 0.5014917628745622,\n \"acc_norm_stderr,none\": 0.005340969872084893,\n\
\ \"prompt_level_strict_acc,none\": 0.7504621072088724,\n \"prompt_level_strict_acc_stderr,none\"\
: 0.018622404509805804,\n \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\"\
: {\n \"acc_norm,none\": 0.5521610831452872,\n \"acc_norm_stderr,none\"\
: 0.006179016832046109,\n \"alias\": \" - leaderboard_bbh\"\n },\n \
\ \"leaderboard_bbh_boolean_expressions\": {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\"\
,\n \"acc_norm,none\": 0.86,\n \"acc_norm_stderr,none\": 0.021989409645240245\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.5668449197860963,\n \"acc_norm_stderr,none\"\
: 0.03633267411102591\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.588,\n \"acc_norm_stderr,none\": 0.031191596026022818\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.604,\n \"acc_norm_stderr,none\": 0.030993197854577898\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.556,\n \"acc_norm_stderr,none\": 0.03148684942554571\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.528,\n \"acc_norm_stderr,none\": 0.031636489531544396\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.468,\n \"acc_norm_stderr,none\": 0.03162125257572558\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.784,\n \"acc_norm_stderr,none\": 0.02607865766373279\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.7,\n \"acc_norm_stderr,none\": 0.029040893477575786\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.36,\n \"acc_norm_stderr,none\": 0.03041876402517494\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.5958904109589042,\n\
\ \"acc_norm_stderr,none\": 0.0407519857003932\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.628,\n \"acc_norm_stderr,none\": 0.03063032594455827\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.58,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.536,\n \"acc_norm_stderr,none\": 0.031603975145223735\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.6966292134831461,\n \"acc_norm_stderr,none\"\
: 0.03455421944400101\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.74,\n \"acc_norm_stderr,none\": 0.027797315752644335\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.548,\n \"acc_norm_stderr,none\": 0.03153986449255664\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.212,\n \"acc_norm_stderr,none\": 0.025901884690541117\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.168,\n \"acc_norm_stderr,none\": 0.023692813205492536\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.24,\n \"acc_norm_stderr,none\": 0.027065293652238982\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.548,\n \"acc_norm_stderr,none\": 0.03153986449255664\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.29949664429530204,\n\
\ \"acc_norm_stderr,none\": 0.013278959534799928,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.2878787878787879,\n\
\ \"acc_norm_stderr,none\": 0.03225883512300998\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.29120879120879123,\n \"acc_norm_stderr,none\": 0.019460910297288078\n\
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.31473214285714285,\n \"acc_norm_stderr,none\"\
: 0.021965797142222607\n },\n \"leaderboard_ifeval\": {\n \"alias\"\
: \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.7504621072088724,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.018622404509805804,\n \
\ \"inst_level_strict_acc,none\": 0.8165467625899281,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.7634011090573013,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.018288827582625598,\n \"inst_level_loose_acc,none\"\
: 0.8285371702637889,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.32326283987915405,\n\
\ \"exact_match_stderr,none\": 0.011761711608666757,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.6091205211726385,\n \"exact_match_stderr,none\": 0.027894098976471507\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.2032520325203252,\n \"exact_match_stderr,none\": 0.03643325851749072\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.20454545454545456,\n \"exact_match_stderr,none\"\
: 0.03524251981380333\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.1392857142857143,\n \"exact_match_stderr,none\"\
: 0.02072911170255923\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.3051948051948052,\n \"exact_match_stderr,none\": 0.0372284008596668\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.46113989637305697,\n \"exact_match_stderr,none\"\
: 0.03597524411734576\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.1037037037037037,\n \"exact_match_stderr,none\": 0.02633725661744443\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.4447307180851064,\n \"acc_stderr,none\": 0.004530535363926052\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.43386243386243384,\n\
\ \"acc_norm_stderr,none\": 0.01762618265060195,\n \"alias\": \" -\
\ leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.56,\n \"acc_norm_stderr,none\": 0.03145724452223569\n },\n \"leaderboard_musr_object_placements\"\
: {\n \"alias\": \" - leaderboard_musr_object_placements\",\n \"\
acc_norm,none\": 0.296875,\n \"acc_norm_stderr,none\": 0.028610997088737832\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.448,\n \"acc_norm_stderr,none\": 0.03151438761115349\n\
\ }\n}\n```"
repo_url: https://huggingface.co/ZeroXClem/Qwen2.5-7B-HomerCreative-Mix
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_navigate
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_snarks
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_gpqa_extended
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_gpqa_main
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_gpqa_main_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_ifeval
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_ifeval_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_mmlu_pro
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_musr_object_placements
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-22T00-32-11.693490.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_22T00_32_11.693490
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-22T00-32-11.693490.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-22T00-32-11.693490.jsonl'
---
# Dataset Card for Evaluation run of ZeroXClem/Qwen2.5-7B-HomerCreative-Mix
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [ZeroXClem/Qwen2.5-7B-HomerCreative-Mix](https://huggingface.co/ZeroXClem/Qwen2.5-7B-HomerCreative-Mix)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix-details",
name="ZeroXClem__Qwen2.5-7B-HomerCreative-Mix__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-22T00-32-11.693490](https://huggingface.co/datasets/open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix-details/blob/main/ZeroXClem__Qwen2.5-7B-HomerCreative-Mix/results_2024-11-22T00-32-11.693490.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"acc,none": 0.4447307180851064,
"acc_stderr,none": 0.004530535363926051,
"inst_level_loose_acc,none": 0.8285371702637889,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.8165467625899281,
"inst_level_strict_acc_stderr,none": "N/A",
"exact_match,none": 0.32326283987915405,
"exact_match_stderr,none": 0.011761711608666757,
"prompt_level_loose_acc,none": 0.7634011090573013,
"prompt_level_loose_acc_stderr,none": 0.018288827582625598,
"acc_norm,none": 0.5014917628745622,
"acc_norm_stderr,none": 0.005340969872084893,
"prompt_level_strict_acc,none": 0.7504621072088724,
"prompt_level_strict_acc_stderr,none": 0.018622404509805804,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.5521610831452872,
"acc_norm_stderr,none": 0.006179016832046109,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.86,
"acc_norm_stderr,none": 0.021989409645240245
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.5668449197860963,
"acc_norm_stderr,none": 0.03633267411102591
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.588,
"acc_norm_stderr,none": 0.031191596026022818
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.604,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.556,
"acc_norm_stderr,none": 0.03148684942554571
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.528,
"acc_norm_stderr,none": 0.031636489531544396
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.468,
"acc_norm_stderr,none": 0.03162125257572558
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.784,
"acc_norm_stderr,none": 0.02607865766373279
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.7,
"acc_norm_stderr,none": 0.029040893477575786
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.36,
"acc_norm_stderr,none": 0.03041876402517494
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5958904109589042,
"acc_norm_stderr,none": 0.0407519857003932
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.628,
"acc_norm_stderr,none": 0.03063032594455827
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6966292134831461,
"acc_norm_stderr,none": 0.03455421944400101
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.74,
"acc_norm_stderr,none": 0.027797315752644335
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.548,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.212,
"acc_norm_stderr,none": 0.025901884690541117
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.168,
"acc_norm_stderr,none": 0.023692813205492536
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.24,
"acc_norm_stderr,none": 0.027065293652238982
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.548,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_gpqa": {
"acc_norm,none": 0.29949664429530204,
"acc_norm_stderr,none": 0.013278959534799928,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.2878787878787879,
"acc_norm_stderr,none": 0.03225883512300998
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.29120879120879123,
"acc_norm_stderr,none": 0.019460910297288078
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.31473214285714285,
"acc_norm_stderr,none": 0.021965797142222607
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7504621072088724,
"prompt_level_strict_acc_stderr,none": 0.018622404509805804,
"inst_level_strict_acc,none": 0.8165467625899281,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7634011090573013,
"prompt_level_loose_acc_stderr,none": 0.018288827582625598,
"inst_level_loose_acc,none": 0.8285371702637889,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.32326283987915405,
"exact_match_stderr,none": 0.011761711608666757,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.6091205211726385,
"exact_match_stderr,none": 0.027894098976471507
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.2032520325203252,
"exact_match_stderr,none": 0.03643325851749072
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.20454545454545456,
"exact_match_stderr,none": 0.03524251981380333
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.1392857142857143,
"exact_match_stderr,none": 0.02072911170255923
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.3051948051948052,
"exact_match_stderr,none": 0.0372284008596668
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.46113989637305697,
"exact_match_stderr,none": 0.03597524411734576
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.1037037037037037,
"exact_match_stderr,none": 0.02633725661744443
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.4447307180851064,
"acc_stderr,none": 0.004530535363926052
},
"leaderboard_musr": {
"acc_norm,none": 0.43386243386243384,
"acc_norm_stderr,none": 0.01762618265060195,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.56,
"acc_norm_stderr,none": 0.03145724452223569
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.296875,
"acc_norm_stderr,none": 0.028610997088737832
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.448,
"acc_norm_stderr,none": 0.03151438761115349
}
},
"leaderboard": {
"acc,none": 0.4447307180851064,
"acc_stderr,none": 0.004530535363926051,
"inst_level_loose_acc,none": 0.8285371702637889,
"inst_level_loose_acc_stderr,none": "N/A",
"inst_level_strict_acc,none": 0.8165467625899281,
"inst_level_strict_acc_stderr,none": "N/A",
"exact_match,none": 0.32326283987915405,
"exact_match_stderr,none": 0.011761711608666757,
"prompt_level_loose_acc,none": 0.7634011090573013,
"prompt_level_loose_acc_stderr,none": 0.018288827582625598,
"acc_norm,none": 0.5014917628745622,
"acc_norm_stderr,none": 0.005340969872084893,
"prompt_level_strict_acc,none": 0.7504621072088724,
"prompt_level_strict_acc_stderr,none": 0.018622404509805804,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.5521610831452872,
"acc_norm_stderr,none": 0.006179016832046109,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.86,
"acc_norm_stderr,none": 0.021989409645240245
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.5668449197860963,
"acc_norm_stderr,none": 0.03633267411102591
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.588,
"acc_norm_stderr,none": 0.031191596026022818
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.604,
"acc_norm_stderr,none": 0.030993197854577898
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.556,
"acc_norm_stderr,none": 0.03148684942554571
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.528,
"acc_norm_stderr,none": 0.031636489531544396
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.468,
"acc_norm_stderr,none": 0.03162125257572558
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.784,
"acc_norm_stderr,none": 0.02607865766373279
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.7,
"acc_norm_stderr,none": 0.029040893477575786
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.36,
"acc_norm_stderr,none": 0.03041876402517494
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.5958904109589042,
"acc_norm_stderr,none": 0.0407519857003932
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.628,
"acc_norm_stderr,none": 0.03063032594455827
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.536,
"acc_norm_stderr,none": 0.031603975145223735
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6966292134831461,
"acc_norm_stderr,none": 0.03455421944400101
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.74,
"acc_norm_stderr,none": 0.027797315752644335
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.548,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.212,
"acc_norm_stderr,none": 0.025901884690541117
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.168,
"acc_norm_stderr,none": 0.023692813205492536
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.24,
"acc_norm_stderr,none": 0.027065293652238982
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.548,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_gpqa": {
"acc_norm,none": 0.29949664429530204,
"acc_norm_stderr,none": 0.013278959534799928,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.2878787878787879,
"acc_norm_stderr,none": 0.03225883512300998
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.29120879120879123,
"acc_norm_stderr,none": 0.019460910297288078
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.31473214285714285,
"acc_norm_stderr,none": 0.021965797142222607
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7504621072088724,
"prompt_level_strict_acc_stderr,none": 0.018622404509805804,
"inst_level_strict_acc,none": 0.8165467625899281,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7634011090573013,
"prompt_level_loose_acc_stderr,none": 0.018288827582625598,
"inst_level_loose_acc,none": 0.8285371702637889,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.32326283987915405,
"exact_match_stderr,none": 0.011761711608666757,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.6091205211726385,
"exact_match_stderr,none": 0.027894098976471507
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.2032520325203252,
"exact_match_stderr,none": 0.03643325851749072
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.20454545454545456,
"exact_match_stderr,none": 0.03524251981380333
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.1392857142857143,
"exact_match_stderr,none": 0.02072911170255923
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.3051948051948052,
"exact_match_stderr,none": 0.0372284008596668
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.46113989637305697,
"exact_match_stderr,none": 0.03597524411734576
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.1037037037037037,
"exact_match_stderr,none": 0.02633725661744443
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.4447307180851064,
"acc_stderr,none": 0.004530535363926052
},
"leaderboard_musr": {
"acc_norm,none": 0.43386243386243384,
"acc_norm_stderr,none": 0.01762618265060195,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.56,
"acc_norm_stderr,none": 0.03145724452223569
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.296875,
"acc_norm_stderr,none": 0.028610997088737832
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.448,
"acc_norm_stderr,none": 0.03151438761115349
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix-details | open-llm-leaderboard | "2024-11-22T00:37:56Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T00:34:21Z" | ---
pretty_name: Evaluation run of ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix](https://huggingface.co/ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix)\n\
The dataset is composed of 38 configuration(s), each one corresponding to one of\
\ the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can\
\ be found as a specific split in each configuration, the split being named using\
\ the timestamp of the run.The \"train\" split is always pointing to the latest\
\ results.\n\nAn additional configuration \"results\" store all the aggregated results\
\ of the run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix-details\"\
,\n\tname=\"ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_boolean_expressions\"\
,\n\tsplit=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results\
\ from run 2024-11-22T00-34-20.371295](https://huggingface.co/datasets/open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix-details/blob/main/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix/results_2024-11-22T00-34-20.371295.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"prompt_level_loose_acc,none\": 0.7578558225508318,\n \"\
prompt_level_loose_acc_stderr,none\": 0.018434587800223168,\n \"acc,none\"\
: 0.4431515957446808,\n \"acc_stderr,none\": 0.00452891098809217,\n \
\ \"acc_norm,none\": 0.5046050071345181,\n \"acc_norm_stderr,none\"\
: 0.005356894928628325,\n \"inst_level_strict_acc,none\": 0.802158273381295,\n\
\ \"inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_strict_acc,none\"\
: 0.7393715341959335,\n \"prompt_level_strict_acc_stderr,none\": 0.018890584986760186,\n\
\ \"exact_match,none\": 0.29531722054380666,\n \"exact_match_stderr,none\"\
: 0.011453860732395094,\n \"inst_level_loose_acc,none\": 0.8201438848920863,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"alias\"\
: \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\"\
: 0.551640340218712,\n \"acc_norm_stderr,none\": 0.006182534734432989,\n\
\ \"alias\": \" - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\"\
: {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \
\ \"acc_norm,none\": 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\"\
: \" - leaderboard_bbh_causal_judgement\",\n \"acc_norm,none\": 0.5614973262032086,\n\
\ \"acc_norm_stderr,none\": 0.03638341809400991\n },\n \
\ \"leaderboard_bbh_date_understanding\": {\n \"alias\": \" - leaderboard_bbh_date_understanding\"\
,\n \"acc_norm,none\": 0.568,\n \"acc_norm_stderr,none\":\
\ 0.03139181076542941\n },\n \"leaderboard_bbh_disambiguation_qa\"\
: {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\",\n \
\ \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\"\
: \" - leaderboard_bbh_formal_fallacies\",\n \"acc_norm,none\": 0.6,\n\
\ \"acc_norm_stderr,none\": 0.031046021028253316\n },\n \
\ \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.54,\n \"acc_norm_stderr,none\": 0.031584653891499004\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.552,\n \
\ \"acc_norm_stderr,none\": 0.03151438761115348\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.524,\n \"acc_norm_stderr,none\":\
\ 0.03164968895968774\n },\n \"leaderboard_bbh_logical_deduction_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.484,\n \"acc_norm_stderr,none\":\
\ 0.03166998503010743\n },\n \"leaderboard_bbh_logical_deduction_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\"\
,\n \"acc_norm,none\": 0.78,\n \"acc_norm_stderr,none\": 0.02625179282460579\n\
\ },\n \"leaderboard_bbh_movie_recommendation\": {\n \"\
alias\": \" - leaderboard_bbh_movie_recommendation\",\n \"acc_norm,none\"\
: 0.644,\n \"acc_norm_stderr,none\": 0.0303436806571532\n },\n\
\ \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.7,\n \"acc_norm_stderr,none\": 0.029040893477575786\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\"\
: \" - leaderboard_bbh_object_counting\",\n \"acc_norm,none\": 0.364,\n\
\ \"acc_norm_stderr,none\": 0.030491555220405475\n },\n \
\ \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" - leaderboard_bbh_penguins_in_a_table\"\
,\n \"acc_norm,none\": 0.589041095890411,\n \"acc_norm_stderr,none\"\
: 0.04085902451640228\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\":\
\ 0.030491555220405475\n },\n \"leaderboard_bbh_ruin_names\": {\n\
\ \"alias\": \" - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\"\
: 0.58,\n \"acc_norm_stderr,none\": 0.03127799950463661\n },\n\
\ \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" -\
\ leaderboard_bbh_snarks\",\n \"acc_norm,none\": 0.6966292134831461,\n\
\ \"acc_norm_stderr,none\": 0.03455421944400101\n },\n \
\ \"leaderboard_bbh_sports_understanding\": {\n \"alias\": \" - leaderboard_bbh_sports_understanding\"\
,\n \"acc_norm,none\": 0.732,\n \"acc_norm_stderr,none\":\
\ 0.02806876238252672\n },\n \"leaderboard_bbh_temporal_sequences\"\
: {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\",\n \
\ \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.204,\n \"acc_norm_stderr,none\":\
\ 0.025537121574548162\n },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.16,\n \"acc_norm_stderr,none\": 0.023232714782060626\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.248,\n \"acc_norm_stderr,none\":\
\ 0.027367497504863593\n },\n \"leaderboard_bbh_web_of_lies\": {\n\
\ \"alias\": \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\"\
: 0.56,\n \"acc_norm_stderr,none\": 0.03145724452223569\n },\n\
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3196308724832215,\n\
\ \"acc_norm_stderr,none\": 0.013522572199065146,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.3181818181818182,\n \"acc_norm_stderr,none\": 0.0331847733384533\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.3131868131868132,\n\
\ \"acc_norm_stderr,none\": 0.01986656558013767\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.328125,\n \"acc_norm_stderr,none\"\
: 0.0222080353262888\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.7393715341959335,\n \"prompt_level_strict_acc_stderr,none\": 0.018890584986760186,\n\
\ \"inst_level_strict_acc,none\": 0.802158273381295,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7578558225508318,\n \"prompt_level_loose_acc_stderr,none\": 0.018434587800223168,\n\
\ \"inst_level_loose_acc,none\": 0.8201438848920863,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.29531722054380666,\n \"exact_match_stderr,none\"\
: 0.011453860732395094,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.5635179153094463,\n\
\ \"exact_match_stderr,none\": 0.028351520946552713\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \"\
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.17073170731707318,\n \"exact_match_stderr,none\": 0.034066279591320504\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.1590909090909091,\n\
\ \"exact_match_stderr,none\": 0.03195667292673137\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\": \"\
\ - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.11785714285714285,\n \"exact_match_stderr,none\": 0.019303911310421605\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.23376623376623376,\n\
\ \"exact_match_stderr,none\": 0.034215730598256215\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.47668393782383417,\n \"exact_match_stderr,none\"\
: 0.03604513672442202\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.1111111111111111,\n \"exact_match_stderr,none\"\
: 0.027148765412512273\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.4431515957446808,\n\
\ \"acc_stderr,none\": 0.00452891098809217\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.43783068783068785,\n \"acc_norm_stderr,none\"\
: 0.017595964155130817,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\":\
\ \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.556,\n\
\ \"acc_norm_stderr,none\": 0.03148684942554571\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.28515625,\n \"acc_norm_stderr,none\"\
: 0.028273327213286358\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.476,\n \"acc_norm_stderr,none\": 0.03164968895968774\n\
\ }\n },\n \"leaderboard\": {\n \"prompt_level_loose_acc,none\"\
: 0.7578558225508318,\n \"prompt_level_loose_acc_stderr,none\": 0.018434587800223168,\n\
\ \"acc,none\": 0.4431515957446808,\n \"acc_stderr,none\": 0.00452891098809217,\n\
\ \"acc_norm,none\": 0.5046050071345181,\n \"acc_norm_stderr,none\"\
: 0.005356894928628325,\n \"inst_level_strict_acc,none\": 0.802158273381295,\n\
\ \"inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_strict_acc,none\"\
: 0.7393715341959335,\n \"prompt_level_strict_acc_stderr,none\": 0.018890584986760186,\n\
\ \"exact_match,none\": 0.29531722054380666,\n \"exact_match_stderr,none\"\
: 0.011453860732395094,\n \"inst_level_loose_acc,none\": 0.8201438848920863,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"alias\": \"leaderboard\"\
\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\": 0.551640340218712,\n\
\ \"acc_norm_stderr,none\": 0.006182534734432989,\n \"alias\": \"\
\ - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\": {\n\
\ \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\"\
: 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n },\n \"\
leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.5614973262032086,\n \"acc_norm_stderr,none\"\
: 0.03638341809400991\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.568,\n \"acc_norm_stderr,none\": 0.03139181076542941\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.6,\n \"acc_norm_stderr,none\": 0.031046021028253316\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.54,\n \"acc_norm_stderr,none\": 0.031584653891499004\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.552,\n \"acc_norm_stderr,none\": 0.03151438761115348\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.524,\n \"acc_norm_stderr,none\": 0.03164968895968774\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.484,\n \"acc_norm_stderr,none\": 0.03166998503010743\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.78,\n \"acc_norm_stderr,none\": 0.02625179282460579\n },\n \"leaderboard_bbh_movie_recommendation\"\
: {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\",\n \"\
acc_norm,none\": 0.644,\n \"acc_norm_stderr,none\": 0.0303436806571532\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.7,\n \"acc_norm_stderr,none\": 0.029040893477575786\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.364,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.589041095890411,\n\
\ \"acc_norm_stderr,none\": 0.04085902451640228\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.58,\n \"acc_norm_stderr,none\": 0.03127799950463661\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.6966292134831461,\n \"acc_norm_stderr,none\"\
: 0.03455421944400101\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.732,\n \"acc_norm_stderr,none\": 0.02806876238252672\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.544,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.204,\n \"acc_norm_stderr,none\": 0.025537121574548162\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.16,\n \"acc_norm_stderr,none\": 0.023232714782060626\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.248,\n \"acc_norm_stderr,none\": 0.027367497504863593\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.56,\n \"acc_norm_stderr,none\": 0.03145724452223569\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3196308724832215,\n\
\ \"acc_norm_stderr,none\": 0.013522572199065146,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.3181818181818182,\n\
\ \"acc_norm_stderr,none\": 0.0331847733384533\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.3131868131868132,\n \"acc_norm_stderr,none\": 0.01986656558013767\n \
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.328125,\n \"acc_norm_stderr,none\": 0.0222080353262888\n\
\ },\n \"leaderboard_ifeval\": {\n \"alias\": \" - leaderboard_ifeval\"\
,\n \"prompt_level_strict_acc,none\": 0.7393715341959335,\n \"prompt_level_strict_acc_stderr,none\"\
: 0.018890584986760186,\n \"inst_level_strict_acc,none\": 0.802158273381295,\n\
\ \"inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7578558225508318,\n \"prompt_level_loose_acc_stderr,none\": 0.018434587800223168,\n\
\ \"inst_level_loose_acc,none\": 0.8201438848920863,\n \"inst_level_loose_acc_stderr,none\"\
: \"N/A\"\n },\n \"leaderboard_math_hard\": {\n \"exact_match,none\"\
: 0.29531722054380666,\n \"exact_match_stderr,none\": 0.011453860732395094,\n\
\ \"alias\": \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.5635179153094463,\n \"exact_match_stderr,none\": 0.028351520946552713\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.17073170731707318,\n \"exact_match_stderr,none\": 0.034066279591320504\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.1590909090909091,\n \"exact_match_stderr,none\"\
: 0.03195667292673137\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.11785714285714285,\n \"exact_match_stderr,none\"\
: 0.019303911310421605\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.23376623376623376,\n \"exact_match_stderr,none\": 0.034215730598256215\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.47668393782383417,\n \"exact_match_stderr,none\"\
: 0.03604513672442202\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.1111111111111111,\n \"exact_match_stderr,none\": 0.027148765412512273\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.4431515957446808,\n \"acc_stderr,none\": 0.00452891098809217\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.43783068783068785,\n\
\ \"acc_norm_stderr,none\": 0.017595964155130817,\n \"alias\": \"\
\ - leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.556,\n \"acc_norm_stderr,none\": 0.03148684942554571\n },\n \"\
leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.28515625,\n \"acc_norm_stderr,none\": 0.028273327213286358\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.476,\n \"acc_norm_stderr,none\": 0.03164968895968774\n\
\ }\n}\n```"
repo_url: https://huggingface.co/ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_navigate
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_snarks
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_gpqa_extended
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_gpqa_main
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_gpqa_main_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_ifeval
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_ifeval_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_mmlu_pro
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_musr_object_placements
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-22T00-34-20.371295.jsonl'
- config_name: ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_22T00_34_20.371295
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-22T00-34-20.371295.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-22T00-34-20.371295.jsonl'
---
# Dataset Card for Evaluation run of ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix](https://huggingface.co/ZeroXClem/Qwen2.5-7B-HomerAnvita-NerdMix)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix-details",
name="ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-22T00-34-20.371295](https://huggingface.co/datasets/open-llm-leaderboard/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix-details/blob/main/ZeroXClem__Qwen2.5-7B-HomerAnvita-NerdMix/results_2024-11-22T00-34-20.371295.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"prompt_level_loose_acc,none": 0.7578558225508318,
"prompt_level_loose_acc_stderr,none": 0.018434587800223168,
"acc,none": 0.4431515957446808,
"acc_stderr,none": 0.00452891098809217,
"acc_norm,none": 0.5046050071345181,
"acc_norm_stderr,none": 0.005356894928628325,
"inst_level_strict_acc,none": 0.802158273381295,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_strict_acc,none": 0.7393715341959335,
"prompt_level_strict_acc_stderr,none": 0.018890584986760186,
"exact_match,none": 0.29531722054380666,
"exact_match_stderr,none": 0.011453860732395094,
"inst_level_loose_acc,none": 0.8201438848920863,
"inst_level_loose_acc_stderr,none": "N/A",
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.551640340218712,
"acc_norm_stderr,none": 0.006182534734432989,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.5614973262032086,
"acc_norm_stderr,none": 0.03638341809400991
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.568,
"acc_norm_stderr,none": 0.03139181076542941
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.6,
"acc_norm_stderr,none": 0.031046021028253316
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.54,
"acc_norm_stderr,none": 0.031584653891499004
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.552,
"acc_norm_stderr,none": 0.03151438761115348
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.524,
"acc_norm_stderr,none": 0.03164968895968774
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.484,
"acc_norm_stderr,none": 0.03166998503010743
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.78,
"acc_norm_stderr,none": 0.02625179282460579
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.644,
"acc_norm_stderr,none": 0.0303436806571532
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.7,
"acc_norm_stderr,none": 0.029040893477575786
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.364,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.589041095890411,
"acc_norm_stderr,none": 0.04085902451640228
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6966292134831461,
"acc_norm_stderr,none": 0.03455421944400101
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.732,
"acc_norm_stderr,none": 0.02806876238252672
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.204,
"acc_norm_stderr,none": 0.025537121574548162
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.16,
"acc_norm_stderr,none": 0.023232714782060626
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.248,
"acc_norm_stderr,none": 0.027367497504863593
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.56,
"acc_norm_stderr,none": 0.03145724452223569
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3196308724832215,
"acc_norm_stderr,none": 0.013522572199065146,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3181818181818182,
"acc_norm_stderr,none": 0.0331847733384533
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.3131868131868132,
"acc_norm_stderr,none": 0.01986656558013767
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.328125,
"acc_norm_stderr,none": 0.0222080353262888
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7393715341959335,
"prompt_level_strict_acc_stderr,none": 0.018890584986760186,
"inst_level_strict_acc,none": 0.802158273381295,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7578558225508318,
"prompt_level_loose_acc_stderr,none": 0.018434587800223168,
"inst_level_loose_acc,none": 0.8201438848920863,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.29531722054380666,
"exact_match_stderr,none": 0.011453860732395094,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.5635179153094463,
"exact_match_stderr,none": 0.028351520946552713
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.17073170731707318,
"exact_match_stderr,none": 0.034066279591320504
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.1590909090909091,
"exact_match_stderr,none": 0.03195667292673137
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.11785714285714285,
"exact_match_stderr,none": 0.019303911310421605
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.23376623376623376,
"exact_match_stderr,none": 0.034215730598256215
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.47668393782383417,
"exact_match_stderr,none": 0.03604513672442202
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.1111111111111111,
"exact_match_stderr,none": 0.027148765412512273
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.4431515957446808,
"acc_stderr,none": 0.00452891098809217
},
"leaderboard_musr": {
"acc_norm,none": 0.43783068783068785,
"acc_norm_stderr,none": 0.017595964155130817,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.556,
"acc_norm_stderr,none": 0.03148684942554571
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.28515625,
"acc_norm_stderr,none": 0.028273327213286358
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.476,
"acc_norm_stderr,none": 0.03164968895968774
}
},
"leaderboard": {
"prompt_level_loose_acc,none": 0.7578558225508318,
"prompt_level_loose_acc_stderr,none": 0.018434587800223168,
"acc,none": 0.4431515957446808,
"acc_stderr,none": 0.00452891098809217,
"acc_norm,none": 0.5046050071345181,
"acc_norm_stderr,none": 0.005356894928628325,
"inst_level_strict_acc,none": 0.802158273381295,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_strict_acc,none": 0.7393715341959335,
"prompt_level_strict_acc_stderr,none": 0.018890584986760186,
"exact_match,none": 0.29531722054380666,
"exact_match_stderr,none": 0.011453860732395094,
"inst_level_loose_acc,none": 0.8201438848920863,
"inst_level_loose_acc_stderr,none": "N/A",
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.551640340218712,
"acc_norm_stderr,none": 0.006182534734432989,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.5614973262032086,
"acc_norm_stderr,none": 0.03638341809400991
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.568,
"acc_norm_stderr,none": 0.03139181076542941
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.6,
"acc_norm_stderr,none": 0.031046021028253316
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.54,
"acc_norm_stderr,none": 0.031584653891499004
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.552,
"acc_norm_stderr,none": 0.03151438761115348
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.524,
"acc_norm_stderr,none": 0.03164968895968774
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.484,
"acc_norm_stderr,none": 0.03166998503010743
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.78,
"acc_norm_stderr,none": 0.02625179282460579
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.644,
"acc_norm_stderr,none": 0.0303436806571532
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.7,
"acc_norm_stderr,none": 0.029040893477575786
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.364,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.589041095890411,
"acc_norm_stderr,none": 0.04085902451640228
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.58,
"acc_norm_stderr,none": 0.03127799950463661
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.6966292134831461,
"acc_norm_stderr,none": 0.03455421944400101
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.732,
"acc_norm_stderr,none": 0.02806876238252672
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.544,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.204,
"acc_norm_stderr,none": 0.025537121574548162
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.16,
"acc_norm_stderr,none": 0.023232714782060626
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.248,
"acc_norm_stderr,none": 0.027367497504863593
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.56,
"acc_norm_stderr,none": 0.03145724452223569
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3196308724832215,
"acc_norm_stderr,none": 0.013522572199065146,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3181818181818182,
"acc_norm_stderr,none": 0.0331847733384533
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.3131868131868132,
"acc_norm_stderr,none": 0.01986656558013767
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.328125,
"acc_norm_stderr,none": 0.0222080353262888
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7393715341959335,
"prompt_level_strict_acc_stderr,none": 0.018890584986760186,
"inst_level_strict_acc,none": 0.802158273381295,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7578558225508318,
"prompt_level_loose_acc_stderr,none": 0.018434587800223168,
"inst_level_loose_acc,none": 0.8201438848920863,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.29531722054380666,
"exact_match_stderr,none": 0.011453860732395094,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.5635179153094463,
"exact_match_stderr,none": 0.028351520946552713
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.17073170731707318,
"exact_match_stderr,none": 0.034066279591320504
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.1590909090909091,
"exact_match_stderr,none": 0.03195667292673137
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.11785714285714285,
"exact_match_stderr,none": 0.019303911310421605
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.23376623376623376,
"exact_match_stderr,none": 0.034215730598256215
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.47668393782383417,
"exact_match_stderr,none": 0.03604513672442202
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.1111111111111111,
"exact_match_stderr,none": 0.027148765412512273
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.4431515957446808,
"acc_stderr,none": 0.00452891098809217
},
"leaderboard_musr": {
"acc_norm,none": 0.43783068783068785,
"acc_norm_stderr,none": 0.017595964155130817,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.556,
"acc_norm_stderr,none": 0.03148684942554571
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.28515625,
"acc_norm_stderr,none": 0.028273327213286358
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.476,
"acc_norm_stderr,none": 0.03164968895968774
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
neoneye/simon-arc-solve-color-v17 | neoneye | "2024-11-22T00:37:15Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-22T00:36:02Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve color version 17
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the colors gets manipulated.
Currently it's two-color images, where the transformation is to swap colors.
The image sizes are between 1 and 5 pixels.
Predict the number of rows in the output image.
# Version 2
Number of test: 1-2. Previously it was always 1 test.
# Version 3
input image size: 1-3.
Number of tests: 1.
Identify most popular color, and least popular color. The output size is always 1x1.
# Version 4
input image size: 1-4.
Number of tests: 1.
Identify most popular color, and least popular color. The output size is always 1x1.
# Version 5
input image size: 1-5.
Number of tests: 1-2.
Identify most popular color, and least popular color. The output size is always 1x1.
# Version 6
input image size: 1-5.
Number of tests: 1-2.
Identify most popular color, and least popular color. Multiple output sizes: output size is 1x1, and same output size as input size.
Swap colors.
# Version 7
Focus on `generate_task_replace_color`.
image size: 3-6.
padding size: 1-5.
# Version 8
Focus on `generate_task_replace_color`.
image size: 3-8.
padding size: 1-10.
# Version 9
Focus on `generate_task_replace_color`.
image size: 3-10.
padding size: 1-20.
# Version 10
Enabled all the task generators.
# Version 11
Focus on `generate_task_replace_color_pairs_with_different_palettes`.
image size: 3-5.
padding size: 1-4.
# Version 12
Focus on `generate_task_replace_color_pairs_with_different_palettes`.
image size: 3-8.
padding size: 1-10.
# Version 13
Focus on `generate_task_replace_color_pairs_with_different_palettes`.
image size: 3-10.
padding size: 1-20.
# Version 14
Extended `generate_task_replace_color_pairs_with_different_palettes` with 2 new palette modes.
Enabled all transformations.
# Version 15
Earlier predictions added to some of the rows.
# Version 16
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 17
Replaced RLE compressed response with raw pixel response.
image size: 1-7.
|
neoneye/simon-arc-solve-skew-v6 | neoneye | "2024-11-22T08:02:31Z" | 7 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-22T01:17:49Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) solve skew version 6
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
ARC-AGI Tasks where the job is to apply skew/unkew in the directions up/down/left/right.
example count: 2-4.
test count: 1-2.
image size: 1-4.
# Version 2
image size: 1-7.
# Version 3
Earlier predictions added to some of the rows.
# Version 4
Added fields: `arc_task`, `test_index`, `earlier_output`.
# Version 5
Replaced RLE compressed response with raw pixel response.
# Version 6
image size: 1-9. |
magnifi/parser_user_v27h | magnifi | "2024-11-22T02:02:43Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:02:42Z" | ---
dataset_info:
features:
- name: Query_id
dtype: int64
- name: Query
dtype: string
- name: Elastic_search
dtype: string
- name: virtual_portfolios
dtype: string
- name: Parser_output
dtype: string
splits:
- name: train
num_bytes: 344199
num_examples: 1524
- name: validation
num_bytes: 24775
num_examples: 128
download_size: 137440
dataset_size: 368974
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
ZixuanKe/fingpt_convfinqa_sup_sample_from_policy_v1.1_dpo_val_chunk_18 | ZixuanKe | "2024-11-22T02:32:53Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:32:52Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 147908
num_examples: 24
download_size: 19032
dataset_size: 147908
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/fingpt_convfinqa_sup_sample_from_policy_v1.1_dpo_val_chunk_23 | ZixuanKe | "2024-11-22T02:33:56Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:33:55Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 167913
num_examples: 33
download_size: 26156
dataset_size: 167913
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mhdang/image_unseen-fewshot_sc_ours_withjpg_num500 | mhdang | "2024-11-22T02:34:37Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:34:10Z" | ---
dataset_info:
features:
- name: jpg_model_train
dtype: binary
- name: jpg_model_base
dtype: binary
- name: user_id
dtype: int64
- name: text
dtype: string
- name: emb
sequence:
sequence: float64
- name: preferred_image_uid_0
dtype: string
- name: dispreferred_image_uid_0
dtype: string
- name: caption_0
dtype: string
- name: preferred_image_uid_1
dtype: string
- name: dispreferred_image_uid_1
dtype: string
- name: caption_1
dtype: string
- name: preferred_image_uid_2
dtype: string
- name: dispreferred_image_uid_2
dtype: string
- name: caption_2
dtype: string
- name: preferred_image_uid_3
dtype: string
- name: dispreferred_image_uid_3
dtype: string
- name: caption_3
dtype: string
- name: class
dtype: int64
- name: __index_level_0__
dtype: int64
- name: user_description
dtype: string
- name: caption
dtype: string
- name: preferred_image_uid_0_jpg
dtype: binary
- name: preferred_image_uid_1_jpg
dtype: binary
- name: preferred_image_uid_2_jpg
dtype: binary
- name: preferred_image_uid_3_jpg
dtype: binary
- name: dispreferred_image_uid_0_jpg
dtype: binary
- name: dispreferred_image_uid_1_jpg
dtype: binary
- name: dispreferred_image_uid_2_jpg
dtype: binary
- name: dispreferred_image_uid_3_jpg
dtype: binary
splits:
- name: test
num_bytes: 1537893962
num_examples: 500
download_size: 1079067223
dataset_size: 1537893962
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
ZixuanKe/fingpt_convfinqa_sup_sample_from_policy_v1.1_dpo_val_chunk_28 | ZixuanKe | "2024-11-22T02:35:30Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:35:29Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 216320
num_examples: 33
download_size: 37707
dataset_size: 216320
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/fingpt_convfinqa_sup_sample_from_policy_v1.1_dpo_val_chunk_13 | ZixuanKe | "2024-11-22T02:36:46Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:36:45Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 195300
num_examples: 35
download_size: 28241
dataset_size: 195300
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mhdang/image_seen_sc_ours_withjpg_num500 | mhdang | "2024-11-22T02:39:13Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:38:44Z" | ---
dataset_info:
features:
- name: jpg_model_train
dtype: binary
- name: jpg_model_base
dtype: binary
- name: user_id
dtype: int64
- name: text
dtype: string
- name: emb
sequence:
sequence: float64
- name: preferred_image_uid_0
dtype: string
- name: dispreferred_image_uid_0
dtype: string
- name: caption_0
dtype: string
- name: preferred_image_uid_1
dtype: string
- name: dispreferred_image_uid_1
dtype: string
- name: caption_1
dtype: string
- name: preferred_image_uid_2
dtype: string
- name: dispreferred_image_uid_2
dtype: string
- name: caption_2
dtype: string
- name: preferred_image_uid_3
dtype: string
- name: dispreferred_image_uid_3
dtype: string
- name: caption_3
dtype: string
- name: class
dtype: int64
- name: __index_level_0__
dtype: int64
- name: user_description
dtype: string
- name: caption
dtype: string
- name: preferred_image_uid_0_jpg
dtype: binary
- name: preferred_image_uid_1_jpg
dtype: binary
- name: preferred_image_uid_2_jpg
dtype: binary
- name: preferred_image_uid_3_jpg
dtype: binary
- name: dispreferred_image_uid_0_jpg
dtype: binary
- name: dispreferred_image_uid_1_jpg
dtype: binary
- name: dispreferred_image_uid_2_jpg
dtype: binary
- name: dispreferred_image_uid_3_jpg
dtype: binary
splits:
- name: test
num_bytes: 1468520892
num_examples: 500
download_size: 1143544216
dataset_size: 1468520892
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
ifrah1/your_dataset_name | ifrah1 | "2024-11-22T02:55:18Z" | 7 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T02:55:16Z" | ---
dataset_info:
features:
- name: English
dtype: string
- name: Urdu
dtype: string
splits:
- name: train
num_bytes: 33059917
num_examples: 107317
download_size: 16879027
dataset_size: 33059917
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/fingpt_convfinqa_sup_sample_from_policy_v1.1_dpo_val_chunk_32 | ZixuanKe | "2024-11-22T03:56:11Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T03:56:10Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 285573
num_examples: 50
download_size: 24937
dataset_size: 285573
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
procit007/treated_0.5 | procit007 | "2024-11-22T04:08:29Z" | 7 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T04:06:14Z" | ---
dataset_info:
features:
- name: gender
dtype: string
- name: accent
dtype: string
- name: speaker_id
dtype: int64
- name: speaker_name
dtype: string
- name: text
dtype: string
- name: normalized_text
dtype: string
- name: audio
dtype: audio
- name: treated
dtype: bool
- name: metrics
struct:
- name: clipping_ratio
dtype: float64
- name: duration
dtype: float64
- name: is_valid
dtype: bool
- name: rms_energy
dtype: float64
- name: sample_rate
dtype: int64
- name: silence_ratio
dtype: float64
- name: snr
dtype: float64
splits:
- name: train
num_bytes: 3188358457.0
num_examples: 10000
download_size: 2987430472
dataset_size: 3188358457.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Asap7772/processed_image_unseen-fewshot_sc_ours_withjpg_num500 | Asap7772 | "2024-11-22T04:12:03Z" | 7 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T04:10:48Z" | ---
dataset_info:
features:
- name: user_id
dtype: int64
- name: caption
sequence: string
- name: split
dtype: string
- name: shot_id
dtype: int64
- name: preferred_image
sequence: binary
- name: dispreferred_image
sequence: binary
- name: preferred_image_uid
sequence: string
- name: dispreferred_image_uid
sequence: string
splits:
- name: test
num_bytes: 1119635843
num_examples: 500
download_size: 1100260023
dataset_size: 1119635843
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
joycewu/common_voice_16_1_zh_TW_pseudo_labelled_concat2 | joycewu | "2024-11-22T05:34:56Z" | 7 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-22T05:08:05Z" | ---
dataset_info:
config_name: zh-TW
features:
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: condition_on_prev
sequence: int64
- name: whisper_transcript
dtype: string
splits:
- name: train
num_bytes: 746186952.0
num_examples: 857
- name: validation
num_bytes: 538995300.0
num_examples: 617
- name: test
num_bytes: 594574487.0
num_examples: 684
download_size: 1682750460
dataset_size: 1879756739.0
configs:
- config_name: zh-TW
data_files:
- split: train
path: zh-TW/train-*
- split: validation
path: zh-TW/validation-*
- split: test
path: zh-TW/test-*
---
|