datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
2.54M
| likes
int64 0
6.35k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
40
⌀ | createdAt
unknown | card
stringlengths 19
1M
|
---|---|---|---|---|---|---|---|---|
haorandai/Nov_PGD_Mice_Orange_Epsilon0.05_1samples_with1constraints | haorandai | "2024-11-25T05:02:37Z" | 3 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:02:36Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 176766.0
num_examples: 2
download_size: 178562
dataset_size: 176766.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_PGD_Banana_Orange_Epsilon0.05_5samples_with5constraints | haorandai | "2024-11-25T05:19:07Z" | 3 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:18:51Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 781257.0
num_examples: 10
download_size: 782923
dataset_size: 781257.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
prashuchavan/rp_ | prashuchavan | "2024-11-25T05:45:17Z" | 3 | 0 | [
"license:mit",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:40:30Z" | ---
license: mit
---
|
yguooo/summarize_from_feedback_oai_preprocessing_llama3_scene0 | yguooo | "2024-11-25T06:01:34Z" | 3 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:41:04Z" | ---
dataset_info:
features:
- name: info
struct:
- name: id
dtype: string
- name: post
dtype: string
- name: title
dtype: string
- name: subreddit
dtype: string
- name: site
dtype: string
- name: article
dtype: string
- name: summaries
list:
- name: text
dtype: string
- name: policy
dtype: string
- name: note
dtype: string
- name: choice
dtype: int32
- name: worker
dtype: string
- name: batch
dtype: string
- name: split
dtype: string
- name: extra
struct:
- name: confidence
dtype: int32
- name: query_token
sequence: int64
- name: query
dtype: string
- name: chosen
dtype: string
- name: chosen_token
sequence: int64
- name: chosen_token_len
dtype: int64
- name: rejected
dtype: string
- name: rejected_token
sequence: int64
- name: rejected_token_len
dtype: int64
- name: chosen_policy
dtype: string
- name: rejected_policy
dtype: string
- name: policies
dtype: string
- name: query_chosen
dtype: string
- name: query_chosen_token
sequence: int64
- name: query_chosen_token_len
dtype: int64
- name: query_rejected
dtype: string
- name: query_rejected_token
sequence: int64
- name: query_rejected_token_len
dtype: int64
- name: query_token_len
dtype: int64
- name: query_chosen_token_response_label
sequence: int64
- name: query_rejected_token_response_label
sequence: int64
splits:
- name: train
num_bytes: 3051559301
num_examples: 92858
- name: validation
num_bytes: 2761252383
num_examples: 83802
- name: validation_cnndm
num_bytes: 222962138
num_examples: 2284
download_size: 289869690
dataset_size: 6035773822
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: validation_cnndm
path: data/validation_cnndm-*
---
|
HamdanXI/libriTTS_dev_wav2vec2_latent_layer1_2sec_PERFECT_chunk_2 | HamdanXI | "2024-11-25T05:46:26Z" | 3 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:45:46Z" | ---
dataset_info:
features:
- name: audio_clip
sequence: float64
- name: layer0_prediction
sequence: float64
- name: predicted_text
dtype: string
- name: speaker_id
dtype: string
splits:
- name: train
num_bytes: 1335915573
num_examples: 100
download_size: 857001013
dataset_size: 1335915573
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "libriTTS_dev_wav2vec2_latent_layer1_2sec_PERFECT_chunk_2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
yguooo/summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia_scene2 | yguooo | "2024-11-25T06:11:12Z" | 3 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:54:12Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: subreddit
dtype: string
- name: title
dtype: string
- name: post
dtype: string
- name: summary
dtype: string
- name: query_token
sequence: int64
- name: query
dtype: string
- name: reference_response
dtype: string
- name: reference_response_token
sequence: int64
- name: reference_response_token_len
dtype: int64
- name: query_reference_response
dtype: string
- name: query_reference_response_token
sequence: int64
- name: query_reference_response_token_response_label
sequence: int64
- name: query_reference_response_token_len
dtype: int64
splits:
- name: train
num_bytes: 2152790755
num_examples: 116722
- name: validation
num_bytes: 118940067
num_examples: 6447
- name: test
num_bytes: 120931386
num_examples: 6553
download_size: 565129816
dataset_size: 2392662208
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
# TL;DR SFT Dataset for OpenAI's [Summarize from Feedback](https://openai.com/blog/summarization/) task
The dataset is directly taken from https://github.com/openai/summarize-from-feedback/tree/700967448d10004279f138666442bf1497d0e705#reddit-tldr-dataset
These columns are taken directly from the aforementioned dataset:
* **id**: unique identifier for the post
* **subreddit**: subreddit the post was taken from
* **title**: title of the post
* **post**: body of the post
* **summary**: summary of the post
* **reference_response**: reference response for the post
These columns are added by this preprocessing script:
* **query**: length-limited query for summarization: OAI pre-processes the main text (title + subreddit + post), ensuring it has only 512 tokens; if the main text is too long, then it tries to truncate at the last `
`. If it's too short it pads the main text ([summarize_from_feedback/tasks.py#L98-L165](https://github.com/openai/summarize-from-feedback/blob/700967448d10004279f138666442bf1497d0e705/summarize_from_feedback/tasks.py#L98-L165)). Padding is either space or `[PAD]` token (see Args below).
* **query_token**: tokenized version of `query`
* **reference_response_token**: tokenized version of `reference_response`
* **reference_response_token_len**: length of `reference_response_token`
* **query_reference_response**: concatenation of `query.strip()` and `reference_response`
* **query_reference_response_token**: tokenized version of `query_reference_response`, up to `max_sft_query_response_length` tokens
* **query_reference_response_token_len**: length of `query_reference_response_token`
# Args
```python
{'base_model': 'EleutherAI/pythia-1b',
'check_length_correctness': True,
'cnndm_params': TaskQueryHParams(length=1919,
format_str='Article:\n{article}\n\nTL;DR:\n',
truncate_field='article',
truncate_text='\n',
padding='pad_token',
pad_token=[50277],
pad_side='left',
max_sft_response_length=None,
max_sft_query_response_length=None,
max_rm_response_length=155,
max_rm_query_response_length=2021),
'debug': False,
'ds_name': 'pythia_scene2',
'hf_entity': 'yguooo',
'push_to_hub': True,
'scenario': 0,
'tldr_params': TaskQueryHParams(length=512,
format_str='TITLE: {title}\\n\\nPOST: '
'{post}\\n\\nWrite a short and '
'concise summary of the given '
'titled post from the {subreddit} '
'subreddit, ensuring the purpose '
'and main ideas are represented '
'clearly.\\n\\nTL;DR:',
truncate_field='post',
truncate_text='\n',
padding='pad_token',
pad_token=[50277],
pad_side='left',
max_sft_response_length=53,
max_sft_query_response_length=562,
max_rm_response_length=169,
max_rm_query_response_length=651)}
```
|
yguooo/summarize_from_feedback_oai_preprocessing_pythia_scene3 | yguooo | "2024-11-25T06:16:15Z" | 3 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T06:01:22Z" | ---
dataset_info:
features:
- name: info
struct:
- name: id
dtype: string
- name: post
dtype: string
- name: title
dtype: string
- name: subreddit
dtype: string
- name: site
dtype: string
- name: article
dtype: string
- name: summaries
list:
- name: text
dtype: string
- name: policy
dtype: string
- name: note
dtype: string
- name: choice
dtype: int32
- name: worker
dtype: string
- name: batch
dtype: string
- name: split
dtype: string
- name: extra
struct:
- name: confidence
dtype: int32
- name: query_token
sequence: int64
- name: query
dtype: string
- name: chosen
dtype: string
- name: chosen_token
sequence: int64
- name: chosen_token_len
dtype: int64
- name: rejected
dtype: string
- name: rejected_token
sequence: int64
- name: rejected_token_len
dtype: int64
- name: chosen_policy
dtype: string
- name: rejected_policy
dtype: string
- name: policies
dtype: string
- name: query_chosen
dtype: string
- name: query_chosen_token
sequence: int64
- name: query_chosen_token_len
dtype: int64
- name: query_rejected
dtype: string
- name: query_rejected_token
sequence: int64
- name: query_rejected_token_len
dtype: int64
- name: query_token_len
dtype: int64
- name: query_chosen_token_response_label
sequence: int64
- name: query_rejected_token_response_label
sequence: int64
splits:
- name: train
num_bytes: 3243393797
num_examples: 92858
- name: validation
num_bytes: 2934997329
num_examples: 83802
- name: validation_cnndm
num_bytes: 225359437
num_examples: 2284
download_size: 294596372
dataset_size: 6403750563
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: validation_cnndm
path: data/validation_cnndm-*
---
|
yguooo/summarize_from_feedback_oai_preprocessing_pythia_scene4 | yguooo | "2024-11-25T06:33:01Z" | 3 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T06:01:54Z" | ---
dataset_info:
features:
- name: info
struct:
- name: id
dtype: string
- name: post
dtype: string
- name: title
dtype: string
- name: subreddit
dtype: string
- name: site
dtype: string
- name: article
dtype: string
- name: summaries
list:
- name: text
dtype: string
- name: policy
dtype: string
- name: note
dtype: string
- name: choice
dtype: int32
- name: worker
dtype: string
- name: batch
dtype: string
- name: split
dtype: string
- name: extra
struct:
- name: confidence
dtype: int32
- name: query_token
sequence: int64
- name: query
dtype: string
- name: chosen
dtype: string
- name: chosen_token
sequence: int64
- name: chosen_token_len
dtype: int64
- name: rejected
dtype: string
- name: rejected_token
sequence: int64
- name: rejected_token_len
dtype: int64
- name: chosen_policy
dtype: string
- name: rejected_policy
dtype: string
- name: policies
dtype: string
- name: query_chosen
dtype: string
- name: query_chosen_token
sequence: int64
- name: query_chosen_token_len
dtype: int64
- name: query_rejected
dtype: string
- name: query_rejected_token
sequence: int64
- name: query_rejected_token_len
dtype: int64
- name: query_token_len
dtype: int64
- name: query_chosen_token_response_label
sequence: int64
- name: query_rejected_token_response_label
sequence: int64
splits:
- name: train
num_bytes: 3268349348
num_examples: 92858
- name: validation
num_bytes: 2957694051
num_examples: 83802
- name: validation_cnndm
num_bytes: 225359437
num_examples: 2284
download_size: 296397691
dataset_size: 6451402836
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: validation_cnndm
path: data/validation_cnndm-*
---
|
anggiatm/botanisquare-tenant-descriptive-v2 | anggiatm | "2024-11-25T06:31:42Z" | 3 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T06:31:30Z" | ---
dataset_info:
features:
- name: desc
dtype: string
splits:
- name: train
num_bytes: 167366
num_examples: 520
download_size: 81593
dataset_size: 167366
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ashwiniai/anatomy-corpus | ashwiniai | "2024-11-25T14:59:44Z" | 3 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T06:38:59Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: page_idx
dtype: int64
- name: document_name
dtype: string
- name: file_path
dtype: string
- name: file_url
dtype: string
- name: loader_name
dtype: string
splits:
- name: train
num_bytes: 12681904
num_examples: 2219
- name: pdfplumbertextloader
num_bytes: 12424290
num_examples: 2219
- name: pymupdf4llmtextloader
num_bytes: 12163384
num_examples: 2219
download_size: 19216773
dataset_size: 37269578
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: pdfplumbertextloader
path: data/pdfplumbertextloader-*
- split: pymupdf4llmtextloader
path: data/pymupdf4llmtextloader-*
---
|
stacklok/insecure-code | stacklok | "2024-11-25T21:21:17Z" | 3 | 0 | [
"task_categories:text-classification",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-classification"
] | "2024-11-25T06:54:23Z" | ---
license: apache-2.0
task_categories:
- text-classification
pretty_name: insecure-code
size_categories:
- 1K<n<10K
---
# Insecure Code Dataset
A dataset of insecure coding patterns, generated with [Promptwright](https://github.com/StacklokLabs/promptwright) version 1.3.1 |
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_dpo_train_chunk_16 | ZixuanKe | "2024-11-25T07:00:47Z" | 3 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T07:00:43Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 5398693
num_examples: 1098
download_size: 587519
dataset_size: 5398693
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
habanoz/c4_tr_400k | habanoz | "2024-11-25T07:39:37Z" | 3 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T07:34:15Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: timestamp
dtype: timestamp[s]
- name: url
dtype: string
splits:
- name: train
num_bytes: 1255259337
num_examples: 400000
download_size: 741872979
dataset_size: 1255259337
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hoonikoo/new_iio | hoonikoo | "2024-11-25T07:35:14Z" | 3 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T07:34:43Z" | ---
license: apache-2.0
---
|
neoneye/simon-arc-combine-v182 | neoneye | "2024-11-25T07:36:09Z" | 3 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-25T07:34:46Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) combined datasets version 182
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
A combination of multiple datasets.
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 2
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 3
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 4
Added a shared dataset name for all these datasets: `SIMON-SOLVE-V1`. There may be higher version numbers in the future.
My hypothesis: Having a version number in the dataset name, it may be easier to unlearn incorrect training data.
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 5
Different random seed.
# Version 6
Using `SIMON-SOLVE-V1` everywhere. Remove the `SIMON-SOLVE-COLOR`, `SIMON-SOLVE-ROTATE`, `SIMON-SOLVE-TRANSLATE`.
# Version 7
Using `SIMON-SOLVE-V1` everywhere.
# Version 8
Same settings. Different seed as usual.
# Version 9
Switching from context length 256 to context length 512.
Increasing the image sizes so the prompt length stays below 512.
`dataset_solve_color`, image size: 1-13.
`dataset_solve_rotate`, image size: 1-9.
`dataset_solve_translate`, image size: 3-9.
# Version 10
Same settings. Different seed as usual.
# Version 11
Same settings. Different seed as usual.
# Version 12
Added 1 more pair to the examples. Now it's 2-4 examples. Previously it was 2-3 examples.
# Version 13
Same settings. Different seed as usual.
# Version 14
Same settings. Different seed as usual.
# Version 15
Same settings. Different seed as usual.
# Version 16
Added `Predict the output image.`
Disabled prediction of rows.
Disabled prediction of height.
# Verison 17
Same settings. Different seed as usual.
Using the `DatasetGenerator` and the `DatasetItemListBuilder`.
# Verison 18
Added datasets.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl` - added.
- `dataset_shape.jsonl` - added.
# Verison 19
Added dataset.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_image.jsonl` - added.
# Verison 20
Bigger images.
# Verison 21
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_mass.jsonl` - added.
# Verison 22
Added dataset.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_histogram.jsonl` - added.
Bigger image sizes.
Number of rows=200k. Was previously 100k rows.
# Verison 23
`datset_mass.jsonl`. increased to `max_mass=5`.
# Verison 24
`datset_mass.jsonl`. increased to `max_mass=6`.
# Verison 25
different seed.
# Verison 26
`datset_mass.jsonl`. increased to `max_mass=25`.
different seed.
# Verison 27
different seed.
# Verison 28
different seed.
# Verison 29
different seed.
# Verison 30
different seed.
# Verison 31
different seed.
# Verison 32
different seed.
# Verison 33
Disabled some dataset.
Datasets:
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_cellular_automaton.jsonl`
# Verison 34
Enabled all datasets.
# Version 35
Regenerated all datasets with new random seeds.
# Verison 36
Added dataset `dataset_scale.jsonl`.
Disabled some dataset.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
# Verison 37
Enabled all datasets
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
# Verison 38
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - added
# Version 39
Regenerated all datasets with new random seeds.
# Version 40
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl` - added
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 41
Regenerated all datasets with new random seeds.
# Version 42
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl` - added
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 43
Enabled all datasets.
# Version 44
Regenerated all datasets with new random seeds.
# Version 45
Extended the `dataset_shape.jsonl` with these new `PixelConnectivity` types: `CORNER4`, `LR2`, `TB2`, `TLBR2`, `TRBL2`.
Hopefully it makes the model better at making sense of diagonal structures, which is something it's terrible at at the moment.
# Version 46
Regenerated all datasets with new random seeds.
# Version 47
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl` - added
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 48
Enabled all datasets.
# Version 49
Bigger `max_mass`. From 6 to 8.
# Version 50
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 51
Regenerated all datasets with new random seeds.
# Version 52
Regenerated all datasets with new random seeds.
# Version 53
Regenerated all datasets with new random seeds.
# Version 54
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_erotion.jsonl` - added
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 55
Added dataset. Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl` - added
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 56
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 57
Regenerated all datasets with new random seeds.
# Version 58
Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 59
Added new datasets.
Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl` - added
- `dataset_solve_fractal.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 60
Incremented random seed
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 61
Enabled all datasets.
More padding inside the `dataset_solve_fractal.jsonl`.
# Version 62
All datasets still enabled.
Turning up the parameter for `dataset_solve_fractal.jsonl`.
scale_input from 3 to 4.
scale_output from 3 to 4.
max_image_size from 3 to 4.
max_pad_count from 4 to 5.
# Version 63
Disabled several datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 64
Added dataset.
Increased the number of rows in the jsonl file from 200k to 300k.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 65
random seed.
# Version 66
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 67
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - enabled
- `dataset_solve_compress.jsonl` - enabled
- `dataset_solve_erosion.jsonl` - enabled
- `dataset_solve_fractal.jsonl` - enabled
- `dataset_solve_outline.jsonl` - enabled
- `dataset_solve_rotate.jsonl` - enabled
- `dataset_solve_scale.jsonl` - enabled
- `dataset_solve_symmetry.jsonl` - enabled
- `dataset_solve_translate.jsonl` - enabled
- `dataset_symmetry.jsonl`
# Version 68
Enabled all datasets.
# Version 69
Different random seed.
# Version 70
Different random seed.
# Version 71
Different random seed.
# Version 72
Different random seed.
# Version 73
Different random seed.
# Version 74
Major update to `dataset_solve_symmetry.jsonl`.
# Version 75
Different random seed.
# Version 76
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 77
Enabled all datasets.
# Version 78
Major update to `dataset_solve_symmetry.jsonl`.
# Version 79
Different random seed.
# Version 80
Different random seed.
# Version 81
Different random seed.
# Version 82
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl` - added
- `dataset_symmetry.jsonl`
# Version 83
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 84
Added dataset `dataset_solve_grid.jsonl`.
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl` - added
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 85
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 86
Enabled all datasets.
# Version 87
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 88
Added dataset `dataset_solve_probecolor.jsonl` with all directions enabled.
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 89
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 90
Disabled some of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 91
Added dataset.
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl` - added
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 92
Different random seed.
# Version 93
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl` - added
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 94
Added dataset.
Disabled datasets that doesn't solve ARC tasks.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl` - added
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 95
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl` - added
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 96
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl` - major update.
- `dataset_symmetry.jsonl`
# Version 97
Disabled the first half of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 98
Disabled the last half of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl` - disabled
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl` - disabled
- `dataset_solve_grid.jsonl` - disabled
- `dataset_solve_half.jsonl` - disabled
- `dataset_solve_mass.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_probecolor.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 99
Disabled the 1/4th of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 100
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl` - added
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 101
Disabled the non solving datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 102
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl` - added
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 103
Different random seed.
# Version 104
Disabled the non solving datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 105
Major update to `dataset_solve_scale.jsonl` with scaling down noisy images.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl` - scale down noisy images
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 106
Different random seed.
# Version 107
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 108
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl` - added
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 109
Different random seed.
# Version 110
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_halfplane.jsonl` - added
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 111
Different random seed.
# Version 112
Different random seed.
# Version 113
Different random seed.
# Version 114
Major update to the `dataset_solve-mass.jsonl`, so it now includes `mass_compare_adjacent_rows` and `mass_compare_adjacent_columns`.
# Version 115
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_gravity.jsonl` - added
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_halfplane.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 116
Hypothesis. What if I train with a smaller dataset, will it converge faster?
Reduced the number of rows in this dataset from 300k rows to 10k rows.
# Version 117
Interesting, 10k rows seems to work fine with the model training.
Picked new random rows.
# Version 118
Still going with 10k rows.
Picked new random rows.
# Version 119
Still going with 10k rows.
Picked new random rows.
# Version 120
Switched to 20k rows.
# Version 121
Still going with 20k rows.
Picked new random rows.
# Version 122
20k rows.
Added `dataset_solve_reverse.jsonl`.
# Version 123
Doubled the number of rows to 40k rows.
# Version 124
Set row count to 100k rows.
Major update to `dataset_solve_gravity.jsonl`.
# Version 125
Row count: 100k rows.
# Version 126
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_bool.jsonl
dataset_solve_boundingbox.jsonl
dataset_solve_color.jsonl
dataset_solve_compress.jsonl
dataset_solve_edge.jsonl
dataset_solve_erosion.jsonl
dataset_solve_flip.jsonl
dataset_solve_fractal.jsonl
dataset_solve_gravity.jsonl
dataset_solve_grid.jsonl
dataset_solve_half.jsonl
dataset_solve_halfplane.jsonl
dataset_solve_mask.jsonl
dataset_solve_mass.jsonl
dataset_solve_outline.jsonl
dataset_solve_probecolor.jsonl
dataset_solve_ray.jsonl
dataset_solve_reverse.jsonl
dataset_solve_rotate.jsonl
dataset_solve_scale.jsonl
dataset_solve_symmetry.jsonl
dataset_solve_translate.jsonl
dataset_solve_zindex.jsonl
```
# Version 127
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_scale.jsonl
dataset_solve_symmetry.jsonl
dataset_solve_translate.jsonl
dataset_solve_zindex.jsonl
```
# Version 128
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_probecolor.jsonl
dataset_solve_ray.jsonl
dataset_solve_reverse.jsonl
dataset_solve_rotate.jsonl
```
# Version 129
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_gravity.jsonl
dataset_solve_grid.jsonl
dataset_solve_half.jsonl
dataset_solve_halfplane.jsonl
dataset_solve_mask.jsonl
dataset_solve_mass.jsonl
dataset_solve_outline.jsonl
```
# Version 130
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_bool.jsonl
dataset_solve_boundingbox.jsonl
dataset_solve_color.jsonl
dataset_solve_compress.jsonl
dataset_solve_edge.jsonl
dataset_solve_erosion.jsonl
dataset_solve_flip.jsonl
dataset_solve_fractal.jsonl
```
# Version 131
Switched back to 300k rows.
Enabled all the datasets.
# Version 132
Random seed.
# Version 133
Removed the rows that are longer than what can be fitted inside a 512 context length.
# Version 134
Random seed.
# Version 135
Random seed.
# Version 136
Major update to the `dataset_solve_gravity.jsonl` file.
# Version 137
Added dataset `dataset_solve_skew.jsonl`.
# Version 138
Disabled several datasets.
```txt
# 'dataset_cellular_automaton.jsonl',
# 'dataset_dilation.jsonl',
# 'dataset_erosion.jsonl',
# 'dataset_histogram.jsonl',
# 'dataset_image.jsonl',
# 'dataset_image_pair.jsonl',
# 'dataset_mass.jsonl',
# 'dataset_scale.jsonl',
# 'dataset_shape.jsonl',
# 'dataset_solve_bool.jsonl',
'dataset_solve_boundingbox.jsonl',
'dataset_solve_color.jsonl',
'dataset_solve_compress.jsonl',
'dataset_solve_edge.jsonl',
'dataset_solve_erosion.jsonl',
'dataset_solve_flip.jsonl',
'dataset_solve_fractal.jsonl',
'dataset_solve_gravity.jsonl',
'dataset_solve_grid.jsonl',
'dataset_solve_half.jsonl',
# 'dataset_solve_halfplane.jsonl',
'dataset_solve_mask.jsonl',
'dataset_solve_mass.jsonl',
'dataset_solve_outline.jsonl',
'dataset_solve_probecolor.jsonl',
# 'dataset_solve_ray.jsonl',
# 'dataset_solve_reverse.jsonl',
'dataset_solve_rotate.jsonl',
'dataset_solve_scale.jsonl',
# 'dataset_solve_skew.jsonl',
'dataset_solve_symmetry.jsonl',
'dataset_solve_translate.jsonl',
'dataset_solve_zindex.jsonl',
# 'dataset_symmetry.jsonl',
```
# Version 139
Disabled several datasets.
```txt
'dataset_cellular_automaton.jsonl',
'dataset_dilation.jsonl',
'dataset_erosion.jsonl',
'dataset_histogram.jsonl',
'dataset_image.jsonl',
'dataset_image_pair.jsonl',
'dataset_mass.jsonl',
'dataset_scale.jsonl',
'dataset_shape.jsonl',
'dataset_solve_bool.jsonl',
# 'dataset_solve_boundingbox.jsonl',
# 'dataset_solve_color.jsonl',
# 'dataset_solve_compress.jsonl',
# 'dataset_solve_edge.jsonl',
# 'dataset_solve_erosion.jsonl',
# 'dataset_solve_flip.jsonl',
# 'dataset_solve_fractal.jsonl',
# 'dataset_solve_gravity.jsonl',
# 'dataset_solve_grid.jsonl',
# 'dataset_solve_half.jsonl',
'dataset_solve_halfplane.jsonl',
# 'dataset_solve_mask.jsonl',
# 'dataset_solve_mass.jsonl',
# 'dataset_solve_outline.jsonl',
# 'dataset_solve_probecolor.jsonl',
'dataset_solve_ray.jsonl',
'dataset_solve_reverse.jsonl',
# 'dataset_solve_rotate.jsonl',
# 'dataset_solve_scale.jsonl',
'dataset_solve_skew.jsonl',
# 'dataset_solve_symmetry.jsonl',
# 'dataset_solve_translate.jsonl',
# 'dataset_solve_zindex.jsonl',
'dataset_symmetry.jsonl',
```
# Version 140
Enabled all datasets.
Added new dataset: `dataset_solve_cross.jsonl`.
# Version 141
Switched to 30k rows.
Disabled several datasets.
```txt
# 'dataset_cellular_automaton.jsonl',
# 'dataset_dilation.jsonl',
# 'dataset_erosion.jsonl',
# 'dataset_histogram.jsonl',
# 'dataset_image.jsonl',
# 'dataset_image_pair.jsonl',
# 'dataset_mass.jsonl',
# 'dataset_scale.jsonl',
# 'dataset_shape.jsonl',
# 'dataset_solve_bool.jsonl',
'dataset_solve_boundingbox.jsonl',
'dataset_solve_color.jsonl',
'dataset_solve_compress.jsonl',
# 'dataset_solve_cross.jsonl',
'dataset_solve_edge.jsonl',
'dataset_solve_erosion.jsonl',
'dataset_solve_flip.jsonl',
'dataset_solve_fractal.jsonl',
# 'dataset_solve_gravity.jsonl',
'dataset_solve_grid.jsonl',
'dataset_solve_half.jsonl',
# 'dataset_solve_halfplane.jsonl',
'dataset_solve_mask.jsonl',
'dataset_solve_mass.jsonl',
'dataset_solve_outline.jsonl',
'dataset_solve_probecolor.jsonl',
'dataset_solve_ray.jsonl',
# 'dataset_solve_reverse.jsonl',
'dataset_solve_rotate.jsonl',
'dataset_solve_scale.jsonl',
'dataset_solve_skew.jsonl',
'dataset_solve_symmetry.jsonl',
'dataset_solve_translate.jsonl',
# 'dataset_solve_zindex.jsonl',
# 'dataset_symmetry.jsonl',
```
# Version 142
Switched to 300k rows.
Enabled all datasets.
Switched from 512 context to 1024 context.
# Version 143
Bigger images in `dataset_solve_cross.jsonl` and in `dataset_solve_mass.jsonl`.
# Version 144
Major update to `dataset_solve_symmetry.jsonl`.
# Version 145
Added `dataset_solve_span.jsonl`.
# Version 146
Extended `dataset_solve_span.jsonl` with `generate_task_with_template_lines`.
# Version 147
Extended `dataset_solve_span.jsonl` with `generate_task_with_alternate`.
# Version 148
Added `dataset_solve_count.jsonl`.
# Version 149
Randomized.
# Version 150
Upgraded context length for several datasets from 512 to 1024.
# Version 151
Randomized.
# Version 152
Randomized.
# Version 153
Extended `dataset_solve_mask.jsonl` with `generate_task_repair_rectangle_and_crop`.
# Version 154
Extended `dataset_solve_color.jsonl` with `generate_task_replace_color`.
# Version 155
Major update to datasets in the range from `dataset_solve_axxx.jsonl` to `dataset_solve_mask.jsonl`.
Now there is an earlier prediction for the output that is to be predicted. It may contain a hint, or it may be garbage that is to be ignored.
# Version 156
Only 2000 rows.
Only these datasets.
'dataset_cellular_automaton.jsonl',
'dataset_dilation.jsonl',
'dataset_erosion.jsonl',
'dataset_histogram.jsonl',
'dataset_image.jsonl',
'dataset_image_pair.jsonl',
'dataset_mass.jsonl',
'dataset_scale.jsonl',
'dataset_shape.jsonl',
'dataset_symmetry.jsonl',
# Version 157
Only these datasets.
- 'dataset_solve_bool.jsonl',
- 'dataset_solve_boundingbox.jsonl',
- 'dataset_solve_color.jsonl',
- 'dataset_solve_compress.jsonl',
- 'dataset_solve_count.jsonl',
- 'dataset_solve_cross.jsonl',
- 'dataset_solve_edge.jsonl',
- 'dataset_solve_erosion.jsonl',
- 'dataset_solve_flip.jsonl',
- 'dataset_solve_fractal.jsonl',
- 'dataset_solve_gravity.jsonl',
- 'dataset_solve_grid.jsonl',
- 'dataset_solve_half.jsonl',
- 'dataset_solve_halfplane.jsonl',
- 'dataset_solve_mask.jsonl',
- 'dataset_solve_mass.jsonl',
- 'dataset_solve_outline.jsonl',
- 'dataset_solve_probecolor.jsonl',
- 'dataset_solve_ray.jsonl',
- 'dataset_solve_reverse.jsonl',
- 'dataset_solve_rotate.jsonl',
- 'dataset_solve_scale.jsonl',
- 'dataset_solve_span.jsonl',
- 'dataset_solve_skew.jsonl',
- 'dataset_solve_symmetry.jsonl',
- 'dataset_solve_translate.jsonl',
- 'dataset_solve_zindex.jsonl',
# Version 158
Only these datasets.
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_rectangle.jsonl`
# Versin 159
Enabled all the `_solve_` datasets.
# Version 160
Regenerated all the `_solve_` datasets with new seed.
# Version 161
Regenerated all the `_solve_` datasets with new seed.
# Version 162
Replaced RLE compressed response with raw pixel response.
# Version 163
Added more generators
- DatasetSolveCount
- DatasetSolveCross
- DatasetSolveEdge
- DatasetSolveErosion
- DatasetSolveFlip
- DatasetSolveFractal
# Version 164
Increased row count from 1000 to 2000.
# Version 165
Added more generators.
# Version 166
Added more generators.
# Version 167
Added more generators.
# Version 168
Added more generators.
# Version 169
Generated data.
# Version 170
Generated data.
# Version 171
Generated data.
Increased output context length from 256 to 512.
# Version 172
Generated data.
# Version 173
Generated data.
# Version 174
Generated data.
# Version 175
Generated data.
# Version 176
Generated data.
# Version 177
Increased the number of rows from 2000 to 4000.
Generated data.
# Version 178
Generated data.
# Version 179
Generated data.
# Version 180
Generated data.
# Version 181
Generated data.
# Version 182
Generated data.
|
Shoot4r/lab10 | Shoot4r | "2024-11-25T07:58:26Z" | 3 | 0 | [
"task_categories:token-classification",
"language:en",
"license:openrail",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | [
"token-classification"
] | "2024-11-25T07:39:03Z" | ---
license: openrail
task_categories:
- token-classification
language:
- en
--- |
ZhangShenao/gc_binarized_ultrafeedback | ZhangShenao | "2024-07-08T16:45:10Z" | 2 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-07-08T16:40:01Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: prompt_id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: score_chosen
dtype: float64
- name: score_rejected
dtype: float64
splits:
- name: train_prefs
num_bytes: 767187610
num_examples: 107496
- name: test_prefs
num_bytes: 13161585
num_examples: 2000
download_size: 403766881
dataset_size: 780349195
configs:
- config_name: default
data_files:
- split: train_prefs
path: data/train_prefs-*
- split: test_prefs
path: data/test_prefs-*
---
|
un-Loc/dataset-warsaw-palace | un-Loc | "2024-08-30T14:01:50Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-08-30T14:01:35Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 68221298.0
num_examples: 34
download_size: 68077529
dataset_size: 68221298.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ArcIndustry/Aarambh | ArcIndustry | "2024-09-25T05:53:59Z" | 2 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-09-25T05:53:25Z" | ---
license: mit
---
|
UserID004/asheley_dataset | UserID004 | "2024-10-13T05:17:53Z" | 2 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-09-27T12:26:19Z" | ---
license: apache-2.0
---
|
mlfoundations-dev/camel_math_gpt-4o-2024-08-06 | mlfoundations-dev | "2024-10-04T09:14:03Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-10-04T08:55:06Z" | ---
dataset_info:
features:
- name: role_1
dtype: string
- name: topic
dtype: string
- name: sub_topic
dtype: string
- name: message_1
dtype: string
- name: message_2
dtype: string
splits:
- name: train
num_bytes: 129508297
num_examples: 50000
download_size: 29478413
dataset_size: 129508297
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/camel_biology_gpt-4o-2024-08-06 | mlfoundations-dev | "2024-10-06T04:44:03Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-10-06T04:43:53Z" | ---
dataset_info:
features:
- name: role_1
dtype: string
- name: topic
dtype: string
- name: sub_topic
dtype: string
- name: message_1
dtype: string
- name: message_2
dtype: string
splits:
- name: train
num_bytes: 70428125
num_examples: 20000
download_size: 24489850
dataset_size: 70428125
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/camel_physics_gpt-4o-mini | mlfoundations-dev | "2024-10-17T18:52:16Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-10-17T18:52:13Z" | ---
dataset_info:
features:
- name: role_1
dtype: string
- name: topic
dtype: string
- name: sub_topic
dtype: string
- name: message_1
dtype: string
- name: message_2
dtype: string
splits:
- name: train
num_bytes: 50720194
num_examples: 20416
download_size: 12017624
dataset_size: 50720194
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
underctrl/single-block_multi-color_pick-up_50 | underctrl | "2024-11-11T05:25:48Z" | 2 | 1 | [
"task_categories:robotics",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | "2024-11-09T16:13:58Z" | ---
task_categories:
- robotics
tags:
- LeRobot
- tutorial
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
FrancophonIA/Vocabulaire_panlatin_du_surf | FrancophonIA | "2024-11-16T19:45:27Z" | 2 | 0 | [
"task_categories:translation",
"language:en",
"language:it",
"language:ca",
"language:es",
"language:fr",
"language:gl",
"language:pt",
"language:ro",
"region:us",
"es_ES",
"fr_FR",
"fr_QC",
"pt_BR"
] | [
"translation"
] | "2024-11-16T19:37:02Z" | ---
language:
- en
- it
- ca
- es
- fr
- gl
- pt
- ro
tags:
- es_ES
- fr_FR
- fr_QC
- pt_BR
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://www.culture.gouv.fr/Thematiques/langue-francaise-et-langues-de-france/Agir-pour-les-langues/Moderniser-et-enrichir-la-langue-francaise/Nos-publications/vocabulaires-panlatins-du-sport/vocabulaire-panlatin-du-surf
## Description
Ce lexique est le fruit d’une collaboration entre la Délégation générale à la langue française et aux langues de France, le réseau panlatin de terminologie REALITER et l’Université Paul Valéry de Montpellier.
Réalisé dans la perspective des Jeux olympiques de 2024, il décline les termes du surf en catalan, en espagnol d'Espagne, en français de France et du Québec, en galicien, en italien, en portugais du Brésil, en roumain, et en anglais. |
FrancophonIA/Vocabulaire_panlatin_nanotechnologie_2 | FrancophonIA | "2024-11-16T20:52:30Z" | 2 | 0 | [
"task_categories:translation",
"language:it",
"language:ca",
"language:es",
"language:fr",
"language:gl",
"language:pt",
"language:ro",
"language:en",
"region:us",
"pt_BR",
"pt_PT",
"es_AR",
"es_ES",
"fr_QC"
] | [
"translation"
] | "2024-11-16T20:51:02Z" | ---
language:
- it
- ca
- es
- fr
- gl
- pt
- ro
- en
tags:
- pt_BR
- pt_PT
- es_AR
- es_ES
- fr_QC
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://www.realiter.net/fr/lessici-realiter
## Description
Élaboration d’un lexique de 160 concepts relatifs au domaine de la nanotechnologie, un domaine multidisciplinaire. La nanotechnologie s’intéresse surtout à la fabrication de structures moléculaires qui comportent au moins une dimension mesurant entre 1 et 100 nanomètres. Ainsi, certains termes traités dans le lexique désignent les techniques, les instruments et les unités de mesure qui sont employés pour étudier et fabriquer des entités de taille nanométrique. De façon générale, les termes de la nomenclature présentée se rattachent dans leur ensemble à la physique, à la chimie, à la biologie, à l’électronique et à l’informatique. |
FrancophonIA/Vocabulaire_panlatin_velo | FrancophonIA | "2024-11-16T21:34:25Z" | 2 | 0 | [
"task_categories:translation",
"language:it",
"language:ca",
"language:es",
"language:fr",
"language:gl",
"language:pt",
"language:ro",
"language:en",
"region:us",
"es_ES",
"es_MX",
"pt_BR",
"pt_PT",
"fr_QC"
] | [
"translation"
] | "2024-11-16T21:33:57Z" | ---
language:
- it
- ca
- es
- fr
- gl
- pt
- ro
- en
tags:
- es_ES
- es_MX
- pt_BR
- pt_PT
- fr_QC
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://www.realiter.net/fr/lessici-realiter |
samahadhoud/MicroTikZ | samahadhoud | "2024-11-18T02:04:05Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-18T01:46:36Z" | ---
dataset_info:
features:
- name: example_number
dtype: int32
- name: combination_number
dtype: int32
- name: image_score
dtype: float32
- name: code_score
dtype: float32
- name: combined_score
dtype: float32
- name: rank
dtype: float32
- name: original_image
dtype: image
- name: generated_image
dtype: image
- name: original_code
dtype: string
- name: generated_code
dtype: string
splits:
- name: train
num_bytes: 1637301531.36
num_examples: 85520
download_size: 1607765742
dataset_size: 1637301531.36
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# TikZ Generation Curriculum Learning Dataset
## Dataset Description
### Overview
This dataset is specifically designed and decomposed for curriculum learning applications in image-to-tikzcode generation tasks. It contains evaluation metrics and comparisons between original TikZ diagrams and their machine-generated counterparts using the `nllg/detikzify-ds-1.3b` model, arranged in order of generation difficulty.
### Purpose
The primary purpose of this dataset is to facilitate curriculum learning strategies in training image-to-tikzcode generation models. By providing a difficulty-ranked dataset, it enables:
- Progressive learning from simple to complex examples
- Difficulty-aware training strategies
- Structured learning path development
- Performance evaluation across difficulty levels
### Evaluation Metrics and Ranking
The dataset includes three dissimilarity metrics (where 0 = identical, 1 = most dissimilar):
1. **Image Dissimilarity** (70% weight):
- Measures visual differences between original and generated images
- Range: 0 to 1 (0 = identical images, 1 = completely different)
- Considers structural differences, edge detection, and complexity
2. **Code Dissimilarity** (30% weight):
- Measures differences between original and generated TikZ code
- Range: 0 to 1 (0 = identical code, 1 = completely different)
- Based on code structure and content comparison
3. **Combined Score**:
- Weighted average: 0.7 * image_dissimilarity + 0.3 * code_dissimilarity
- Range: 0 to 1 (0 = perfect match, 1 = maximum difference)
### Dataset Statistics
- Total number of samples: 85,520
- Average image dissimilarity: 0.3003
- Average code dissimilarity: 0.6285
- Average combined dissimilarity: 0.3988
- Dissimilarity range: 0.0274 to 0.9255
### Features
- **example_number**: Unique identifier for each example
- **combination_number**: Specific combination identifier within each example
- **image_score**: Dissimilarity score between original and generated images (0-1)
- **code_score**: Dissimilarity score between original and generated TikZ code (0-1)
- **combined_score**: Weighted combination of dissimilarity metrics
- **rank**: Normalized difficulty rank (0=easiest to 1=hardest)
- **original_image**: Original diagram in PNG format
- **generated_image**: Model-generated diagram in PNG format if there is
- **original_code**: Original TikZ code
- **generated_code**: Model-generated TikZ code
## Usage
### Loading the Dataset
```python
from datasets import load_dataset
dataset = load_dataset("samahadhoud/decomposed-tikz-dataset-with-difficulty-0-10")
|
underctrl/single-block_blue-color_pick-up_80 | underctrl | "2024-11-18T04:49:28Z" | 2 | 0 | [
"task_categories:robotics",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | "2024-11-18T03:33:14Z" | ---
task_categories:
- robotics
tags:
- LeRobot
- tutorial
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
MarcMill/biobertv1 | MarcMill | "2024-11-20T06:58:04Z" | 2 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-20T06:56:57Z" | ---
license: apache-2.0
---
|
aminv/wordpress_qa | aminv | "2024-11-20T15:56:45Z" | 2 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T15:46:33Z" | ---
license: mit
---
|
aminv/wordpress-qa-llama3 | aminv | "2024-11-20T17:51:35Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-20T17:51:31Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 35245
num_examples: 50
download_size: 16747
dataset_size: 35245
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
malaysia-ai/crawl-youtube-malaysian-cartoons | malaysia-ai | "2024-11-26T01:25:22Z" | 2 | 0 | [
"language:ms",
"language:en",
"region:us"
] | null | "2024-11-21T05:41:30Z" | ---
language:
- ms
- en
---
# Crawl Youtube Malaysian Cartoons
**Current size at local is 812G, we are cleaning some space to compress it**.
Source code at https://github.com/mesolitica/malaysian-dataset/tree/master/speech/malaysia-cartoon-youtube |
FrancophonIA/Belgian_government_bilingual_parallel_corpus | FrancophonIA | "2024-11-21T14:21:51Z" | 2 | 0 | [
"task_categories:translation",
"language:nl",
"language:fr",
"region:us"
] | [
"translation"
] | "2024-11-21T14:21:11Z" | ---
language:
- nl
- fr
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/18952
## Description
Aligned texts from the Belgian government in French and Dutch (aligned with SDL Trados Studio)
## Citation
```
Belgian government bilingual parallel corpus (2022). Version 1.0. [Dataset (Text corpus)]. Source: European Language Grid. https://live.european-language-grid.eu/catalogue/corpus/18952
``` |
FrancophonIA/Translations_Hungarian_public_websites | FrancophonIA | "2024-11-21T14:36:03Z" | 2 | 0 | [
"task_categories:translation",
"language:fr",
"language:pl",
"language:cs",
"language:sv",
"language:fi",
"language:de",
"language:it",
"language:en",
"language:sl",
"region:us"
] | [
"translation"
] | "2024-11-21T14:32:50Z" | ---
language:
- fr
- pl
- cs
- sv
- fi
- de
- it
- en
- sl
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/18982
## Description
A webcrawl of 14 different websites covering parallel corpora of Hungarian with Polish, Czech, Swedish, Finnish, French, German, Italian, English and Slovenian
## Citation
```
Translations of Hungarian from public websites (2022). Version 1.0. [Dataset (Text corpus)]. Source: European Language Grid. https://live.european-language-grid.eu/catalogue/corpus/18982
``` |
FrancophonIA/Luxembourg_website | FrancophonIA | "2024-11-21T14:38:56Z" | 2 | 0 | [
"task_categories:translation",
"language:en",
"language:de",
"language:fr",
"region:us"
] | [
"translation"
] | "2024-11-21T14:37:48Z" | ---
language:
- en
- de
- fr
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/19053
## Description
Parallel (de-en-fr) corpus. Contains partailly cleaned parallel sentences from the original data set (#157), which was delivered as a TMX file.
## Citation
```
Translation of the Luxembourg.lu web site (Processed) (2022). Version 2.0. [Dataset (Text corpus)]. Source: European Language Grid. https://live.european-language-grid.eu/catalogue/corpus/19053
``` |
FrancophonIA/Charter_values_citizenship_integration | FrancophonIA | "2024-11-21T14:44:51Z" | 2 | 0 | [
"task_categories:translation",
"language:de",
"language:es",
"language:en",
"language:it",
"language:fr",
"region:us"
] | [
"translation"
] | "2024-11-21T14:41:40Z" | ---
language:
- de
- es
- en
- it
- fr
multilingulality:
- multilingual
task_categories:
- translation
viewer: false
---
> [!NOTE]
> Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/19058
## Description
The integration agreement form prepared for signing the pact between foreign and state, in addition to providing the alien's commitments, indicates, the statement by the person concerned, to adhere to the Charter of the values of citizenship and integration of the decree of the Minister of 23 April 2007, pledging to respect its principles. The Charter of citizenship and integration values adopted in 2007 summarizes the fundamental principles of our legal system governing the collective life, both citizens and immigrants. The Charter, drawn up according to the principles of the Italian Constitution and the major European Charters and international human rights, focuses especially on those issues that multiculturalism poses to Western societies. - Corpora Multilingual - Provided by Flavia Vecchione. - MINISTERO DELL’INTERNO website
## Citation
```
CHARTER OF VALUES OF CITIZENSHIP AND INTEGRATION (Processed) (2018, October 04). Version 2.0. [Dataset (Text corpus)]. Source: European Language Grid. https://live.european-language-grid.eu/catalogue/corpus/19058
``` |
Thermostatic/Biblia-Antiguo-Testamento-Nahuatl-Huasteca-Oriental | Thermostatic | "2024-11-23T02:18:50Z" | 2 | 0 | [
"license:mit",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T02:18:36Z" | ---
license: mit
---
|
miguelsolis/some_name_random_8_2024_11_23_03_03_20 | miguelsolis | "2024-11-23T03:05:11Z" | 2 | 0 | [
"task_categories:robotics",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | "2024-11-23T03:05:03Z" | ---
task_categories:
- robotics
tags:
- LeRobot
- tutorial
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-binarized | reflection-gen | "2024-11-23T03:06:33Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T03:06:32Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 12055246
num_examples: 4029
download_size: 5209484
dataset_size: 12055246
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-binarized"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-full_response_traceback | reflection-gen | "2024-11-23T03:06:34Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T03:06:33Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 30289538
num_examples: 4029
download_size: 11136511
dataset_size: 30289538
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-full_response_traceback"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-binarized_all_pairs | reflection-gen | "2024-11-23T03:06:36Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T03:06:35Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 20470836
num_examples: 6720
download_size: 6709944
dataset_size: 20470836
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-binarized_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-full_resp_trace | reflection-gen | "2024-11-23T06:03:10Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T06:03:09Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 24553127
num_examples: 4029
download_size: 10188652
dataset_size: 24553127
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-full_resp_trace | reflection-gen | "2024-11-23T08:44:04Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T08:44:03Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 20761749
num_examples: 2762
download_size: 7647105
dataset_size: 20761749
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter3_sppo_hard_new_cn_mining_oj_iter3-full_resp_trace | reflection-gen | "2024-11-23T11:09:41Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:09:40Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 18281442
num_examples: 2478
download_size: 6723407
dataset_size: 18281442
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter3_sppo_hard_new_cn_mining_oj_iter3-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin | reflection-gen | "2024-11-23T11:31:27Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:31:26Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 7929162
num_examples: 2093
download_size: 3240246
dataset_size: 7929162
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-full_resp_trace | reflection-gen | "2024-11-23T11:31:28Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:31:27Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 19124550
num_examples: 2093
download_size: 6858574
dataset_size: 19124550
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin_all_pairs | reflection-gen | "2024-11-23T11:31:30Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:31:29Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 14934685
num_examples: 3896
download_size: 4375034
dataset_size: 14934685
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_pos_reflct_rmsprop_iter3_sigmoid_cn_mining_oj_iter3-bin | reflection-gen | "2024-11-23T11:56:53Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:56:52Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 6287041
num_examples: 2623
download_size: 2543936
dataset_size: 6287041
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_pos_reflct_rmsprop_iter3_sigmoid_cn_mining_oj_iter3-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_pos_reflct_rmsprop_iter3_sigmoid_cn_mining_oj_iter3-full_resp_trace | reflection-gen | "2024-11-23T11:56:55Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:56:53Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 15125907
num_examples: 2623
download_size: 5537307
dataset_size: 15125907
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_pos_reflct_rmsprop_iter3_sigmoid_cn_mining_oj_iter3-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_pos_reflct_rmsprop_iter3_sigmoid_cn_mining_oj_iter3-bin_all_pairs | reflection-gen | "2024-11-23T11:56:56Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T11:56:55Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 12800514
num_examples: 5159
download_size: 3640059
dataset_size: 12800514
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_pos_reflct_rmsprop_iter3_sigmoid_cn_mining_oj_iter3-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
k-arthik-r/sys-logs-L0-to-L4-12.6k | k-arthik-r | "2024-11-23T13:18:19Z" | 2 | 0 | [
"license:llama3.2",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T13:17:38Z" | ---
license: llama3.2
---
|
reflection-gen/ds_coder6.7b_pos_reflct_rmsprop_iter4_sppo_hard_new_cn_mining_oj_iter4-full_resp_trace | reflection-gen | "2024-11-23T13:24:23Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T13:24:22Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 16381302
num_examples: 2259
download_size: 6043747
dataset_size: 16381302
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter4_sppo_hard_new_cn_mining_oj_iter4-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
junnystateofmind/testing_refuel_5_turns_only_ckp_0 | junnystateofmind | "2024-11-23T14:40:17Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T14:40:15Z" | ---
dataset_info:
features:
- name: combined_data
struct:
- name: narrative
dtype: string
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 19176
num_examples: 5
download_size: 17625
dataset_size: 19176
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/testing_refuel_5_turns_only_ckp_1 | junnystateofmind | "2024-11-23T14:42:02Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T14:42:00Z" | ---
dataset_info:
features:
- name: combined_data
struct:
- name: narrative
dtype: string
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 37130
num_examples: 5
download_size: 20861
dataset_size: 37130
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/testing_ultrainteract_5_turns_only | junnystateofmind | "2024-11-23T15:19:00Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T15:18:57Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 37380
num_examples: 1
download_size: 5237
dataset_size: 37380
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/testing_ultrainteract_sampled_h_from_sampled_len_ckp_0 | junnystateofmind | "2024-11-23T15:19:48Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T15:19:41Z" | ---
dataset_info:
features:
- name: trajectory_sampled_h_from_sampled_len
list:
- name: content
dtype: string
- name: role
dtype: string
- name: sampled_len_from_5
dtype: int64
- name: sampled_h_from_sampled_len
dtype: int64
splits:
- name: train
num_bytes: 2657
num_examples: 1
download_size: 8617
dataset_size: 2657
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
thefernandolourenco/silviosantoscantormarc | thefernandolourenco | "2024-11-23T18:27:14Z" | 2 | 0 | [
"license:openrail",
"region:us"
] | null | "2024-11-23T18:27:14Z" | ---
license: openrail
---
|
ZixuanKe/fingpt_convfinqa_sup_sample_from_policy_v1.1_dpo_val_chunk_3 | ZixuanKe | "2024-11-23T19:54:30Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-23T19:54:29Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
splits:
- name: train
num_bytes: 264338
num_examples: 25
download_size: 27277
dataset_size: 264338
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ahmedheakl/ar_geochat_instruct | ahmedheakl | "2024-11-24T02:10:16Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T01:52:53Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: image_path
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 23412879912.0
num_examples: 20000
download_size: 23373751401
dataset_size: 23412879912.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/D_gen10_run0_llama2-7b_wiki_doc1000_real96_synt32 | dgambettaphd | "2024-11-24T02:33:34Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T02:33:31Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 643629
num_examples: 1000
download_size: 408917
dataset_size: 643629
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
camel-bench/arabic_examsv | camel-bench | "2024-11-24T03:20:21Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T03:20:06Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 370775474.0
num_examples: 823
download_size: 355304182
dataset_size: 370775474.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
nikkoyudha/dynasty_warriors_characters | nikkoyudha | "2024-11-24T04:57:19Z" | 2 | 0 | [
"license:cc-by-nd-4.0",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-24T04:56:49Z" | ---
license: cc-by-nd-4.0
---
|
Aya168/project_from_PIPE2 | Aya168 | "2024-11-24T11:53:39Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T10:09:55Z" | ---
dataset_info:
features:
- name: img_id
dtype: string
- name: original_image
dtype: image
- name: target_image
dtype: image
- name: object_image
dtype: image
splits:
- name: train
num_bytes: 4092990035.329
num_examples: 42437
download_size: 4072045955
dataset_size: 4092990035.329
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
czm05/test03 | czm05 | "2024-11-24T10:54:22Z" | 2 | 0 | [
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T10:54:07Z" | ---
license: apache-2.0
---
|
iqwiki-kor/wDPO-ko | iqwiki-kor | "2024-11-24T13:14:13Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T13:14:08Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen_score
dtype: float64
- name: rejected_score
dtype: float64
splits:
- name: train
num_bytes: 45459123
num_examples: 10000
download_size: 21162726
dataset_size: 45459123
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gaydmi/alpaca-tat | gaydmi | "2024-11-25T01:14:35Z" | 2 | 0 | [
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T16:31:31Z" | ---
configs:
- config_name: default
data_files:
- split: train
path: ru_alpaca_seed_tasks.csv
--- |
mlfoundations-dev/oh_v1.2_sin_airoboros_diversity | mlfoundations-dev | "2024-11-24T18:12:07Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T17:34:37Z" | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: shard_id
dtype: string
- name: output
dtype: string
- name: ngram_3_uniqueness
dtype: float64
- name: entropy
dtype: float64
- name: gini_index
dtype: float64
- name: self_bleu
dtype: float64
- name: embeddings
sequence: float64
- name: kmeans_inertia_embeddings
dtype: float64
- name: new_conversations
list:
- name: content
dtype: string
- name: role
dtype: string
- name: gradients
sequence: float64
- name: kmeans_inertia_gradients
dtype: float64
splits:
- name: train
num_bytes: 58694197
num_examples: 4665
download_size: 45143196
dataset_size: 58694197
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DatPySci/weak_gpt2-large_tldr_synthetic | DatPySci | "2024-11-24T17:36:27Z" | 2 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T17:36:06Z" | ---
dataset_info:
features:
- name: target
sequence: string
- name: reference_response
dtype: string
- name: ctx
dtype: string
splits:
- name: train
num_bytes: 239653716
num_examples: 114674
download_size: 140121221
dataset_size: 239653716
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
lukehinds/testdataset | lukehinds | "2024-11-24T18:21:22Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T18:21:20Z" | ---
dataset_info:
features:
- name: messages
list:
- name: role
dtype: string
- name: content
dtype: string
splits:
- name: train
num_bytes: 711
num_examples: 5
download_size: 1757
dataset_size: 711
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
braindao/solidity-base-sft-v3 | braindao | "2024-11-24T18:36:52Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T18:36:29Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 736357422.2074078
num_examples: 38495
download_size: 134425079
dataset_size: 736357422.2074078
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Leyo/moss_test_r8 | Leyo | "2024-11-24T18:45:09Z" | 2 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2024-11-24T18:44:56Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "moss",
"total_episodes": 10,
"total_frames": 4415,
"total_tasks": 1,
"total_videos": 20,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:10"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"next.reward": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
open-llm-leaderboard/ehristoforu__RQwen-v0.1-details | open-llm-leaderboard | "2024-11-24T18:57:52Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T18:54:32Z" | ---
pretty_name: Evaluation run of ehristoforu/RQwen-v0.1
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [ehristoforu/RQwen-v0.1](https://huggingface.co/ehristoforu/RQwen-v0.1)\nThe dataset\
\ is composed of 38 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 1 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/ehristoforu__RQwen-v0.1-details\"\
,\n\tname=\"ehristoforu__RQwen-v0.1__leaderboard_bbh_boolean_expressions\",\n\t\
split=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results from\
\ run 2024-11-24T18-54-31.650276](https://huggingface.co/datasets/open-llm-leaderboard/ehristoforu__RQwen-v0.1-details/blob/main/ehristoforu__RQwen-v0.1/results_2024-11-24T18-54-31.650276.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"acc,none\": 0.5201961436170213,\n \"acc_stderr,none\"\
: 0.004554750245067938,\n \"prompt_level_strict_acc,none\": 0.7264325323475046,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.019183727107392846,\n \
\ \"inst_level_strict_acc,none\": 0.7985611510791367,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.7615526802218114,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.01833788809424391,\n \
\ \"inst_level_loose_acc,none\": 0.8249400479616307,\n \"inst_level_loose_acc_stderr,none\"\
: \"N/A\",\n \"acc_norm,none\": 0.5702425736152549,\n \"acc_norm_stderr,none\"\
: 0.005124139231525546,\n \"exact_match,none\": 0.02945619335347432,\n\
\ \"exact_match_stderr,none\": 0.0046364753008244705,\n \"\
alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n \
\ \"acc_norm,none\": 0.6415552855407047,\n \"acc_norm_stderr,none\"\
: 0.005818997061406109,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.892,\n\
\ \"acc_norm_stderr,none\": 0.019669559381568776\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6256684491978609,\n \"acc_norm_stderr,none\"\
: 0.0354849234134303\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.7,\n \"acc_norm_stderr,none\": 0.029040893477575786\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.664,\n\
\ \"acc_norm_stderr,none\": 0.029933259094191533\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.644,\n \"acc_norm_stderr,none\":\
\ 0.0303436806571532\n },\n \"leaderboard_bbh_geometric_shapes\":\
\ {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\",\n \
\ \"acc_norm,none\": 0.532,\n \"acc_norm_stderr,none\": 0.031621252575725574\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.82,\n \
\ \"acc_norm_stderr,none\": 0.02434689065029351\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.632,\n \"acc_norm_stderr,none\":\
\ 0.03056207062099311\n },\n \"leaderboard_bbh_logical_deduction_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.596,\n \"acc_norm_stderr,none\":\
\ 0.03109668818482536\n },\n \"leaderboard_bbh_logical_deduction_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\"\
,\n \"acc_norm,none\": 0.928,\n \"acc_norm_stderr,none\":\
\ 0.016381005750490122\n },\n \"leaderboard_bbh_movie_recommendation\"\
: {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\",\n \
\ \"acc_norm,none\": 0.768,\n \"acc_norm_stderr,none\": 0.026750070374865202\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \"\
\ - leaderboard_bbh_navigate\",\n \"acc_norm,none\": 0.668,\n \
\ \"acc_norm_stderr,none\": 0.029844039047465857\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.7328767123287672,\n \"acc_norm_stderr,none\": 0.03674407640319397\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.788,\n \"acc_norm_stderr,none\": 0.025901884690541117\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.828,\n \
\ \"acc_norm_stderr,none\": 0.02391551394448624\n },\n \"leaderboard_bbh_salient_translation_error_detection\"\
: {\n \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\"\
,\n \"acc_norm,none\": 0.616,\n \"acc_norm_stderr,none\":\
\ 0.030821679117375447\n },\n \"leaderboard_bbh_snarks\": {\n \
\ \"alias\": \" - leaderboard_bbh_snarks\",\n \"acc_norm,none\"\
: 0.7808988764044944,\n \"acc_norm_stderr,none\": 0.031090883837921395\n\
\ },\n \"leaderboard_bbh_sports_understanding\": {\n \"\
alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.624,\n \"acc_norm_stderr,none\": 0.03069633626739458\n },\n\
\ \"leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" -\
\ leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.852,\n\
\ \"acc_norm_stderr,none\": 0.022503547243806186\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.244,\n \"acc_norm_stderr,none\": 0.02721799546455311\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.204,\n \"acc_norm_stderr,none\":\
\ 0.025537121574548162\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.328,\n \"acc_norm_stderr,none\":\
\ 0.029752391824475363\n },\n \"leaderboard_bbh_web_of_lies\": {\n\
\ \"alias\": \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\"\
: 0.548,\n \"acc_norm_stderr,none\": 0.03153986449255664\n },\n\
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.32550335570469796,\n\
\ \"acc_norm_stderr,none\": 0.013587913744347518,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.3282828282828283,\n \"acc_norm_stderr,none\": 0.03345678422756777\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.32967032967032966,\n\
\ \"acc_norm_stderr,none\": 0.0201365887896455\n },\n \"\
leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.31919642857142855,\n \"acc_norm_stderr,none\"\
: 0.02204886116457606\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.7264325323475046,\n \"prompt_level_strict_acc_stderr,none\": 0.019183727107392846,\n\
\ \"inst_level_strict_acc,none\": 0.7985611510791367,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7615526802218114,\n \"prompt_level_loose_acc_stderr,none\": 0.01833788809424391,\n\
\ \"inst_level_loose_acc,none\": 0.8249400479616307,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.02945619335347432,\n \"exact_match_stderr,none\"\
: 0.0046364753008244705,\n \"alias\": \" - leaderboard_math_hard\"\n\
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.04234527687296417,\n\
\ \"exact_match_stderr,none\": 0.011511879967693189\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \"\
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.032520325203252036,\n \"exact_match_stderr,none\": 0.016058998205879745\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.007575757575757576,\n\
\ \"exact_match_stderr,none\": 0.007575757575757577\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\":\
\ \" - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.017857142857142856,\n \"exact_match_stderr,none\": 0.007928503387888855\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.025974025974025976,\n\
\ \"exact_match_stderr,none\": 0.012859058999697068\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.05699481865284974,\n \"exact_match_stderr,none\"\
: 0.01673108529360757\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.007407407407407408,\n \"exact_match_stderr,none\"\
: 0.007407407407407408\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.5201961436170213,\n\
\ \"acc_stderr,none\": 0.004554750245067938\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.4126984126984127,\n \"acc_norm_stderr,none\"\
: 0.01745952627984168,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\": \"\
\ - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.532,\n\
\ \"acc_norm_stderr,none\": 0.031621252575725574\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.26171875,\n \"acc_norm_stderr,none\"\
: 0.027526959754524398\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.448,\n \"acc_norm_stderr,none\": 0.03151438761115349\n\
\ }\n },\n \"leaderboard\": {\n \"acc,none\": 0.5201961436170213,\n\
\ \"acc_stderr,none\": 0.004554750245067938,\n \"prompt_level_strict_acc,none\"\
: 0.7264325323475046,\n \"prompt_level_strict_acc_stderr,none\": 0.019183727107392846,\n\
\ \"inst_level_strict_acc,none\": 0.7985611510791367,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.7615526802218114,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.01833788809424391,\n \"inst_level_loose_acc,none\"\
: 0.8249400479616307,\n \"inst_level_loose_acc_stderr,none\": \"N/A\",\n\
\ \"acc_norm,none\": 0.5702425736152549,\n \"acc_norm_stderr,none\"\
: 0.005124139231525546,\n \"exact_match,none\": 0.02945619335347432,\n \
\ \"exact_match_stderr,none\": 0.0046364753008244705,\n \"alias\": \"\
leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\": 0.6415552855407047,\n\
\ \"acc_norm_stderr,none\": 0.005818997061406109,\n \"alias\": \"\
\ - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\": {\n\
\ \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\"\
: 0.892,\n \"acc_norm_stderr,none\": 0.019669559381568776\n },\n \"\
leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6256684491978609,\n \"acc_norm_stderr,none\"\
: 0.0354849234134303\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.7,\n \"acc_norm_stderr,none\": 0.029040893477575786\n },\n \"leaderboard_bbh_disambiguation_qa\"\
: {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\"\
: 0.664,\n \"acc_norm_stderr,none\": 0.029933259094191533\n },\n \"\
leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.644,\n \"acc_norm_stderr,none\": 0.0303436806571532\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.532,\n \"acc_norm_stderr,none\": 0.031621252575725574\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.82,\n \"acc_norm_stderr,none\": 0.02434689065029351\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.596,\n \"acc_norm_stderr,none\": 0.03109668818482536\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.928,\n \"acc_norm_stderr,none\": 0.016381005750490122\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.768,\n \"acc_norm_stderr,none\": 0.026750070374865202\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.668,\n \"acc_norm_stderr,none\": 0.029844039047465857\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.456,\n \"acc_norm_stderr,none\": 0.031563285061213475\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.7328767123287672,\n\
\ \"acc_norm_stderr,none\": 0.03674407640319397\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.788,\n \"acc_norm_stderr,none\": 0.025901884690541117\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.828,\n \"acc_norm_stderr,none\": 0.02391551394448624\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.616,\n \"acc_norm_stderr,none\": 0.030821679117375447\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.7808988764044944,\n \"acc_norm_stderr,none\"\
: 0.031090883837921395\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.624,\n \"acc_norm_stderr,none\": 0.03069633626739458\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.852,\n \"acc_norm_stderr,none\": 0.022503547243806186\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.244,\n \"acc_norm_stderr,none\": 0.02721799546455311\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.204,\n \"acc_norm_stderr,none\": 0.025537121574548162\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.328,\n \"acc_norm_stderr,none\": 0.029752391824475363\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.548,\n \"acc_norm_stderr,none\": 0.03153986449255664\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.32550335570469796,\n\
\ \"acc_norm_stderr,none\": 0.013587913744347518,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.3282828282828283,\n\
\ \"acc_norm_stderr,none\": 0.03345678422756777\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.32967032967032966,\n \"acc_norm_stderr,none\": 0.0201365887896455\n \
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.31919642857142855,\n \"acc_norm_stderr,none\"\
: 0.02204886116457606\n },\n \"leaderboard_ifeval\": {\n \"alias\"\
: \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.7264325323475046,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.019183727107392846,\n \
\ \"inst_level_strict_acc,none\": 0.7985611510791367,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.7615526802218114,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.01833788809424391,\n \"inst_level_loose_acc,none\"\
: 0.8249400479616307,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.02945619335347432,\n\
\ \"exact_match_stderr,none\": 0.0046364753008244705,\n \"alias\"\
: \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.04234527687296417,\n \"exact_match_stderr,none\": 0.011511879967693189\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.032520325203252036,\n \"exact_match_stderr,none\": 0.016058998205879745\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.007575757575757576,\n \"exact_match_stderr,none\"\
: 0.007575757575757577\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.017857142857142856,\n \"exact_match_stderr,none\"\
: 0.007928503387888855\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.025974025974025976,\n \"exact_match_stderr,none\": 0.012859058999697068\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.05699481865284974,\n \"exact_match_stderr,none\"\
: 0.01673108529360757\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.007407407407407408,\n \"exact_match_stderr,none\": 0.007407407407407408\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.5201961436170213,\n \"acc_stderr,none\": 0.004554750245067938\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.4126984126984127,\n\
\ \"acc_norm_stderr,none\": 0.01745952627984168,\n \"alias\": \" -\
\ leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.532,\n \"acc_norm_stderr,none\": 0.031621252575725574\n },\n \"\
leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.26171875,\n \"acc_norm_stderr,none\": 0.027526959754524398\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.448,\n \"acc_norm_stderr,none\": 0.03151438761115349\n\
\ }\n}\n```"
repo_url: https://huggingface.co/ehristoforu/RQwen-v0.1
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_navigate
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_snarks
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_gpqa_extended
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_gpqa_main
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_gpqa_main_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_ifeval
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_ifeval_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_mmlu_pro
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_musr_object_placements
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-24T18-54-31.650276.jsonl'
- config_name: ehristoforu__RQwen-v0.1__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_24T18_54_31.650276
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-24T18-54-31.650276.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-24T18-54-31.650276.jsonl'
---
# Dataset Card for Evaluation run of ehristoforu/RQwen-v0.1
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [ehristoforu/RQwen-v0.1](https://huggingface.co/ehristoforu/RQwen-v0.1)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/ehristoforu__RQwen-v0.1-details",
name="ehristoforu__RQwen-v0.1__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-24T18-54-31.650276](https://huggingface.co/datasets/open-llm-leaderboard/ehristoforu__RQwen-v0.1-details/blob/main/ehristoforu__RQwen-v0.1/results_2024-11-24T18-54-31.650276.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"acc,none": 0.5201961436170213,
"acc_stderr,none": 0.004554750245067938,
"prompt_level_strict_acc,none": 0.7264325323475046,
"prompt_level_strict_acc_stderr,none": 0.019183727107392846,
"inst_level_strict_acc,none": 0.7985611510791367,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7615526802218114,
"prompt_level_loose_acc_stderr,none": 0.01833788809424391,
"inst_level_loose_acc,none": 0.8249400479616307,
"inst_level_loose_acc_stderr,none": "N/A",
"acc_norm,none": 0.5702425736152549,
"acc_norm_stderr,none": 0.005124139231525546,
"exact_match,none": 0.02945619335347432,
"exact_match_stderr,none": 0.0046364753008244705,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.6415552855407047,
"acc_norm_stderr,none": 0.005818997061406109,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.892,
"acc_norm_stderr,none": 0.019669559381568776
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6256684491978609,
"acc_norm_stderr,none": 0.0354849234134303
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.7,
"acc_norm_stderr,none": 0.029040893477575786
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.664,
"acc_norm_stderr,none": 0.029933259094191533
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.644,
"acc_norm_stderr,none": 0.0303436806571532
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.532,
"acc_norm_stderr,none": 0.031621252575725574
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.82,
"acc_norm_stderr,none": 0.02434689065029351
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.596,
"acc_norm_stderr,none": 0.03109668818482536
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.928,
"acc_norm_stderr,none": 0.016381005750490122
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.768,
"acc_norm_stderr,none": 0.026750070374865202
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.668,
"acc_norm_stderr,none": 0.029844039047465857
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.7328767123287672,
"acc_norm_stderr,none": 0.03674407640319397
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.788,
"acc_norm_stderr,none": 0.025901884690541117
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.828,
"acc_norm_stderr,none": 0.02391551394448624
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.616,
"acc_norm_stderr,none": 0.030821679117375447
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.7808988764044944,
"acc_norm_stderr,none": 0.031090883837921395
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.624,
"acc_norm_stderr,none": 0.03069633626739458
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.244,
"acc_norm_stderr,none": 0.02721799546455311
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.204,
"acc_norm_stderr,none": 0.025537121574548162
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.328,
"acc_norm_stderr,none": 0.029752391824475363
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.548,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_gpqa": {
"acc_norm,none": 0.32550335570469796,
"acc_norm_stderr,none": 0.013587913744347518,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3282828282828283,
"acc_norm_stderr,none": 0.03345678422756777
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.32967032967032966,
"acc_norm_stderr,none": 0.0201365887896455
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.31919642857142855,
"acc_norm_stderr,none": 0.02204886116457606
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7264325323475046,
"prompt_level_strict_acc_stderr,none": 0.019183727107392846,
"inst_level_strict_acc,none": 0.7985611510791367,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7615526802218114,
"prompt_level_loose_acc_stderr,none": 0.01833788809424391,
"inst_level_loose_acc,none": 0.8249400479616307,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.02945619335347432,
"exact_match_stderr,none": 0.0046364753008244705,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.04234527687296417,
"exact_match_stderr,none": 0.011511879967693189
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.032520325203252036,
"exact_match_stderr,none": 0.016058998205879745
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.007575757575757576,
"exact_match_stderr,none": 0.007575757575757577
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.017857142857142856,
"exact_match_stderr,none": 0.007928503387888855
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.025974025974025976,
"exact_match_stderr,none": 0.012859058999697068
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.05699481865284974,
"exact_match_stderr,none": 0.01673108529360757
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.007407407407407408,
"exact_match_stderr,none": 0.007407407407407408
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.5201961436170213,
"acc_stderr,none": 0.004554750245067938
},
"leaderboard_musr": {
"acc_norm,none": 0.4126984126984127,
"acc_norm_stderr,none": 0.01745952627984168,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.532,
"acc_norm_stderr,none": 0.031621252575725574
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.26171875,
"acc_norm_stderr,none": 0.027526959754524398
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.448,
"acc_norm_stderr,none": 0.03151438761115349
}
},
"leaderboard": {
"acc,none": 0.5201961436170213,
"acc_stderr,none": 0.004554750245067938,
"prompt_level_strict_acc,none": 0.7264325323475046,
"prompt_level_strict_acc_stderr,none": 0.019183727107392846,
"inst_level_strict_acc,none": 0.7985611510791367,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7615526802218114,
"prompt_level_loose_acc_stderr,none": 0.01833788809424391,
"inst_level_loose_acc,none": 0.8249400479616307,
"inst_level_loose_acc_stderr,none": "N/A",
"acc_norm,none": 0.5702425736152549,
"acc_norm_stderr,none": 0.005124139231525546,
"exact_match,none": 0.02945619335347432,
"exact_match_stderr,none": 0.0046364753008244705,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.6415552855407047,
"acc_norm_stderr,none": 0.005818997061406109,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.892,
"acc_norm_stderr,none": 0.019669559381568776
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6256684491978609,
"acc_norm_stderr,none": 0.0354849234134303
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.7,
"acc_norm_stderr,none": 0.029040893477575786
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.664,
"acc_norm_stderr,none": 0.029933259094191533
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.644,
"acc_norm_stderr,none": 0.0303436806571532
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.532,
"acc_norm_stderr,none": 0.031621252575725574
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.82,
"acc_norm_stderr,none": 0.02434689065029351
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.596,
"acc_norm_stderr,none": 0.03109668818482536
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.928,
"acc_norm_stderr,none": 0.016381005750490122
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.768,
"acc_norm_stderr,none": 0.026750070374865202
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.668,
"acc_norm_stderr,none": 0.029844039047465857
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.456,
"acc_norm_stderr,none": 0.031563285061213475
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.7328767123287672,
"acc_norm_stderr,none": 0.03674407640319397
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.788,
"acc_norm_stderr,none": 0.025901884690541117
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.828,
"acc_norm_stderr,none": 0.02391551394448624
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.616,
"acc_norm_stderr,none": 0.030821679117375447
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.7808988764044944,
"acc_norm_stderr,none": 0.031090883837921395
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.624,
"acc_norm_stderr,none": 0.03069633626739458
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.852,
"acc_norm_stderr,none": 0.022503547243806186
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.244,
"acc_norm_stderr,none": 0.02721799546455311
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.204,
"acc_norm_stderr,none": 0.025537121574548162
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.328,
"acc_norm_stderr,none": 0.029752391824475363
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.548,
"acc_norm_stderr,none": 0.03153986449255664
},
"leaderboard_gpqa": {
"acc_norm,none": 0.32550335570469796,
"acc_norm_stderr,none": 0.013587913744347518,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.3282828282828283,
"acc_norm_stderr,none": 0.03345678422756777
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.32967032967032966,
"acc_norm_stderr,none": 0.0201365887896455
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.31919642857142855,
"acc_norm_stderr,none": 0.02204886116457606
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7264325323475046,
"prompt_level_strict_acc_stderr,none": 0.019183727107392846,
"inst_level_strict_acc,none": 0.7985611510791367,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7615526802218114,
"prompt_level_loose_acc_stderr,none": 0.01833788809424391,
"inst_level_loose_acc,none": 0.8249400479616307,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.02945619335347432,
"exact_match_stderr,none": 0.0046364753008244705,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.04234527687296417,
"exact_match_stderr,none": 0.011511879967693189
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.032520325203252036,
"exact_match_stderr,none": 0.016058998205879745
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.007575757575757576,
"exact_match_stderr,none": 0.007575757575757577
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.017857142857142856,
"exact_match_stderr,none": 0.007928503387888855
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.025974025974025976,
"exact_match_stderr,none": 0.012859058999697068
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.05699481865284974,
"exact_match_stderr,none": 0.01673108529360757
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.007407407407407408,
"exact_match_stderr,none": 0.007407407407407408
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.5201961436170213,
"acc_stderr,none": 0.004554750245067938
},
"leaderboard_musr": {
"acc_norm,none": 0.4126984126984127,
"acc_norm_stderr,none": 0.01745952627984168,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.532,
"acc_norm_stderr,none": 0.031621252575725574
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.26171875,
"acc_norm_stderr,none": 0.027526959754524398
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.448,
"acc_norm_stderr,none": 0.03151438761115349
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
jamesdaizs/chiblings | jamesdaizs | "2024-11-24T19:38:18Z" | 2 | 0 | [
"task_categories:text-to-image",
"language:en",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us",
"art"
] | [
"text-to-image"
] | "2024-11-24T19:25:44Z" | ---
task_categories:
- text-to-image
language:
- en
tags:
- art
size_categories:
- 1K<n<10K
--- |
Obscure-Entropy/Flickr8k-Augmented | Obscure-Entropy | "2024-11-25T11:51:04Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T20:02:05Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: caption
dtype: string
splits:
- name: train
num_bytes: 10108227134.688
num_examples: 48288
download_size: 10106362012
dataset_size: 10108227134.688
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HanxuHU/gemma-2-9b-it-ultrafeedback-annotate-truth-judge | HanxuHU | "2024-11-24T22:22:20Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T22:22:15Z" | ---
dataset_info:
features:
- name: prompt_id
dtype: string
- name: prompt
dtype: string
- name: all_generated_responses
sequence: string
- name: scores
sequence: float64
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: test
num_bytes: 28633857
num_examples: 1962
download_size: 13281487
dataset_size: 28633857
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
marcov/super_glue_record_promptsource | marcov | "2024-11-24T23:58:20Z" | 2 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-24T23:46:39Z" | ---
dataset_info:
features:
- name: passage
dtype: string
- name: query
dtype: string
- name: entities
sequence: string
- name: entity_spans
sequence:
- name: text
dtype: string
- name: start
dtype: int32
- name: end
dtype: int32
- name: answers
sequence: string
- name: idx
struct:
- name: passage
dtype: int32
- name: query
dtype: int32
- name: template_name
dtype: string
- name: template
dtype: string
- name: rendered_input
dtype: string
- name: rendered_output
dtype: string
splits:
- name: train
num_bytes: 6602841845.0
num_examples: 2014600
- name: validation
num_bytes: 646456677.0
num_examples: 200000
- name: test
num_bytes: 625849870.0
num_examples: 200000
download_size: 3154589657
dataset_size: 7875148392.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
neoneye/simon-arc-combine-v181 | neoneye | "2024-11-24T23:56:03Z" | 2 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-24T23:55:17Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) combined datasets version 181
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
A combination of multiple datasets.
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 2
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 3
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 4
Added a shared dataset name for all these datasets: `SIMON-SOLVE-V1`. There may be higher version numbers in the future.
My hypothesis: Having a version number in the dataset name, it may be easier to unlearn incorrect training data.
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 5
Different random seed.
# Version 6
Using `SIMON-SOLVE-V1` everywhere. Remove the `SIMON-SOLVE-COLOR`, `SIMON-SOLVE-ROTATE`, `SIMON-SOLVE-TRANSLATE`.
# Version 7
Using `SIMON-SOLVE-V1` everywhere.
# Version 8
Same settings. Different seed as usual.
# Version 9
Switching from context length 256 to context length 512.
Increasing the image sizes so the prompt length stays below 512.
`dataset_solve_color`, image size: 1-13.
`dataset_solve_rotate`, image size: 1-9.
`dataset_solve_translate`, image size: 3-9.
# Version 10
Same settings. Different seed as usual.
# Version 11
Same settings. Different seed as usual.
# Version 12
Added 1 more pair to the examples. Now it's 2-4 examples. Previously it was 2-3 examples.
# Version 13
Same settings. Different seed as usual.
# Version 14
Same settings. Different seed as usual.
# Version 15
Same settings. Different seed as usual.
# Version 16
Added `Predict the output image.`
Disabled prediction of rows.
Disabled prediction of height.
# Verison 17
Same settings. Different seed as usual.
Using the `DatasetGenerator` and the `DatasetItemListBuilder`.
# Verison 18
Added datasets.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl` - added.
- `dataset_shape.jsonl` - added.
# Verison 19
Added dataset.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_image.jsonl` - added.
# Verison 20
Bigger images.
# Verison 21
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_mass.jsonl` - added.
# Verison 22
Added dataset.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_histogram.jsonl` - added.
Bigger image sizes.
Number of rows=200k. Was previously 100k rows.
# Verison 23
`datset_mass.jsonl`. increased to `max_mass=5`.
# Verison 24
`datset_mass.jsonl`. increased to `max_mass=6`.
# Verison 25
different seed.
# Verison 26
`datset_mass.jsonl`. increased to `max_mass=25`.
different seed.
# Verison 27
different seed.
# Verison 28
different seed.
# Verison 29
different seed.
# Verison 30
different seed.
# Verison 31
different seed.
# Verison 32
different seed.
# Verison 33
Disabled some dataset.
Datasets:
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_cellular_automaton.jsonl`
# Verison 34
Enabled all datasets.
# Version 35
Regenerated all datasets with new random seeds.
# Verison 36
Added dataset `dataset_scale.jsonl`.
Disabled some dataset.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
# Verison 37
Enabled all datasets
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
# Verison 38
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - added
# Version 39
Regenerated all datasets with new random seeds.
# Version 40
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl` - added
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 41
Regenerated all datasets with new random seeds.
# Version 42
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl` - added
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 43
Enabled all datasets.
# Version 44
Regenerated all datasets with new random seeds.
# Version 45
Extended the `dataset_shape.jsonl` with these new `PixelConnectivity` types: `CORNER4`, `LR2`, `TB2`, `TLBR2`, `TRBL2`.
Hopefully it makes the model better at making sense of diagonal structures, which is something it's terrible at at the moment.
# Version 46
Regenerated all datasets with new random seeds.
# Version 47
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl` - added
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 48
Enabled all datasets.
# Version 49
Bigger `max_mass`. From 6 to 8.
# Version 50
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 51
Regenerated all datasets with new random seeds.
# Version 52
Regenerated all datasets with new random seeds.
# Version 53
Regenerated all datasets with new random seeds.
# Version 54
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_erotion.jsonl` - added
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 55
Added dataset. Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl` - added
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 56
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 57
Regenerated all datasets with new random seeds.
# Version 58
Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 59
Added new datasets.
Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl` - added
- `dataset_solve_fractal.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 60
Incremented random seed
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 61
Enabled all datasets.
More padding inside the `dataset_solve_fractal.jsonl`.
# Version 62
All datasets still enabled.
Turning up the parameter for `dataset_solve_fractal.jsonl`.
scale_input from 3 to 4.
scale_output from 3 to 4.
max_image_size from 3 to 4.
max_pad_count from 4 to 5.
# Version 63
Disabled several datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 64
Added dataset.
Increased the number of rows in the jsonl file from 200k to 300k.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 65
random seed.
# Version 66
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 67
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - enabled
- `dataset_solve_compress.jsonl` - enabled
- `dataset_solve_erosion.jsonl` - enabled
- `dataset_solve_fractal.jsonl` - enabled
- `dataset_solve_outline.jsonl` - enabled
- `dataset_solve_rotate.jsonl` - enabled
- `dataset_solve_scale.jsonl` - enabled
- `dataset_solve_symmetry.jsonl` - enabled
- `dataset_solve_translate.jsonl` - enabled
- `dataset_symmetry.jsonl`
# Version 68
Enabled all datasets.
# Version 69
Different random seed.
# Version 70
Different random seed.
# Version 71
Different random seed.
# Version 72
Different random seed.
# Version 73
Different random seed.
# Version 74
Major update to `dataset_solve_symmetry.jsonl`.
# Version 75
Different random seed.
# Version 76
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 77
Enabled all datasets.
# Version 78
Major update to `dataset_solve_symmetry.jsonl`.
# Version 79
Different random seed.
# Version 80
Different random seed.
# Version 81
Different random seed.
# Version 82
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl` - added
- `dataset_symmetry.jsonl`
# Version 83
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 84
Added dataset `dataset_solve_grid.jsonl`.
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl` - added
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 85
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 86
Enabled all datasets.
# Version 87
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 88
Added dataset `dataset_solve_probecolor.jsonl` with all directions enabled.
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 89
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 90
Disabled some of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 91
Added dataset.
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl` - added
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 92
Different random seed.
# Version 93
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl` - added
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 94
Added dataset.
Disabled datasets that doesn't solve ARC tasks.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl` - added
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 95
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl` - added
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 96
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl` - major update.
- `dataset_symmetry.jsonl`
# Version 97
Disabled the first half of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 98
Disabled the last half of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl` - disabled
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl` - disabled
- `dataset_solve_grid.jsonl` - disabled
- `dataset_solve_half.jsonl` - disabled
- `dataset_solve_mass.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_probecolor.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 99
Disabled the 1/4th of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 100
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl` - added
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 101
Disabled the non solving datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 102
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl` - added
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 103
Different random seed.
# Version 104
Disabled the non solving datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 105
Major update to `dataset_solve_scale.jsonl` with scaling down noisy images.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl` - scale down noisy images
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 106
Different random seed.
# Version 107
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 108
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl` - added
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 109
Different random seed.
# Version 110
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_halfplane.jsonl` - added
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 111
Different random seed.
# Version 112
Different random seed.
# Version 113
Different random seed.
# Version 114
Major update to the `dataset_solve-mass.jsonl`, so it now includes `mass_compare_adjacent_rows` and `mass_compare_adjacent_columns`.
# Version 115
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_gravity.jsonl` - added
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_halfplane.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 116
Hypothesis. What if I train with a smaller dataset, will it converge faster?
Reduced the number of rows in this dataset from 300k rows to 10k rows.
# Version 117
Interesting, 10k rows seems to work fine with the model training.
Picked new random rows.
# Version 118
Still going with 10k rows.
Picked new random rows.
# Version 119
Still going with 10k rows.
Picked new random rows.
# Version 120
Switched to 20k rows.
# Version 121
Still going with 20k rows.
Picked new random rows.
# Version 122
20k rows.
Added `dataset_solve_reverse.jsonl`.
# Version 123
Doubled the number of rows to 40k rows.
# Version 124
Set row count to 100k rows.
Major update to `dataset_solve_gravity.jsonl`.
# Version 125
Row count: 100k rows.
# Version 126
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_bool.jsonl
dataset_solve_boundingbox.jsonl
dataset_solve_color.jsonl
dataset_solve_compress.jsonl
dataset_solve_edge.jsonl
dataset_solve_erosion.jsonl
dataset_solve_flip.jsonl
dataset_solve_fractal.jsonl
dataset_solve_gravity.jsonl
dataset_solve_grid.jsonl
dataset_solve_half.jsonl
dataset_solve_halfplane.jsonl
dataset_solve_mask.jsonl
dataset_solve_mass.jsonl
dataset_solve_outline.jsonl
dataset_solve_probecolor.jsonl
dataset_solve_ray.jsonl
dataset_solve_reverse.jsonl
dataset_solve_rotate.jsonl
dataset_solve_scale.jsonl
dataset_solve_symmetry.jsonl
dataset_solve_translate.jsonl
dataset_solve_zindex.jsonl
```
# Version 127
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_scale.jsonl
dataset_solve_symmetry.jsonl
dataset_solve_translate.jsonl
dataset_solve_zindex.jsonl
```
# Version 128
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_probecolor.jsonl
dataset_solve_ray.jsonl
dataset_solve_reverse.jsonl
dataset_solve_rotate.jsonl
```
# Version 129
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_gravity.jsonl
dataset_solve_grid.jsonl
dataset_solve_half.jsonl
dataset_solve_halfplane.jsonl
dataset_solve_mask.jsonl
dataset_solve_mass.jsonl
dataset_solve_outline.jsonl
```
# Version 130
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_bool.jsonl
dataset_solve_boundingbox.jsonl
dataset_solve_color.jsonl
dataset_solve_compress.jsonl
dataset_solve_edge.jsonl
dataset_solve_erosion.jsonl
dataset_solve_flip.jsonl
dataset_solve_fractal.jsonl
```
# Version 131
Switched back to 300k rows.
Enabled all the datasets.
# Version 132
Random seed.
# Version 133
Removed the rows that are longer than what can be fitted inside a 512 context length.
# Version 134
Random seed.
# Version 135
Random seed.
# Version 136
Major update to the `dataset_solve_gravity.jsonl` file.
# Version 137
Added dataset `dataset_solve_skew.jsonl`.
# Version 138
Disabled several datasets.
```txt
# 'dataset_cellular_automaton.jsonl',
# 'dataset_dilation.jsonl',
# 'dataset_erosion.jsonl',
# 'dataset_histogram.jsonl',
# 'dataset_image.jsonl',
# 'dataset_image_pair.jsonl',
# 'dataset_mass.jsonl',
# 'dataset_scale.jsonl',
# 'dataset_shape.jsonl',
# 'dataset_solve_bool.jsonl',
'dataset_solve_boundingbox.jsonl',
'dataset_solve_color.jsonl',
'dataset_solve_compress.jsonl',
'dataset_solve_edge.jsonl',
'dataset_solve_erosion.jsonl',
'dataset_solve_flip.jsonl',
'dataset_solve_fractal.jsonl',
'dataset_solve_gravity.jsonl',
'dataset_solve_grid.jsonl',
'dataset_solve_half.jsonl',
# 'dataset_solve_halfplane.jsonl',
'dataset_solve_mask.jsonl',
'dataset_solve_mass.jsonl',
'dataset_solve_outline.jsonl',
'dataset_solve_probecolor.jsonl',
# 'dataset_solve_ray.jsonl',
# 'dataset_solve_reverse.jsonl',
'dataset_solve_rotate.jsonl',
'dataset_solve_scale.jsonl',
# 'dataset_solve_skew.jsonl',
'dataset_solve_symmetry.jsonl',
'dataset_solve_translate.jsonl',
'dataset_solve_zindex.jsonl',
# 'dataset_symmetry.jsonl',
```
# Version 139
Disabled several datasets.
```txt
'dataset_cellular_automaton.jsonl',
'dataset_dilation.jsonl',
'dataset_erosion.jsonl',
'dataset_histogram.jsonl',
'dataset_image.jsonl',
'dataset_image_pair.jsonl',
'dataset_mass.jsonl',
'dataset_scale.jsonl',
'dataset_shape.jsonl',
'dataset_solve_bool.jsonl',
# 'dataset_solve_boundingbox.jsonl',
# 'dataset_solve_color.jsonl',
# 'dataset_solve_compress.jsonl',
# 'dataset_solve_edge.jsonl',
# 'dataset_solve_erosion.jsonl',
# 'dataset_solve_flip.jsonl',
# 'dataset_solve_fractal.jsonl',
# 'dataset_solve_gravity.jsonl',
# 'dataset_solve_grid.jsonl',
# 'dataset_solve_half.jsonl',
'dataset_solve_halfplane.jsonl',
# 'dataset_solve_mask.jsonl',
# 'dataset_solve_mass.jsonl',
# 'dataset_solve_outline.jsonl',
# 'dataset_solve_probecolor.jsonl',
'dataset_solve_ray.jsonl',
'dataset_solve_reverse.jsonl',
# 'dataset_solve_rotate.jsonl',
# 'dataset_solve_scale.jsonl',
'dataset_solve_skew.jsonl',
# 'dataset_solve_symmetry.jsonl',
# 'dataset_solve_translate.jsonl',
# 'dataset_solve_zindex.jsonl',
'dataset_symmetry.jsonl',
```
# Version 140
Enabled all datasets.
Added new dataset: `dataset_solve_cross.jsonl`.
# Version 141
Switched to 30k rows.
Disabled several datasets.
```txt
# 'dataset_cellular_automaton.jsonl',
# 'dataset_dilation.jsonl',
# 'dataset_erosion.jsonl',
# 'dataset_histogram.jsonl',
# 'dataset_image.jsonl',
# 'dataset_image_pair.jsonl',
# 'dataset_mass.jsonl',
# 'dataset_scale.jsonl',
# 'dataset_shape.jsonl',
# 'dataset_solve_bool.jsonl',
'dataset_solve_boundingbox.jsonl',
'dataset_solve_color.jsonl',
'dataset_solve_compress.jsonl',
# 'dataset_solve_cross.jsonl',
'dataset_solve_edge.jsonl',
'dataset_solve_erosion.jsonl',
'dataset_solve_flip.jsonl',
'dataset_solve_fractal.jsonl',
# 'dataset_solve_gravity.jsonl',
'dataset_solve_grid.jsonl',
'dataset_solve_half.jsonl',
# 'dataset_solve_halfplane.jsonl',
'dataset_solve_mask.jsonl',
'dataset_solve_mass.jsonl',
'dataset_solve_outline.jsonl',
'dataset_solve_probecolor.jsonl',
'dataset_solve_ray.jsonl',
# 'dataset_solve_reverse.jsonl',
'dataset_solve_rotate.jsonl',
'dataset_solve_scale.jsonl',
'dataset_solve_skew.jsonl',
'dataset_solve_symmetry.jsonl',
'dataset_solve_translate.jsonl',
# 'dataset_solve_zindex.jsonl',
# 'dataset_symmetry.jsonl',
```
# Version 142
Switched to 300k rows.
Enabled all datasets.
Switched from 512 context to 1024 context.
# Version 143
Bigger images in `dataset_solve_cross.jsonl` and in `dataset_solve_mass.jsonl`.
# Version 144
Major update to `dataset_solve_symmetry.jsonl`.
# Version 145
Added `dataset_solve_span.jsonl`.
# Version 146
Extended `dataset_solve_span.jsonl` with `generate_task_with_template_lines`.
# Version 147
Extended `dataset_solve_span.jsonl` with `generate_task_with_alternate`.
# Version 148
Added `dataset_solve_count.jsonl`.
# Version 149
Randomized.
# Version 150
Upgraded context length for several datasets from 512 to 1024.
# Version 151
Randomized.
# Version 152
Randomized.
# Version 153
Extended `dataset_solve_mask.jsonl` with `generate_task_repair_rectangle_and_crop`.
# Version 154
Extended `dataset_solve_color.jsonl` with `generate_task_replace_color`.
# Version 155
Major update to datasets in the range from `dataset_solve_axxx.jsonl` to `dataset_solve_mask.jsonl`.
Now there is an earlier prediction for the output that is to be predicted. It may contain a hint, or it may be garbage that is to be ignored.
# Version 156
Only 2000 rows.
Only these datasets.
'dataset_cellular_automaton.jsonl',
'dataset_dilation.jsonl',
'dataset_erosion.jsonl',
'dataset_histogram.jsonl',
'dataset_image.jsonl',
'dataset_image_pair.jsonl',
'dataset_mass.jsonl',
'dataset_scale.jsonl',
'dataset_shape.jsonl',
'dataset_symmetry.jsonl',
# Version 157
Only these datasets.
- 'dataset_solve_bool.jsonl',
- 'dataset_solve_boundingbox.jsonl',
- 'dataset_solve_color.jsonl',
- 'dataset_solve_compress.jsonl',
- 'dataset_solve_count.jsonl',
- 'dataset_solve_cross.jsonl',
- 'dataset_solve_edge.jsonl',
- 'dataset_solve_erosion.jsonl',
- 'dataset_solve_flip.jsonl',
- 'dataset_solve_fractal.jsonl',
- 'dataset_solve_gravity.jsonl',
- 'dataset_solve_grid.jsonl',
- 'dataset_solve_half.jsonl',
- 'dataset_solve_halfplane.jsonl',
- 'dataset_solve_mask.jsonl',
- 'dataset_solve_mass.jsonl',
- 'dataset_solve_outline.jsonl',
- 'dataset_solve_probecolor.jsonl',
- 'dataset_solve_ray.jsonl',
- 'dataset_solve_reverse.jsonl',
- 'dataset_solve_rotate.jsonl',
- 'dataset_solve_scale.jsonl',
- 'dataset_solve_span.jsonl',
- 'dataset_solve_skew.jsonl',
- 'dataset_solve_symmetry.jsonl',
- 'dataset_solve_translate.jsonl',
- 'dataset_solve_zindex.jsonl',
# Version 158
Only these datasets.
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_rectangle.jsonl`
# Versin 159
Enabled all the `_solve_` datasets.
# Version 160
Regenerated all the `_solve_` datasets with new seed.
# Version 161
Regenerated all the `_solve_` datasets with new seed.
# Version 162
Replaced RLE compressed response with raw pixel response.
# Version 163
Added more generators
- DatasetSolveCount
- DatasetSolveCross
- DatasetSolveEdge
- DatasetSolveErosion
- DatasetSolveFlip
- DatasetSolveFractal
# Version 164
Increased row count from 1000 to 2000.
# Version 165
Added more generators.
# Version 166
Added more generators.
# Version 167
Added more generators.
# Version 168
Added more generators.
# Version 169
Generated data.
# Version 170
Generated data.
# Version 171
Generated data.
Increased output context length from 256 to 512.
# Version 172
Generated data.
# Version 173
Generated data.
# Version 174
Generated data.
# Version 175
Generated data.
# Version 176
Generated data.
# Version 177
Increased the number of rows from 2000 to 4000.
Generated data.
# Version 178
Generated data.
# Version 179
Generated data.
# Version 180
Generated data.
# Version 181
Generated data.
|
open-llm-leaderboard/dwikitheduck__gen-inst-1-details | open-llm-leaderboard | "2024-11-25T00:18:36Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T00:14:38Z" | ---
pretty_name: Evaluation run of dwikitheduck/gen-inst-1
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [dwikitheduck/gen-inst-1](https://huggingface.co/dwikitheduck/gen-inst-1)\nThe\
\ dataset is composed of 38 configuration(s), each one corresponding to one of the\
\ evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be\
\ found as a specific split in each configuration, the split being named using the\
\ timestamp of the run.The \"train\" split is always pointing to the latest results.\n\
\nAn additional configuration \"results\" store all the aggregated results of the\
\ run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\n\t\"open-llm-leaderboard/dwikitheduck__gen-inst-1-details\"\
,\n\tname=\"dwikitheduck__gen-inst-1__leaderboard_bbh_boolean_expressions\",\n\t\
split=\"latest\"\n)\n```\n\n## Latest results\n\nThese are the [latest results from\
\ run 2024-11-25T00-14-37.470143](https://huggingface.co/datasets/open-llm-leaderboard/dwikitheduck__gen-inst-1-details/blob/main/dwikitheduck__gen-inst-1/results_2024-11-25T00-14-37.470143.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"leaderboard\": {\n\
\ \"acc,none\": 0.5088929521276596,\n \"acc_stderr,none\"\
: 0.004557749352736335,\n \"acc_norm,none\": 0.5772473732001556,\n \
\ \"acc_norm_stderr,none\": 0.0051629491888934955,\n \"exact_match,none\"\
: 0.0445619335347432,\n \"exact_match_stderr,none\": 0.005666316247127577,\n\
\ \"inst_level_loose_acc,none\": 0.8309352517985612,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7707948243992606,\n \"prompt_level_loose_acc_stderr,none\": 0.018087757424955286,\n\
\ \"inst_level_strict_acc,none\": 0.8069544364508393,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_strict_acc,none\"\
: 0.7430683918669131,\n \"prompt_level_strict_acc_stderr,none\": 0.01880296257563689,\n\
\ \"alias\": \"leaderboard\"\n },\n \"leaderboard_bbh\"\
: {\n \"acc_norm,none\": 0.6405137996875543,\n \"acc_norm_stderr,none\"\
: 0.005843282173574642,\n \"alias\": \" - leaderboard_bbh\"\n \
\ },\n \"leaderboard_bbh_boolean_expressions\": {\n \"alias\"\
: \" - leaderboard_bbh_boolean_expressions\",\n \"acc_norm,none\": 0.892,\n\
\ \"acc_norm_stderr,none\": 0.019669559381568776\n },\n \
\ \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6203208556149733,\n \"acc_norm_stderr,none\"\
: 0.03558443628801667\n },\n \"leaderboard_bbh_date_understanding\"\
: {\n \"alias\": \" - leaderboard_bbh_date_understanding\",\n \
\ \"acc_norm,none\": 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n\
\ },\n \"leaderboard_bbh_disambiguation_qa\": {\n \"alias\"\
: \" - leaderboard_bbh_disambiguation_qa\",\n \"acc_norm,none\": 0.716,\n\
\ \"acc_norm_stderr,none\": 0.028576958730437443\n },\n \
\ \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.656,\n \"acc_norm_stderr,none\":\
\ 0.03010450339231644\n },\n \"leaderboard_bbh_geometric_shapes\"\
: {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\",\n \
\ \"acc_norm,none\": 0.608,\n \"acc_norm_stderr,none\": 0.030938207620401222\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \"\
\ - leaderboard_bbh_hyperbaton\",\n \"acc_norm,none\": 0.76,\n \
\ \"acc_norm_stderr,none\": 0.027065293652238982\n },\n \"leaderboard_bbh_logical_deduction_five_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_five_objects\"\
,\n \"acc_norm,none\": 0.632,\n \"acc_norm_stderr,none\":\
\ 0.03056207062099311\n },\n \"leaderboard_bbh_logical_deduction_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.612,\n \"acc_norm_stderr,none\":\
\ 0.030881038748993974\n },\n \"leaderboard_bbh_logical_deduction_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_logical_deduction_three_objects\"\
,\n \"acc_norm,none\": 0.94,\n \"acc_norm_stderr,none\": 0.015050117079158739\n\
\ },\n \"leaderboard_bbh_movie_recommendation\": {\n \"\
alias\": \" - leaderboard_bbh_movie_recommendation\",\n \"acc_norm,none\"\
: 0.76,\n \"acc_norm_stderr,none\": 0.027065293652238982\n },\n\
\ \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.672,\n \"acc_norm_stderr,none\":\
\ 0.029752391824475363\n },\n \"leaderboard_bbh_object_counting\"\
: {\n \"alias\": \" - leaderboard_bbh_object_counting\",\n \
\ \"acc_norm,none\": 0.46,\n \"acc_norm_stderr,none\": 0.031584653891499004\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"\
alias\": \" - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\"\
: 0.636986301369863,\n \"acc_norm_stderr,none\": 0.03993397596179569\n\
\ },\n \"leaderboard_bbh_reasoning_about_colored_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\",\n\
\ \"acc_norm,none\": 0.816,\n \"acc_norm_stderr,none\": 0.02455581299422255\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \"\
\ - leaderboard_bbh_ruin_names\",\n \"acc_norm,none\": 0.816,\n \
\ \"acc_norm_stderr,none\": 0.02455581299422255\n },\n \"leaderboard_bbh_salient_translation_error_detection\"\
: {\n \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\"\
,\n \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\":\
\ 0.030491555220405475\n },\n \"leaderboard_bbh_snarks\": {\n \
\ \"alias\": \" - leaderboard_bbh_snarks\",\n \"acc_norm,none\"\
: 0.7865168539325843,\n \"acc_norm_stderr,none\": 0.030799891078809365\n\
\ },\n \"leaderboard_bbh_sports_understanding\": {\n \"\
alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.74,\n \"acc_norm_stderr,none\": 0.027797315752644335\n },\n\
\ \"leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" -\
\ leaderboard_bbh_temporal_sequences\",\n \"acc_norm,none\": 0.652,\n\
\ \"acc_norm_stderr,none\": 0.030186568464511673\n },\n \
\ \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \"\
alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\",\n \
\ \"acc_norm,none\": 0.224,\n \"acc_norm_stderr,none\": 0.026421361687347884\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.184,\n \"acc_norm_stderr,none\":\
\ 0.02455581299422255\n },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.34,\n \"acc_norm_stderr,none\": 0.030020073605457873\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\":\
\ \" - leaderboard_bbh_web_of_lies\",\n \"acc_norm,none\": 0.556,\n\
\ \"acc_norm_stderr,none\": 0.03148684942554571\n },\n \
\ \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3716442953020134,\n\
\ \"acc_norm_stderr,none\": 0.014011490289268045,\n \"alias\"\
: \" - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n\
\ \"alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\"\
: 0.35858585858585856,\n \"acc_norm_stderr,none\": 0.034169036403915276\n\
\ },\n \"leaderboard_gpqa_extended\": {\n \"alias\": \"\
\ - leaderboard_gpqa_extended\",\n \"acc_norm,none\": 0.3663003663003663,\n\
\ \"acc_norm_stderr,none\": 0.020637740788656753\n },\n \
\ \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.38392857142857145,\n \"acc_norm_stderr,none\"\
: 0.02300313291907409\n },\n \"leaderboard_ifeval\": {\n \
\ \"alias\": \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\"\
: 0.7430683918669131,\n \"prompt_level_strict_acc_stderr,none\": 0.01880296257563689,\n\
\ \"inst_level_strict_acc,none\": 0.8069544364508393,\n \"\
inst_level_strict_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7707948243992606,\n \"prompt_level_loose_acc_stderr,none\": 0.018087757424955286,\n\
\ \"inst_level_loose_acc,none\": 0.8309352517985612,\n \"\
inst_level_loose_acc_stderr,none\": \"N/A\"\n },\n \"leaderboard_math_hard\"\
: {\n \"exact_match,none\": 0.0445619335347432,\n \"exact_match_stderr,none\"\
: 0.005666316247127577,\n \"alias\": \" - leaderboard_math_hard\"\n \
\ },\n \"leaderboard_math_algebra_hard\": {\n \"alias\"\
: \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\": 0.06188925081433225,\n\
\ \"exact_match_stderr,none\": 0.013774440126929627\n },\n \
\ \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\": \"\
\ - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.04065040650406504,\n \"exact_match_stderr,none\": 0.017878907564437465\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\"\
: \" - leaderboard_math_geometry_hard\",\n \"exact_match,none\": 0.022727272727272728,\n\
\ \"exact_match_stderr,none\": 0.0130210469090637\n },\n \
\ \"leaderboard_math_intermediate_algebra_hard\": {\n \"alias\": \"\
\ - leaderboard_math_intermediate_algebra_hard\",\n \"exact_match,none\"\
: 0.039285714285714285,\n \"exact_match_stderr,none\": 0.011630873964205717\n\
\ },\n \"leaderboard_math_num_theory_hard\": {\n \"alias\"\
: \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\": 0.012987012987012988,\n\
\ \"exact_match_stderr,none\": 0.009153145279150204\n },\n \
\ \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.06217616580310881,\n \"exact_match_stderr,none\"\
: 0.01742697415424056\n },\n \"leaderboard_math_precalculus_hard\"\
: {\n \"alias\": \" - leaderboard_math_precalculus_hard\",\n \
\ \"exact_match,none\": 0.05185185185185185,\n \"exact_match_stderr,none\"\
: 0.019154368449050496\n },\n \"leaderboard_mmlu_pro\": {\n \
\ \"alias\": \" - leaderboard_mmlu_pro\",\n \"acc,none\": 0.5088929521276596,\n\
\ \"acc_stderr,none\": 0.004557749352736335\n },\n \"leaderboard_musr\"\
: {\n \"acc_norm,none\": 0.4193121693121693,\n \"acc_norm_stderr,none\"\
: 0.017343672073569773,\n \"alias\": \" - leaderboard_musr\"\n \
\ },\n \"leaderboard_musr_murder_mysteries\": {\n \"alias\":\
\ \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\": 0.584,\n\
\ \"acc_norm_stderr,none\": 0.031235856237014505\n },\n \
\ \"leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.265625,\n \"acc_norm_stderr,none\"\
: 0.027658162598649488\n },\n \"leaderboard_musr_team_allocation\"\
: {\n \"alias\": \" - leaderboard_musr_team_allocation\",\n \
\ \"acc_norm,none\": 0.412,\n \"acc_norm_stderr,none\": 0.03119159602602282\n\
\ }\n },\n \"leaderboard\": {\n \"acc,none\": 0.5088929521276596,\n\
\ \"acc_stderr,none\": 0.004557749352736335,\n \"acc_norm,none\":\
\ 0.5772473732001556,\n \"acc_norm_stderr,none\": 0.0051629491888934955,\n\
\ \"exact_match,none\": 0.0445619335347432,\n \"exact_match_stderr,none\"\
: 0.005666316247127577,\n \"inst_level_loose_acc,none\": 0.8309352517985612,\n\
\ \"inst_level_loose_acc_stderr,none\": \"N/A\",\n \"prompt_level_loose_acc,none\"\
: 0.7707948243992606,\n \"prompt_level_loose_acc_stderr,none\": 0.018087757424955286,\n\
\ \"inst_level_strict_acc,none\": 0.8069544364508393,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_strict_acc,none\": 0.7430683918669131,\n \
\ \"prompt_level_strict_acc_stderr,none\": 0.01880296257563689,\n \"alias\"\
: \"leaderboard\"\n },\n \"leaderboard_bbh\": {\n \"acc_norm,none\"\
: 0.6405137996875543,\n \"acc_norm_stderr,none\": 0.005843282173574642,\n\
\ \"alias\": \" - leaderboard_bbh\"\n },\n \"leaderboard_bbh_boolean_expressions\"\
: {\n \"alias\": \" - leaderboard_bbh_boolean_expressions\",\n \"\
acc_norm,none\": 0.892,\n \"acc_norm_stderr,none\": 0.019669559381568776\n\
\ },\n \"leaderboard_bbh_causal_judgement\": {\n \"alias\": \" - leaderboard_bbh_causal_judgement\"\
,\n \"acc_norm,none\": 0.6203208556149733,\n \"acc_norm_stderr,none\"\
: 0.03558443628801667\n },\n \"leaderboard_bbh_date_understanding\": {\n \
\ \"alias\": \" - leaderboard_bbh_date_understanding\",\n \"acc_norm,none\"\
: 0.692,\n \"acc_norm_stderr,none\": 0.02925692860650181\n },\n \"\
leaderboard_bbh_disambiguation_qa\": {\n \"alias\": \" - leaderboard_bbh_disambiguation_qa\"\
,\n \"acc_norm,none\": 0.716,\n \"acc_norm_stderr,none\": 0.028576958730437443\n\
\ },\n \"leaderboard_bbh_formal_fallacies\": {\n \"alias\": \" - leaderboard_bbh_formal_fallacies\"\
,\n \"acc_norm,none\": 0.656,\n \"acc_norm_stderr,none\": 0.03010450339231644\n\
\ },\n \"leaderboard_bbh_geometric_shapes\": {\n \"alias\": \" - leaderboard_bbh_geometric_shapes\"\
,\n \"acc_norm,none\": 0.608,\n \"acc_norm_stderr,none\": 0.030938207620401222\n\
\ },\n \"leaderboard_bbh_hyperbaton\": {\n \"alias\": \" - leaderboard_bbh_hyperbaton\"\
,\n \"acc_norm,none\": 0.76,\n \"acc_norm_stderr,none\": 0.027065293652238982\n\
\ },\n \"leaderboard_bbh_logical_deduction_five_objects\": {\n \"alias\"\
: \" - leaderboard_bbh_logical_deduction_five_objects\",\n \"acc_norm,none\"\
: 0.632,\n \"acc_norm_stderr,none\": 0.03056207062099311\n },\n \"\
leaderboard_bbh_logical_deduction_seven_objects\": {\n \"alias\": \" - leaderboard_bbh_logical_deduction_seven_objects\"\
,\n \"acc_norm,none\": 0.612,\n \"acc_norm_stderr,none\": 0.030881038748993974\n\
\ },\n \"leaderboard_bbh_logical_deduction_three_objects\": {\n \"\
alias\": \" - leaderboard_bbh_logical_deduction_three_objects\",\n \"acc_norm,none\"\
: 0.94,\n \"acc_norm_stderr,none\": 0.015050117079158739\n },\n \"\
leaderboard_bbh_movie_recommendation\": {\n \"alias\": \" - leaderboard_bbh_movie_recommendation\"\
,\n \"acc_norm,none\": 0.76,\n \"acc_norm_stderr,none\": 0.027065293652238982\n\
\ },\n \"leaderboard_bbh_navigate\": {\n \"alias\": \" - leaderboard_bbh_navigate\"\
,\n \"acc_norm,none\": 0.672,\n \"acc_norm_stderr,none\": 0.029752391824475363\n\
\ },\n \"leaderboard_bbh_object_counting\": {\n \"alias\": \" - leaderboard_bbh_object_counting\"\
,\n \"acc_norm,none\": 0.46,\n \"acc_norm_stderr,none\": 0.031584653891499004\n\
\ },\n \"leaderboard_bbh_penguins_in_a_table\": {\n \"alias\": \" \
\ - leaderboard_bbh_penguins_in_a_table\",\n \"acc_norm,none\": 0.636986301369863,\n\
\ \"acc_norm_stderr,none\": 0.03993397596179569\n },\n \"leaderboard_bbh_reasoning_about_colored_objects\"\
: {\n \"alias\": \" - leaderboard_bbh_reasoning_about_colored_objects\"\
,\n \"acc_norm,none\": 0.816,\n \"acc_norm_stderr,none\": 0.02455581299422255\n\
\ },\n \"leaderboard_bbh_ruin_names\": {\n \"alias\": \" - leaderboard_bbh_ruin_names\"\
,\n \"acc_norm,none\": 0.816,\n \"acc_norm_stderr,none\": 0.02455581299422255\n\
\ },\n \"leaderboard_bbh_salient_translation_error_detection\": {\n \
\ \"alias\": \" - leaderboard_bbh_salient_translation_error_detection\",\n \
\ \"acc_norm,none\": 0.636,\n \"acc_norm_stderr,none\": 0.030491555220405475\n\
\ },\n \"leaderboard_bbh_snarks\": {\n \"alias\": \" - leaderboard_bbh_snarks\"\
,\n \"acc_norm,none\": 0.7865168539325843,\n \"acc_norm_stderr,none\"\
: 0.030799891078809365\n },\n \"leaderboard_bbh_sports_understanding\": {\n\
\ \"alias\": \" - leaderboard_bbh_sports_understanding\",\n \"acc_norm,none\"\
: 0.74,\n \"acc_norm_stderr,none\": 0.027797315752644335\n },\n \"\
leaderboard_bbh_temporal_sequences\": {\n \"alias\": \" - leaderboard_bbh_temporal_sequences\"\
,\n \"acc_norm,none\": 0.652,\n \"acc_norm_stderr,none\": 0.030186568464511673\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_five_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_five_objects\"\
,\n \"acc_norm,none\": 0.224,\n \"acc_norm_stderr,none\": 0.026421361687347884\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_seven_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_seven_objects\"\
,\n \"acc_norm,none\": 0.184,\n \"acc_norm_stderr,none\": 0.02455581299422255\n\
\ },\n \"leaderboard_bbh_tracking_shuffled_objects_three_objects\": {\n \
\ \"alias\": \" - leaderboard_bbh_tracking_shuffled_objects_three_objects\"\
,\n \"acc_norm,none\": 0.34,\n \"acc_norm_stderr,none\": 0.030020073605457873\n\
\ },\n \"leaderboard_bbh_web_of_lies\": {\n \"alias\": \" - leaderboard_bbh_web_of_lies\"\
,\n \"acc_norm,none\": 0.556,\n \"acc_norm_stderr,none\": 0.03148684942554571\n\
\ },\n \"leaderboard_gpqa\": {\n \"acc_norm,none\": 0.3716442953020134,\n\
\ \"acc_norm_stderr,none\": 0.014011490289268045,\n \"alias\": \"\
\ - leaderboard_gpqa\"\n },\n \"leaderboard_gpqa_diamond\": {\n \"\
alias\": \" - leaderboard_gpqa_diamond\",\n \"acc_norm,none\": 0.35858585858585856,\n\
\ \"acc_norm_stderr,none\": 0.034169036403915276\n },\n \"leaderboard_gpqa_extended\"\
: {\n \"alias\": \" - leaderboard_gpqa_extended\",\n \"acc_norm,none\"\
: 0.3663003663003663,\n \"acc_norm_stderr,none\": 0.020637740788656753\n\
\ },\n \"leaderboard_gpqa_main\": {\n \"alias\": \" - leaderboard_gpqa_main\"\
,\n \"acc_norm,none\": 0.38392857142857145,\n \"acc_norm_stderr,none\"\
: 0.02300313291907409\n },\n \"leaderboard_ifeval\": {\n \"alias\"\
: \" - leaderboard_ifeval\",\n \"prompt_level_strict_acc,none\": 0.7430683918669131,\n\
\ \"prompt_level_strict_acc_stderr,none\": 0.01880296257563689,\n \
\ \"inst_level_strict_acc,none\": 0.8069544364508393,\n \"inst_level_strict_acc_stderr,none\"\
: \"N/A\",\n \"prompt_level_loose_acc,none\": 0.7707948243992606,\n \
\ \"prompt_level_loose_acc_stderr,none\": 0.018087757424955286,\n \"inst_level_loose_acc,none\"\
: 0.8309352517985612,\n \"inst_level_loose_acc_stderr,none\": \"N/A\"\n \
\ },\n \"leaderboard_math_hard\": {\n \"exact_match,none\": 0.0445619335347432,\n\
\ \"exact_match_stderr,none\": 0.005666316247127577,\n \"alias\":\
\ \" - leaderboard_math_hard\"\n },\n \"leaderboard_math_algebra_hard\": {\n\
\ \"alias\": \" - leaderboard_math_algebra_hard\",\n \"exact_match,none\"\
: 0.06188925081433225,\n \"exact_match_stderr,none\": 0.013774440126929627\n\
\ },\n \"leaderboard_math_counting_and_prob_hard\": {\n \"alias\":\
\ \" - leaderboard_math_counting_and_prob_hard\",\n \"exact_match,none\"\
: 0.04065040650406504,\n \"exact_match_stderr,none\": 0.017878907564437465\n\
\ },\n \"leaderboard_math_geometry_hard\": {\n \"alias\": \" - leaderboard_math_geometry_hard\"\
,\n \"exact_match,none\": 0.022727272727272728,\n \"exact_match_stderr,none\"\
: 0.0130210469090637\n },\n \"leaderboard_math_intermediate_algebra_hard\"\
: {\n \"alias\": \" - leaderboard_math_intermediate_algebra_hard\",\n \
\ \"exact_match,none\": 0.039285714285714285,\n \"exact_match_stderr,none\"\
: 0.011630873964205717\n },\n \"leaderboard_math_num_theory_hard\": {\n \
\ \"alias\": \" - leaderboard_math_num_theory_hard\",\n \"exact_match,none\"\
: 0.012987012987012988,\n \"exact_match_stderr,none\": 0.009153145279150204\n\
\ },\n \"leaderboard_math_prealgebra_hard\": {\n \"alias\": \" - leaderboard_math_prealgebra_hard\"\
,\n \"exact_match,none\": 0.06217616580310881,\n \"exact_match_stderr,none\"\
: 0.01742697415424056\n },\n \"leaderboard_math_precalculus_hard\": {\n \
\ \"alias\": \" - leaderboard_math_precalculus_hard\",\n \"exact_match,none\"\
: 0.05185185185185185,\n \"exact_match_stderr,none\": 0.019154368449050496\n\
\ },\n \"leaderboard_mmlu_pro\": {\n \"alias\": \" - leaderboard_mmlu_pro\"\
,\n \"acc,none\": 0.5088929521276596,\n \"acc_stderr,none\": 0.004557749352736335\n\
\ },\n \"leaderboard_musr\": {\n \"acc_norm,none\": 0.4193121693121693,\n\
\ \"acc_norm_stderr,none\": 0.017343672073569773,\n \"alias\": \"\
\ - leaderboard_musr\"\n },\n \"leaderboard_musr_murder_mysteries\": {\n \
\ \"alias\": \" - leaderboard_musr_murder_mysteries\",\n \"acc_norm,none\"\
: 0.584,\n \"acc_norm_stderr,none\": 0.031235856237014505\n },\n \"\
leaderboard_musr_object_placements\": {\n \"alias\": \" - leaderboard_musr_object_placements\"\
,\n \"acc_norm,none\": 0.265625,\n \"acc_norm_stderr,none\": 0.027658162598649488\n\
\ },\n \"leaderboard_musr_team_allocation\": {\n \"alias\": \" - leaderboard_musr_team_allocation\"\
,\n \"acc_norm,none\": 0.412,\n \"acc_norm_stderr,none\": 0.03119159602602282\n\
\ }\n}\n```"
repo_url: https://huggingface.co/dwikitheduck/gen-inst-1
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_boolean_expressions
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_boolean_expressions_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_causal_judgement
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_causal_judgement_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_date_understanding
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_date_understanding_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_disambiguation_qa
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_disambiguation_qa_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_formal_fallacies
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_formal_fallacies_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_geometric_shapes
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_geometric_shapes_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_hyperbaton
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_hyperbaton_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_logical_deduction_five_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_five_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_logical_deduction_seven_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_seven_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_logical_deduction_three_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_logical_deduction_three_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_movie_recommendation
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_movie_recommendation_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_navigate
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_navigate_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_object_counting
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_object_counting_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_penguins_in_a_table
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_penguins_in_a_table_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_reasoning_about_colored_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_reasoning_about_colored_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_ruin_names
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_ruin_names_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_salient_translation_error_detection
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_salient_translation_error_detection_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_snarks
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_snarks_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_sports_understanding
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_sports_understanding_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_temporal_sequences
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_temporal_sequences_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_tracking_shuffled_objects_five_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_five_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_tracking_shuffled_objects_seven_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_seven_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_tracking_shuffled_objects_three_objects
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_tracking_shuffled_objects_three_objects_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_bbh_web_of_lies
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_bbh_web_of_lies_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_gpqa_diamond
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_diamond_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_gpqa_extended
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_extended_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_gpqa_main
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_gpqa_main_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_gpqa_main_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_ifeval
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_ifeval_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_ifeval_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_algebra_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_algebra_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_counting_and_prob_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_counting_and_prob_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_geometry_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_geometry_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_intermediate_algebra_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_intermediate_algebra_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_num_theory_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_num_theory_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_prealgebra_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_prealgebra_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_math_precalculus_hard
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_math_precalculus_hard_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_mmlu_pro
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_mmlu_pro_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_musr_murder_mysteries
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_murder_mysteries_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_musr_object_placements
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_object_placements_2024-11-25T00-14-37.470143.jsonl'
- config_name: dwikitheduck__gen-inst-1__leaderboard_musr_team_allocation
data_files:
- split: 2024_11_25T00_14_37.470143
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-25T00-14-37.470143.jsonl'
- split: latest
path:
- '**/samples_leaderboard_musr_team_allocation_2024-11-25T00-14-37.470143.jsonl'
---
# Dataset Card for Evaluation run of dwikitheduck/gen-inst-1
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [dwikitheduck/gen-inst-1](https://huggingface.co/dwikitheduck/gen-inst-1)
The dataset is composed of 38 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"open-llm-leaderboard/dwikitheduck__gen-inst-1-details",
name="dwikitheduck__gen-inst-1__leaderboard_bbh_boolean_expressions",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-11-25T00-14-37.470143](https://huggingface.co/datasets/open-llm-leaderboard/dwikitheduck__gen-inst-1-details/blob/main/dwikitheduck__gen-inst-1/results_2024-11-25T00-14-37.470143.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"leaderboard": {
"acc,none": 0.5088929521276596,
"acc_stderr,none": 0.004557749352736335,
"acc_norm,none": 0.5772473732001556,
"acc_norm_stderr,none": 0.0051629491888934955,
"exact_match,none": 0.0445619335347432,
"exact_match_stderr,none": 0.005666316247127577,
"inst_level_loose_acc,none": 0.8309352517985612,
"inst_level_loose_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7707948243992606,
"prompt_level_loose_acc_stderr,none": 0.018087757424955286,
"inst_level_strict_acc,none": 0.8069544364508393,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_strict_acc,none": 0.7430683918669131,
"prompt_level_strict_acc_stderr,none": 0.01880296257563689,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.6405137996875543,
"acc_norm_stderr,none": 0.005843282173574642,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.892,
"acc_norm_stderr,none": 0.019669559381568776
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6203208556149733,
"acc_norm_stderr,none": 0.03558443628801667
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.716,
"acc_norm_stderr,none": 0.028576958730437443
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.656,
"acc_norm_stderr,none": 0.03010450339231644
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.608,
"acc_norm_stderr,none": 0.030938207620401222
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.76,
"acc_norm_stderr,none": 0.027065293652238982
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.612,
"acc_norm_stderr,none": 0.030881038748993974
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.94,
"acc_norm_stderr,none": 0.015050117079158739
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.76,
"acc_norm_stderr,none": 0.027065293652238982
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.672,
"acc_norm_stderr,none": 0.029752391824475363
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.46,
"acc_norm_stderr,none": 0.031584653891499004
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.636986301369863,
"acc_norm_stderr,none": 0.03993397596179569
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.816,
"acc_norm_stderr,none": 0.02455581299422255
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.816,
"acc_norm_stderr,none": 0.02455581299422255
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.7865168539325843,
"acc_norm_stderr,none": 0.030799891078809365
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.74,
"acc_norm_stderr,none": 0.027797315752644335
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.652,
"acc_norm_stderr,none": 0.030186568464511673
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.224,
"acc_norm_stderr,none": 0.026421361687347884
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.184,
"acc_norm_stderr,none": 0.02455581299422255
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.34,
"acc_norm_stderr,none": 0.030020073605457873
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.556,
"acc_norm_stderr,none": 0.03148684942554571
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3716442953020134,
"acc_norm_stderr,none": 0.014011490289268045,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.35858585858585856,
"acc_norm_stderr,none": 0.034169036403915276
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.3663003663003663,
"acc_norm_stderr,none": 0.020637740788656753
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.38392857142857145,
"acc_norm_stderr,none": 0.02300313291907409
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7430683918669131,
"prompt_level_strict_acc_stderr,none": 0.01880296257563689,
"inst_level_strict_acc,none": 0.8069544364508393,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7707948243992606,
"prompt_level_loose_acc_stderr,none": 0.018087757424955286,
"inst_level_loose_acc,none": 0.8309352517985612,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.0445619335347432,
"exact_match_stderr,none": 0.005666316247127577,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.06188925081433225,
"exact_match_stderr,none": 0.013774440126929627
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.04065040650406504,
"exact_match_stderr,none": 0.017878907564437465
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.022727272727272728,
"exact_match_stderr,none": 0.0130210469090637
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.039285714285714285,
"exact_match_stderr,none": 0.011630873964205717
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.012987012987012988,
"exact_match_stderr,none": 0.009153145279150204
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.06217616580310881,
"exact_match_stderr,none": 0.01742697415424056
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.05185185185185185,
"exact_match_stderr,none": 0.019154368449050496
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.5088929521276596,
"acc_stderr,none": 0.004557749352736335
},
"leaderboard_musr": {
"acc_norm,none": 0.4193121693121693,
"acc_norm_stderr,none": 0.017343672073569773,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.584,
"acc_norm_stderr,none": 0.031235856237014505
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.265625,
"acc_norm_stderr,none": 0.027658162598649488
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.412,
"acc_norm_stderr,none": 0.03119159602602282
}
},
"leaderboard": {
"acc,none": 0.5088929521276596,
"acc_stderr,none": 0.004557749352736335,
"acc_norm,none": 0.5772473732001556,
"acc_norm_stderr,none": 0.0051629491888934955,
"exact_match,none": 0.0445619335347432,
"exact_match_stderr,none": 0.005666316247127577,
"inst_level_loose_acc,none": 0.8309352517985612,
"inst_level_loose_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7707948243992606,
"prompt_level_loose_acc_stderr,none": 0.018087757424955286,
"inst_level_strict_acc,none": 0.8069544364508393,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_strict_acc,none": 0.7430683918669131,
"prompt_level_strict_acc_stderr,none": 0.01880296257563689,
"alias": "leaderboard"
},
"leaderboard_bbh": {
"acc_norm,none": 0.6405137996875543,
"acc_norm_stderr,none": 0.005843282173574642,
"alias": " - leaderboard_bbh"
},
"leaderboard_bbh_boolean_expressions": {
"alias": " - leaderboard_bbh_boolean_expressions",
"acc_norm,none": 0.892,
"acc_norm_stderr,none": 0.019669559381568776
},
"leaderboard_bbh_causal_judgement": {
"alias": " - leaderboard_bbh_causal_judgement",
"acc_norm,none": 0.6203208556149733,
"acc_norm_stderr,none": 0.03558443628801667
},
"leaderboard_bbh_date_understanding": {
"alias": " - leaderboard_bbh_date_understanding",
"acc_norm,none": 0.692,
"acc_norm_stderr,none": 0.02925692860650181
},
"leaderboard_bbh_disambiguation_qa": {
"alias": " - leaderboard_bbh_disambiguation_qa",
"acc_norm,none": 0.716,
"acc_norm_stderr,none": 0.028576958730437443
},
"leaderboard_bbh_formal_fallacies": {
"alias": " - leaderboard_bbh_formal_fallacies",
"acc_norm,none": 0.656,
"acc_norm_stderr,none": 0.03010450339231644
},
"leaderboard_bbh_geometric_shapes": {
"alias": " - leaderboard_bbh_geometric_shapes",
"acc_norm,none": 0.608,
"acc_norm_stderr,none": 0.030938207620401222
},
"leaderboard_bbh_hyperbaton": {
"alias": " - leaderboard_bbh_hyperbaton",
"acc_norm,none": 0.76,
"acc_norm_stderr,none": 0.027065293652238982
},
"leaderboard_bbh_logical_deduction_five_objects": {
"alias": " - leaderboard_bbh_logical_deduction_five_objects",
"acc_norm,none": 0.632,
"acc_norm_stderr,none": 0.03056207062099311
},
"leaderboard_bbh_logical_deduction_seven_objects": {
"alias": " - leaderboard_bbh_logical_deduction_seven_objects",
"acc_norm,none": 0.612,
"acc_norm_stderr,none": 0.030881038748993974
},
"leaderboard_bbh_logical_deduction_three_objects": {
"alias": " - leaderboard_bbh_logical_deduction_three_objects",
"acc_norm,none": 0.94,
"acc_norm_stderr,none": 0.015050117079158739
},
"leaderboard_bbh_movie_recommendation": {
"alias": " - leaderboard_bbh_movie_recommendation",
"acc_norm,none": 0.76,
"acc_norm_stderr,none": 0.027065293652238982
},
"leaderboard_bbh_navigate": {
"alias": " - leaderboard_bbh_navigate",
"acc_norm,none": 0.672,
"acc_norm_stderr,none": 0.029752391824475363
},
"leaderboard_bbh_object_counting": {
"alias": " - leaderboard_bbh_object_counting",
"acc_norm,none": 0.46,
"acc_norm_stderr,none": 0.031584653891499004
},
"leaderboard_bbh_penguins_in_a_table": {
"alias": " - leaderboard_bbh_penguins_in_a_table",
"acc_norm,none": 0.636986301369863,
"acc_norm_stderr,none": 0.03993397596179569
},
"leaderboard_bbh_reasoning_about_colored_objects": {
"alias": " - leaderboard_bbh_reasoning_about_colored_objects",
"acc_norm,none": 0.816,
"acc_norm_stderr,none": 0.02455581299422255
},
"leaderboard_bbh_ruin_names": {
"alias": " - leaderboard_bbh_ruin_names",
"acc_norm,none": 0.816,
"acc_norm_stderr,none": 0.02455581299422255
},
"leaderboard_bbh_salient_translation_error_detection": {
"alias": " - leaderboard_bbh_salient_translation_error_detection",
"acc_norm,none": 0.636,
"acc_norm_stderr,none": 0.030491555220405475
},
"leaderboard_bbh_snarks": {
"alias": " - leaderboard_bbh_snarks",
"acc_norm,none": 0.7865168539325843,
"acc_norm_stderr,none": 0.030799891078809365
},
"leaderboard_bbh_sports_understanding": {
"alias": " - leaderboard_bbh_sports_understanding",
"acc_norm,none": 0.74,
"acc_norm_stderr,none": 0.027797315752644335
},
"leaderboard_bbh_temporal_sequences": {
"alias": " - leaderboard_bbh_temporal_sequences",
"acc_norm,none": 0.652,
"acc_norm_stderr,none": 0.030186568464511673
},
"leaderboard_bbh_tracking_shuffled_objects_five_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_five_objects",
"acc_norm,none": 0.224,
"acc_norm_stderr,none": 0.026421361687347884
},
"leaderboard_bbh_tracking_shuffled_objects_seven_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_seven_objects",
"acc_norm,none": 0.184,
"acc_norm_stderr,none": 0.02455581299422255
},
"leaderboard_bbh_tracking_shuffled_objects_three_objects": {
"alias": " - leaderboard_bbh_tracking_shuffled_objects_three_objects",
"acc_norm,none": 0.34,
"acc_norm_stderr,none": 0.030020073605457873
},
"leaderboard_bbh_web_of_lies": {
"alias": " - leaderboard_bbh_web_of_lies",
"acc_norm,none": 0.556,
"acc_norm_stderr,none": 0.03148684942554571
},
"leaderboard_gpqa": {
"acc_norm,none": 0.3716442953020134,
"acc_norm_stderr,none": 0.014011490289268045,
"alias": " - leaderboard_gpqa"
},
"leaderboard_gpqa_diamond": {
"alias": " - leaderboard_gpqa_diamond",
"acc_norm,none": 0.35858585858585856,
"acc_norm_stderr,none": 0.034169036403915276
},
"leaderboard_gpqa_extended": {
"alias": " - leaderboard_gpqa_extended",
"acc_norm,none": 0.3663003663003663,
"acc_norm_stderr,none": 0.020637740788656753
},
"leaderboard_gpqa_main": {
"alias": " - leaderboard_gpqa_main",
"acc_norm,none": 0.38392857142857145,
"acc_norm_stderr,none": 0.02300313291907409
},
"leaderboard_ifeval": {
"alias": " - leaderboard_ifeval",
"prompt_level_strict_acc,none": 0.7430683918669131,
"prompt_level_strict_acc_stderr,none": 0.01880296257563689,
"inst_level_strict_acc,none": 0.8069544364508393,
"inst_level_strict_acc_stderr,none": "N/A",
"prompt_level_loose_acc,none": 0.7707948243992606,
"prompt_level_loose_acc_stderr,none": 0.018087757424955286,
"inst_level_loose_acc,none": 0.8309352517985612,
"inst_level_loose_acc_stderr,none": "N/A"
},
"leaderboard_math_hard": {
"exact_match,none": 0.0445619335347432,
"exact_match_stderr,none": 0.005666316247127577,
"alias": " - leaderboard_math_hard"
},
"leaderboard_math_algebra_hard": {
"alias": " - leaderboard_math_algebra_hard",
"exact_match,none": 0.06188925081433225,
"exact_match_stderr,none": 0.013774440126929627
},
"leaderboard_math_counting_and_prob_hard": {
"alias": " - leaderboard_math_counting_and_prob_hard",
"exact_match,none": 0.04065040650406504,
"exact_match_stderr,none": 0.017878907564437465
},
"leaderboard_math_geometry_hard": {
"alias": " - leaderboard_math_geometry_hard",
"exact_match,none": 0.022727272727272728,
"exact_match_stderr,none": 0.0130210469090637
},
"leaderboard_math_intermediate_algebra_hard": {
"alias": " - leaderboard_math_intermediate_algebra_hard",
"exact_match,none": 0.039285714285714285,
"exact_match_stderr,none": 0.011630873964205717
},
"leaderboard_math_num_theory_hard": {
"alias": " - leaderboard_math_num_theory_hard",
"exact_match,none": 0.012987012987012988,
"exact_match_stderr,none": 0.009153145279150204
},
"leaderboard_math_prealgebra_hard": {
"alias": " - leaderboard_math_prealgebra_hard",
"exact_match,none": 0.06217616580310881,
"exact_match_stderr,none": 0.01742697415424056
},
"leaderboard_math_precalculus_hard": {
"alias": " - leaderboard_math_precalculus_hard",
"exact_match,none": 0.05185185185185185,
"exact_match_stderr,none": 0.019154368449050496
},
"leaderboard_mmlu_pro": {
"alias": " - leaderboard_mmlu_pro",
"acc,none": 0.5088929521276596,
"acc_stderr,none": 0.004557749352736335
},
"leaderboard_musr": {
"acc_norm,none": 0.4193121693121693,
"acc_norm_stderr,none": 0.017343672073569773,
"alias": " - leaderboard_musr"
},
"leaderboard_musr_murder_mysteries": {
"alias": " - leaderboard_musr_murder_mysteries",
"acc_norm,none": 0.584,
"acc_norm_stderr,none": 0.031235856237014505
},
"leaderboard_musr_object_placements": {
"alias": " - leaderboard_musr_object_placements",
"acc_norm,none": 0.265625,
"acc_norm_stderr,none": 0.027658162598649488
},
"leaderboard_musr_team_allocation": {
"alias": " - leaderboard_musr_team_allocation",
"acc_norm,none": 0.412,
"acc_norm_stderr,none": 0.03119159602602282
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
reflection-gen/ds_chat_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-bin | reflection-gen | "2024-11-25T00:17:08Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T00:17:06Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 7749203
num_examples: 3150
download_size: 3108272
dataset_size: 7749203
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-full_resp_trace | reflection-gen | "2024-11-25T00:17:10Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T00:17:09Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 18508857
num_examples: 3150
download_size: 6739450
dataset_size: 18508857
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-bin_all_pairs | reflection-gen | "2024-11-25T00:17:12Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T00:17:10Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 15299160
num_examples: 6016
download_size: 4388091
dataset_size: 15299160
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter1_sppo_hard_new_cn_mining_oj_iter1-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
RylanSchaeffer/collapse_gemma-2-9b_hs2_accumulate_iter5_sftsd0_temp1_max_seq_len512 | RylanSchaeffer | "2024-11-25T01:50:29Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T01:50:28Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 14698155
num_examples: 12531
download_size: 754424
dataset_size: 14698155
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
reflection-gen/ds_chat_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin | reflection-gen | "2024-11-25T02:03:07Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T02:03:06Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 7882102
num_examples: 3167
download_size: 3155722
dataset_size: 7882102
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-full_resp_trace | reflection-gen | "2024-11-25T02:03:09Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T02:03:08Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 19175927
num_examples: 3167
download_size: 6871419
dataset_size: 19175927
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
reflection-gen/ds_chat_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin_all_pairs | reflection-gen | "2024-11-25T02:03:10Z" | 2 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T02:03:09Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 16827316
num_examples: 6418
download_size: 4630489
dataset_size: 16827316
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter2_sppo_hard_new_cn_mining_oj_iter2-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
RylanSchaeffer/collapse_gemma-2-9b_hs2_accumulate_iter5_sftsd1_temp1_max_seq_len512 | RylanSchaeffer | "2024-11-25T02:12:36Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T02:12:34Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 14946574
num_examples: 12531
download_size: 784226
dataset_size: 14946574
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RylanSchaeffer/collapse_gemma-2-9b_hs2_accumulate_iter5_sftsd2_temp1_max_seq_len512 | RylanSchaeffer | "2024-11-25T02:34:52Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T02:34:51Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 14764204
num_examples: 12531
download_size: 721047
dataset_size: 14764204
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
casl0605/cartoon-example | casl0605 | "2024-11-25T02:44:39Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T02:44:37Z" | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 1097021.0
num_examples: 25
download_size: 1091088
dataset_size: 1097021.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DoctoraojoyAojoy/UniFood | DoctoraojoyAojoy | "2024-11-25T02:47:43Z" | 2 | 0 | [
"license:cc-by-nc-4.0",
"region:us"
] | null | "2024-11-25T02:47:43Z" | ---
license: cc-by-nc-4.0
---
|
lianghsun/patent-zh_tw-en-translation-dpo | lianghsun | "2024-11-25T03:02:21Z" | 2 | 0 | [
"task_categories:translation",
"language:zh",
"license:cc-by-nc-sa-4.0",
"size_categories:1K<n<10K",
"region:us",
"patent",
"zh-tw",
"ROC",
"Taiwan"
] | [
"translation"
] | "2024-11-25T03:01:24Z" | ---
license: cc-by-nc-sa-4.0
task_categories:
- translation
language:
- zh
tags:
- patent
- zh-tw
- ROC
- Taiwan
size_categories:
- 1K<n<10K
---
# Dataset Card for lianghsun/patent-zh_tw-en-translation-dpo
<!-- Provide a quick summary of the dataset. -->
(WIP)
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
marcov/freebase_qa_promptsource | marcov | "2024-11-25T03:37:18Z" | 2 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T03:37:01Z" | ---
dataset_info:
features:
- name: Question-ID
dtype: string
- name: RawQuestion
dtype: string
- name: ProcessedQuestion
dtype: string
- name: Parses
sequence:
- name: Parse-Id
dtype: string
- name: PotentialTopicEntityMention
dtype: string
- name: TopicEntityName
dtype: string
- name: TopicEntityMid
dtype: string
- name: InferentialChain
dtype: string
- name: Answers
sequence:
- name: AnswersMid
dtype: string
- name: AnswersName
sequence: string
- name: template_name
dtype: string
- name: template
dtype: string
- name: rendered_input
dtype: string
- name: rendered_output
dtype: string
splits:
- name: train
num_bytes: 105253003.0
num_examples: 101790
- name: test
num_bytes: 20561808.0
num_examples: 19980
- name: validation
num_bytes: 20473054.0
num_examples: 19970
download_size: 38241015
dataset_size: 146287865.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
marcov/paws-x_en_promptsource | marcov | "2024-11-25T03:47:54Z" | 2 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T03:47:01Z" | ---
dataset_info:
features:
- name: id
dtype: int32
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: label
dtype:
class_label:
names:
'0': '0'
'1': '1'
- name: template_name
dtype: string
- name: template
dtype: string
- name: rendered_input
dtype: string
- name: rendered_output
dtype: string
splits:
- name: train
num_bytes: 389130111.49153525
num_examples: 565240
- name: test
num_bytes: 15778306.285041668
num_examples: 22907
- name: validation
num_bytes: 15683472.145875
num_examples: 22863
download_size: 179437577
dataset_size: 420591889.9224519
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
RichardWang0803/TestData | RichardWang0803 | "2024-11-25T03:48:26Z" | 2 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-25T03:48:25Z" | ---
license: apache-2.0
---
|
haorandai/Nov_PGD_Banana_UF_Epsilon0.05_1samples_with1constraints | haorandai | "2024-11-25T04:51:36Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T04:51:34Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 215305.0
num_examples: 2
download_size: 217121
dataset_size: 215305.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ekatosha/twitter-financial-cor-labels | ekatosha | "2024-11-25T04:58:22Z" | 2 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T04:58:18Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int64
splits:
- name: train
num_bytes: 939352
num_examples: 9543
- name: validation
num_bytes: 237530
num_examples: 2388
download_size: 712538
dataset_size: 1176882
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
haorandai/Nov_PGD_Banana_UF_Epsilon0.05_5samples_with5constraints | haorandai | "2024-11-25T05:09:34Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:09:32Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 1023566.0
num_examples: 10
download_size: 1025246
dataset_size: 1023566.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_PGD_Bicycle_UF_Epsilon0.05_5samples_with5constraints | haorandai | "2024-11-25T05:12:35Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:12:33Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 1073104.0
num_examples: 10
download_size: 1074818
dataset_size: 1073104.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_PGD_Mice_UF_Epsilon0.05_5samples_with5constraints | haorandai | "2024-11-25T05:15:10Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:15:09Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 999155.0
num_examples: 10
download_size: 1000833
dataset_size: 999155.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_Random_Banana_UF_Epsilon0.05_5samples_with5constraints | haorandai | "2024-11-25T05:38:29Z" | 2 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-25T05:38:28Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 1001975.0
num_examples: 10
download_size: 1003569
dataset_size: 1001975.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|