datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
3.25M
| likes
int64 0
7.54k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1.02M
|
---|---|---|---|---|---|---|---|---|
violetxi/NUMINA-V2-Clean-Blocks-10000_10500-0_10 | violetxi | "2024-11-26T03:33:59Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T03:14:06Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: source
dtype: string
- name: is_correct
dtype: bool
- name: target_answer
dtype: string
- name: solution
dtype: string
- name: solution_steps
dtype: string
- name: attempts
dtype: string
- name: model_answer
dtype: string
splits:
- name: train
num_bytes: 201278847
num_examples: 32432
download_size: 21030168
dataset_size: 201278847
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Shannnh/baseline-dataset-5 | Shannnh | "2024-11-26T03:17:14Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T03:16:58Z" | ---
dataset_info:
features:
- name: document
dtype: string
- name: question
dtype: string
- name: short_answers
dtype: string
- name: predicted_answer
dtype: string
splits:
- name: validation
num_bytes: 170508582
num_examples: 4289
download_size: 88082731
dataset_size: 170508582
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
---
|
rrvaswin/_5_turns_only_ckp_1 | rrvaswin | "2024-11-26T03:55:23Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T03:55:12Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 4260
num_examples: 1
download_size: 6123
dataset_size: 4260
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marcov/narrativeqa_promptsource | marcov | "2024-11-26T10:08:06Z" | 23 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T04:21:25Z" | ---
dataset_info:
features:
- name: document
struct:
- name: id
dtype: string
- name: kind
dtype: string
- name: url
dtype: string
- name: file_size
dtype: int32
- name: word_count
dtype: int32
- name: start
dtype: string
- name: end
dtype: string
- name: summary
struct:
- name: text
dtype: string
- name: tokens
sequence: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
- name: question
struct:
- name: text
dtype: string
- name: tokens
sequence: string
- name: answers
list:
- name: text
dtype: string
- name: tokens
sequence: string
- name: template_name
dtype: string
- name: template
dtype: string
- name: rendered_input
dtype: string
- name: rendered_output
dtype: string
splits:
- name: train
num_bytes: 160551425136.863
num_examples: 261796
- name: test
num_bytes: 49308994394.0
num_examples: 84456
- name: validation
num_bytes: 16711465478.905663
num_examples: 27508
download_size: 117855548227
dataset_size: 226571885009.76868
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_stepwise_dpo_chunk_6 | ZixuanKe | "2024-11-26T04:37:40Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T04:37:26Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
- name: justification
dtype: string
splits:
- name: train
num_bytes: 8755033
num_examples: 1642
download_size: 1746862
dataset_size: 8755033
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_stepwise_dpo_chunk_20 | ZixuanKe | "2024-11-26T04:47:59Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T04:47:58Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
- name: justification
dtype: string
splits:
- name: train
num_bytes: 11380
num_examples: 3
download_size: 28475
dataset_size: 11380
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_stepwise_dpo_chunk_7 | ZixuanKe | "2024-11-26T04:57:50Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T04:57:48Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
- name: justification
dtype: string
splits:
- name: train
num_bytes: 9228105
num_examples: 1642
download_size: 1806388
dataset_size: 9228105
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
violetxi/NUMINA-V2-Clean-Blocks-1100_1200-0_45 | violetxi | "2024-11-26T07:38:23Z" | 23 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T04:57:50Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: source
dtype: string
- name: is_correct
dtype: bool
- name: target_answer
dtype: string
- name: solution
dtype: string
- name: solution_steps
dtype: string
- name: attempts
dtype: string
- name: model_answer
dtype: string
splits:
- name: train
num_bytes: 1321241837
num_examples: 164256
download_size: 136222829
dataset_size: 1321241837
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hlm22/lerobot_test | hlm22 | "2024-11-26T05:23:58Z" | 23 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-26T05:01:29Z" | ---
license: apache-2.0
dataset_info:
features:
- name: observation.images.camera1
dtype: video_frame
- name: observation.images.camera2
dtype: video_frame
- name: observation.state
sequence: float32
length: 4
- name: observation.effort
sequence: float32
length: 4
- name: action
sequence: float32
length: 3
- name: episode_index
dtype: int64
- name: frame_index
dtype: int64
- name: timestamp
dtype: float32
- name: next.done
dtype: bool
- name: index
dtype: int64
splits:
- name: train
num_bytes: 4804
num_examples: 25
download_size: 9274
dataset_size: 4804
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
sajadh/SAJADH-llama2-1k | sajadh | "2024-11-26T05:03:10Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:03:02Z" | ---
dataset_info:
features:
- name: message_id
dtype: string
- name: parent_id
dtype: string
- name: user_id
dtype: string
- name: created_date
dtype: string
- name: text
dtype: string
- name: role
dtype: string
- name: lang
dtype: string
- name: review_count
dtype: float64
- name: review_result
dtype: bool
- name: deleted
dtype: bool
- name: rank
dtype: float64
- name: synthetic
dtype: bool
- name: model_name
dtype: 'null'
- name: detoxify
struct:
- name: identity_attack
dtype: float64
- name: insult
dtype: float64
- name: obscene
dtype: float64
- name: severe_toxicity
dtype: float64
- name: sexual_explicit
dtype: float64
- name: threat
dtype: float64
- name: toxicity
dtype: float64
- name: message_tree_id
dtype: string
- name: tree_state
dtype: string
- name: emojis
struct:
- name: count
sequence: int64
- name: name
sequence: string
- name: labels
struct:
- name: count
sequence: int64
- name: name
sequence: string
- name: value
sequence: float64
splits:
- name: train
num_bytes: 107144003
num_examples: 85443
download_size: 40516221
dataset_size: 107144003
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_Clean_Banana_Orange_5samples_with5constraints | haorandai | "2024-11-26T05:05:12Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:05:11Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 377100.0
num_examples: 10
download_size: 378005
dataset_size: 377100.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/conversational_ai_turn_0_checkpoint | junnystateofmind | "2024-11-26T05:05:33Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:05:29Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 31682
num_examples: 20
download_size: 8130
dataset_size: 31682
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_Clean_Mice_Orange_5samples_with5constraints | haorandai | "2024-11-26T05:06:52Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:06:50Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 484845.0
num_examples: 10
download_size: 485751
dataset_size: 484845.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/conversational_ai_turn_1_checkpoint | junnystateofmind | "2024-11-26T05:06:56Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:06:54Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 163897
num_examples: 20
download_size: 21136
dataset_size: 163897
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haorandai/Nov_Clean_Bicycle_Orange_5samples_with5constraints | haorandai | "2024-11-26T05:08:21Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:08:19Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 474223.0
num_examples: 10
download_size: 475138
dataset_size: 474223.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/conversational_ai_turn_3_checkpoint | junnystateofmind | "2024-11-26T05:09:38Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:09:36Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 474254
num_examples: 20
download_size: 30485
dataset_size: 474254
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/conversational_ai_turn_4_checkpoint | junnystateofmind | "2024-11-26T05:11:16Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:11:14Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 644474
num_examples: 20
download_size: 35059
dataset_size: 644474
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/conversational_ai | junnystateofmind | "2024-11-26T05:11:19Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:11:17Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 644474
num_examples: 20
download_size: 35059
dataset_size: 644474
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/fast_test_turn_0_ckp | junnystateofmind | "2024-11-26T05:56:19Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:34:40Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 26929
num_examples: 10
download_size: 12127
dataset_size: 26929
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_stepwise_dpo_chunk_15 | ZixuanKe | "2024-11-26T05:35:53Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:35:51Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
- name: justification
dtype: string
splits:
- name: train
num_bytes: 8353113
num_examples: 1642
download_size: 1652029
dataset_size: 8353113
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/fast_test_turn_3_ckp | junnystateofmind | "2024-11-26T05:58:06Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:36:36Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 38414
num_examples: 10
download_size: 20009
dataset_size: 38414
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/fast_test_turn_4_ckp | junnystateofmind | "2024-11-26T05:58:53Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:37:23Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 41427
num_examples: 10
download_size: 20173
dataset_size: 41427
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/fast_test | junnystateofmind | "2024-11-26T05:58:55Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:37:25Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 41427
num_examples: 10
download_size: 20173
dataset_size: 41427
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_stepwise_dpo_chunk_9 | ZixuanKe | "2024-11-26T05:38:42Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:38:40Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
- name: justification
dtype: string
splits:
- name: train
num_bytes: 8474899
num_examples: 1642
download_size: 1765624
dataset_size: 8474899
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Tippawan/train1 | Tippawan | "2024-11-26T05:47:20Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T05:47:17Z" | ---
dataset_info:
features:
- name: incorrect
dtype: string
- name: correct
dtype: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 18622956
num_examples: 92337
download_size: 8482017
dataset_size: 18622956
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ZixuanKe/flare_finqa_sup_sample_from_policy_v1.1_stepwise_dpo_chunk_12 | ZixuanKe | "2024-11-26T06:04:29Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:04:27Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: rejected
dtype: string
- name: chosen
dtype: string
- name: justification
dtype: string
splits:
- name: train
num_bytes: 8981631
num_examples: 1642
download_size: 1731185
dataset_size: 8981631
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Vansh3333 | vanshp1232 | "2024-12-19T12:48:00Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:09:12Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 170796
num_examples: 87
download_size: 72034
dataset_size: 170796
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Mohit3999 | vanshp1232 | "2024-12-20T07:23:42Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:09:23Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
- name: completeness_score.responses.users
sequence: string
- name: actionable_score.responses.status
sequence: string
- name: clarity_score.responses.status
sequence: string
- name: feedback.responses
sequence: string
- name: accuracy_score.responses.status
sequence: string
- name: relevance_score.responses.users
sequence: string
- name: relevance_score.responses.status
sequence: string
- name: engagement_score.responses.users
sequence: string
- name: accuracy_score.responses
sequence: int64
- name: actionable_score.responses
sequence: int64
- name: completeness_score.responses.status
sequence: string
- name: accuracy_score.responses.users
sequence: string
- name: actionable_score.responses.users
sequence: string
- name: engagement_score.responses
sequence: int64
- name: clarity_score.responses
sequence: int64
- name: feedback.responses.users
sequence: string
- name: feedback.responses.status
sequence: string
- name: engagement_score.responses.status
sequence: string
- name: clarity_score.responses.users
sequence: string
- name: completeness_score.responses
sequence: int64
- name: relevance_score.responses
sequence: int64
splits:
- name: train
num_bytes: 1656817
num_examples: 402
download_size: 507922
dataset_size: 1656817
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/dyvanshu132 | vanshp1232 | "2024-11-26T06:09:35Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:09:33Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 177586
num_examples: 55
download_size: 48279
dataset_size: 177586
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Nithin138 | vanshp1232 | "2024-11-27T06:08:41Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:09:52Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 18613
num_examples: 5
download_size: 26184
dataset_size: 18613
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Amita2012 | vanshp1232 | "2024-12-19T12:49:02Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:10:04Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 175408
num_examples: 22
download_size: 65416
dataset_size: 175408
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Trisha | vanshp1232 | "2024-12-19T12:49:09Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:10:08Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
- name: relevance_score.responses
sequence: int64
- name: relevance_score.responses.users
sequence: string
- name: relevance_score.responses.status
sequence: string
- name: accuracy_score.responses
sequence: int64
- name: accuracy_score.responses.users
sequence: string
- name: accuracy_score.responses.status
sequence: string
- name: clarity_score.responses
sequence: int64
- name: clarity_score.responses.users
sequence: string
- name: clarity_score.responses.status
sequence: string
- name: actionable_score.responses
sequence: int64
- name: actionable_score.responses.users
sequence: string
- name: actionable_score.responses.status
sequence: string
- name: engagement_score.responses
sequence: int64
- name: engagement_score.responses.users
sequence: string
- name: engagement_score.responses.status
sequence: string
- name: completeness_score.responses
sequence: int64
- name: completeness_score.responses.users
sequence: string
- name: completeness_score.responses.status
sequence: string
- name: feedback.responses
sequence: string
- name: feedback.responses.users
sequence: string
- name: feedback.responses.status
sequence: string
splits:
- name: train
num_bytes: 1445444
num_examples: 119
download_size: 457191
dataset_size: 1445444
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Pranav2533 | vanshp1232 | "2024-12-19T12:49:32Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:10:24Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 453634
num_examples: 56
download_size: 175363
dataset_size: 453634
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Gaurav12321 | vanshp1232 | "2024-12-19T12:50:20Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:10:55Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 66125
num_examples: 10
download_size: 35985
dataset_size: 66125
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Abhishek111222 | vanshp1232 | "2024-12-19T12:50:32Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:11:04Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
- name: relevance_score.responses
sequence: int64
- name: relevance_score.responses.users
sequence: string
- name: relevance_score.responses.status
sequence: string
- name: accuracy_score.responses
sequence: int64
- name: accuracy_score.responses.users
sequence: string
- name: accuracy_score.responses.status
sequence: string
- name: clarity_score.responses
sequence: int64
- name: clarity_score.responses.users
sequence: string
- name: clarity_score.responses.status
sequence: string
- name: actionable_score.responses
sequence: int64
- name: actionable_score.responses.users
sequence: string
- name: actionable_score.responses.status
sequence: string
- name: engagement_score.responses
sequence: int64
- name: engagement_score.responses.users
sequence: string
- name: engagement_score.responses.status
sequence: string
- name: completeness_score.responses
sequence: int64
- name: completeness_score.responses.users
sequence: string
- name: completeness_score.responses.status
sequence: string
- name: feedback.responses
sequence: string
- name: feedback.responses.users
sequence: string
- name: feedback.responses.status
sequence: string
splits:
- name: train
num_bytes: 436906
num_examples: 55
download_size: 140414
dataset_size: 436906
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/AmitPathak123 | vanshp1232 | "2024-12-19T12:50:45Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:11:13Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
- name: relevance_score.responses
sequence: int64
- name: relevance_score.responses.users
sequence: string
- name: relevance_score.responses.status
sequence: string
- name: accuracy_score.responses
sequence: int64
- name: accuracy_score.responses.users
sequence: string
- name: accuracy_score.responses.status
sequence: string
- name: clarity_score.responses
sequence: int64
- name: clarity_score.responses.users
sequence: string
- name: clarity_score.responses.status
sequence: string
- name: actionable_score.responses
sequence: int64
- name: actionable_score.responses.users
sequence: string
- name: actionable_score.responses.status
sequence: string
- name: engagement_score.responses
sequence: int64
- name: engagement_score.responses.users
sequence: string
- name: engagement_score.responses.status
sequence: string
- name: completeness_score.responses
sequence: int64
- name: completeness_score.responses.users
sequence: string
- name: completeness_score.responses.status
sequence: string
- name: feedback.responses
sequence: string
- name: feedback.responses.users
sequence: string
- name: feedback.responses.status
sequence: string
splits:
- name: train
num_bytes: 115919
num_examples: 15
download_size: 66183
dataset_size: 115919
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vanshp1232/Prithvi1232 | vanshp1232 | "2024-12-19T12:50:57Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:11:22Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype: string
- name: _server_id
dtype: string
- name: instruction
dtype: string
- name: conv_id
dtype: string
- name: turn
dtype: int64
splits:
- name: train
num_bytes: 99408
num_examples: 9
download_size: 43599
dataset_size: 99408
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Noveled/minhwa_dataset_1126_01 | Noveled | "2024-11-26T06:44:58Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:44:48Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 29247593.0
num_examples: 186
download_size: 28406548
dataset_size: 29247593.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
violetxi/NUMINA-V2-Clean-Blocks-1400_1600-1_4 | violetxi | "2024-11-26T07:29:05Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T06:45:22Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: source
dtype: string
- name: is_correct
dtype: bool
- name: target_answer
dtype: string
- name: solution
dtype: string
- name: solution_steps
dtype: string
- name: attempts
dtype: string
- name: model_answer
dtype: string
splits:
- name: train
num_bytes: 47746408
num_examples: 7936
download_size: 5337265
dataset_size: 47746408
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cc0de/Pile_arXiv | cc0de | "2024-11-26T07:13:04Z" | 23 | 0 | [
"license:mit",
"region:us"
] | null | "2024-11-26T07:13:04Z" | ---
license: mit
---
|
mlfoundations-dev/oh_v3.1_wo_camel_ai_biology | mlfoundations-dev | "2024-11-26T07:23:12Z" | 23 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T07:22:20Z" | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: source_label_exact
sequence: string
splits:
- name: train
num_bytes: 1950265226
num_examples: 988199
download_size: 1025294707
dataset_size: 1950265226
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/oh_v1.2_sin_slim_orca_diversity | mlfoundations-dev | "2024-12-01T02:32:58Z" | 23 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T08:00:33Z" | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: shard_id
dtype: string
- name: output
dtype: string
- name: ngram_3_uniqueness
dtype: float64
- name: entropy
dtype: float64
- name: gini_index
dtype: float64
- name: self_bleu
dtype: float64
- name: embeddings
dtype: string
- name: kmeans_inertia_embeddings
dtype: float64
- name: kmeans_inertia_embeddings_normalized
dtype: float64
- name: projected_gradients_embeddings
dtype: float64
- name: new_conversations
dtype: string
- name: projected_gradients
dtype: string
- name: projected_gradients_vendi
dtype: float64
- name: projected_gradients_log_det
dtype: float64
- name: projected_embeddings_log_det
dtype: float64
- name: kmeans_inertia_gradients
dtype: float64
- name: kmeans_inertia_gradients_normalized
dtype: float64
splits:
- name: train
num_bytes: 5853259315
num_examples: 365464
download_size: 3370112412
dataset_size: 5853259315
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
naytin/turkish-sentiment-analysis-dataset_ENG | naytin | "2024-12-05T07:37:03Z" | 23 | 0 | [
"language:en",
"license:cc-by-nc-nd-4.0",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T08:40:17Z" | ---
language:
- en
license: cc-by-nc-nd-4.0
---
|
liangzid/robench-eval-Time3-s | liangzid | "2024-11-26T08:47:19Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T08:47:16Z" | ---
dataset_info:
features:
- name: shuffled_text
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2098318
num_examples: 2930
download_size: 1175759
dataset_size: 2098318
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
JJuny/llama2_DYD_train | JJuny | "2024-11-26T08:57:05Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T08:56:56Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 966783
num_examples: 219
download_size: 186893
dataset_size: 966783
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
JJuny/llama2_DYD_eval | JJuny | "2024-11-26T08:57:14Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T08:57:12Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 22259
num_examples: 5
download_size: 6857
dataset_size: 22259
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liangzid/robench-eval-Time4-s | liangzid | "2024-11-26T08:58:23Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T08:58:19Z" | ---
dataset_info:
features:
- name: shuffled_text
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2068681
num_examples: 2931
download_size: 1155052
dataset_size: 2068681
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/fast_test_2_turn_4_ckp | junnystateofmind | "2024-11-26T09:21:07Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T09:21:05Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 26142
num_examples: 10
download_size: 16524
dataset_size: 26142
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
junnystateofmind/fast_test_2 | junnystateofmind | "2024-11-26T09:21:10Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T09:21:08Z" | ---
dataset_info:
features:
- name: trajectory
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 26142
num_examples: 10
download_size: 16524
dataset_size: 26142
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AnsenH/cartoon_lora | AnsenH | "2024-11-26T09:27:46Z" | 23 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-26T09:27:09Z" | ---
license: apache-2.0
---
|
liangzid/robench-eval-Time7-s | liangzid | "2024-11-26T09:30:22Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T09:30:19Z" | ---
dataset_info:
features:
- name: shuffled_text
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2081816
num_examples: 2931
download_size: 1162897
dataset_size: 2081816
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liangzid/robench-eval-Time10-s | liangzid | "2024-11-26T10:03:56Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T10:03:53Z" | ---
dataset_info:
features:
- name: shuffled_text
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2080323
num_examples: 2931
download_size: 1160407
dataset_size: 2080323
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marcov/ambig_qa_light_promptsource | marcov | "2024-11-26T10:09:51Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T10:09:44Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: question
dtype: string
- name: annotations
sequence:
- name: type
dtype: string
- name: answer
sequence: string
- name: qaPairs
sequence:
- name: question
dtype: string
- name: answer
sequence: string
- name: template_name
dtype: string
- name: template
dtype: string
- name: rendered_input
dtype: string
- name: rendered_output
dtype: string
splits:
- name: train
num_bytes: 75255907.0
num_examples: 50180
- name: validation
num_bytes: 16331293.0
num_examples: 10010
download_size: 14453361
dataset_size: 91587200.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
MoonKih/final2 | MoonKih | "2024-11-26T10:18:55Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T10:18:53Z" | ---
dataset_info:
features:
- name: image
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 1346616
num_examples: 1190
download_size: 172567
dataset_size: 1346616
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
plaguss/test_math_shepherd_prm_4 | plaguss | "2024-11-26T10:20:30Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"arxiv:2312.08935",
"region:us",
"synthetic",
"distilabel",
"rlaif"
] | null | "2024-11-26T10:20:24Z" | ---
size_categories: n<1K
dataset_info:
features:
- name: instruction
dtype: string
- name: answer
dtype: string
- name: golden_solution
sequence: string
- name: model_name
dtype: string
- name: solutions
sequence: string
- name: distilabel_metadata
struct:
- name: statistics_completer
struct:
- name: input_tokens
sequence: int64
- name: output_tokens
sequence: int64
- name: input
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 29324
num_examples: 11
download_size: 26974
dataset_size: 29324
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for test_math_shepherd_prm_4
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/plaguss/test_math_shepherd_prm_4/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/plaguss/test_math_shepherd_prm_4/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"answer": "Janet sells 16 - 3 - 4 = \u003c\u003c16-3-4=9\u003e\u003e9 duck eggs a day.\nShe makes 9 * 2 = $\u003c\u003c9*2=18\u003e\u003e18 every day at the farmer\u2019s market.\n#### 18",
"distilabel_metadata": {
"statistics_completer": {
"input_tokens": [
714
],
"output_tokens": [
352
]
}
},
"golden_solution": [
"Step 1: Calculate the total number of eggs laid by the ducks per day: 16 eggs",
"Step 2: Calculate the number of eggs Janet uses for breakfast and baking: 3 (for breakfast) + 4 (for baking) = 7 eggs",
"Step 3: Calculate the number of eggs left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs",
"Step 4: Calculate the total amount Janet makes from selling eggs: 9 eggs * $2 per egg = \u003c\u003c9*2=18\u003e\u003e18 dollars",
"The answer is: 18"
],
"input": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers\u0027 market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers\u0027 market? Step 1: Calculate the number of eggs laid per day: 1\u043a\u0438\nStep 2: Calculate the number of eggs Janet eats for breakfast: 3 \u043a\u0438\nStep 3: Calculate the number of eggs Janet bakes for her friends: 4 \u043a\u0438\nStep 4: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs \u043a\u0438\nStep 5: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars The answer is: 18\n\n## Solution 2\n\nStep 1: Calculate the number of eggs laid per day: 16\nStep 2: Calculate the total number of eggs used (eaten and baked): 3 + 4 = \u003c\u003c3+4=7\u003e\u003e7\nStep 3: Calculate the number of eggs Janet has left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 4: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 3\n\nStep 1: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 2: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 4\n\nStep 1: Calculate the number of eggs Janet eats for breakfast: 3\nStep 2: Calculate the number of eggs Janet bakes for her friends: 4\nStep 3: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 4: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 5\n\nStep 1: Calculate the total number of eggs used (eaten and baked): 3 + 4 = \u003c\u003c3+4=7\u003e\u003e7\nStep 2: Calculate the number of eggs Janet has left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 3: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18 \u043a\u0438",
"instruction": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers\u0027 market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers\u0027 market?",
"label": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers\u0027 market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers\u0027 market? Step 1: Calculate the number of eggs laid per day: 16\nStep 2: Calculate the number of eggs Janet eats for breakfast: 3 +\nStep 3: Calculate the number of eggs Janet bakes for her friends: 4 +\nStep 4: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs +\nStep 5: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars The answer is: 18\n\n## Solution 2\n\nStep 1: Calculate the number of eggs laid per day: 16\nStep 2: Calculate the total number of eggs used (eaten and baked): 3 + 4 = \u003c\u003c3+4=7\u003e\u003e7\nStep 3: Calculate the number of eggs Janet has left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 4: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 3\n\nStep 1: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 2: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 4\n\nStep 1: Calculate the number of eggs Janet eats for breakfast: 3\nStep 2: Calculate the number of eggs Janet bakes for her friends: 4\nStep 3: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 4: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 5\n\nStep 1: Calculate the total number of eggs used (eaten and baked): 3 + 4 = \u003c\u003c3+4=7\u003e\u003e7\nStep 2: Calculate the number of eggs Janet has left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 3: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18 -",
"model_name": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"solutions": [
"Step 1: Calculate the number of eggs laid per day: 16",
"Step 2: Calculate the number of eggs Janet eats for breakfast: 3 +",
"Step 3: Calculate the number of eggs Janet bakes for her friends: 4 +",
"Step 4: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs +",
"Step 5: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars The answer is: 18\n\n## Solution 2\n\nStep 1: Calculate the number of eggs laid per day: 16\nStep 2: Calculate the total number of eggs used (eaten and baked): 3 + 4 = \u003c\u003c3+4=7\u003e\u003e7\nStep 3: Calculate the number of eggs Janet has left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 4: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 3\n\nStep 1: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 2: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 4\n\nStep 1: Calculate the number of eggs Janet eats for breakfast: 3\nStep 2: Calculate the number of eggs Janet bakes for her friends: 4\nStep 3: Calculate the number of eggs Janet has left to sell: 16 - (3 + 4) = 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 4: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18\n\n## Solution 5\n\nStep 1: Calculate the total number of eggs used (eaten and baked): 3 + 4 = \u003c\u003c3+4=7\u003e\u003e7\nStep 2: Calculate the number of eggs Janet has left to sell: 16 - 7 = \u003c\u003c16-7=9\u003e\u003e9 eggs\nStep 3: Calculate the amount Janet makes at the farmers\u0027 market: 9 * $2 = \u003c\u003c9*2=18\u003e\u003e18 dollars\nThe answer is: 18 -"
]
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("plaguss/test_math_shepherd_prm_4", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("plaguss/test_math_shepherd_prm_4")
```
</details>
## References
```
@misc{2312.08935,
Author = {Peiyi Wang and Lei Li and Zhihong Shao and R. X. Xu and Damai Dai and Yifei Li and Deli Chen and Y. Wu and Zhifang Sui},
Title = {Math-Shepherd: Verify and Reinforce LLMs Step-by-step without Human Annotations},
Year = {2023},
Eprint = {arXiv:2312.08935},
}
```
```
@misc{2312.08935,
Author = {Peiyi Wang and Lei Li and Zhihong Shao and R. X. Xu and Damai Dai and Yifei Li and Deli Chen and Y. Wu and Zhifang Sui},
Title = {Math-Shepherd: Verify and Reinforce LLMs Step-by-step without Human Annotations},
Year = {2023},
Eprint = {arXiv:2312.08935},
}
```
```
@misc{wang2024mathshepherdverifyreinforcellms,
title={Math-Shepherd: Verify and Reinforce LLMs Step-by-step without Human Annotations},
author={Peiyi Wang and Lei Li and Zhihong Shao and R. X. Xu and Damai Dai and Yifei Li and Deli Chen and Y. Wu and Zhifang Sui},
year={2024},
eprint={2312.08935},
archivePrefix={arXiv},
primaryClass={cs.AI},
url={https://arxiv.org/abs/2312.08935},
}
```
```
@misc{wang2024mathshepherdverifyreinforcellms,
title={Math-Shepherd: Verify and Reinforce LLMs Step-by-step without Human Annotations},
author={Peiyi Wang and Lei Li and Zhihong Shao and R. X. Xu and Damai Dai and Yifei Li and Deli Chen and Y. Wu and Zhifang Sui},
year={2024},
eprint={2312.08935},
archivePrefix={arXiv},
primaryClass={cs.AI},
url={https://arxiv.org/abs/2312.08935},
}
```
|
jeongseokoh/MATH-SHEPHERD-CLS | jeongseokoh | "2024-11-26T11:09:21Z" | 23 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T11:07:05Z" | ---
dataset_info:
features:
- name: input
dtype: string
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 402145574
num_examples: 444655
download_size: 170385595
dataset_size: 402145574
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
xodhks/crawling-emotions-in-google-train | xodhks | "2024-11-26T11:30:55Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T11:30:26Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: emotion
dtype: string
- name: label
dtype: int32
- name: image_id
dtype: string
splits:
- name: train
num_bytes: 138798551.373
num_examples: 2827
download_size: 172574494
dataset_size: 138798551.373
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
BDRC/LayoutSegmentation_Dataset | BDRC | "2024-11-26T11:37:06Z" | 23 | 0 | [
"license:mit",
"modality:image",
"region:us"
] | null | "2024-11-26T11:30:35Z" | ---
license: mit
---
|
abluva/NSL-KDD-V2 | abluva | "2024-11-26T11:48:47Z" | 23 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-11-26T11:32:47Z" | ---
license: apache-2.0
---
The NSL-KDD V2 is an extended version of NSL-KDD original dataset. The dataset is normalised and 1 additional class is synthesised by mixing multiple non-benign classes.
To cite the dataset, please reference the original paper with DOI: 10.1109/SmartNets61466.2024.10577645. The paper is published in IEEE SmartNets and can be accessed https://www.researchgate.net/publication/382034618_Blender-GAN_Multi-Target_Conditional_Generative_Adversarial_Network_for_Novel_Class_Synthetic_Data_Generation.
Citation info:
Madhubalan, Akshayraj & Gautam, Amit & Tiwary, Priya. (2024). Blender-GAN: Multi-Target Conditional Generative Adversarial Network for Novel Class Synthetic Data Generation. 1-7. 10.1109/SmartNets61466.2024.10577645.
This dataset was made by Abluva Inc, a Palo Alto based, research-driven Data Protection firm. Our data protection platform empowers customers to secure data through advanced security mechanisms such as Fine Grained Access control and sophisticated depersonalization algorithms (e.g. Pseudonymization, Anonymization and Randomization). Abluva's Data Protection solutions facilitate data democratization within and outside the organizations, mitigating the concerns related to theft and compliance. The innovative intrusion detection algorithm by Abluva employs patented technologies for an intricately balanced approach that excludes normal access deviations, ensuring intrusion detection without disrupting the business operations. Abluva’s Solution enables organizations to extract further value from their data by enabling secure Knowledge Graphs and deploying Secure Data as a Service among other novel uses of data. Committed to providing a safe and secure environment, Abluva empowers organizations to unlock the full potential of their data. |
dgambettaphd/D_gen3_run1_llama2-7b_wiki_doc1000_real32_synt96 | dgambettaphd | "2024-11-26T11:40:29Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T11:40:26Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 527114
num_examples: 1000
download_size: 291319
dataset_size: 527114
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
abluva/CIC-IDS-2017-V2 | abluva | "2024-11-26T12:22:06Z" | 23 | 0 | [
"license:apache-2.0",
"size_categories:1M<n<10M",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T12:14:15Z" | ---
license: apache-2.0
---
The CIC-IDS-V2 is an extended version of the original CIC-IDS 2017 dataset. The dataset is normalised and 1 new class called "Comb" is added which is a combination of synthesised data of multiple non-benign classes.
To cite the dataset, please reference the original paper with DOI: 10.1109/SmartNets61466.2024.10577645. The paper is published in IEEE SmartNets and can be accessed here: https://www.researchgate.net/publication/382034618_Blender-GAN_Multi-Target_Conditional_Generative_Adversarial_Network_for_Novel_Class_Synthetic_Data_Generation.
Citation info:
Madhubalan, Akshayraj & Gautam, Amit & Tiwary, Priya. (2024). Blender-GAN: Multi-Target Conditional Generative Adversarial Network for Novel Class Synthetic Data Generation. 1-7. 10.1109/SmartNets61466.2024.10577645.
This dataset was made by Abluva Inc, a Palo Alto based, research-driven Data Protection firm. Our data protection platform empowers customers to secure data through advanced security mechanisms such as Fine Grained Access control and sophisticated depersonalization algorithms (e.g. Pseudonymization, Anonymization and Randomization). Abluva's Data Protection solutions facilitate data democratization within and outside the organizations, mitigating the concerns related to theft and compliance. The innovative intrusion detection algorithm by Abluva employs patented technologies for an intricately balanced approach that excludes normal access deviations, ensuring intrusion detection without disrupting the business operations. Abluva’s Solution enables organizations to extract further value from their data by enabling secure Knowledge Graphs and deploying Secure Data as a Service among other novel uses of data. Committed to providing a safe and secure environment, Abluva empowers organizations to unlock the full potential of their data. |
paolordls/crosslg-contaminated-benchmark-en-og-sm-0 | paolordls | "2024-11-26T12:41:25Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T12:41:22Z" | ---
dataset_info:
features:
- name: fake_news
dtype: string
- name: scenario_id
dtype: int64
- name: real_news
dtype: string
- name: fake_keyword
dtype: string
- name: real_question
dtype: string
- name: fake_question
dtype: string
- name: real_answer
dtype: string
- name: fake_answer
dtype: string
splits:
- name: train
num_bytes: 78984
num_examples: 10
download_size: 98638
dataset_size: 78984
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_2f4195a9-8b19-4ab6-9b99-6e163e60251b | argilla-internal-testing | "2024-11-26T12:46:24Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T12:46:23Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_12d13e90-cc21-4654-abc9-20351dcf8c30 | argilla-internal-testing | "2024-11-26T12:46:28Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T12:46:27Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/D_gen4_run1_llama2-7b_wiki_doc1000_real32_synt96 | dgambettaphd | "2024-11-26T13:11:11Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T13:11:08Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 527236
num_examples: 1000
download_size: 291220
dataset_size: 527236
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liangzid/robench-eval-Time29-s | liangzid | "2024-11-26T13:36:16Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T13:36:13Z" | ---
dataset_info:
features:
- name: shuffled_text
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2099085
num_examples: 2931
download_size: 1171607
dataset_size: 2099085
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
VladislavKaryukin/AmericanEnglishTTS | VladislavKaryukin | "2024-11-26T13:45:17Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T13:39:09Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: script
dtype: string
- name: source
dtype: string
- name: info
dtype: string
splits:
- name: train
num_bytes: 435903544.24
num_examples: 3837
download_size: 478889042
dataset_size: 435903544.24
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
This is an English Speech to Text Corpora
Identifier: SLR45
Summary: A free American English corpus by Surfingtech (www.surfing.ai), containing utterances from 10 speakers, Each speaker has about 350 utterances;
Category: Speech
License: Creative Common BY-NC-ND 4.0 (Attribution-NonCommercial-NoDerivatives 4.0 International) |
neoneye/simon-arc-combine-v197 | neoneye | "2024-11-26T13:41:24Z" | 23 | 0 | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-to-text",
"text-to-image"
] | "2024-11-26T13:40:03Z" | ---
license: mit
task_categories:
- image-to-text
- text-to-image
language:
- en
pretty_name: simons ARC (abstraction & reasoning corpus) combined datasets version 197
size_categories:
- 1K<n<10K
configs:
- config_name: default
data_files:
- split: train
path: data.jsonl
---
# Version 1
A combination of multiple datasets.
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 2
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 3
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 4
Added a shared dataset name for all these datasets: `SIMON-SOLVE-V1`. There may be higher version numbers in the future.
My hypothesis: Having a version number in the dataset name, it may be easier to unlearn incorrect training data.
Datasets: `dataset_solve_color.jsonl`, `dataset_solve_rotate.jsonl`, `dataset_solve_translate.jsonl`.
# Version 5
Different random seed.
# Version 6
Using `SIMON-SOLVE-V1` everywhere. Remove the `SIMON-SOLVE-COLOR`, `SIMON-SOLVE-ROTATE`, `SIMON-SOLVE-TRANSLATE`.
# Version 7
Using `SIMON-SOLVE-V1` everywhere.
# Version 8
Same settings. Different seed as usual.
# Version 9
Switching from context length 256 to context length 512.
Increasing the image sizes so the prompt length stays below 512.
`dataset_solve_color`, image size: 1-13.
`dataset_solve_rotate`, image size: 1-9.
`dataset_solve_translate`, image size: 3-9.
# Version 10
Same settings. Different seed as usual.
# Version 11
Same settings. Different seed as usual.
# Version 12
Added 1 more pair to the examples. Now it's 2-4 examples. Previously it was 2-3 examples.
# Version 13
Same settings. Different seed as usual.
# Version 14
Same settings. Different seed as usual.
# Version 15
Same settings. Different seed as usual.
# Version 16
Added `Predict the output image.`
Disabled prediction of rows.
Disabled prediction of height.
# Verison 17
Same settings. Different seed as usual.
Using the `DatasetGenerator` and the `DatasetItemListBuilder`.
# Verison 18
Added datasets.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl` - added.
- `dataset_shape.jsonl` - added.
# Verison 19
Added dataset.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_image.jsonl` - added.
# Verison 20
Bigger images.
# Verison 21
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_mass.jsonl` - added.
# Verison 22
Added dataset.
Datasets:
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_cellular_automaton.jsonl`
- `dataset_shape.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_histogram.jsonl` - added.
Bigger image sizes.
Number of rows=200k. Was previously 100k rows.
# Verison 23
`datset_mass.jsonl`. increased to `max_mass=5`.
# Verison 24
`datset_mass.jsonl`. increased to `max_mass=6`.
# Verison 25
different seed.
# Verison 26
`datset_mass.jsonl`. increased to `max_mass=25`.
different seed.
# Verison 27
different seed.
# Verison 28
different seed.
# Verison 29
different seed.
# Verison 30
different seed.
# Verison 31
different seed.
# Verison 32
different seed.
# Verison 33
Disabled some dataset.
Datasets:
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_cellular_automaton.jsonl`
# Verison 34
Enabled all datasets.
# Version 35
Regenerated all datasets with new random seeds.
# Verison 36
Added dataset `dataset_scale.jsonl`.
Disabled some dataset.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
# Verison 37
Enabled all datasets
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
# Verison 38
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - added
# Version 39
Regenerated all datasets with new random seeds.
# Version 40
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl` - added
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 41
Regenerated all datasets with new random seeds.
# Version 42
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl` - added
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 43
Enabled all datasets.
# Version 44
Regenerated all datasets with new random seeds.
# Version 45
Extended the `dataset_shape.jsonl` with these new `PixelConnectivity` types: `CORNER4`, `LR2`, `TB2`, `TLBR2`, `TRBL2`.
Hopefully it makes the model better at making sense of diagonal structures, which is something it's terrible at at the moment.
# Version 46
Regenerated all datasets with new random seeds.
# Version 47
Added dataset. Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl` - added
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 48
Enabled all datasets.
# Version 49
Bigger `max_mass`. From 6 to 8.
# Version 50
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 51
Regenerated all datasets with new random seeds.
# Version 52
Regenerated all datasets with new random seeds.
# Version 53
Regenerated all datasets with new random seeds.
# Version 54
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_erotion.jsonl` - added
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 55
Added dataset. Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl` - added
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 56
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 57
Regenerated all datasets with new random seeds.
# Version 58
Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 59
Added new datasets.
Disabled most datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl` - added
- `dataset_solve_fractal.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 60
Incremented random seed
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 61
Enabled all datasets.
More padding inside the `dataset_solve_fractal.jsonl`.
# Version 62
All datasets still enabled.
Turning up the parameter for `dataset_solve_fractal.jsonl`.
scale_input from 3 to 4.
scale_output from 3 to 4.
max_image_size from 3 to 4.
max_pad_count from 4 to 5.
# Version 63
Disabled several datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 64
Added dataset.
Increased the number of rows in the jsonl file from 200k to 300k.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl`
# Version 65
random seed.
# Version 66
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl`
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 67
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl` - enabled
- `dataset_solve_compress.jsonl` - enabled
- `dataset_solve_erosion.jsonl` - enabled
- `dataset_solve_fractal.jsonl` - enabled
- `dataset_solve_outline.jsonl` - enabled
- `dataset_solve_rotate.jsonl` - enabled
- `dataset_solve_scale.jsonl` - enabled
- `dataset_solve_symmetry.jsonl` - enabled
- `dataset_solve_translate.jsonl` - enabled
- `dataset_symmetry.jsonl`
# Version 68
Enabled all datasets.
# Version 69
Different random seed.
# Version 70
Different random seed.
# Version 71
Different random seed.
# Version 72
Different random seed.
# Version 73
Different random seed.
# Version 74
Major update to `dataset_solve_symmetry.jsonl`.
# Version 75
Different random seed.
# Version 76
Disabled some datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 77
Enabled all datasets.
# Version 78
Major update to `dataset_solve_symmetry.jsonl`.
# Version 79
Different random seed.
# Version 80
Different random seed.
# Version 81
Different random seed.
# Version 82
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl` - added
- `dataset_symmetry.jsonl`
# Version 83
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 84
Added dataset `dataset_solve_grid.jsonl`.
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl` - added
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 85
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 86
Enabled all datasets.
# Version 87
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 88
Added dataset `dataset_solve_probecolor.jsonl` with all directions enabled.
Disabled datasets that doesn't solve ARC puzzles.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 89
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 90
Disabled some of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 91
Added dataset.
Enabled all datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl` - added
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 92
Different random seed.
# Version 93
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl` - added
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 94
Added dataset.
Disabled datasets that doesn't solve ARC tasks.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl` - added
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 95
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl` - added
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 96
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl` - major update.
- `dataset_symmetry.jsonl`
# Version 97
Disabled the first half of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 98
Disabled the last half of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl` - disabled
- `dataset_solve_erosion.jsonl` - disabled
- `dataset_solve_fractal.jsonl` - disabled
- `dataset_solve_grid.jsonl` - disabled
- `dataset_solve_half.jsonl` - disabled
- `dataset_solve_mass.jsonl` - disabled
- `dataset_solve_outline.jsonl` - disabled
- `dataset_solve_probecolor.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 99
Disabled the 1/4th of the datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl` - disabled
- `dataset_solve_color.jsonl` - disabled
- `dataset_solve_compress.jsonl` - disabled
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl` - disabled
- `dataset_solve_rotate.jsonl` - disabled
- `dataset_solve_scale.jsonl` - disabled
- `dataset_solve_symmetry.jsonl` - disabled
- `dataset_solve_translate.jsonl` - disabled
- `dataset_solve_zindex.jsonl` - disabled
- `dataset_symmetry.jsonl` - disabled
# Version 100
Added dataset.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl` - added
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 101
Disabled the non solving datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 102
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl` - added
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 103
Different random seed.
# Version 104
Disabled the non solving datasets.
Datasets:
- `dataset_cellular_automaton.jsonl` - disabled
- `dataset_dilation.jsonl` - disabled
- `dataset_erotion.jsonl` - disabled
- `dataset_histogram.jsonl` - disabled
- `dataset_image.jsonl` - disabled
- `dataset_image_pair.jsonl` - disabled
- `dataset_mass.jsonl` - disabled
- `dataset_scale.jsonl` - disabled
- `dataset_shape.jsonl` - disabled
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl` - disabled
# Version 105
Major update to `dataset_solve_scale.jsonl` with scaling down noisy images.
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl` - scale down noisy images
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 106
Different random seed.
# Version 107
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl` - added
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 108
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl` - added
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 109
Different random seed.
# Version 110
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_halfplane.jsonl` - added
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 111
Different random seed.
# Version 112
Different random seed.
# Version 113
Different random seed.
# Version 114
Major update to the `dataset_solve-mass.jsonl`, so it now includes `mass_compare_adjacent_rows` and `mass_compare_adjacent_columns`.
# Version 115
Added dataset
Datasets:
- `dataset_cellular_automaton.jsonl`
- `dataset_dilation.jsonl`
- `dataset_erotion.jsonl`
- `dataset_histogram.jsonl`
- `dataset_image.jsonl`
- `dataset_image_pair.jsonl`
- `dataset_mass.jsonl`
- `dataset_scale.jsonl`
- `dataset_shape.jsonl`
- `dataset_solve_bool.jsonl`
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_color.jsonl`
- `dataset_solve_compress.jsonl`
- `dataset_solve_edge.jsonl`
- `dataset_solve_erosion.jsonl`
- `dataset_solve_flip.jsonl`
- `dataset_solve_fractal.jsonl`
- `dataset_solve_gravity.jsonl` - added
- `dataset_solve_grid.jsonl`
- `dataset_solve_half.jsonl`
- `dataset_solve_halfplane.jsonl`
- `dataset_solve_mask.jsonl`
- `dataset_solve_mass.jsonl`
- `dataset_solve_outline.jsonl`
- `dataset_solve_probecolor.jsonl`
- `dataset_solve_ray.jsonl`
- `dataset_solve_rotate.jsonl`
- `dataset_solve_scale.jsonl`
- `dataset_solve_symmetry.jsonl`
- `dataset_solve_translate.jsonl`
- `dataset_solve_zindex.jsonl`
- `dataset_symmetry.jsonl`
# Version 116
Hypothesis. What if I train with a smaller dataset, will it converge faster?
Reduced the number of rows in this dataset from 300k rows to 10k rows.
# Version 117
Interesting, 10k rows seems to work fine with the model training.
Picked new random rows.
# Version 118
Still going with 10k rows.
Picked new random rows.
# Version 119
Still going with 10k rows.
Picked new random rows.
# Version 120
Switched to 20k rows.
# Version 121
Still going with 20k rows.
Picked new random rows.
# Version 122
20k rows.
Added `dataset_solve_reverse.jsonl`.
# Version 123
Doubled the number of rows to 40k rows.
# Version 124
Set row count to 100k rows.
Major update to `dataset_solve_gravity.jsonl`.
# Version 125
Row count: 100k rows.
# Version 126
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_bool.jsonl
dataset_solve_boundingbox.jsonl
dataset_solve_color.jsonl
dataset_solve_compress.jsonl
dataset_solve_edge.jsonl
dataset_solve_erosion.jsonl
dataset_solve_flip.jsonl
dataset_solve_fractal.jsonl
dataset_solve_gravity.jsonl
dataset_solve_grid.jsonl
dataset_solve_half.jsonl
dataset_solve_halfplane.jsonl
dataset_solve_mask.jsonl
dataset_solve_mass.jsonl
dataset_solve_outline.jsonl
dataset_solve_probecolor.jsonl
dataset_solve_ray.jsonl
dataset_solve_reverse.jsonl
dataset_solve_rotate.jsonl
dataset_solve_scale.jsonl
dataset_solve_symmetry.jsonl
dataset_solve_translate.jsonl
dataset_solve_zindex.jsonl
```
# Version 127
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_scale.jsonl
dataset_solve_symmetry.jsonl
dataset_solve_translate.jsonl
dataset_solve_zindex.jsonl
```
# Version 128
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_probecolor.jsonl
dataset_solve_ray.jsonl
dataset_solve_reverse.jsonl
dataset_solve_rotate.jsonl
```
# Version 129
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_gravity.jsonl
dataset_solve_grid.jsonl
dataset_solve_half.jsonl
dataset_solve_halfplane.jsonl
dataset_solve_mask.jsonl
dataset_solve_mass.jsonl
dataset_solve_outline.jsonl
```
# Version 130
Row count: 20k rows.
Only these datasets are enabled:
```txt
dataset_solve_bool.jsonl
dataset_solve_boundingbox.jsonl
dataset_solve_color.jsonl
dataset_solve_compress.jsonl
dataset_solve_edge.jsonl
dataset_solve_erosion.jsonl
dataset_solve_flip.jsonl
dataset_solve_fractal.jsonl
```
# Version 131
Switched back to 300k rows.
Enabled all the datasets.
# Version 132
Random seed.
# Version 133
Removed the rows that are longer than what can be fitted inside a 512 context length.
# Version 134
Random seed.
# Version 135
Random seed.
# Version 136
Major update to the `dataset_solve_gravity.jsonl` file.
# Version 137
Added dataset `dataset_solve_skew.jsonl`.
# Version 138
Disabled several datasets.
```txt
# 'dataset_cellular_automaton.jsonl',
# 'dataset_dilation.jsonl',
# 'dataset_erosion.jsonl',
# 'dataset_histogram.jsonl',
# 'dataset_image.jsonl',
# 'dataset_image_pair.jsonl',
# 'dataset_mass.jsonl',
# 'dataset_scale.jsonl',
# 'dataset_shape.jsonl',
# 'dataset_solve_bool.jsonl',
'dataset_solve_boundingbox.jsonl',
'dataset_solve_color.jsonl',
'dataset_solve_compress.jsonl',
'dataset_solve_edge.jsonl',
'dataset_solve_erosion.jsonl',
'dataset_solve_flip.jsonl',
'dataset_solve_fractal.jsonl',
'dataset_solve_gravity.jsonl',
'dataset_solve_grid.jsonl',
'dataset_solve_half.jsonl',
# 'dataset_solve_halfplane.jsonl',
'dataset_solve_mask.jsonl',
'dataset_solve_mass.jsonl',
'dataset_solve_outline.jsonl',
'dataset_solve_probecolor.jsonl',
# 'dataset_solve_ray.jsonl',
# 'dataset_solve_reverse.jsonl',
'dataset_solve_rotate.jsonl',
'dataset_solve_scale.jsonl',
# 'dataset_solve_skew.jsonl',
'dataset_solve_symmetry.jsonl',
'dataset_solve_translate.jsonl',
'dataset_solve_zindex.jsonl',
# 'dataset_symmetry.jsonl',
```
# Version 139
Disabled several datasets.
```txt
'dataset_cellular_automaton.jsonl',
'dataset_dilation.jsonl',
'dataset_erosion.jsonl',
'dataset_histogram.jsonl',
'dataset_image.jsonl',
'dataset_image_pair.jsonl',
'dataset_mass.jsonl',
'dataset_scale.jsonl',
'dataset_shape.jsonl',
'dataset_solve_bool.jsonl',
# 'dataset_solve_boundingbox.jsonl',
# 'dataset_solve_color.jsonl',
# 'dataset_solve_compress.jsonl',
# 'dataset_solve_edge.jsonl',
# 'dataset_solve_erosion.jsonl',
# 'dataset_solve_flip.jsonl',
# 'dataset_solve_fractal.jsonl',
# 'dataset_solve_gravity.jsonl',
# 'dataset_solve_grid.jsonl',
# 'dataset_solve_half.jsonl',
'dataset_solve_halfplane.jsonl',
# 'dataset_solve_mask.jsonl',
# 'dataset_solve_mass.jsonl',
# 'dataset_solve_outline.jsonl',
# 'dataset_solve_probecolor.jsonl',
'dataset_solve_ray.jsonl',
'dataset_solve_reverse.jsonl',
# 'dataset_solve_rotate.jsonl',
# 'dataset_solve_scale.jsonl',
'dataset_solve_skew.jsonl',
# 'dataset_solve_symmetry.jsonl',
# 'dataset_solve_translate.jsonl',
# 'dataset_solve_zindex.jsonl',
'dataset_symmetry.jsonl',
```
# Version 140
Enabled all datasets.
Added new dataset: `dataset_solve_cross.jsonl`.
# Version 141
Switched to 30k rows.
Disabled several datasets.
```txt
# 'dataset_cellular_automaton.jsonl',
# 'dataset_dilation.jsonl',
# 'dataset_erosion.jsonl',
# 'dataset_histogram.jsonl',
# 'dataset_image.jsonl',
# 'dataset_image_pair.jsonl',
# 'dataset_mass.jsonl',
# 'dataset_scale.jsonl',
# 'dataset_shape.jsonl',
# 'dataset_solve_bool.jsonl',
'dataset_solve_boundingbox.jsonl',
'dataset_solve_color.jsonl',
'dataset_solve_compress.jsonl',
# 'dataset_solve_cross.jsonl',
'dataset_solve_edge.jsonl',
'dataset_solve_erosion.jsonl',
'dataset_solve_flip.jsonl',
'dataset_solve_fractal.jsonl',
# 'dataset_solve_gravity.jsonl',
'dataset_solve_grid.jsonl',
'dataset_solve_half.jsonl',
# 'dataset_solve_halfplane.jsonl',
'dataset_solve_mask.jsonl',
'dataset_solve_mass.jsonl',
'dataset_solve_outline.jsonl',
'dataset_solve_probecolor.jsonl',
'dataset_solve_ray.jsonl',
# 'dataset_solve_reverse.jsonl',
'dataset_solve_rotate.jsonl',
'dataset_solve_scale.jsonl',
'dataset_solve_skew.jsonl',
'dataset_solve_symmetry.jsonl',
'dataset_solve_translate.jsonl',
# 'dataset_solve_zindex.jsonl',
# 'dataset_symmetry.jsonl',
```
# Version 142
Switched to 300k rows.
Enabled all datasets.
Switched from 512 context to 1024 context.
# Version 143
Bigger images in `dataset_solve_cross.jsonl` and in `dataset_solve_mass.jsonl`.
# Version 144
Major update to `dataset_solve_symmetry.jsonl`.
# Version 145
Added `dataset_solve_span.jsonl`.
# Version 146
Extended `dataset_solve_span.jsonl` with `generate_task_with_template_lines`.
# Version 147
Extended `dataset_solve_span.jsonl` with `generate_task_with_alternate`.
# Version 148
Added `dataset_solve_count.jsonl`.
# Version 149
Randomized.
# Version 150
Upgraded context length for several datasets from 512 to 1024.
# Version 151
Randomized.
# Version 152
Randomized.
# Version 153
Extended `dataset_solve_mask.jsonl` with `generate_task_repair_rectangle_and_crop`.
# Version 154
Extended `dataset_solve_color.jsonl` with `generate_task_replace_color`.
# Version 155
Major update to datasets in the range from `dataset_solve_axxx.jsonl` to `dataset_solve_mask.jsonl`.
Now there is an earlier prediction for the output that is to be predicted. It may contain a hint, or it may be garbage that is to be ignored.
# Version 156
Only 2000 rows.
Only these datasets.
'dataset_cellular_automaton.jsonl',
'dataset_dilation.jsonl',
'dataset_erosion.jsonl',
'dataset_histogram.jsonl',
'dataset_image.jsonl',
'dataset_image_pair.jsonl',
'dataset_mass.jsonl',
'dataset_scale.jsonl',
'dataset_shape.jsonl',
'dataset_symmetry.jsonl',
# Version 157
Only these datasets.
- 'dataset_solve_bool.jsonl',
- 'dataset_solve_boundingbox.jsonl',
- 'dataset_solve_color.jsonl',
- 'dataset_solve_compress.jsonl',
- 'dataset_solve_count.jsonl',
- 'dataset_solve_cross.jsonl',
- 'dataset_solve_edge.jsonl',
- 'dataset_solve_erosion.jsonl',
- 'dataset_solve_flip.jsonl',
- 'dataset_solve_fractal.jsonl',
- 'dataset_solve_gravity.jsonl',
- 'dataset_solve_grid.jsonl',
- 'dataset_solve_half.jsonl',
- 'dataset_solve_halfplane.jsonl',
- 'dataset_solve_mask.jsonl',
- 'dataset_solve_mass.jsonl',
- 'dataset_solve_outline.jsonl',
- 'dataset_solve_probecolor.jsonl',
- 'dataset_solve_ray.jsonl',
- 'dataset_solve_reverse.jsonl',
- 'dataset_solve_rotate.jsonl',
- 'dataset_solve_scale.jsonl',
- 'dataset_solve_span.jsonl',
- 'dataset_solve_skew.jsonl',
- 'dataset_solve_symmetry.jsonl',
- 'dataset_solve_translate.jsonl',
- 'dataset_solve_zindex.jsonl',
# Version 158
Only these datasets.
- `dataset_solve_boundingbox.jsonl`
- `dataset_solve_rectangle.jsonl`
# Versin 159
Enabled all the `_solve_` datasets.
# Version 160
Regenerated all the `_solve_` datasets with new seed.
# Version 161
Regenerated all the `_solve_` datasets with new seed.
# Version 162
Replaced RLE compressed response with raw pixel response.
# Version 163
Added more generators
- DatasetSolveCount
- DatasetSolveCross
- DatasetSolveEdge
- DatasetSolveErosion
- DatasetSolveFlip
- DatasetSolveFractal
# Version 164
Increased row count from 1000 to 2000.
# Version 165
Added more generators.
# Version 166
Added more generators.
# Version 167
Added more generators.
# Version 168
Added more generators.
# Version 169
Generated data.
# Version 170
Generated data.
# Version 171
Generated data.
Increased output context length from 256 to 512.
# Version 172
Generated data.
# Version 173
Generated data.
# Version 174
Generated data.
# Version 175
Generated data.
# Version 176
Generated data.
# Version 177
Increased the number of rows from 2000 to 4000.
Generated data.
# Version 178
Generated data.
# Version 179
Generated data.
# Version 180
Generated data.
# Version 181
Generated data.
# Version 182
Generated data.
# Version 183
Generated data.
# Version 184
Generated data.
# Version 185
Generated data.
# Version 186
Generated data.
# Version 187
Generated data.
# Version 188
Generated data.
# Version 189
Added `DatasetSolveDeform` dataset generator.
# Version 190
Generated data.
# Version 191
Generated data.
# Version 192
Generated data.
# Version 193
Generated data.
# Version 194
Generated data.
# Version 195
Generated data.
# Version 196
Increased the number of rows from 4000 to 8000.
Generated data.
# Version 197
Generated data.
|
Mateusz1017/company_reports_features_combined_full | Mateusz1017 | "2024-11-26T15:03:02Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T13:41:03Z" | ---
dataset_info:
features:
- name: __index_level_0__
dtype: float64
- name: features
sequence:
sequence: float64
- name: cik
dtype: string
- name: year
dtype: string
- name: section_1
dtype: string
- name: company_name
dtype: string
- name: sic_code
dtype: string
- name: input_ids
sequence: int64
- name: ticker
sequence: string
- name: returns
dtype: float64
- name: logged_monthly_returns_matrix
sequence: float64
- name: input_ids_length
dtype: float64
splits:
- name: train
num_bytes: 28037048967
num_examples: 24752
download_size: 13150027544
dataset_size: 28037048967
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_90a4eb58-04d7-41b3-828d-c5bd2dd28d49 | argilla-internal-testing | "2024-11-26T13:47:59Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T13:47:58Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_57612550-2c18-468b-b26a-510ceb8ea742 | argilla-internal-testing | "2024-11-26T13:48:00Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T13:48:00Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_d8fa9656-52c8-4edd-a477-efa116e2f8ac | argilla-internal-testing | "2024-11-26T14:32:49Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T14:32:48Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/D_gen5_run1_llama2-7b_wiki_doc1000_real32_synt96 | dgambettaphd | "2024-11-26T14:44:16Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T14:44:14Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 527240
num_examples: 1000
download_size: 291142
dataset_size: 527240
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jfcalvo/export-testing-262 | jfcalvo | "2024-11-26T14:44:55Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T14:44:51Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype:
class_label:
names:
'0': pending
'1': completed
- name: _server_id
dtype: string
- name: text
dtype: string
- name: ranking_0.responses
sequence:
sequence:
- name: value
dtype: string
- name: rank
dtype: int64
- name: ranking_0.responses.users
sequence: string
- name: ranking_0.responses.status
sequence:
class_label:
names:
'0': draft
'1': submitted
'2': discarded
splits:
- name: train
num_bytes: 13864425
num_examples: 10000
download_size: 8799674
dataset_size: 13864425
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jfcalvo/export-testing-264 | jfcalvo | "2024-11-26T14:56:30Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T14:56:22Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: status
dtype:
class_label:
names:
'0': pending
'1': completed
- name: _server_id
dtype: string
- name: messages
sequence:
- name: role
dtype: string
- name: content
dtype: string
- name: label_0.responses
sequence:
class_label:
names:
'0': positive
'1': negative
'2': neutral
- name: label_0.responses.users
sequence: string
- name: label_0.responses.status
sequence:
class_label:
names:
'0': draft
'1': submitted
'2': discarded
splits:
- name: train
num_bytes: 51332770
num_examples: 8800
download_size: 25909462
dataset_size: 51332770
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_8177bd4c-c0d3-46d8-a735-e076514d5911 | argilla-internal-testing | "2024-11-26T15:01:16Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T15:01:15Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_5435b4b3-8356-4f6a-a420-665063ee83ce | argilla-internal-testing | "2024-11-26T15:01:22Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T15:01:21Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_2f8ec016-33c8-4a80-87cb-840712159229 | argilla-internal-testing | "2024-11-26T15:01:42Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T15:01:40Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liangzid/robench-eval-Time5-c | liangzid | "2024-12-06T13:26:43Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T15:06:24Z" | ---
dataset_info:
features:
- name: text_with_holes
dtype: string
- name: text_candidates
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2583613
num_examples: 2276
download_size: 1453317
dataset_size: 2583613
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AnonymousLLMer/finance-wiki-gold | AnonymousLLMer | "2024-11-26T15:29:30Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T15:26:22Z" | ---
dataset_info:
features:
- name: title
dtype: string
- name: text
dtype: string
- name: from
dtype: string
splits:
- name: train
num_bytes: 2060316
num_examples: 393
download_size: 1124573
dataset_size: 2060316
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
timothy0503/saycan-plan | timothy0503 | "2024-11-26T15:40:00Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T15:34:23Z" | ---
dataset_info:
features:
- name: QUESTION
dtype: string
- name: ANSWER
dtype: string
splits:
- name: train
num_bytes: 14865
num_examples: 99
download_size: 4465
dataset_size: 14865
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
bobbyDiggs/my-face-images | bobbyDiggs | "2024-11-26T15:59:23Z" | 23 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-26T15:49:30Z" | ---
license: apache-2.0
---
|
liangzid/robench-eval-Time10-c | liangzid | "2024-12-06T16:18:12Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T16:01:02Z" | ---
dataset_info:
features:
- name: text_with_holes
dtype: string
- name: text_candidates
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2569162
num_examples: 2275
download_size: 1452169
dataset_size: 2569162
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
AnonymousLLMer/finance-corpus-krx | AnonymousLLMer | "2024-11-26T16:02:49Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T16:02:46Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 1243980
num_examples: 315
download_size: 439813
dataset_size: 1243980
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
simonycl/ultrafeedback_binarized_gold | simonycl | "2024-11-26T16:11:11Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T16:08:11Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: prompt_id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: all_generated_responses
sequence: string
- name: score_chosen
dtype: float64
- name: score_rejected
dtype: float64
splits:
- name: train
num_bytes: 571020642
num_examples: 61135
- name: test
num_bytes: 18658914
num_examples: 2000
download_size: 313197607
dataset_size: 589679556
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
violetxi/NUMINA-V2-Clean-Blocks-1800_2000-0_5 | violetxi | "2024-11-26T16:10:37Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T16:10:35Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: source
dtype: string
- name: is_correct
dtype: bool
- name: target_answer
dtype: string
- name: solution
dtype: string
- name: solution_steps
dtype: string
- name: attempts
dtype: string
- name: model_answer
dtype: string
splits:
- name: train
num_bytes: 104951167
num_examples: 16016
download_size: 12412171
dataset_size: 104951167
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
neuronbit/uaspeech_train_casted | neuronbit | "2024-11-26T17:30:01Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T17:28:14Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: target
dtype: string
- name: path
dtype: string
- name: dataset
dtype: string
- name: severity
dtype: string
- name: speech
dtype:
audio:
sampling_rate: 16000
splits:
- name: train
num_bytes: 4155002694.0
num_examples: 38656
download_size: 3970809447
dataset_size: 4155002694.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
neuronbit/uaspeech_test_casted | neuronbit | "2024-11-26T17:30:49Z" | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T17:30:02Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: target
dtype: string
- name: path
dtype: string
- name: dataset
dtype: string
- name: severity
dtype: string
- name: speech
dtype:
audio:
sampling_rate: 16000
splits:
- name: train
num_bytes: 1870051557.5
num_examples: 18740
download_size: 1750628281
dataset_size: 1870051557.5
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liangzid/robench-eval-Time18-c | liangzid | "2024-12-06T20:53:53Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T17:31:18Z" | ---
dataset_info:
features:
- name: text_with_holes
dtype: string
- name: text_candidates
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2576973
num_examples: 2276
download_size: 1456240
dataset_size: 2576973
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
theonlyamos/ghanaian_languages_to_english_translation_and_transcription_dataset | theonlyamos | "2024-11-26T19:43:06Z" | 23 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-11-26T17:32:41Z" | ---
license: apache-2.0
---
|
dgambettaphd/D_gen7_run1_llama2-7b_wiki_doc1000_real32_synt96 | dgambettaphd | "2024-11-26T18:14:51Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:14:48Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 527238
num_examples: 1000
download_size: 291221
dataset_size: 527238
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Noggerone/finetuning_demo | Noggerone | "2024-11-26T18:19:53Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:19:50Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 1272100
num_examples: 2058
download_size: 574230
dataset_size: 1272100
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
SeppeV/joke_gen_of_mistral_ft_double_dpo_10pc | SeppeV | "2024-11-26T18:20:11Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:20:10Z" | ---
dataset_info:
features:
- name: jokeText
dtype: string
- name: userId
dtype: int64
splits:
- name: train
num_bytes: 210123
num_examples: 130
download_size: 103350
dataset_size: 210123
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
amuvarma/combined-shuffled-text-orca-ds | amuvarma | "2024-11-26T18:30:54Z" | 23 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:29:25Z" | ---
dataset_info:
features:
- name: messages
dtype: string
splits:
- name: train
num_bytes: 4812681874.0
num_examples: 1046410
download_size: 2271308074
dataset_size: 4812681874.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
amuvarma/combined-shuffled-dev-text-orca-ds | amuvarma | "2024-11-26T18:31:19Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:31:17Z" | ---
dataset_info:
features:
- name: messages
dtype: string
splits:
- name: train
num_bytes: 13797694.614921493
num_examples: 3000
download_size: 6352893
dataset_size: 13797694.614921493
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LongThan/Hallucination_MLLMs_hallucinated_ques | LongThan | "2024-11-26T18:40:01Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:36:46Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: type
dtype: string
- name: category
dtype: string
- name: correct_ans
dtype: string
- name: source
dtype: string
- name: best_ans
dtype: string
- name: KOSMOS2_ans
dtype: string
splits:
- name: train
num_bytes: 58018
num_examples: 114
download_size: 38394
dataset_size: 58018
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
liangzid/robench-eval-Time24-c | liangzid | "2024-12-07T00:17:37Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T18:38:29Z" | ---
dataset_info:
features:
- name: text_with_holes
dtype: string
- name: text_candidates
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 2578018
num_examples: 2275
download_size: 1454764
dataset_size: 2578018
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
amuvarma/text-dev-0-processed-1 | amuvarma | "2024-11-26T19:17:27Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T19:14:25Z" | ---
dataset_info:
features:
- name: messages
dtype: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 333146914
num_examples: 3000
download_size: 17209671
dataset_size: 333146914
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
IanAndJohn/imgs_with_metadata | IanAndJohn | "2024-11-26T19:43:31Z" | 23 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T19:42:06Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: Latitude
dtype: float64
- name: Longitude
dtype: float64
splits:
- name: train
num_bytes: 7287735.0
num_examples: 492
download_size: 7282292
dataset_size: 7287735.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
deivism/meddocan_labels21 | deivism | "2024-11-26T19:58:39Z" | 23 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-11-26T19:58:35Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: document_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence:
class_label:
names:
'0': O
'1': B-DAT
'2': I-DAT
'3': B-ORG
'4': I-ORG
'5': B-PER
'6': I-PER
'7': B-LOC
'8': I-LOC
'9': B-IDE
'10': I-IDE
'11': B-SEX
'12': I-SEX
'13': B-MAI
'14': I-MAI
'15': B-TEL
'16': I-TEL
'17': B-PRO
'18': I-PRO
'19': B-MISC
'20': I-MISC
splits:
- name: train
num_bytes: 9141826
num_examples: 4731
- name: validation
num_bytes: 4826850
num_examples: 2469
- name: test
num_bytes: 4586544
num_examples: 2374
download_size: 1868649
dataset_size: 18555220
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|