datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
2.63M
| likes
int64 0
6.49k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1M
|
---|---|---|---|---|---|---|---|---|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_64_0.01_64_BestF1_pl | ferrazzipietro | "2024-12-02T18:19:54Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:19:51Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 157591
num_examples: 101
- name: test
num_bytes: 1105280
num_examples: 654
download_size: 273569
dataset_size: 1262871
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_64_0.01_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:20:38Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:20:34Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 314142
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_64_0.05_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:21:10Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:21:07Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 313532
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_16_0.05_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:21:42Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:21:39Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 313929
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
Carlosgg14/Yuta | Carlosgg14 | "2024-12-02T18:22:57Z" | 9 | 0 | [
"license:openrail",
"region:us"
] | null | "2024-12-02T18:21:52Z" | ---
license: openrail
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_32_0.01_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:22:13Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:22:11Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 314229
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_64_0.05_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:22:44Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:22:42Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 314194
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_32_0.05_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:23:15Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:23:13Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 316033
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_16_0.01_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:23:45Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:23:43Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 314842
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_32_0.01_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:24:16Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:24:14Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 313240
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_16_0.05_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:24:47Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:24:45Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 314776
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_32_0.01_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:25:18Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:25:16Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 313629
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_16_0.01_64_BestF1_gr | ferrazzipietro | "2024-12-02T18:27:52Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:27:50Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: offsets
sequence: int64
- name: text
dtype: string
- name: type
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 172478
num_examples: 94
- name: test
num_bytes: 1556265
num_examples: 738
download_size: 317783
dataset_size: 1728743
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_16_0.05_64_BestF1_en | ferrazzipietro | "2024-12-02T18:35:16Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:35:13Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292646
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
marco-schouten/exp1 | marco-schouten | "2024-12-02T18:35:58Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:35:56Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 116903.0
num_examples: 28
download_size: 56123
dataset_size: 116903.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp2 | marco-schouten | "2024-12-02T18:36:02Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:35:59Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 532978.0
num_examples: 111
download_size: 282927
dataset_size: 532978.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp3 | marco-schouten | "2024-12-02T18:36:05Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:03Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 321937.0
num_examples: 64
download_size: 178481
dataset_size: 321937.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp4 | marco-schouten | "2024-12-02T18:36:08Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:06Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 658792.0
num_examples: 172
download_size: 237697
dataset_size: 658792.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp5 | marco-schouten | "2024-12-02T18:36:12Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:10Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 1279936.0
num_examples: 291
download_size: 583043
dataset_size: 1279936.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp6 | marco-schouten | "2024-12-02T18:36:16Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:13Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 765821.0
num_examples: 224
download_size: 241138
dataset_size: 765821.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp7 | marco-schouten | "2024-12-02T18:36:21Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:18Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 3291157.0
num_examples: 888
download_size: 1280848
dataset_size: 3291157.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp8 | marco-schouten | "2024-12-02T18:36:24Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:22Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 1994325.0
num_examples: 512
download_size: 804259
dataset_size: 1994325.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp9 | marco-schouten | "2024-12-02T18:36:29Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:26Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 4025641.728
num_examples: 1376
download_size: 913827
dataset_size: 4025641.728
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp10 | marco-schouten | "2024-12-02T18:36:37Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:36:33Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 8311728.384
num_examples: 2328
download_size: 2712417
dataset_size: 8311728.384
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Caua261/datasetTcc | Caua261 | "2024-12-02T18:39:12Z" | 9 | 0 | [
"license:other",
"region:us"
] | null | "2024-12-02T18:39:12Z" | ---
license: other
license_name: test
license_link: LICENSE
---
|
all-oj-gen/ds_chat_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-bin_all_pairs | all-oj-gen | "2024-12-02T19:44:05Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T19:44:04Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 32328416
num_examples: 10785
download_size: 9136605
dataset_size: 32328416
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
richmondsin/truthfulqa_it_mc2_results | richmondsin | "2024-12-02T21:10:33Z" | 9 | 0 | [
"region:us"
] | null | "2024-12-02T21:10:20Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/truthfulqa_it_mc2_results\"\
,\n\tname=\"google__gemma-2-2b__truthfulqa_it_mc2\",\n\tsplit=\"latest\"\n)\n```\n\
\n## Latest results\n\nThese are the [latest results from run 2024-12-02T16-10-20.246364](https://huggingface.co/datasets/richmondsin/truthfulqa_it_mc2_results/blob/main/google/gemma-2-2b/results_2024-12-02T16-10-20.246364.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"truthfulqa_it_mc2\"\
: {\n \"alias\": \"truthfulqa_it_mc2\",\n \"acc,none\": 0.4362238821542554,\n\
\ \"acc_stderr,none\": 0.01598467768604646\n }\n },\n \"\
truthfulqa_it_mc2\": {\n \"alias\": \"truthfulqa_it_mc2\",\n \"acc,none\"\
: 0.4362238821542554,\n \"acc_stderr,none\": 0.01598467768604646\n }\n\
}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__truthfulqa_it_mc2
data_files:
- split: 2024_12_02T16_10_20.246364
path:
- '**/samples_truthfulqa_it_mc2_2024-12-02T16-10-20.246364.jsonl'
- split: latest
path:
- '**/samples_truthfulqa_it_mc2_2024-12-02T16-10-20.246364.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/truthfulqa_it_mc2_results",
name="google__gemma-2-2b__truthfulqa_it_mc2",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-02T16-10-20.246364](https://huggingface.co/datasets/richmondsin/truthfulqa_it_mc2_results/blob/main/google/gemma-2-2b/results_2024-12-02T16-10-20.246364.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"truthfulqa_it_mc2": {
"alias": "truthfulqa_it_mc2",
"acc,none": 0.4362238821542554,
"acc_stderr,none": 0.01598467768604646
}
},
"truthfulqa_it_mc2": {
"alias": "truthfulqa_it_mc2",
"acc,none": 0.4362238821542554,
"acc_stderr,none": 0.01598467768604646
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
lingvenvist/animacy-zh-nogroups-xtr-synthetic-filtered | lingvenvist | "2024-12-02T21:34:32Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T21:34:18Z" | ---
dataset_info:
features:
- name: sentences
dtype: string
- name: tokens
sequence: string
- name: anim_tags
sequence:
class_label:
names:
'0': N
'1': A
'2': H
- name: target-indexes
sequence: int64
- name: source
dtype: string
splits:
- name: train
num_bytes: 7744341
num_examples: 22923
- name: test
num_bytes: 1534376
num_examples: 4577
- name: validation
num_bytes: 1036151
num_examples: 3072
download_size: 5067570
dataset_size: 10314868
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
---
|
mathreward/data_collection_8b_math_1 | mathreward | "2024-12-02T21:59:30Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T21:58:54Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: gt
dtype: string
- name: my_solu
dtype: string
splits:
- name: train
num_bytes: 2281260230
num_examples: 562500
download_size: 859608808
dataset_size: 2281260230
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mathreward/data_collection_8b_math_4 | mathreward | "2024-12-02T22:09:50Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:08:42Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: gt
dtype: string
- name: my_solu
dtype: string
splits:
- name: train
num_bytes: 3715831133
num_examples: 705000
download_size: 1513598540
dataset_size: 3715831133
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mathreward/data_collection_8b_math_3 | mathreward | "2024-12-02T22:09:51Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:08:43Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: gt
dtype: string
- name: my_solu
dtype: string
splits:
- name: train
num_bytes: 4018723852
num_examples: 757500
download_size: 1633212244
dataset_size: 4018723852
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
taufiqsyed/salami_cleaned_sampled_trial_trunc_enriched | taufiqsyed | "2024-12-02T22:24:41Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:24:03Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: song_id
dtype: string
- name: structure
dtype: string
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: tempos
dtype: string
- name: keys
dtype: string
- name: instruments
dtype: string
- name: genres
dtype: string
- name: moods
dtype: string
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 529245167.0
num_examples: 200
- name: eval
num_bytes: 84679315.0
num_examples: 32
download_size: 598466538
dataset_size: 613924482.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: eval
path: data/eval-*
---
|
richmondsin/truthfulqa_id_mc1_results | richmondsin | "2024-12-02T22:32:12Z" | 9 | 0 | [
"region:us"
] | null | "2024-12-02T22:32:01Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/truthfulqa_id_mc1_results\"\
,\n\tname=\"google__gemma-2-2b__truthfulqa_id_mc1\",\n\tsplit=\"latest\"\n)\n```\n\
\n## Latest results\n\nThese are the [latest results from run 2024-12-02T17-32-01.349991](https://huggingface.co/datasets/richmondsin/truthfulqa_id_mc1_results/blob/main/google/gemma-2-2b/results_2024-12-02T17-32-01.349991.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"truthfulqa_id_mc1\"\
: {\n \"alias\": \"truthfulqa_id_mc1\",\n \"acc,none\": 0.2953890489913545,\n\
\ \"acc_stderr,none\": 0.017330267741201465,\n \"acc_norm,none\"\
: 0.29971181556195964,\n \"acc_norm_stderr,none\": 0.01740298373741313\n\
\ }\n },\n \"truthfulqa_id_mc1\": {\n \"alias\": \"truthfulqa_id_mc1\"\
,\n \"acc,none\": 0.2953890489913545,\n \"acc_stderr,none\": 0.017330267741201465,\n\
\ \"acc_norm,none\": 0.29971181556195964,\n \"acc_norm_stderr,none\"\
: 0.01740298373741313\n }\n}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__truthfulqa_id_mc1
data_files:
- split: 2024_12_02T17_32_01.349991
path:
- '**/samples_truthfulqa_id_mc1_2024-12-02T17-32-01.349991.jsonl'
- split: latest
path:
- '**/samples_truthfulqa_id_mc1_2024-12-02T17-32-01.349991.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/truthfulqa_id_mc1_results",
name="google__gemma-2-2b__truthfulqa_id_mc1",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-02T17-32-01.349991](https://huggingface.co/datasets/richmondsin/truthfulqa_id_mc1_results/blob/main/google/gemma-2-2b/results_2024-12-02T17-32-01.349991.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"truthfulqa_id_mc1": {
"alias": "truthfulqa_id_mc1",
"acc,none": 0.2953890489913545,
"acc_stderr,none": 0.017330267741201465,
"acc_norm,none": 0.29971181556195964,
"acc_norm_stderr,none": 0.01740298373741313
}
},
"truthfulqa_id_mc1": {
"alias": "truthfulqa_id_mc1",
"acc,none": 0.2953890489913545,
"acc_stderr,none": 0.017330267741201465,
"acc_norm,none": 0.29971181556195964,
"acc_norm_stderr,none": 0.01740298373741313
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
pclucas14/nqa-RAG-256_14_24 | pclucas14 | "2024-12-02T22:36:55Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:36:51Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 25733272
num_examples: 65
download_size: 10060126
dataset_size: 25733272
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_16_24 | pclucas14 | "2024-12-02T22:42:11Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:42:10Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26754022
num_examples: 65
download_size: 11080276
dataset_size: 26754022
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_15_24 | pclucas14 | "2024-12-02T22:42:37Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:42:36Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26439230
num_examples: 65
download_size: 10829894
dataset_size: 26439230
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_10_24 | pclucas14 | "2024-12-02T22:43:06Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:43:05Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 25975615
num_examples: 66
download_size: 10487988
dataset_size: 25975615
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_13_24 | pclucas14 | "2024-12-02T22:45:42Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:45:40Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26682014
num_examples: 65
download_size: 10384710
dataset_size: 26682014
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_19_24 | pclucas14 | "2024-12-02T22:50:09Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:50:07Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26696045
num_examples: 65
download_size: 10996107
dataset_size: 26696045
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_9_24 | pclucas14 | "2024-12-02T22:50:46Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:50:44Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26396960
num_examples: 66
download_size: 11485151
dataset_size: 26396960
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
taufiqsyed/salami_truncsplit_legit1__enriched | taufiqsyed | "2024-12-02T23:12:30Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:53:43Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: song_id
dtype: string
- name: structure
dtype: string
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: tempos
dtype: string
- name: keys
dtype: string
- name: instruments
dtype: string
- name: genres
dtype: string
- name: moods
dtype: string
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 2199008170.0
num_examples: 831
- name: eval
num_bytes: 84679315.0
num_examples: 32
download_size: 2235466097
dataset_size: 2283687485.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: eval
path: data/eval-*
---
|
pclucas14/nqa-RAG-256_12_24 | pclucas14 | "2024-12-02T22:54:17Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:54:15Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26495607
num_examples: 65
download_size: 10695044
dataset_size: 26495607
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_8_24 | pclucas14 | "2024-12-02T22:56:05Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:56:03Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 25864745
num_examples: 66
download_size: 9692932
dataset_size: 25864745
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_3_24 | pclucas14 | "2024-12-02T22:56:50Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:56:49Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26650607
num_examples: 66
download_size: 10586949
dataset_size: 26650607
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_17_24 | pclucas14 | "2024-12-02T22:58:12Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:58:09Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 25745301
num_examples: 65
download_size: 10301059
dataset_size: 25745301
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_18_24 | pclucas14 | "2024-12-02T22:58:42Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:58:40Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26217561
num_examples: 65
download_size: 10286908
dataset_size: 26217561
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_6_24 | pclucas14 | "2024-12-02T22:59:14Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:59:13Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 27197480
num_examples: 66
download_size: 11257143
dataset_size: 27197480
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_2_24 | pclucas14 | "2024-12-02T22:59:37Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:59:36Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 27569614
num_examples: 66
download_size: 10931973
dataset_size: 27569614
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_0_24 | pclucas14 | "2024-12-02T23:00:23Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:00:21Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 27966150
num_examples: 66
download_size: 11309961
dataset_size: 27966150
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_11_24 | pclucas14 | "2024-12-02T23:00:47Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:00:45Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26278970
num_examples: 66
download_size: 10248590
dataset_size: 26278970
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_1_24 | pclucas14 | "2024-12-02T23:01:01Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:00:59Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 27289971
num_examples: 66
download_size: 10874793
dataset_size: 27289971
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_7_24 | pclucas14 | "2024-12-02T23:03:28Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:03:27Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26787005
num_examples: 66
download_size: 10503985
dataset_size: 26787005
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_4_24 | pclucas14 | "2024-12-02T23:05:52Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:05:50Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26619269
num_examples: 66
download_size: 11036439
dataset_size: 26619269
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
doejn771/code_x_glue_ct_code_to_text_java_python | doejn771 | "2024-12-03T00:19:02Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:12:46Z" | ---
dataset_info:
features:
- name: id
dtype: int32
- name: repo
dtype: string
- name: path
dtype: string
- name: func_name
dtype: string
- name: original_string
dtype: string
- name: language
dtype: string
- name: code
dtype: string
- name: code_tokens
sequence: string
- name: docstring
dtype: string
- name: docstring_tokens
sequence: string
- name: sha
dtype: string
- name: url
dtype: string
splits:
- name: train
num_bytes: 1266216983
num_examples: 416743
- name: validation
num_bytes: 60254908
num_examples: 19097
- name: test
num_bytes: 79740441
num_examples: 25873
download_size: 480195417
dataset_size: 1406212332
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
richmondsin/truthfulqa_id_mc2_results | richmondsin | "2024-12-03T00:28:14Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:28:03Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/truthfulqa_id_mc2_results\"\
,\n\tname=\"google__gemma-2-2b__truthfulqa_id_mc2\",\n\tsplit=\"latest\"\n)\n```\n\
\n## Latest results\n\nThese are the [latest results from run 2024-12-02T19-28-03.715223](https://huggingface.co/datasets/richmondsin/truthfulqa_id_mc2_results/blob/main/google/gemma-2-2b/results_2024-12-02T19-28-03.715223.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"truthfulqa_id_mc2\"\
: {\n \"alias\": \"truthfulqa_id_mc2\",\n \"acc,none\": 0.4366475601155338,\n\
\ \"acc_stderr,none\": 0.016426278376888724\n }\n },\n \"\
truthfulqa_id_mc2\": {\n \"alias\": \"truthfulqa_id_mc2\",\n \"acc,none\"\
: 0.4366475601155338,\n \"acc_stderr,none\": 0.016426278376888724\n }\n\
}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__truthfulqa_id_mc2
data_files:
- split: 2024_12_02T19_28_03.715223
path:
- '**/samples_truthfulqa_id_mc2_2024-12-02T19-28-03.715223.jsonl'
- split: latest
path:
- '**/samples_truthfulqa_id_mc2_2024-12-02T19-28-03.715223.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/truthfulqa_id_mc2_results",
name="google__gemma-2-2b__truthfulqa_id_mc2",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-02T19-28-03.715223](https://huggingface.co/datasets/richmondsin/truthfulqa_id_mc2_results/blob/main/google/gemma-2-2b/results_2024-12-02T19-28-03.715223.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"truthfulqa_id_mc2": {
"alias": "truthfulqa_id_mc2",
"acc,none": 0.4366475601155338,
"acc_stderr,none": 0.016426278376888724
}
},
"truthfulqa_id_mc2": {
"alias": "truthfulqa_id_mc2",
"acc,none": 0.4366475601155338,
"acc_stderr,none": 0.016426278376888724
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
julia-se/tracka_mistral_fewshot_anger | julia-se | "2024-12-03T00:44:17Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:44:15Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: Anger
dtype: int64
- name: Disgust
dtype: int64
- name: Fear
dtype: int64
- name: Joy
dtype: int64
- name: Sadness
dtype: int64
- name: Surprise
dtype: int64
- name: predicted_is_anger
dtype: int64
- name: y_anger
dtype: int64
splits:
- name: train
num_bytes: 472807
num_examples: 2226
download_size: 217016
dataset_size: 472807
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/stf_regex_ner_2_fuzzycosseno_80 | juliadollis | "2024-12-03T04:30:55Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T04:30:43Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: inteiro_teor_regex
dtype: string
- name: NER
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 160888933
num_examples: 1000
download_size: 44243977
dataset_size: 160888933
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
infinite-dataset-hub/AnimalAlliesAlgorithm | infinite-dataset-hub | "2024-12-03T06:38:39Z" | 9 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"infinite-dataset-hub",
"synthetic"
] | null | "2024-12-03T06:38:38Z" | ---
license: mit
tags:
- infinite-dataset-hub
- synthetic
---
# AnimalAlliesAlgorithm
tags: vegan advocacy, efficacy prediction, animal rights
_Note: This is an AI-generated dataset so its content may be inaccurate or false_
**Dataset Description:**
The 'AnimalAlliesAlgorithm' dataset is a curated collection of textual data aimed at understanding the various dimensions of vegan advocacy. It encompasses a range of content, including advocacy campaigns, personal testimonies, scientific studies, and policy discussions related to animal rights and veganism. Each entry has been carefully labeled to reflect its primary focus within the realm of vegan advocacy, from awareness-raising to promoting policy change.
**CSV Content Preview:**
```
id,text,label
001,"The Vegan Society's recent campaign highlighted the environmental benefits of a plant-based diet.",Awareness
002,"An interview with a renowned vegan chef discussing the moral imperative of veganism.",PersonalTestimony
003,"A scientific study on the impact of factory farming on local wildlife populations.",ImpactStudy
004,"A blog post from a legislator advocating for stricter animal welfare laws.",PolicyChange
005,"Vegan bloggers collaborating to share recipes and tips for a cruelty-free lifestyle.",CommunitySupport
```
**Source of the data:**
The dataset was generated using the [Infinite Dataset Hub](https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub) and microsoft/Phi-3-mini-4k-instruct using the query 'vegan advocacy':
- **Dataset Generation Page**: https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub?q=vegan+advocacy&dataset=AnimalAlliesAlgorithm&tags=vegan+advocacy,+efficacy+prediction,+animal+rights
- **Model**: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
- **More Datasets**: https://huggingface.co/datasets?other=infinite-dataset-hub
|
seachen/stable-1 | seachen | "2024-12-03T07:05:19Z" | 9 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T07:01:16Z" | ---
license: apache-2.0
---
|
r1v3r/bitflags-filterbyLLM-verified | r1v3r | "2024-12-04T03:42:39Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T08:28:15Z" | ---
dataset_info:
features:
- name: repo
dtype: string
- name: problem_statement
dtype: string
- name: hints_text
dtype: string
- name: instance_id
dtype: string
- name: issue_numbers
sequence: string
- name: base_commit
dtype: string
- name: test_patch
dtype: string
- name: version
dtype: string
- name: pull_number
dtype: int64
- name: created_at
dtype: string
- name: patch
dtype: string
- name: environment_setup_commit
dtype: string
- name: FAIL_TO_PASS
sequence: string
- name: PASS_TO_PASS
sequence: string
- name: FAIL_TO_FAIL
sequence: string
- name: PASS_TO_FAIL
sequence: 'null'
splits:
- name: train
num_bytes: 164014
num_examples: 9
download_size: 58139
dataset_size: 164014
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ELVISIO/incorrect_triplet | ELVISIO | "2024-12-03T08:48:48Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T08:48:47Z" | ---
dataset_info:
features:
- name: doc_regexclean01
dtype: string
- name: anchor
dtype: string
- name: positive
dtype: string
- name: negative
dtype: string
splits:
- name: train
num_bytes: 832690
num_examples: 558
download_size: 499310
dataset_size: 832690
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HappyPablo/dec3_1 | HappyPablo | "2024-12-03T09:41:51Z" | 9 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2024-12-03T09:41:46Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 1,
"total_frames": 794,
"total_tasks": 1,
"total_videos": 0,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
vantral/selkup_me_pl | vantral | "2024-12-03T10:32:15Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T10:32:12Z" | ---
dataset_info:
features:
- name: all
struct:
- name: interlinear-text
list:
- name: item
struct:
- name: source
dtype: string
- name: paragraph
list:
- name: item
struct:
- name: speaker
dtype: string
- name: phrase
list:
- name: item
struct:
- name: ft
dtype: string
- name: id
dtype: string
- name: participant
dtype: string
- name: timestamp
sequence: string
- name: word
list:
list:
- name: item
struct:
- name: grammar_tags
sequence: string
- name: translation
sequence: string
- name: txt
dtype: string
- name: morph
list:
- name: item
struct:
- name: gls
dtype: string
- name: id
dtype: string
- name: txt
dtype: string
- name: item
dtype: 'null'
splits:
- name: train
num_bytes: 29025
num_examples: 1
download_size: 23213
dataset_size: 29025
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
babs/podcast-12 | babs | "2024-12-03T10:35:49Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T10:35:44Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 206176216.0
num_examples: 339
download_size: 194924855
dataset_size: 206176216.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shreyasgite/so100_test | shreyasgite | "2024-12-03T10:48:11Z" | 9 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | "2024-12-03T10:48:01Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 2390,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
babs/podcast-16 | babs | "2024-12-03T10:48:21Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T10:48:16Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 199714333.0
num_examples: 233
download_size: 190909606
dataset_size: 199714333.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/stf_regex_ner_2_fuzzyover_70 | juliadollis | "2024-12-03T13:30:26Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T13:29:39Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: inteiro_teor_regex
dtype: string
- name: NER
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 157934422
num_examples: 1000
download_size: 43794395
dataset_size: 157934422
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DT4LM/t5v1-1base_sst2_pair_leap | DT4LM | "2024-12-03T14:15:15Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:10:54Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int32
splits:
- name: train
num_bytes: 48613
num_examples: 637
download_size: 34535
dataset_size: 48613
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/opengpt_gpt-4o-mini_scale_x.25 | mlfoundations-dev | "2024-12-03T21:28:25Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:35:03Z" | ---
dataset_info:
features:
- name: language
dtype: string
- name: quantity
dtype: int64
- name: task
dtype: string
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 3767638
num_examples: 1479
download_size: 1912183
dataset_size: 3767638
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Alwaly/parler_tts-descriptions-tags_bis_wom_test | Alwaly | "2024-12-03T15:29:01Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T15:28:59Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
- name: text_description
dtype: string
splits:
- name: test
num_bytes: 770344
num_examples: 1995
download_size: 301291
dataset_size: 770344
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
Sprutz/TrollBlend-50 | Sprutz | "2024-12-03T17:02:49Z" | 9 | 0 | [
"language:en",
"size_categories:100K<n<1M",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"Troll",
"Misinformation"
] | null | "2024-12-03T16:53:51Z" | ---
language:
- en
tags:
- Troll
- Misinformation
size_categories:
- 100K<n<1M
---
This dataset is a collection of troll tweets and non troll tweets. The troll tweets are gathered from the Russian Troll dataset and the non troll tweets are a colelction of serious tweets from political tweets and tweets from the #metoo movement.
The tweets present in this dataset are very old and thus might not be a good standalone dataset to train a model on. Add more recent tweets for a model that generalizes better |
mlfoundations-dev/unnatural_instructions_gpt-4o-mini_scale_x.25 | mlfoundations-dev | "2024-12-03T21:28:28Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:19:58Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: constraints
dtype: string
- name: output
dtype: string
- name: alternative_formulation
dtype: string
- name: alternative_formulation_inlined
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 27774548
num_examples: 17504
download_size: 10360530
dataset_size: 27774548
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ashercn97/reasoning-v2-yay | ashercn97 | "2024-12-03T17:26:32Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:26:27Z" | ---
dataset_info:
features:
- name: text_id
dtype: string
- name: text
dtype: string
- name: label
sequence: string
- name: split_text
sequence: string
splits:
- name: train
num_bytes: 4677252
num_examples: 3000
download_size: 2658627
dataset_size: 4677252
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/unnatural_instructions_gpt-4o-mini_scale_x.5 | mlfoundations-dev | "2024-12-03T21:28:30Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:40:41Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: constraints
dtype: string
- name: output
dtype: string
- name: alternative_formulation
dtype: string
- name: alternative_formulation_inlined
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 52683197
num_examples: 33253
download_size: 19622868
dataset_size: 52683197
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/alpaca_scale_x.5 | mlfoundations-dev | "2024-12-03T21:28:56Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T18:26:52Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: filtered_reason
dtype: 'null'
- name: filtered_decision
dtype: bool
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 57105095
num_examples: 72773
download_size: 34690806
dataset_size: 57105095
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RyanYr/self-reflect_mini8Bit-t0_mistlarge-t12_om2-460k_binlabel_correction | RyanYr | "2024-12-03T18:58:46Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T18:58:31Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
- name: response@0_correctness
dtype: bool
- name: response@2_correctness
dtype: bool
splits:
- name: train
num_bytes: 1110721389
num_examples: 247730
download_size: 409878232
dataset_size: 1110721389
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
haydenbspence/distilabel-example | haydenbspence | "2024-12-03T19:33:30Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T19:33:29Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: completion
dtype: string
- name: meta
struct:
- name: category
dtype: string
- name: completion
dtype: string
- name: id
dtype: int64
- name: input
dtype: 'null'
- name: motivation_app
dtype: 'null'
- name: prompt
dtype: string
- name: source
dtype: string
- name: subcategory
dtype: string
- name: generation
dtype: 'null'
- name: model_name
dtype: 'null'
- name: distilabel_metadata
struct:
- name: raw_input_text_generation_0
dtype: 'null'
- name: raw_output_text_generation_0
dtype: 'null'
splits:
- name: train
num_bytes: 16613
num_examples: 10
download_size: 19461
dataset_size: 16613
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Shifaur/sri_lanka_constitutional_law_qa | Shifaur | "2024-12-03T19:41:03Z" | 9 | 0 | [
"task_categories:question-answering",
"language:en",
"license:openrail",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2306.02707",
"doi:10.57967/hf/3708",
"region:us",
"legal"
] | [
"question-answering"
] | "2024-12-03T19:35:50Z" | ---
license: openrail
task_categories:
- question-answering
language:
- en
tags:
- legal
size_categories:
- 1K<n<10K
---
# Sri Lankan Constitutional Law QA Dataset
## **Overview**
This dataset is the **first publicly accessible question-answer dataset** focused specifically on **Sri Lankan Constitutional Law**. It was developed to facilitate research, education, and application in areas related to legal studies, constitutional understanding, and NLP (Natural Language Processing). The dataset contains **1,697 question-answer pairs**, each verified to ensure accuracy.
### **Dataset Creation Process**
This dataset was created through an innovative two-step approach:
1. **AI Generation**:
- The questions were generated synthetically using **GPT-4**. The generative process utilized advanced prompt engineering to extract detailed, contextually accurate questions and answers about Sri Lankan constitutional law.
- The process followed principles inspired by the **Orca paper** (*Mukherjee et al., 2023: Orca: Progressive learning from complex explanation traces of GPT-4*). In this process, **complex reasoning patterns of GPT-4** were leveraged to ensure the quality and diversity of generated questions and answers.
2. **Expert Verification**:
- Every entry generated by GPT-4 was **thoroughly verified by legal professionals**. This rigorous review aimed to ensure **accuracy**, **legal correctness**, and **completeness** of the information provided.
### **Content**
The dataset comprises question-answer pairs regarding various aspects of the **Sri Lankan Constitution**, including, but not limited to:
- The rights and duties of citizens.
- Roles and responsibilities of government branches.
- Legal definitions and judicial interpretations.
- Amendments, legal provisions, and exceptions.
### **Significance**
This dataset is the **first of its kind** in terms of public accessibility for Sri Lankan Constitutional Law, providing an important educational and research resource. It can be used in:
- **Legal Research**: Assisting in training NLP models for legal understanding.
- **Education**: Providing law students with resources to understand the constitution more deeply.
- **AI Research**: Training, fine-tuning, or benchmarking language models for complex legal reasoning.
## **Data Format**
The dataset is structured as JSON, with each entry having the following fields:
- **"question"**: A string containing the legal question.
- **"answer"**: A string containing the corresponding answer, verified for accuracy.
### **Example Entry**
```json
{
"question": "What is the short title of the Penal Code Ordinance?",
"answer": "The short title of the Penal Code Ordinance is 'Penal Code'."
}
```
## **Usage Instructions**
This dataset can be loaded and processed easily for NLP applications, legal text analysis, or educational purposes. Tools like the Hugging Face `datasets` library are recommended for accessing and processing the dataset.
## **Licensing**
This dataset is made available under the **OpenRAIL license**. The OpenRAIL license allows for:
- **Royalty-free access** to the dataset.
- **Flexible downstream use and re-distribution**.
- **Distribution of any derivatives** created from the dataset.
For the full terms and conditions of the OpenRAIL license, please refer to [OpenRAIL License Link](https://www.licenses.ai/source-code-license).
## **Disclaimers**
- **Not Legal Advice**: This dataset is intended solely for **informational and educational purposes**. It does not constitute legal advice, nor should it be treated as such. Users must consult qualified legal professionals for advice on any legal matters.
- **No Liability for Use**: The creators and contributors of this dataset explicitly disclaim all warranties of accuracy, completeness, or applicability for specific use cases. While every effort has been made to ensure the accuracy and validity of the content, **no guarantees** are made regarding its fitness for use, and the creators will not be held responsible for any losses, damages, or liabilities that may arise from its use.
- **No Endorsement**: The dataset and its contents do not reflect the official views of any government, legal entity, or institution. The dataset was developed independently, and **no endorsement** by any official body is implied.
## **Citation**
If you use this dataset in your work, please cite it as follows:
```
@misc {shifaur_rahman_2024,
author = { {Shifaur Rahman} },
title = { sri_lanka_constitutional_law_qa (Revision b4079d1) },
year = 2024,
url = { https://huggingface.co/datasets/Shifaur/sri_lanka_constitutional_law_qa },
doi = { 10.57967/hf/3708 },
publisher = { Hugging Face }
}
@article{mukherjee2023orca,
title={Orca: Progressive Learning from Complex Explanation Traces of GPT-4},
author={Mukherjee, Subhojit and Mitra, Arindam and Jawahar, Ganesh and Agarwal, Siddhartha and Palangi, Hamid and Awadallah, Ahmed},
journal={arXiv preprint arXiv:2306.02707},
year={2023}
}
```
## **Acknowledgements**
The development of this dataset involved:
- The **GPT-4** model for question generation.
- **Legal experts** who verified every entry for accuracy and legal validity.
- The dataset was inspired by the methodology detailed in the **Orca paper** by Mukherjee et al. (2023).
## **Contact Information**
For any questions or issues regarding this dataset, please reach out to the contributors through the [Hugging Face dataset page](https://huggingface.com/datasets/Shifaur/sri_lanka_constitutional_law_qa). |
mlgawd/final_dpo_nemo_v8 | mlgawd | "2024-12-03T20:12:00Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:11:57Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28069223
num_examples: 5877
download_size: 15919715
dataset_size: 28069223
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LuanaMARD/historiav01 | LuanaMARD | "2024-12-03T20:49:36Z" | 9 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:31:38Z" | ---
license: mit
---
|
mlgawd/final_dpo_nemo_v10 | mlgawd | "2024-12-03T20:32:04Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:32:02Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28102869
num_examples: 5877
download_size: 15924726
dataset_size: 28102869
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlgawd/final_dpo_nemo_v12 | mlgawd | "2024-12-03T20:45:50Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:45:47Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28031447
num_examples: 5864
download_size: 15892999
dataset_size: 28031447
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/airoboros_stage_3_roleplay_none_response_gpt-4o-inst_gpt_4o-mini_resp | mlfoundations-dev | "2024-12-03T21:03:42Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T21:03:34Z" | ---
dataset_info:
features:
- name: min_docsearch_score
dtype: float64
- name: airoboros_subset
dtype: string
- name: instruction
dtype: string
- name: embedding
sequence: float64
- name: too_similar
dtype: bool
- name: similar_text
dtype: string
- name: similar_text_distance
dtype: float64
splits:
- name: train
num_bytes: 192199
num_examples: 20
download_size: 148062
dataset_size: 192199
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/D_gen0_run2_llama2-7b_wiki_doc1000_real64_synt64 | dgambettaphd | "2024-12-03T22:36:26Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T22:36:23Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 578372
num_examples: 1000
download_size: 361575
dataset_size: 578372
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
CambioMoney/ami-speaker-analysis_full_run_5_validation | CambioMoney | "2024-12-04T00:21:20Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:15:51Z" | ---
dataset_info:
features:
- name: meeting_id
dtype: string
- name: audio_id
dtype: string
- name: text
dtype: string
- name: audio
struct:
- name: array
sequence: float64
- name: path
dtype: string
- name: sampling_rate
dtype: int64
- name: begin_time
dtype: float64
- name: end_time
dtype: float64
- name: microphone_id
dtype: string
- name: speaker_id
dtype: string
- name: is_complete
dtype: bool
- name: original_segment
dtype: bool
splits:
- name: train
num_bytes: 413360707
num_examples: 1084
download_size: 91497853
dataset_size: 413360707
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
CambioMoney/ami-speaker-analysis_deepgram_run_train | CambioMoney | "2024-12-04T00:30:31Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:30:29Z" | ---
dataset_info:
features:
- name: meeting_id
dtype: string
- name: audio_id
dtype: string
- name: text
dtype: string
- name: audio
struct:
- name: array
sequence: float64
- name: path
dtype: string
- name: sampling_rate
dtype: int64
- name: begin_time
dtype: float64
- name: end_time
dtype: float64
- name: microphone_id
dtype: string
- name: speaker_id
dtype: string
- name: is_complete
dtype: bool
- name: original_segment
dtype: bool
- name: confidence
dtype: float64
splits:
- name: train
num_bytes: 18406083
num_examples: 36
download_size: 3608850
dataset_size: 18406083
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
siqi00/llama3_gsm8k_question_gsmlike_unhelpful_0.6_0.9_50_256 | siqi00 | "2024-12-04T01:44:44Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T01:21:47Z" | ---
dataset_info:
features:
- name: real
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_0
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_1
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_2
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_3
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_4
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_5
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 36559895
num_examples: 7473
download_size: 14555121
dataset_size: 36559895
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gswamy/pythia-1.4B-tldr-vllm-pair-iter-2 | gswamy | "2024-12-04T01:54:43Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T01:54:22Z" | ---
dataset_info:
features:
- name: info
struct:
- name: id
dtype: string
- name: post
dtype: string
- name: title
dtype: string
- name: subreddit
dtype: string
- name: site
dtype: string
- name: article
dtype: string
- name: summaries
list:
- name: text
dtype: string
- name: policy
dtype: string
- name: note
dtype: string
- name: choice
dtype: int32
- name: worker
dtype: string
- name: batch
dtype: string
- name: split
dtype: string
- name: extra
struct:
- name: confidence
dtype: int32
- name: query_token
sequence: int64
- name: query
dtype: string
- name: response0
dtype: string
- name: response0_token
sequence: int64
- name: response0_token_len
dtype: int64
- name: response0_policy
dtype: string
- name: query_response0
dtype: string
- name: query_response0_token
sequence: int64
- name: query_response0_token_len
dtype: int64
- name: query_response0_token_response_label
sequence: int64
- name: response1
dtype: string
- name: response1_token
sequence: int64
- name: response1_token_len
dtype: int64
- name: response1_policy
dtype: string
- name: query_response1
dtype: string
- name: query_response1_token
sequence: int64
- name: query_response1_token_len
dtype: int64
- name: query_response1_token_response_label
sequence: int64
- name: query_token_len
dtype: int64
- name: policies
dtype: string
- name: iter_2_best_query_response
sequence: int64
- name: iter_2_worst_query_response
sequence: int64
- name: iter_2_best_mask
sequence: int64
- name: iter_2_worst_mask
sequence: int64
- name: iter_2_best_reward
dtype: float64
- name: iter_2_worst_reward
dtype: float64
splits:
- name: train
num_bytes: 4841788931
num_examples: 92858
download_size: 182255447
dataset_size: 4841788931
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
julia-se/tracka_mistral_multilabel | julia-se | "2024-12-04T02:35:48Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T02:35:46Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: Anger
dtype: int64
- name: Disgust
dtype: int64
- name: Fear
dtype: int64
- name: Joy
dtype: int64
- name: Sadness
dtype: int64
- name: Surprise
dtype: int64
- name: Raiva
dtype: int64
- name: Nojo
dtype: int64
- name: Medo
dtype: int64
- name: Alegria
dtype: int64
- name: Tristeza
dtype: int64
- name: Surpresa
dtype: int64
splits:
- name: train
num_bytes: 544039
num_examples: 2226
download_size: 221435
dataset_size: 544039
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
julia-se/tracka_qwen_zeroshot_fear | julia-se | "2024-12-04T03:01:31Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T03:01:28Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: Anger
dtype: int64
- name: Disgust
dtype: int64
- name: Fear
dtype: int64
- name: Joy
dtype: int64
- name: Sadness
dtype: int64
- name: Surprise
dtype: int64
- name: predicted_is_fear
dtype: int64
- name: y_fear
dtype: int64
splits:
- name: train
num_bytes: 472807
num_examples: 2226
download_size: 220654
dataset_size: 472807
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
r1v3r/asterinas_llm_versions | r1v3r | "2024-12-04T04:43:06Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T04:43:02Z" | ---
dataset_info:
features:
- name: version
dtype: string
- name: pull_number
dtype: int64
- name: problem_statement
dtype: string
- name: test_patch
dtype: string
- name: instance_id
dtype: string
- name: created_at
dtype: string
- name: base_commit
dtype: string
- name: repo
dtype: string
- name: issue_numbers
sequence: string
- name: patch
dtype: string
- name: hints_text
dtype: string
- name: environment_setup_commit
dtype: string
splits:
- name: train
num_bytes: 95500
num_examples: 3
download_size: 73934
dataset_size: 95500
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ininini/final2_QA-Dataset | ininini | "2024-12-04T05:43:46Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T05:09:13Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 23107
num_examples: 187
download_size: 10468
dataset_size: 23107
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
presencesw/data_remove_v0_preprocessed_1000 | presencesw | "2024-12-04T07:23:11Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T05:10:14Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: mask
dtype: image
- name: masked_image
dtype: image
- name: mae_embedding
dtype:
array3_d:
shape:
- 1
- 196
- 768
dtype: float32
splits:
- name: train
num_bytes: 13629084978.0
num_examples: 1000
download_size: 13640186121
dataset_size: 13629084978.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HappyPablo/eval_dec4_data1 | HappyPablo | "2024-12-04T08:21:22Z" | 9 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100_2_eval"
] | [
"robotics"
] | "2024-12-04T08:21:06Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100_2_eval
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 10,
"total_frames": 8602,
"total_tasks": 1,
"total_videos": 20,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:10"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
Nachiket-S/LLaMa_3B_IsCoT_DebiasingInstruction | Nachiket-S | "2024-12-04T08:32:45Z" | 9 | 0 | [
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:32:44Z" | ---
dataset_info:
features:
- name: file_name
dtype: 'null'
- name: paragraph
dtype: 'null'
splits:
- name: inference
num_bytes: 0
num_examples: 0
download_size: 756
dataset_size: 0
configs:
- config_name: default
data_files:
- split: inference
path: data/inference-*
---
|
Nachiket-S/LLaMa_3B_NoCoT_DebiasingInstruction | Nachiket-S | "2024-12-04T08:33:45Z" | 9 | 0 | [
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:33:43Z" | ---
dataset_info:
features:
- name: file_name
dtype: 'null'
- name: paragraph
dtype: 'null'
splits:
- name: inference
num_bytes: 0
num_examples: 0
download_size: 756
dataset_size: 0
configs:
- config_name: default
data_files:
- split: inference
path: data/inference-*
---
|
mlgawd/final_dpo_nemo_v18 | mlgawd | "2024-12-04T09:14:32Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:14:29Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 27178086
num_examples: 5845
download_size: 15360486
dataset_size: 27178086
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlgawd/final_dpo_nemo_v20 | mlgawd | "2024-12-04T09:48:47Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:36:40Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 27177632
num_examples: 5845
download_size: 15358940
dataset_size: 27177632
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
kowndinya23/flan2022-zeroshot-instr-inpt-outp-800000 | kowndinya23 | "2024-12-04T09:58:36Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:58:20Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 789086601
num_examples: 800000
- name: validation
num_bytes: 6604084
num_examples: 7407
download_size: 322527226
dataset_size: 795690685
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
kowndinya23/flan2022-zeroshot-instr-inpt-outp-1600000 | kowndinya23 | "2024-12-04T09:59:23Z" | 9 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:58:51Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 1522586846
num_examples: 1600000
- name: validation
num_bytes: 12982473
num_examples: 13716
download_size: 626208029
dataset_size: 1535569319
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|