datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
2.66M
| likes
int64 0
6.48k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1M
|
---|---|---|---|---|---|---|---|---|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_32_0.01_64_BestF1_en | ferrazzipietro | "2024-12-02T18:31:59Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:31:56Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292641
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_32_0.05_64_BestF1_en | ferrazzipietro | "2024-12-02T18:32:33Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:32:31Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292535
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_32_0.01_64_BestF1_en | ferrazzipietro | "2024-12-02T18:32:50Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:32:47Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292434
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_64_0.05_64_BestF1_en | ferrazzipietro | "2024-12-02T18:33:10Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:33:05Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292636
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_32_64_0.05_64_BestF1_en | ferrazzipietro | "2024-12-02T18:33:30Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:33:27Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292687
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_16_0.01_64_BestF1_en | ferrazzipietro | "2024-12-02T18:33:48Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:33:45Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 293274
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_16_0.01_64_BestF1_en | ferrazzipietro | "2024-12-02T18:34:06Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:34:03Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292885
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_16_32_0.05_64_BestF1_en | ferrazzipietro | "2024-12-02T18:34:40Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:34:37Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292523
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
ferrazzipietro/LS_Llama-3.1-8B_e3c-sentences-sk-unrevised_NoQuant_64_16_0.05_64_BestF1_en | ferrazzipietro | "2024-12-02T18:34:58Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T18:34:55Z" | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: entities
list:
- name: id
dtype: string
- name: offsets
sequence: int64
- name: role
dtype: string
- name: semantic_type_id
dtype: string
- name: text
dtype: string
- name: type
dtype: string
- name: original_text
dtype: string
- name: original_id
dtype: string
- name: tokens
sequence: string
- name: ner_tags
sequence: int64
- name: ground_truth_word_level
sequence: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: predictions
sequence: string
- name: ground_truth_labels
sequence: string
splits:
- name: all_validation
num_bytes: 418249
num_examples: 106
- name: test
num_bytes: 2472788
num_examples: 666
download_size: 292738
dataset_size: 2891037
configs:
- config_name: default
data_files:
- split: all_validation
path: data/all_validation-*
- split: test
path: data/test-*
---
|
sartifyllc/sft_question_answer_gemma_test | sartifyllc | "2024-12-02T19:12:56Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T19:12:55Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 12356
num_examples: 50
download_size: 9559
dataset_size: 12356
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/stf_regex_ner_completo_80 | juliadollis | "2024-12-02T20:21:32Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T20:12:01Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: inteiro_teor_regex
dtype: string
- name: NER
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 9037862346
num_examples: 78477
download_size: 2459050799
dataset_size: 9037862346
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/airoboros_stage_3_coding_none_response_gpt-4o-inst_gpt_4o-mini_resp_test | mlfoundations-dev | "2024-12-02T21:47:56Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T21:10:28Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: response
dtype: string
- name: airoboros_subset
dtype: string
splits:
- name: train
num_bytes: 3754298
num_examples: 1200
download_size: 1765579
dataset_size: 3754298
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mathreward/data_collection_8b_math_2 | mathreward | "2024-12-02T22:01:44Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:00:53Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: gt
dtype: string
- name: my_solu
dtype: string
splits:
- name: train
num_bytes: 3283996207
num_examples: 607500
download_size: 1342872050
dataset_size: 3283996207
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
makcedward/openai-moderation | makcedward | "2024-12-04T14:45:36Z" | 9 | 0 | [
"task_categories:text-classification",
"language:en",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2208.03274",
"region:us",
"prompt_guard",
"prmopt",
"LlamaGuard"
] | [
"text-classification"
] | "2024-12-02T22:33:34Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: S
dtype: float64
- name: H
dtype: float64
- name: V
dtype: float64
- name: HR
dtype: float64
- name: SH
dtype: float64
- name: S3
dtype: float64
- name: H2
dtype: float64
- name: V2
dtype: float64
splits:
- name: test
num_bytes: 1222579
num_examples: 1680
download_size: 746347
dataset_size: 1222579
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
task_categories:
- text-classification
language:
- en
tags:
- prompt_guard
- prmopt
- LlamaGuard
---
# Dataset
Homepage: https://github.com/openai/moderation-api-release
Description: A Holistic Approach to Undesired Content Detection
Citation:
```
@article{openai2022moderation,
title={A Holistic Approach to Undesired Content Detection},
author={Todor Markov and Chong Zhang and Sandhini Agarwal and Tyna Eloundou and Teddy Lee and Steven Adler and Angela Jiang and Lilian Weng},
journal={arXiv preprint arXiv:2208.03274},
year={2022}
}
``` |
JimmieJom/boofu | JimmieJom | "2024-12-02T22:37:26Z" | 9 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T22:37:03Z" | ---
license: apache-2.0
---
|
pclucas14/nqa-RAG-256_22_24 | pclucas14 | "2024-12-02T23:01:16Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:01:14Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26388567
num_examples: 65
download_size: 10775611
dataset_size: 26388567
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
bustamiyusoef/TransTigriya-English | bustamiyusoef | "2024-12-02T23:18:37Z" | 9 | 0 | [
"task_categories:translation",
"language:ti",
"language:en",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"translation"
] | "2024-12-02T23:02:40Z" | ---
task_categories:
- translation
language:
- ti
- en
---
The original data from [HornMT](https://github.com/asmelashteka/HornMT/tree/main) |
pclucas14/nqa-RAG-256_5_24 | pclucas14 | "2024-12-02T23:03:35Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:03:33Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26742364
num_examples: 66
download_size: 10694928
dataset_size: 26742364
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_21_24 | pclucas14 | "2024-12-02T23:05:09Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:05:07Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 25139427
num_examples: 65
download_size: 10600443
dataset_size: 25139427
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_23_24 | pclucas14 | "2024-12-02T23:07:47Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:07:45Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26073886
num_examples: 65
download_size: 11091963
dataset_size: 26073886
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_20_24 | pclucas14 | "2024-12-02T23:12:17Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:12:15Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26066335
num_examples: 65
download_size: 11251136
dataset_size: 26066335
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
bellomuiz78/knowledgebase | bellomuiz78 | "2024-12-04T00:54:45Z" | 9 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-02T23:46:46Z" | ---
license: mit
---
|
ashercn97/reasoning-v1-worked | ashercn97 | "2024-12-02T23:55:07Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:55:05Z" | ---
dataset_info:
features:
- name: text_id
dtype: string
- name: text
dtype: string
- name: label
sequence: string
- name: split_text
sequence: string
splits:
- name: train
num_bytes: 143957
num_examples: 100
download_size: 90504
dataset_size: 143957
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/D_gen0_run2_llama2-7b_wiki_doc1000_real32_synt96 | dgambettaphd | "2024-12-03T00:26:32Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:26:30Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 511648
num_examples: 1000
download_size: 301252
dataset_size: 511648
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
julia-se/tracka_mistral_fewshot_disgust | julia-se | "2024-12-03T01:03:16Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:29:17Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: Anger
dtype: int64
- name: Disgust
dtype: int64
- name: Fear
dtype: int64
- name: Joy
dtype: int64
- name: Sadness
dtype: int64
- name: Surprise
dtype: int64
- name: predicted_is_disgust
dtype: int64
- name: y_disgust
dtype: int64
splits:
- name: train
num_bytes: 472807
num_examples: 2226
download_size: 216953
dataset_size: 472807
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ashnaz/refined_symptoms_doctors | ashnaz | "2024-12-03T01:30:22Z" | 9 | 0 | [
"license:afl-3.0",
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T01:21:13Z" | ---
license: afl-3.0
---
|
Taylor658/myelography-imaging | Taylor658 | "2024-12-03T03:29:44Z" | 9 | 0 | [
"task_categories:text-classification",
"task_ids:named-entity-recognition",
"task_ids:news-articles-summarization",
"annotations_creators:synthetic",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:apache-2.0",
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-classification"
] | "2024-12-03T03:28:14Z" | ---
annotations_creators:
- synthetic
language:
- en
license: apache-2.0
multilinguality:
- monolingual
pretty_name: Myelography Imaging
size_categories:
- 1K<n<10K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- named-entity-recognition
- news-articles-summarization
---
# Myelography Imaging
## Dataset Description
This dataset consists of **750 synthetic myelography examination records** representing a wide spectrum of spinal pathologies and patient experiences. Each record includes:
- **Patient demographics**: Age and sex.
- **Clinical symptoms prompting the procedure**: Detailed and verbose descriptions.
- **Procedural details**: Contrast medium type, injection site, and imaging modality used.
- **Verbose findings**: Observations such as spinal cord compression, herniated discs, tumors, and spinal stenosis.
- **Complications encountered**: Any issues arising during or after the procedure.
- **Follow-up recommendations**: Suggested next steps, including surgical consultation, physical therapy, or additional imaging.
### Example Data
| Age | Sex | Clinical Symptoms | Contrast Medium Type | Injection Site | Imaging Modality | Findings | Complications | Follow-up Recommendations |
|-----|-------|---------------------------------------------------------|----------------------|----------------|------------------|-------------------------------------------------|------------------------------------------|--------------------------------------------------|
| 45 | Male | Chronic lower back pain with radiating leg pain | Iodinated contrast | Lumbar spine | X-ray | Large herniated disc at L4-L5 | No complications | Referral to neurosurgery for evaluation |
| 60 | Female| Acute onset lower limb weakness post-trauma | Gadolinium-based contrast| Cervical spine | MRI | Severe spinal cord compression | Localized discomfort at injection site | Follow-up imaging with enhanced MRI |
## Intended Use
This dataset is intended for educational, research, and development purposes, including:
- Training and benchmarking in **natural language processing** (NLP) tasks.
- Developing tools for medical image analysis and clinical decision support systems.
- Conducting exploratory data analysis in synthetic medical datasets.
## Limitations
This dataset is entirely synthetic and does not contain real patient data. It should not be used for diagnostic purposes. The findings and follow-up recommendations are simulated and may not encompass the full complexity of real-world scenarios.
## License
This dataset is distributed under the **Apache 2.0 License**.
## Citation
---
### Acknowledgments
|
RussRobin/VDD | RussRobin | "2024-12-03T05:04:58Z" | 9 | 0 | [
"license:cc-by-4.0",
"size_categories:n<1K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"arxiv:2305.13608",
"region:us"
] | null | "2024-12-03T04:34:17Z" | ---
license: cc-by-4.0
---
VDD: Varied Drone Dataset for Semantic Segmentation
Paper: https://arxiv.org/abs/2305.13608
GitHub Repo: https://github.com/RussRobin/VDD
This HF repo contains VDD source images and annotations.
Please refer to our GitHub Repo if you want to download our annotation of UDD and UAVid.
|
infinite-dataset-hub/EthicalEatingEmotions | infinite-dataset-hub | "2024-12-03T06:32:40Z" | 9 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"infinite-dataset-hub",
"synthetic"
] | null | "2024-12-03T06:32:39Z" | ---
license: mit
tags:
- infinite-dataset-hub
- synthetic
---
# EthicalEatingEmotions
tags: vegan, psychology, dietary choices
_Note: This is an AI-generated dataset so its content may be inaccurate or false_
**Dataset Description:**
The 'EthicalEatingEmotions' dataset contains anonymized user-generated content from various platforms discussing the emotional aspects of adopting a vegan diet. The data is gathered from social media posts, blog comments, and forum discussions. Each entry includes the original text, a sentiment analysis score, and a label reflecting the user's emotional stance towards veganism (e.g., positive, neutral, negative).
**CSV Content Preview:**
```
text,sentiment_score,labels
"I've been vegan for 5 years now and I feel healthier than ever!",0.9,"positive"
"Trying to be vegan has been challenging but worth it for the planet.",0.7,"positive"
"The taste of vegan food can sometimes be off-putting, but I'm learning.",0.6,"neutral"
"I'm disappointed by the lack of vegan options at my favorite restaurant.",0.3,"negative"
"Veganism isn't for everyone, and that's okay. I respect people's choices.",0.5,"neutral"
```
**Source of the data:**
The dataset was generated using the [Infinite Dataset Hub](https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub) and microsoft/Phi-3-mini-4k-instruct using the query 'vegan':
- **Dataset Generation Page**: https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub?q=vegan&dataset=EthicalEatingEmotions&tags=vegan,+psychology,+dietary+choices
- **Model**: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
- **More Datasets**: https://huggingface.co/datasets?other=infinite-dataset-hub
|
denkCF/UsersCodeforcesSubmissionsEnd2024 | denkCF | "2024-12-03T08:30:52Z" | 9 | 0 | [
"language:en",
"license:cc-by-4.0",
"size_categories:10M<n<100M",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"codeforces",
"submissions"
] | null | "2024-12-03T07:52:37Z" | ---
datasets:
- name: usersCodeforcesSubmissionsEnd2024
size: 200MB
task_categories:
- other
languages:
- en
licenses:
- cc-by-4.0
tags:
- codeforces
- competitive-programming
- submissions
pretty_name: Codeforces Users Submissions (End of 2024)
description: >
This dataset contains anonymized submission data of ≈15,000 Codeforces
users, spanning from the inception of Codeforces to the end of November
2024.
download_size: 100MB
dataset_size: 1.2GB
license: cc-by-4.0
language:
- en
tags:
- codeforces
- submissions
size_categories:
- 10M<n<100M
---
# Codeforces Users Submissions Dataset (End of 2024)
This project provides the usersCodeforcesSubmissionsEnd2024.csv file, containing anonymized submission data of approximately 15,000 active Codeforces users. The dataset includes all submissions from the inception of Codeforces up to the end of November 2024. It is designed to support AI and data-driven projects. (All data was collected using the open Codeforces API)
## Dataset Overview
The file contains **17,607,999 rows** with the following columns:
- **`handle`**: An anonymized and shuffled user nickname (e.g., `user{i}`).
- **`rating_at_submission`**: User's rating at the time of submission.
- **`problem_rating`**: Problem difficulty rating.
- **`id_of_submission_task`**: Unique problem identifier on Codeforces.
- **`verdict`**: Result of the submission (e.g., `OK`, `WRONG_ANSWER`).
- **`time`**: Time of submission (in seconds since the Unix epoch).
## Purpose of the Dataset
1. **AI Development**: This dataset can be used to create intelligent systems to enhance user learning on Codeforces.
_(Example: The author of this dataset is currently working on the project "Codeforces User Analysis System for Generating Individual Training Recommendations," which aims to recommend tasks to users based on their weaknesses.)_
2. **Time Saving**: Collecting such data manually can be time-consuming (it took ≈7 hours for this dataset). By providing it in a ready-to-use format, we aim to save your time and effort.
3. **Reduce Server Load**: This dataset minimizes repetitive data scraping, thereby reducing the load on Codeforces servers.
## License
This dataset is shared under the [CC BY 4.0 License](https://creativecommons.org/licenses/by/4.0/). You are free to use it for your projects with proper attribution.
## How to Use
1. Download the `usersCodeforcesSubmissionsEnd2024.zip` file.
2. Unzip the file to access the `usersCodeforcesSubmissionsEnd2024.csv` dataset:
- On Linux/macOS: Use the `unzip` command in the terminal.
- On Windows: Right-click the file and select "Extract All."
3. Load the CSV file into your favorite data analysis tool:
```python
import pandas as pd
df = pd.read_csv("usersCodeforcesSubmissionsEnd2024.csv")
# Good luck with your projects :) |
aniruddha007/example-retrieval-reranking-dataset | aniruddha007 | "2024-12-03T08:42:58Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif"
] | null | "2024-12-03T08:42:55Z" | ---
size_categories: n<1K
dataset_info:
- config_name: generate_reranking_pairs
features:
- name: filename
dtype: string
- name: anchor
dtype: string
- name: repo_name
dtype: string
- name: positive
dtype: string
- name: negative
dtype: string
- name: distilabel_metadata
struct:
- name: raw_input_generate_reranking_pairs
list:
- name: content
dtype: string
- name: role
dtype: string
- name: raw_output_generate_reranking_pairs
dtype: string
- name: model_name
dtype: string
splits:
- name: train
num_bytes: 39508
num_examples: 15
download_size: 36985
dataset_size: 39508
- config_name: generate_retrieval_pairs
features:
- name: filename
dtype: string
- name: anchor
dtype: string
- name: repo_name
dtype: string
- name: positive
dtype: string
- name: negative
dtype: string
- name: distilabel_metadata
struct:
- name: raw_input_generate_retrieval_pairs
list:
- name: content
dtype: string
- name: role
dtype: string
- name: raw_output_generate_retrieval_pairs
dtype: string
- name: model_name
dtype: string
splits:
- name: train
num_bytes: 38355
num_examples: 15
download_size: 30713
dataset_size: 38355
configs:
- config_name: generate_reranking_pairs
data_files:
- split: train
path: generate_reranking_pairs/train-*
- config_name: generate_retrieval_pairs
data_files:
- split: train
path: generate_retrieval_pairs/train-*
tags:
- synthetic
- distilabel
- rlaif
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for example-retrieval-reranking-dataset
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/aniruddha007/example-retrieval-reranking-dataset/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/aniruddha007/example-retrieval-reranking-dataset/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: generate_retrieval_pairs </summary><hr>
```json
{
"anchor": "description: Argilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\nhide: navigation\n\nWelcome to Argilla\n\nArgilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.",
"distilabel_metadata": {
"raw_input_generate_retrieval_pairs": [
{
"content": "Your task is to generate a positive and a negative sentence given an anchor sentence. Take into account the context given. The positive sentence has to be a query for the anchor sentence, while the negative sentence is a \u0027hard negative\u0027 that meets the following criteria:\n- Uses similar keywords or phrases as the anchor sentence\n- Has a similar grammatical structure or syntax\n- Is not related to the anchor sentence, but could be mistaken for it\nTry to create a negative sentence that would be challenging for a model to distinguish from the positive sentence. You must output only two new sections: `## Positive` and `## Negative`.",
"role": "system"
},
{
"content": "## Context\n\n\nThe text is a chunk from technical Python SDK documentation of Argilla.\nArgilla is a collaboration tool for AI engineers and domain experts to build high-quality datasets.\nAlong with prose explanations, the text chunk may include code snippets and Python references.\n\n\n## Anchor\n\ndescription: Argilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\nhide: navigation\n\nWelcome to Argilla\n\nArgilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\n",
"role": "user"
}
],
"raw_output_generate_retrieval_pairs": "## Positive\nWhat is Argilla, the collaboration tool for AI engineers and domain experts, that offers high-quality outputs, full data ownership, and overall efficiency?\n\n## Negative\nArgilla is a collaboration platform for AI engineers and domain experts that demand low-quality outputs, limited data ownership, and overall inefficiency."
},
"filename": "argilla-python/docs/index.md",
"model_name": "mistralai/Mistral-7B-Instruct-v0.3",
"negative": "Argilla is a collaboration platform for AI engineers and domain experts that demand low-quality outputs, limited data ownership, and overall inefficiency.",
"positive": "What is Argilla, the collaboration tool for AI engineers and domain experts, that offers high-quality outputs, full data ownership, and overall efficiency?",
"repo_name": "argilla-io/argilla-python"
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("aniruddha007/example-retrieval-reranking-dataset", "generate_retrieval_pairs")
```
</details>
<details><summary> Configuration: generate_reranking_pairs </summary><hr>
```json
{
"anchor": "description: Argilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\nhide: navigation\n\nWelcome to Argilla\n\nArgilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.",
"distilabel_metadata": {
"raw_input_generate_reranking_pairs": [
{
"content": "Your task is to generate a positive and a negative sentence given an anchor sentence. Take into account the context given. The positive sentence has to be semantically similar to the anchor sentence, while the negative sentence can use similar words but must not be related to the anchor sentence. You must output only two new sections: `## Positive` and `## Negative`.",
"role": "system"
},
{
"content": "## Context\n\n\nThe text is a chunk from technical Python SDK documentation of Argilla.\nArgilla is a collaboration tool for AI engineers and domain experts to build high-quality datasets.\nAlong with prose explanations, the text chunk may include code snippets and Python references.\n\n\n## Anchor\n\ndescription: Argilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\nhide: navigation\n\nWelcome to Argilla\n\nArgilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\n",
"role": "user"
}
],
"raw_output_generate_reranking_pairs": "## Positive\nArgilla offers an efficient and collaborative environment for AI engineers and domain experts to produce superior quality datasets while maintaining full control over their data.\n\n## Negative\nUnfortunately, Argilla does not support real-time data streaming or AI model training, making it less suitable for certain fast-paced AI project requirements."
},
"filename": "argilla-python/docs/index.md",
"model_name": "mistralai/Mistral-7B-Instruct-v0.3",
"negative": "Unfortunately, Argilla does not support real-time data streaming or AI model training, making it less suitable for certain fast-paced AI project requirements.",
"positive": "Argilla offers an efficient and collaborative environment for AI engineers and domain experts to produce superior quality datasets while maintaining full control over their data.",
"repo_name": "argilla-io/argilla-python"
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("aniruddha007/example-retrieval-reranking-dataset", "generate_reranking_pairs")
```
</details>
|
yspark0519/iemocap_add_features | yspark0519 | "2024-12-03T09:46:36Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T09:11:00Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
list:
- name: text
dtype: string
- name: type
dtype: string
- name: role
dtype: string
- name: images
sequence: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 1880275
num_examples: 3548
- name: test
num_bytes: 493491
num_examples: 942
download_size: 551240
dataset_size: 2373766
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
babs/podcast-9 | babs | "2024-12-03T09:56:46Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T09:56:41Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 193557411.0
num_examples: 275
download_size: 181264413
dataset_size: 193557411.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
viktoriatilevska/train_group3_1M | viktoriatilevska | "2024-12-03T12:51:20Z" | 9 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T12:51:10Z" | ---
dataset_info:
features:
- name: context
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 384600002
num_examples: 1000000
download_size: 76733429
dataset_size: 384600002
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Alwaly/parler_tts_wom | Alwaly | "2024-12-03T13:40:39Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T13:40:36Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: float64
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
splits:
- name: train
num_bytes: 1925479
num_examples: 17952
- name: test
num_bytes: 215369
num_examples: 1995
download_size: 1812661
dataset_size: 2140848
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
juliadollis/stf_regex_ner_2_fuzzyover_90 | juliadollis | "2024-12-03T14:05:13Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:05:07Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: inteiro_teor_regex
dtype: string
- name: NER
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 159167208
num_examples: 1000
download_size: 44085104
dataset_size: 159167208
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/opengpt_gpt-4o-mini_scale_x.125 | mlfoundations-dev | "2024-12-03T21:32:39Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:33:05Z" | ---
dataset_info:
features:
- name: language
dtype: string
- name: quantity
dtype: int64
- name: task
dtype: string
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 1749694
num_examples: 498
download_size: 854812
dataset_size: 1749694
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/opengpt_gpt-4o-mini_scale_x.5 | mlfoundations-dev | "2024-12-03T21:28:25Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:33:29Z" | ---
dataset_info:
features:
- name: language
dtype: string
- name: quantity
dtype: int64
- name: task
dtype: string
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 5171805
num_examples: 2466
download_size: 2656285
dataset_size: 5171805
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DT4LM/t5v1-1base_sst2_leap | DT4LM | "2024-12-03T15:19:28Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T15:18:23Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int32
splits:
- name: train
num_bytes: 48903
num_examples: 662
download_size: 34435
dataset_size: 48903
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
all-oj-gen/ds_coder_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-bin_all_pairs | all-oj-gen | "2024-12-03T16:12:42Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T16:12:33Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 37426071
num_examples: 9188
download_size: 11275871
dataset_size: 37426071
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
aboriskin/adaptive_rag_hotpotqa | aboriskin | "2024-12-06T13:50:35Z" | 9 | 0 | [
"task_categories:question-answering",
"language:en",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"question-answering"
] | "2024-12-03T16:59:58Z" | ---
license: apache-2.0
task_categories:
- question-answering
language:
- en
size_categories:
- n<1K
---
In this collection you can find 4 datasets with `is_supporting=True` contexts from the Adaptive RAG collection.
There are picked 4/6 datasets from Adaptive RAG datasets with `is_supporting=True` contexts.
Not all samples from TriviaQA and SQUAD have `is_supporting=True` contexts, thats why we do not include them in hf collection.
If question have more than one `is_supporting=True` context, we concatenate them.
Script for data transformation from original Adaptive RAG format into our format can be found here:
https://github.com/sashaboriskin/rag_routing/blob/main/data/hf_adaptive_rag_supportive_context.py |
all-oj-gen/ds_chat_pos_reflct_rmsprop_iter1_sppo_hard_new_all_oj_iter1-bin | all-oj-gen | "2024-12-03T17:18:41Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:18:20Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 17187060
num_examples: 5909
download_size: 7065544
dataset_size: 17187060
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_pos_reflct_rmsprop_iter1_sppo_hard_new_all_oj_iter1-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
RyanYr/self-reflect_mini8Bit-t0_mistlarge-t12_om2-460k_binlabel_reflection | RyanYr | "2024-12-03T18:58:31Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T18:58:21Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
- name: response@0_correctness
dtype: bool
- name: response@2_correctness
dtype: bool
splits:
- name: train
num_bytes: 712727727
num_examples: 247730
download_size: 261524199
dataset_size: 712727727
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Alwaly/parler_tts-descriptions-tags_bis_wom | Alwaly | "2024-12-03T19:33:50Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T19:33:48Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
- name: text_description
dtype: string
splits:
- name: train
num_bytes: 7000548
num_examples: 17952
- name: test
num_bytes: 784386
num_examples: 1995
download_size: 3004619
dataset_size: 7784934
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
asoria/crawl4ai_hf_page_md | asoria | "2024-12-03T19:39:27Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"crawl4ai",
"crawl"
] | null | "2024-12-03T19:39:24Z" | ---
tags:
- crawl4ai
- crawl
---
**Source of the data:**
The dataset was generated using [Crawl4ai](https://crawl4ai.com/mkdocs/) library from https://huggingface.co/.
|
mlgawd/final_dpo_nemo_v9 | mlgawd | "2024-12-03T20:28:30Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:28:27Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28102869
num_examples: 5877
download_size: 15924726
dataset_size: 28102869
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
collinear-ai/financial_cg_flex_customization | collinear-ai | "2024-12-03T21:39:02Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:32:03Z" | ---
dataset_info:
features:
- name: conv_prefix
list:
- name: content
dtype: string
- name: role
dtype: string
- name: response
struct:
- name: content
dtype: string
- name: role
dtype: string
- name: ground_truth
dtype: int64
splits:
- name: pku_safer_rlhf_economic_crime
num_bytes: 1267
num_examples: 2
download_size: 7552
dataset_size: 1267
configs:
- config_name: default
data_files:
- split: pku_safer_rlhf_economic_crime
path: data/pku_safer_rlhf_economic_crime-*
---
|
Honi086/voz_natanzinholima | Honi086 | "2024-12-03T21:38:26Z" | 9 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-03T20:52:41Z" | ---
license: openrail
---
|
mlgawd/final_dpo_nemo_v15 | mlgawd | "2024-12-03T22:11:02Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T22:10:59Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28050268.76875957
num_examples: 5866
download_size: 15945218
dataset_size: 28050268.76875957
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
BarryFutureman/jenny-tts-text-tags-6h-v1 | BarryFutureman | "2024-12-03T22:27:23Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T22:27:21Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: transcription_normalised
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
splits:
- name: train
num_bytes: 2063542
num_examples: 4000
download_size: 1025292
dataset_size: 2063542
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
HFXM/hh-rlhf-Rule7 | HFXM | "2024-12-03T22:57:36Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T22:57:30Z" | ---
dataset_info:
features:
- name: chosen
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 325133436
num_examples: 169352
download_size: 183445975
dataset_size: 325133436
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
CambioMoney/ami-speaker-analysis_full_run_3 | CambioMoney | "2024-12-03T23:25:09Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T23:17:50Z" | ---
dataset_info:
features:
- name: meeting_id
dtype: string
- name: audio_id
dtype: string
- name: text
dtype: string
- name: audio
struct:
- name: array
sequence: float64
- name: path
dtype: string
- name: sampling_rate
dtype: int64
- name: begin_time
dtype: float64
- name: end_time
dtype: float64
- name: microphone_id
dtype: string
- name: speaker_id
dtype: string
- name: is_complete
dtype: bool
- name: original_segment
dtype: bool
splits:
- name: train
num_bytes: 204976583
num_examples: 459
- name: validation
num_bytes: 165869849
num_examples: 434
- name: test
num_bytes: 112767531
num_examples: 418
download_size: 102500074
dataset_size: 483613963
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
mathreward/new_8b_llama31_selfcorr_horizon2_tmp07 | mathreward | "2024-12-03T23:20:58Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T23:20:53Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: gt
dtype: string
- name: level
dtype: string
- name: type
dtype: string
- name: my_solu
dtype: string
- name: pred
sequence: string
splits:
- name: train
num_bytes: 22370291
num_examples: 5000
download_size: 6772341
dataset_size: 22370291
---
# Dataset Card for "new_8b_llama31_selfcorr_horizon2_tmp07"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
polymathic-ai/turbulent_radiative_layer_2D | polymathic-ai | "2024-12-03T23:31:34Z" | 9 | 1 | [
"task_categories:time-series-forecasting",
"task_categories:other",
"task_ids:multivariate-time-series-forecasting",
"language:en",
"license:cc-by-4.0",
"region:us",
"physics"
] | [
"time-series-forecasting",
"other"
] | "2024-12-03T23:29:30Z" | ---
language:
- en
license: cc-by-4.0
tags:
- physics
task_categories:
- time-series-forecasting
- other
task_ids:
- multivariate-time-series-forecasting
---
# How To Load from HuggingFace Hub
1. Be sure to have `the_well` installed (`pip install the_well`)
2. Use the `WellDataModule` to retrieve data as follows:
```python
from the_well.data import WellDataModule
# The following line may take a couple of minutes to instantiate the datamodule
datamodule = WellDataModule(
"hf://datasets/polymathic-ai/",
"turbulent_radiative_layer_2D",
)
train_dataloader = datamodule.train_dataloader()
for batch in dataloader:
# Process training batch
...
```
# Turbulent Radiative Layer - 2D
**One line description of the data:** Everywhere in astrophysical systems hot gas moves relative to cold gas, which leads to mixing, and mixing populates intermediate temperature gas that is highly reactive—in this case it is rapidly cooling.
**Longer description of the data:** In this simulation, there is cold, dense gas on the bottom and hot dilute gas on the top. They are moving relative to each other at highly subsonic velocities. This set up is unstable to the Kelvin Helmholtz instability, which is seeded with small scale noise that is varied between the simulations. The hot gas and cold gas are both in thermal equilibrium in the sense that the heating and cooling are exactly balanced. However, once mixing occurs as a result of the turbulence induced by the Kelvin Helmholtz instability the intermediate temperatures become populated. This intermediate temperature gas is not in thermal equilibrium and cooling beats heating. This leads to a net mass flux from the hot phase to the cold phase. This process occurs in the interstellar medium, and in the Circum-Galactic medium when cold clouds move through the ambient, hot medium. By understanding how the total cooling and mass transfer scale with the cooling rate we are able to constrain how this process controls the overall phase structure, energetics and dynamics of the gas in and around galaxies.
**Associated paper**: [Paper](https://iopscience.iop.org/article/10.3847/2041-8213/ab8d2c/pdf).
**Domain expert**: [Drummond Fielding](https://dfielding14.github.io/), CCA, Flatiron Institute & Cornell University.
**Code or software used to generate the data**: [Athena++](https://www.athena-astro.app/).
**Equation**:
$$
\begin{align*}
\frac{ \partial \rho}{\partial t} + \nabla \cdot \left( \rho \vec{v} \right) &= 0 \\
\frac{ \partial \rho \vec{v} }{\partial t} + \nabla \cdot \left( \rho \vec{v}\vec{v} + P \right) &= 0 \\
\frac{ \partial E }{\partial t} + \nabla \cdot \left( (E + P) \vec{v} \right) &= - \frac{E}{t_{\rm cool}} \\
E = P / (\gamma -1) \, \, \gamma &= 5/3
\end{align*}
$$
with \\(\rho\\) the density, \\(\vec{v}\\) the 2D velocity, \\(P\\) the pressure, \\(E\\) the total energy, and \\(t_{\rm cool}\\) the cooling time.
![Gif](https://users.flatironinstitute.org/~polymathic/data/the_well/datasets/turbulent_radiative_layer_2D/gif/density_normalized.gif)
| Dataset | FNO | TFNO | Unet | CNextU-net
|:-:|:-:|:-:|:-:|:-:|
| `turbulent_radiative_layer_2D` | 0.5001| 0.5016 |0.2418| \\(\mathbf{0.1956}\\)|
Table: VRMSE metrics on test sets (lower is better). Best results are shown in bold. VRMSE is scaled such that predicting the mean value of the target field results in a score of 1.
## About the data
**Dimension of discretized data:** 101 timesteps of 384x128 images.
**Fields available in the data:** Density (scalar field), pressure (scalar field), velocity (vector field).
**Number of trajectories:** 90 (10 different seeds for each of the 9 \\(t_{cool}\\) values).
**Estimated size of the ensemble of all simulations:** 6.9 GB.
**Grid type:** uniform, cartesian coordinates.
**Initial conditions:** Analytic, described in the [paper](https://ui.adsabs.harvard.edu/abs/2020ApJ...894L..24F/abstract).
**Boundary conditions:** Periodic in the x-direction, zero-gradient for the y-direction.
**Simulation time-step ( \\(\Delta t\\)):** varies with \\(t_{cool}\\). Smallest \\(t_{cool}\\) has \\(\Delta t = 1.36\times10^{-2}\\) and largest \\(t_{cool}\\) has \\(\Delta t = 1.74\times10^{-2}\\). Not that this is not in seconds. This is in dimensionless simulation time.
**Data are stored separated by ( \\(\delta t\\)):** 1.597033 in simulation time.
**Total time range ( \\(t_{min}\\) to \\(t_{max}\\)):** \\(t_{min} = 0\\), \\(t_{max} = 159.7033\\).
**Spatial domain size ( \\(L_x\\), \\(L_y\\), \\(L_z\\)):** \\(x \in [-0.5, 0.5]\\), \\(y \in [-1, 2]\\) giving \\(L_x = 1\\) and \\(L_y = 3\\).
**Set of coefficients or non-dimensional parameters evaluated:** \\(t_{cool} = \{0.03, 0.06, 0.1, 0.18, 0.32, 0.56, 1.00, 1.78, 3.16\}\\).
**Approximate time to generate the data:** 84 seconds using 48 cores for one simulation. 100 CPU hours for everything.
**Hardware used to generate the data:** 48 CPU cores.
## What is interesting and challenging about the data:
**What phenomena of physical interest are catpured in the data:**
- The mass flux from hot to cold phase.
- The turbulent velocities.
- Amount of mass per temperature bin (T = press/dens).
**How to evaluate a new simulator operating in this space:** See whether it captures the right mass flux, the right turbulent velocities, and the right amount of mass per temperature bin.
Please cite the associated paper if you use this data in your research:
```
@article{fielding2020multiphase,
title={Multiphase gas and the fractal nature of radiative turbulent mixing layers},
author={Fielding, Drummond B and Ostriker, Eve C and Bryan, Greg L and Jermyn, Adam S},
journal={The Astrophysical Journal Letters},
volume={894},
number={2},
pages={L24},
year={2020},
publisher={IOP Publishing}
}
```
|
CambioMoney/ami-speaker-analysis_full_run_deepgram_4_train | CambioMoney | "2024-12-04T00:30:22Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:28:32Z" | ---
dataset_info:
features:
- name: meeting_id
dtype: string
- name: audio_id
dtype: string
- name: text
dtype: string
- name: audio
struct:
- name: array
sequence: float64
- name: path
dtype: string
- name: sampling_rate
dtype: int64
- name: begin_time
dtype: float64
- name: end_time
dtype: float64
- name: microphone_id
dtype: string
- name: speaker_id
dtype: string
- name: is_complete
dtype: bool
- name: original_segment
dtype: bool
- name: confidence
dtype: float64
splits:
- name: train
num_bytes: 64287453
num_examples: 100
download_size: 12880587
dataset_size: 64287453
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RyanYr/self-reflect_mini8Bit-t0_sft-t1_om2-1 | RyanYr | "2024-12-04T01:05:24Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:35:49Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: generated_solution
dtype: string
- name: answer
dtype: string
- name: problem_source
dtype: string
- name: response@0
sequence: string
- name: response@1
sequence: string
splits:
- name: train
num_bytes: 200430782
num_examples: 20000
download_size: 74218427
dataset_size: 200430782
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ziyu3141/rich_feedback_train_with_image | ziyu3141 | "2024-12-04T02:22:57Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T02:10:57Z" | ---
dataset_info:
features:
- name: Filename
dtype: string
- name: Aesthetics score
dtype: float64
- name: Artifact score
dtype: float64
- name: Misalignment score
dtype: float64
- name: Overall score
dtype: float64
- name: Artifact heatmap
sequence:
sequence:
sequence: int64
- name: Misalignment heatmap
sequence:
sequence:
sequence: int64
- name: Misalignment token label
dtype: string
- name: prompt
dtype: string
- name: image
dtype: binary
splits:
- name: train
num_bytes: 101068478704
num_examples: 15810
download_size: 1715550658
dataset_size: 101068478704
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
all-oj-gen/ds_coder_pos_reflct_rmsprop_iter1_sppo_hard_new_all_oj_iter1-bin | all-oj-gen | "2024-12-04T02:20:07Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T02:20:06Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 22694827
num_examples: 5803
download_size: 9769195
dataset_size: 22694827
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter1_sppo_hard_new_all_oj_iter1-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
all-oj-gen/ds_coder_pos_reflct_rmsprop_iter1_sppo_hard_new_all_oj_iter1-full_resp_trace | all-oj-gen | "2024-12-04T02:20:10Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T02:20:08Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 58672389
num_examples: 5803
download_size: 22470758
dataset_size: 58672389
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter1_sppo_hard_new_all_oj_iter1-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Senju2/Context-Aware-English-to-Arabic-Dataset | Senju2 | "2024-12-04T02:23:50Z" | 9 | 0 | [
"language:ar",
"language:en",
"license:artistic-2.0",
"size_categories:1M<n<10M",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T02:20:37Z" | ---
license: artistic-2.0
language:
- ar
- en
--- |
ShravaniCV/guanaco-llama2-1k | ShravaniCV | "2024-12-04T06:22:25Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T06:22:24Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 1654448
num_examples: 1000
download_size: 966692
dataset_size: 1654448
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
all-oj-gen/ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-bin | all-oj-gen | "2024-12-04T07:11:57Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T07:11:56Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 20501335
num_examples: 5330
download_size: 8790051
dataset_size: 20501335
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
all-oj-gen/ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-full_resp_trace | all-oj-gen | "2024-12-04T07:11:59Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T07:11:58Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 53264044
num_examples: 5330
download_size: 20304959
dataset_size: 53264044
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
likithguna/guanaco-llama2-1k | likithguna | "2024-12-04T07:31:54Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T07:31:53Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 1654448
num_examples: 1000
download_size: 966692
dataset_size: 1654448
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gswamy/pythia-1.4B-tldr-vllm-pair-iter-3 | gswamy | "2024-12-04T08:13:46Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:13:27Z" | ---
dataset_info:
features:
- name: info
struct:
- name: id
dtype: string
- name: post
dtype: string
- name: title
dtype: string
- name: subreddit
dtype: string
- name: site
dtype: string
- name: article
dtype: string
- name: summaries
list:
- name: text
dtype: string
- name: policy
dtype: string
- name: note
dtype: string
- name: choice
dtype: int32
- name: worker
dtype: string
- name: batch
dtype: string
- name: split
dtype: string
- name: extra
struct:
- name: confidence
dtype: int32
- name: query_token
sequence: int64
- name: query
dtype: string
- name: response0
dtype: string
- name: response0_token
sequence: int64
- name: response0_token_len
dtype: int64
- name: response0_policy
dtype: string
- name: query_response0
dtype: string
- name: query_response0_token
sequence: int64
- name: query_response0_token_len
dtype: int64
- name: query_response0_token_response_label
sequence: int64
- name: response1
dtype: string
- name: response1_token
sequence: int64
- name: response1_token_len
dtype: int64
- name: response1_policy
dtype: string
- name: query_response1
dtype: string
- name: query_response1_token
sequence: int64
- name: query_response1_token_len
dtype: int64
- name: query_response1_token_response_label
sequence: int64
- name: query_token_len
dtype: int64
- name: policies
dtype: string
- name: iter_3_best_query_response
sequence: int64
- name: iter_3_worst_query_response
sequence: int64
- name: iter_3_best_mask
sequence: int64
- name: iter_3_worst_mask
sequence: int64
- name: iter_3_best_reward
dtype: float64
- name: iter_3_worst_reward
dtype: float64
splits:
- name: train
num_bytes: 4841788931
num_examples: 92858
download_size: 186299631
dataset_size: 4841788931
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
utkarsh4430/pretraining | utkarsh4430 | "2024-12-04T08:43:18Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:28:59Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: instruction
dtype: string
- name: output
dtype: string
- name: dataset
dtype: string
- name: task
dtype: string
- name: input
dtype: string
- name: audio_feat
dtype: binary
- name: video_feat
dtype: binary
splits:
- name: train
num_bytes: 38631914922
num_examples: 485830
download_size: 36262540214
dataset_size: 38631914922
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nachiket-S/LLaMa_1B_NoCoT_DebiasingInstruction | Nachiket-S | "2024-12-04T09:21:25Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T08:32:01Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: paragraph
dtype: string
- name: generated_text
dtype: string
splits:
- name: inference
num_bytes: 125517
num_examples: 80
download_size: 48320
dataset_size: 125517
configs:
- config_name: default
data_files:
- split: inference
path: data/inference-*
---
|
tdurbor/background-removal-arena-green | tdurbor | "2024-12-04T09:32:11Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:19:12Z" | ---
dataset_info:
features:
- name: original_image
dtype: image
- name: clipdrop_image
dtype: image
- name: bria_image
dtype: image
- name: photoroom_image
dtype: image
- name: removebg_image
dtype: image
- name: original_filename
dtype: string
splits:
- name: train
num_bytes: 147718672.0
num_examples: 77
download_size: 147674887
dataset_size: 147718672.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jfcalvo/export-testing-different-split | jfcalvo | "2024-12-04T16:28:50Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:argilla",
"region:us",
"rlfh",
"argilla",
"human-feedback"
] | null | "2024-12-04T09:29:34Z" | ---
tags:
- rlfh
- argilla
- human-feedback
---
# Dataset Card for export-testing-different-split
This dataset has been created with [Argilla](https://github.com/argilla-io/argilla). As shown in the sections below, this dataset can be loaded into your Argilla server as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets).
## Using this dataset with Argilla
To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code:
```python
import argilla as rg
ds = rg.Dataset.from_hub("jfcalvo/export-testing-different-split", settings="auto")
```
This will load the settings and records from the dataset repository and push them to you Argilla server for exploration and annotation.
## Using this dataset with `datasets`
To load the records of this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code:
```python
from datasets import load_dataset
ds = load_dataset("jfcalvo/export-testing-different-split")
```
This will only load the records of the dataset, but not the Argilla settings.
## Dataset Structure
This dataset repo contains:
* Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `rg.Dataset.from_hub` and can be loaded independently using the `datasets` library via `load_dataset`.
* The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla.
* A dataset configuration folder conforming to the Argilla dataset format in `.argilla`.
The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**.
### Fields
The **fields** are the features or text of a dataset's records. For example, the 'text' column of a text classification dataset of the 'prompt' column of an instruction following dataset.
| Field Name | Title | Type | Required |
| ---------- | ----- | ---- | -------- |
| persona | persona | text | False |
| image | image | image | False |
### Questions
The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking.
| Question Name | Title | Type | Required | Description | Values/Labels |
| ------------- | ----- | ---- | -------- | ----------- | ------------- |
| text_0 | text_0 | text | True | N/A | N/A |
| label_1 | label_1 | label_selection | True | N/A | [] |
| multi-label_2 | multi-label_2 | multi_label_selection | True | N/A | [] |
| rating_3 | rating_3 | rating | True | N/A | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] |
| ranking_4 | ranking_4 | ranking | True | N/A | ['option1', 'option2'] |
| span_5 | span_5 | span | True | N/A | N/A |
<!-- check length of metadata properties -->
### Data Splits
The dataset contains a single split, which is `train`.
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation guidelines
[More Information Needed]
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] |
VargheseP/test_dataset_area_asc | VargheseP | "2024-12-04T11:33:15Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T11:32:47Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: bbx
dtype: image
- name: dist
dtype: image
- name: ellipse
dtype: image
- name: basic
dtype: string
- name: artsy
dtype: string
- name: caption
dtype: string
- name: mask
dtype: image
splits:
- name: train
num_bytes: 84262821.0
num_examples: 931
download_size: 81324540
dataset_size: 84262821.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cuijinye/so106_test | cuijinye | "2024-12-04T12:19:01Z" | 9 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | "2024-12-04T12:18:02Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 1016,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
all-oj-gen/ds_coder6.7b_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-bin | all-oj-gen | "2024-12-04T12:20:13Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T12:20:12Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 18544306
num_examples: 5387
download_size: 8159425
dataset_size: 18544306
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Sebastianycx/alpaca_train_cleaned | Sebastianycx | "2024-12-04T13:18:53Z" | 9 | 0 | [
"license:mit",
"region:us"
] | null | "2024-12-04T13:18:53Z" | ---
license: mit
---
|
AsmaaMahmoudSaeddd/testdataset7 | AsmaaMahmoudSaeddd | "2024-12-04T13:21:34Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T13:21:29Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype: string
splits:
- name: train
num_bytes: 91277.0
num_examples: 3
download_size: 91579
dataset_size: 91277.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RyanYr/self-reflect_mini8Bit-t0_sft-t1_om2-1_2 | RyanYr | "2024-12-04T13:27:44Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T13:27:33Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: generated_solution
dtype: string
- name: answer
dtype: string
- name: problem_source
dtype: string
- name: response@0
sequence: string
- name: response@1
sequence: string
splits:
- name: train
num_bytes: 819468626
num_examples: 80000
download_size: 303055898
dataset_size: 819468626
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
marco-schouten/exp11 | marco-schouten | "2024-12-04T13:58:55Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T13:52:08Z" | ---
dataset_info:
features:
- name: input_image
dtype: image
- name: edit_prompt
dtype: string
- name: edited_image
dtype: image
splits:
- name: train
num_bytes: 1313588.0
num_examples: 311
download_size: 513217
dataset_size: 1313588.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Geraldine/Ead-Instruct-full-175k | Geraldine | "2024-12-04T15:15:13Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T14:27:39Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 62975299
num_examples: 175410
download_size: 8413170
dataset_size: 62975299
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# EAD/XML Dataset for Prompt-Completion Tasks
## Overview
This dataset is specifically designed for text generation and completion tasks involving Encoded Archival Description (EAD) files in XML format. The dataset provides four distinct types of prompt-completion pairs, each crafted to help train and evaluate language models in understanding hierarchical XML structures, element relationships, path-based predictions, and masked element generation.
## Dataset summary
This dataset is designed for fine-tuning large language models to generate and complete XML files compliant with the EAD (Encoded Archival Description) standard. It emphasizes structural and semantic understanding of deeply nested XML elements, commonly used for archival metadata representation.
The dataset consists of:
- XML snippets and sections extracted from real-world EAD/XML files.
- Examples designed to balance structural integrity, attribute handling, and meaningful textual content.
- Diverse prompts, including partial, truncated, and complete XML sections.
## Dataset creation
### Source Data
The source XML files were derived from the French `Inventaires du Catalogue général des manuscrits (CGM)` of the BnF collections.
These files adhere to the EAD schema and were processed programmatically to create structured training examples.
### Processing Pipeline
The dataset was generated using a Python pipeline leveraging the lxml and xml.etree.ElementTree libraries.
Example
```
def hierarchical_decomposition(root, tree):
"""Generates hierarchical decomposition prompts/completions."""
for c in root.findall('.//'):
# Get the immediate parent
immediate_parent = c.getparent()
# Create a copy of the immediate parent to avoid modifying the original tree
parent_snippet_element = copy.deepcopy(immediate_parent)
# Remove all children from the copied parent to keep only the parent tag
for child in parent_snippet_element:
parent_snippet_element.remove(child)
# Optionally, add the attributes of the immediate parent to the snippet
for key, value in immediate_parent.attrib.items():
parent_snippet_element.set(key, value)
# Now convert this modified parent to a string
prompt = etree.tostring(parent_snippet_element, pretty_print=True, encoding="UTF-8", xml_declaration=False).decode()
# Modification: If c has children, only add the first child to the completion
if len(c):
# Create a copy of 'c' to avoid modifying the original
c_copy = copy.deepcopy(c)
# Remove all but the first child from the copy
for i in range(len(c_copy) - 1, 0, -1): # Iterate backwards to avoid index issues
c_copy.remove(c_copy[i])
parent_snippet_element.insert(0, c_copy) # Add the modified 'c' with only the first child
else:
parent_snippet_element.insert(0, c) # If 'c' has no children, add it as is
completion = etree.tostring(parent_snippet_element, pretty_print=True, encoding="UTF-8", xml_declaration=False).decode()
yield prompt, completion
# Example usage:
tree = etree.parse(<local_path_eadxml_file>)
root = tree.getroot()
for prompt, completion in hierarchical_decomposition(root, tree): # Pass both root and tree
print("Hierarchical content prediction:")
print("Prompt:", prompt)
print("Completion", completion)
print("---")
```
## Dataset Features
The dataset includes the following types of prompt-completion pairs:
1. **Hierarchical Decomposition**
- **Prompt**: An XML snippet representing a parent element.
- **Completion**: A valid child element that fits within the EAD structure.
- **Use Case**: Train models to generate child elements based on their parent context.
2. **Deep Hierarchical Decomposition**
- **Prompt**: An XML snippet representing a parent element.
- **Completion**: A complete section with deeply nested elements.
- **Use Case**: Enable models to predict the relationship between parent and child nodes in XML.
3. **Path-Based Prediction**
- **Prompt**: The XPath of a specific EAD/XML element.
- **Completion**: The text content of the referenced element.
- **Use Case**: Develop models capable of navigating XML trees and retrieving element values.
4. **Masked Element Prediction**
- **Prompt**: An XML snippet where a specific element's content is replaced with a mask `[MASK]`.
- **Completion**: The original value of the masked element.
- **Use Case**: Train models to reconstruct missing information in XML elements.
## Dataset Creation
The dataset was created using a set of EAD/XML files. The following steps were followed to generate prompt-completion pairs:
1. **Hierarchical Decomposition**
- Parsed the XML tree to isolate `<c>` components and their parents.
- Extracted parent elements as prompts and child elements as completions.
- Ensured all children were removed from the copied parent to retain context.
2. **Deep Hierarchical Decomposition**
- Iterated through focused key EAD/XML sections such as `<eadheader>` or `<archdesc>`.
- Recursively parsed the XML tree with a configurable depth (MAX_DEPTH) to control the size and complexity of generated examples
- Used the structured template specifying the section to be completed as the prompt and the corresponding extracted XML snippet as the completion.
3. **Path-Based Prediction**
- Generated XPath for each element in the XML tree.
- Used the XPath as the prompt and the corresponding element's text content as the completion.
4. **Masked Element Prediction**
- Masked a specific element's text content in a deep copy of its parent.
- Used the masked parent as the prompt and the original text as the completion.
Each generated pair was validated for non-empty completions and sanitized to ensure proper formatting and structure.
## Dataset Statistics
- **File Source**: EAD/XML files from the French `Inventaires du Catalogue général des manuscrits (CGM)` (BnF) : [https://api.bnf.fr/fr/CCFr/CGM](https://api.bnf.fr/fr/CCFr/CGM).
- **Total Samples**: The dataset contains a rich variety of 175 410 examples spanning the four prompt-completion categories.
## Available Datasets
Three versions of the dataset are available:
* **Ead-Instruct-full-175k:** The complete dataset of 175,000 records.
* **[Ead-Instruct-50k](https://huggingface.co/datasets/Geraldine/Ead-Instruct-50k):** A subset of 50,000 records.
* **[Ead-Instruct-10k](https://huggingface.co/datasets/Geraldine/Ead-Instruct-10k):** A subset of 10,000 records
## How to Use
The dataset can be accessed and used for fine-tuning and evaluating generative models. Prompts and completions are stored as key-value pairs in JSON format. Each entry includes:
- `"prompt"`: The input text for the model.
- `"completion"`: The expected output from the model.
Example entry:
```json
{
"prompt": "Given this EAD/XML snippet representing a parent element, generate a valid child element that fits within the EAD structure. Snippet: <parent-element>...</parent-element>",
"completion": "<child-element>...</child-element>"
}
```
## Applications
- **Fine-Tuning**: Train large language models to understand structured XML data.
- **XML Autocompletion**: Build tools for EAD/XML editing and validation.
- **Information Retrieval**: Develop systems to extract meaningful content from XML archives.
- **Data Imputation**: Enhance the capability of models to recover missing or incomplete data.
## Citation
If you use this dataset in your research or development, please cite it as follows:
```
@dataset{ead_xml_prompt_completion,
title={EAD/XML Dataset for Prompt-Completion Tasks},
author={Géraldine Geoffroy},
year={2024},
publisher={Huggingface Datasets},
url={https://huggingface.co/datasets/Geraldine/Ead-Instruct-full-175k}
}
```
## Acknowledgments
This dataset was created using EAD/XML files sourced from the `Inventaires du Catalogue général des manuscrits (CGM)` (BnF) collection.
|
jasong03/summary-dataset | jasong03 | "2024-12-04T15:27:45Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T15:27:43Z" | ---
dataset_info:
features:
- name: summary
dtype: string
splits:
- name: train
num_bytes: 6672204
num_examples: 27360
download_size: 3553772
dataset_size: 6672204
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vinesmsuic/SwissProtCLAP_500k_gpt4o | vinesmsuic | "2024-12-05T07:16:25Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T15:48:07Z" | ---
dataset_info:
features:
- name: UniProt ID
dtype: string
- name: Protein Sequence
dtype: string
- name: gt_desc
dtype: string
- name: structure_info
dtype: string
- name: functional_info
dtype: string
splits:
- name: train
num_bytes: 924217773
num_examples: 539563
download_size: 311565954
dataset_size: 924217773
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
geodevwalid23/palm_tree_dataset | geodevwalid23 | "2024-12-04T17:49:09Z" | 9 | 0 | [
"license:unknown",
"region:us"
] | null | "2024-12-04T17:09:03Z" | ---
license: unknown
---
|
jeongseokoh/GSM8K-Contrastive | jeongseokoh | "2024-12-05T07:41:27Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T17:57:52Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: past_steps
sequence: string
- name: answer
dtype: string
- name: original_question
dtype: string
- name: original_rp
dtype: string
- name: negative_steps
sequence: string
- name: positive_steps
dtype: string
- name: task
dtype: string
splits:
- name: train
num_bytes: 214179460
num_examples: 118058
download_size: 112247393
dataset_size: 214179460
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ashercn97/reasoning-v1-small | ashercn97 | "2024-12-04T18:59:07Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif"
] | null | "2024-12-04T18:59:04Z" | ---
size_categories: n<1K
dataset_info:
features:
- name: anchor
dtype: string
- name: logical
dtype: string
- name: illogical
dtype: string
splits:
- name: train
num_bytes: 411452
num_examples: 1000
download_size: 248318
dataset_size: 411452
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for reasoning-v1-small
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/ashercn97/reasoning-v1-small/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/ashercn97/reasoning-v1-small/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"anchor": "George wants to warm his hands quickly by rubbing them. Which skin surface will produce the most heat?",
"illogical": "Rubbing the back of his hands would be ineffective as it generates less warmth compared to other body parts.",
"logical": "The palms of George\u0027s hands will produce the most heat when he rubs them together because they have a higher concentration of blood vessels."
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("ashercn97/reasoning-v1-small", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("ashercn97/reasoning-v1-small")
```
</details>
|
udamaurizio/Google_TTS_Ita_v1 | udamaurizio | "2024-12-04T19:36:51Z" | 9 | 0 | [
"language:it",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us",
"audio",
"tts",
"text",
"udanet"
] | null | "2024-12-04T19:17:41Z" | ---
language:
- it
tags:
- audio
- tts
- text
- udanet
--- |
gokulsrinivasagan/processed_book_corpus_cleaned | gokulsrinivasagan | "2024-12-04T19:45:04Z" | 9 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T19:40:41Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: special_tokens_mask
sequence: int8
splits:
- name: train
num_bytes: 7023275388.332596
num_examples: 2277342
- name: validation
num_bytes: 372257304.0
num_examples: 120706
download_size: 2053364016
dataset_size: 7395532692.332596
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
ashercn97/reasoning-v2-small | ashercn97 | "2024-12-04T19:59:40Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif"
] | null | "2024-12-04T19:59:37Z" | ---
size_categories: n<1K
dataset_info:
features:
- name: anchor
dtype: string
- name: logical
dtype: string
- name: illogical
dtype: string
splits:
- name: train
num_bytes: 562289
num_examples: 1000
download_size: 192324
dataset_size: 562289
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for reasoning-v2-small
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/ashercn97/reasoning-v2-small/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/ashercn97/reasoning-v2-small/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"anchor": "If a poppy is boring and red, then the poppy is big.\nIf a poppy is fast, then the poppy is weak.\nIf a poppy is smart or hot, then the poppy is good.\nIf a poppy is purple, then the poppy is bad.\nIf a poppy is beautiful and strong, then the poppy is soft.\nFact:\nThe poppy is beautiful and hot.\nThe following can be determined about the poppy:",
"illogical": "If a poppy is boring and weak, then the poppy is small.",
"logical": "Given that the poppy is beautiful and hot, we can conclude that the poppy is good."
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("ashercn97/reasoning-v2-small", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("ashercn97/reasoning-v2-small")
```
</details>
|
Samoed/IndicCrosslingualSTS | Samoed | "2024-12-04T20:43:11Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T20:42:32Z" | ---
dataset_info:
- config_name: en-as
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 60945
num_examples: 256
download_size: 35376
dataset_size: 60945
- config_name: en-bn
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 67460
num_examples: 256
download_size: 38088
dataset_size: 67460
- config_name: en-gu
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 64786
num_examples: 256
download_size: 37140
dataset_size: 64786
- config_name: en-hi
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 92497
num_examples: 256
download_size: 51498
dataset_size: 92497
- config_name: en-kn
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 77385
num_examples: 256
download_size: 42987
dataset_size: 77385
- config_name: en-ml
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 79979
num_examples: 256
download_size: 44196
dataset_size: 79979
- config_name: en-mr
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 75928
num_examples: 256
download_size: 43383
dataset_size: 75928
- config_name: en-or
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 57794
num_examples: 256
download_size: 32315
dataset_size: 57794
- config_name: en-pa
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 75532
num_examples: 256
download_size: 43175
dataset_size: 75532
- config_name: en-ta
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 87284
num_examples: 256
download_size: 43472
dataset_size: 87284
- config_name: en-te
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 79011
num_examples: 256
download_size: 43790
dataset_size: 79011
- config_name: en-ur
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: float64
splits:
- name: test
num_bytes: 72395
num_examples: 256
download_size: 46115
dataset_size: 72395
configs:
- config_name: en-as
data_files:
- split: test
path: en-as/test-*
- config_name: en-bn
data_files:
- split: test
path: en-bn/test-*
- config_name: en-gu
data_files:
- split: test
path: en-gu/test-*
- config_name: en-hi
data_files:
- split: test
path: en-hi/test-*
- config_name: en-kn
data_files:
- split: test
path: en-kn/test-*
- config_name: en-ml
data_files:
- split: test
path: en-ml/test-*
- config_name: en-mr
data_files:
- split: test
path: en-mr/test-*
- config_name: en-or
data_files:
- split: test
path: en-or/test-*
- config_name: en-pa
data_files:
- split: test
path: en-pa/test-*
- config_name: en-ta
data_files:
- split: test
path: en-ta/test-*
- config_name: en-te
data_files:
- split: test
path: en-te/test-*
- config_name: en-ur
data_files:
- split: test
path: en-ur/test-*
---
|
plaguss/math_shepherd_token | plaguss | "2024-12-04T21:23:51Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T21:23:35Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: completions
sequence: string
- name: labels
sequence: bool
splits:
- name: train
num_bytes: 368155117
num_examples: 422422
- name: test
num_bytes: 19423237
num_examples: 22233
download_size: 195393521
dataset_size: 387578354
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
kawsarahmd/english_bangla_nmt_datasets_bidirectional_v2 | kawsarahmd | "2024-12-04T21:49:30Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T21:49:17Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: news_id
dtype: string
- name: input_text
dtype: string
- name: output_text
dtype: string
splits:
- name: train
num_bytes: 326584767
num_examples: 105600
- name: validation
num_bytes: 28094694
num_examples: 9387
- name: test
num_bytes: 7209579
num_examples: 2347
download_size: 172182902
dataset_size: 361889040
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
jacobmorrison/tulu-3-sft-single-turn | jacobmorrison | "2024-12-04T21:58:11Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T21:57:27Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 2388461728
num_examples: 939343
download_size: 1152053521
dataset_size: 2388461728
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ayoubsa/Sign_Road_Detection_Dataset | ayoubsa | "2024-12-07T04:05:18Z" | 9 | 0 | [
"task_categories:object-detection",
"language:en",
"license:cc",
"size_categories:1K<n<10K",
"region:us"
] | [
"object-detection"
] | "2024-12-04T23:57:59Z" | ---
license: cc
task_categories:
- object-detection
language:
- en
size_categories:
- 1K<n<10K
---
# Dataset Description
<!-- Provide a quick summary of the dataset. -->
The dataset contains images of road signs with annotations in YOLO format, which specify the class ID and the bounding box coordinates for each object.
There are 15 classes:
- Traffic Lights: Green Light, Red Light.
- Speed Limits: 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120.
- Stop Sign: Stop.
Each image can contain multiple road signs. The dataset simulates real-world driving conditions, including varying weather, lighting, and road environments.
# Dataset Usage
You can use this dataset to detect different road signs. This is a link for my competition leaderboard in colab where you can submit your results: https://codalab.lisn.upsaclay.fr/competitions/21061#results
# Authors
Provided by a Roboflow user.
# License
CC BY 4.0.
|
nlv23/earlymodernspanishonchina | nlv23 | "2024-12-05T01:29:12Z" | 9 | 0 | [
"license:cc-by-nc-3.0",
"size_categories:n<1K",
"format:text",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-05T01:28:34Z" | ---
license: cc-by-nc-3.0
---
|
ernestchu/emnist-digits | ernestchu | "2024-12-05T01:51:52Z" | 9 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T01:34:14Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype: int64
splits:
- name: train
num_bytes: 102459463.0
num_examples: 240000
- name: test
num_bytes: 17074672.0
num_examples: 40000
download_size: 371539854
dataset_size: 119534135.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
mk43275/combined_diary | mk43275 | "2024-12-05T02:27:54Z" | 9 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T02:27:40Z" | ---
license: mit
---
|
kornwtp/en-stsbenchmark-sts | kornwtp | "2024-12-05T02:44:24Z" | 9 | 0 | [
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T02:39:20Z" | ---
license: apache-2.0
---
|
naufalso/owasp_top10_2017_2021 | naufalso | "2024-12-05T03:23:50Z" | 9 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T03:23:47Z" | ---
dataset_info:
features:
- name: title
dtype: string
- name: path
dtype: string
- name: text
dtype: string
- name: total_chars
dtype: int64
- name: file_size_mb
dtype: float64
- name: text_w_embed_image
dtype: string
splits:
- name: train
num_bytes: 2623829
num_examples: 39
download_size: 2487903
dataset_size: 2623829
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
linyongj/eval_so100_act | linyongj | "2024-12-05T03:36:05Z" | 9 | 0 | [
"task_categories:robotics",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | "2024-12-05T03:35:08Z" | ---
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
udamaurizio/parler_Google_TTS_Ita_v1_prompted | udamaurizio | "2024-12-09T16:55:38Z" | 9 | 0 | [
"language:it",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"tts"
] | null | "2024-12-05T04:07:44Z" | ---
language:
- it
dataset_info:
features:
- name: text
dtype: string
- name: transcription_normalised
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: text_description
dtype: string
splits:
- name: train
num_bytes: 28361
num_examples: 68
download_size: 18871
dataset_size: 28361
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
license: apache-2.0
tags:
- tts
size_categories:
- n<1K
--- |
rmsdud/chat | rmsdud | "2024-12-05T05:01:08Z" | 9 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T05:00:45Z" | ---
dataset_info:
features:
- name: context
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 3753161
num_examples: 23263
download_size: 1554612
dataset_size: 3753161
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
SaltyCedar/Copus_pat2011_forSP | SaltyCedar | "2024-12-05T07:01:22Z" | 9 | 0 | [
"license:apache-2.0",
"size_categories:10M<n<100M",
"format:text",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-05T06:39:42Z" | ---
license: apache-2.0
---
|
jkazdan/pku-safe-30k-test-Mistral-7B-Instruct-v0.2 | jkazdan | "2024-12-05T07:38:00Z" | 9 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T06:53:45Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 2641924
num_examples: 2816
download_size: 1362334
dataset_size: 2641924
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|