datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
2.66M
| likes
int64 0
6.48k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1M
|
---|---|---|---|---|---|---|---|---|
pclucas14/nqa-RAG-256_0_24 | pclucas14 | "2024-12-02T23:00:23Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:00:21Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 27966150
num_examples: 66
download_size: 11309961
dataset_size: 27966150
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_11_24 | pclucas14 | "2024-12-02T23:00:47Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:00:45Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26278970
num_examples: 66
download_size: 10248590
dataset_size: 26278970
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_1_24 | pclucas14 | "2024-12-02T23:01:01Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:00:59Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 27289971
num_examples: 66
download_size: 10874793
dataset_size: 27289971
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_7_24 | pclucas14 | "2024-12-02T23:03:28Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:03:27Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26787005
num_examples: 66
download_size: 10503985
dataset_size: 26787005
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pclucas14/nqa-RAG-256_4_24 | pclucas14 | "2024-12-02T23:05:52Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-02T23:05:50Z" | ---
dataset_info:
features:
- name: text
sequence:
sequence: string
- name: questions
sequence: string
- name: answers
sequence:
sequence: string
- name: document_id
dtype: string
- name: split
dtype: string
splits:
- name: train
num_bytes: 26619269
num_examples: 66
download_size: 11036439
dataset_size: 26619269
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
doejn771/code_x_glue_ct_code_to_text_java_python | doejn771 | "2024-12-03T00:19:02Z" | 8 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:12:46Z" | ---
dataset_info:
features:
- name: id
dtype: int32
- name: repo
dtype: string
- name: path
dtype: string
- name: func_name
dtype: string
- name: original_string
dtype: string
- name: language
dtype: string
- name: code
dtype: string
- name: code_tokens
sequence: string
- name: docstring
dtype: string
- name: docstring_tokens
sequence: string
- name: sha
dtype: string
- name: url
dtype: string
splits:
- name: train
num_bytes: 1266216983
num_examples: 416743
- name: validation
num_bytes: 60254908
num_examples: 19097
- name: test
num_bytes: 79740441
num_examples: 25873
download_size: 480195417
dataset_size: 1406212332
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
richmondsin/truthfulqa_id_mc2_results | richmondsin | "2024-12-03T00:28:14Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:json",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:28:03Z" | ---
pretty_name: Evaluation run of google/gemma-2-2b
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)\nThe dataset is\
\ composed of 0 configuration(s), each one corresponding to one of the evaluated\
\ task.\n\nThe dataset has been created from 2 run(s). Each run can be found as\
\ a specific split in each configuration, the split being named using the timestamp\
\ of the run.The \"train\" split is always pointing to the latest results.\n\nAn\
\ additional configuration \"results\" store all the aggregated results of the run.\n\
\nTo load the details from a run, you can for instance do the following:\n```python\n\
from datasets import load_dataset\ndata = load_dataset(\n\t\"richmondsin/truthfulqa_id_mc2_results\"\
,\n\tname=\"google__gemma-2-2b__truthfulqa_id_mc2\",\n\tsplit=\"latest\"\n)\n```\n\
\n## Latest results\n\nThese are the [latest results from run 2024-12-02T19-28-03.715223](https://huggingface.co/datasets/richmondsin/truthfulqa_id_mc2_results/blob/main/google/gemma-2-2b/results_2024-12-02T19-28-03.715223.json)\
\ (note that there might be results for other tasks in the repos if successive evals\
\ didn't cover the same tasks. You find each in the results and the \"latest\" split\
\ for each eval):\n\n```python\n{\n \"all\": {\n \"truthfulqa_id_mc2\"\
: {\n \"alias\": \"truthfulqa_id_mc2\",\n \"acc,none\": 0.4366475601155338,\n\
\ \"acc_stderr,none\": 0.016426278376888724\n }\n },\n \"\
truthfulqa_id_mc2\": {\n \"alias\": \"truthfulqa_id_mc2\",\n \"acc,none\"\
: 0.4366475601155338,\n \"acc_stderr,none\": 0.016426278376888724\n }\n\
}\n```"
repo_url: https://huggingface.co/google/gemma-2-2b
leaderboard_url: ''
point_of_contact: ''
configs:
- config_name: google__gemma-2-2b__truthfulqa_id_mc2
data_files:
- split: 2024_12_02T19_28_03.715223
path:
- '**/samples_truthfulqa_id_mc2_2024-12-02T19-28-03.715223.jsonl'
- split: latest
path:
- '**/samples_truthfulqa_id_mc2_2024-12-02T19-28-03.715223.jsonl'
---
# Dataset Card for Evaluation run of google/gemma-2-2b
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
The dataset is composed of 0 configuration(s), each one corresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset(
"richmondsin/truthfulqa_id_mc2_results",
name="google__gemma-2-2b__truthfulqa_id_mc2",
split="latest"
)
```
## Latest results
These are the [latest results from run 2024-12-02T19-28-03.715223](https://huggingface.co/datasets/richmondsin/truthfulqa_id_mc2_results/blob/main/google/gemma-2-2b/results_2024-12-02T19-28-03.715223.json) (note that there might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"truthfulqa_id_mc2": {
"alias": "truthfulqa_id_mc2",
"acc,none": 0.4366475601155338,
"acc_stderr,none": 0.016426278376888724
}
},
"truthfulqa_id_mc2": {
"alias": "truthfulqa_id_mc2",
"acc,none": 0.4366475601155338,
"acc_stderr,none": 0.016426278376888724
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
julia-se/tracka_mistral_fewshot_anger | julia-se | "2024-12-03T00:44:17Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T00:44:15Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: Anger
dtype: int64
- name: Disgust
dtype: int64
- name: Fear
dtype: int64
- name: Joy
dtype: int64
- name: Sadness
dtype: int64
- name: Surprise
dtype: int64
- name: predicted_is_anger
dtype: int64
- name: y_anger
dtype: int64
splits:
- name: train
num_bytes: 472807
num_examples: 2226
download_size: 217016
dataset_size: 472807
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/stf_regex_ner_2_fuzzycosseno_80 | juliadollis | "2024-12-03T04:30:55Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T04:30:43Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: inteiro_teor_regex
dtype: string
- name: NER
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 160888933
num_examples: 1000
download_size: 44243977
dataset_size: 160888933
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
JoeLeelyf/OVBench | JoeLeelyf | "2024-12-03T06:47:20Z" | 8 | 0 | [
"license:cc-by-nc-sa-4.0",
"region:us"
] | null | "2024-12-03T06:47:20Z" | ---
license: cc-by-nc-sa-4.0
---
|
seachen/stable-1 | seachen | "2024-12-03T07:05:19Z" | 8 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2024-12-03T07:01:16Z" | ---
license: apache-2.0
---
|
r1v3r/bitflags-filterbyLLM-verified | r1v3r | "2024-12-04T03:42:39Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T08:28:15Z" | ---
dataset_info:
features:
- name: repo
dtype: string
- name: problem_statement
dtype: string
- name: hints_text
dtype: string
- name: instance_id
dtype: string
- name: issue_numbers
sequence: string
- name: base_commit
dtype: string
- name: test_patch
dtype: string
- name: version
dtype: string
- name: pull_number
dtype: int64
- name: created_at
dtype: string
- name: patch
dtype: string
- name: environment_setup_commit
dtype: string
- name: FAIL_TO_PASS
sequence: string
- name: PASS_TO_PASS
sequence: string
- name: FAIL_TO_FAIL
sequence: string
- name: PASS_TO_FAIL
sequence: 'null'
splits:
- name: train
num_bytes: 164014
num_examples: 9
download_size: 58139
dataset_size: 164014
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ELVISIO/incorrect_triplet | ELVISIO | "2024-12-03T08:48:48Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T08:48:47Z" | ---
dataset_info:
features:
- name: doc_regexclean01
dtype: string
- name: anchor
dtype: string
- name: positive
dtype: string
- name: negative
dtype: string
splits:
- name: train
num_bytes: 832690
num_examples: 558
download_size: 499310
dataset_size: 832690
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
vantral/selkup_me_pl | vantral | "2024-12-03T10:32:15Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T10:32:12Z" | ---
dataset_info:
features:
- name: all
struct:
- name: interlinear-text
list:
- name: item
struct:
- name: source
dtype: string
- name: paragraph
list:
- name: item
struct:
- name: speaker
dtype: string
- name: phrase
list:
- name: item
struct:
- name: ft
dtype: string
- name: id
dtype: string
- name: participant
dtype: string
- name: timestamp
sequence: string
- name: word
list:
list:
- name: item
struct:
- name: grammar_tags
sequence: string
- name: translation
sequence: string
- name: txt
dtype: string
- name: morph
list:
- name: item
struct:
- name: gls
dtype: string
- name: id
dtype: string
- name: txt
dtype: string
- name: item
dtype: 'null'
splits:
- name: train
num_bytes: 29025
num_examples: 1
download_size: 23213
dataset_size: 29025
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
babs/podcast-12 | babs | "2024-12-03T10:35:49Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T10:35:44Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 206176216.0
num_examples: 339
download_size: 194924855
dataset_size: 206176216.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shreyasgite/so100_test | shreyasgite | "2024-12-03T10:48:11Z" | 8 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | "2024-12-03T10:48:01Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 2390,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
babs/podcast-16 | babs | "2024-12-03T10:48:21Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T10:48:16Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 199714333.0
num_examples: 233
download_size: 190909606
dataset_size: 199714333.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
all-oj-gen/ds_coder_reflct_rmsprop_iter3_sppo_hard_new_all_oj_iter3-bin | all-oj-gen | "2024-12-03T12:19:56Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T12:19:51Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 19631270
num_examples: 4925
download_size: 8690052
dataset_size: 19631270
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_reflct_rmsprop_iter3_sppo_hard_new_all_oj_iter3-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
all-oj-gen/ds_coder_reflct_rmsprop_iter3_sppo_hard_new_all_oj_iter3-full_resp_trace | all-oj-gen | "2024-12-03T12:20:02Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T12:19:57Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 50739368
num_examples: 4925
download_size: 19826846
dataset_size: 50739368
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_reflct_rmsprop_iter3_sppo_hard_new_all_oj_iter3-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
all-oj-gen/ds_coder_reflct_rmsprop_iter3_sppo_hard_new_all_oj_iter3-bin_all_pairs | all-oj-gen | "2024-12-03T12:20:07Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T12:20:03Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: test
dtype: string
splits:
- name: train
num_bytes: 38571495
num_examples: 9504
download_size: 11589303
dataset_size: 38571495
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_reflct_rmsprop_iter3_sppo_hard_new_all_oj_iter3-bin_all_pairs"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
juliadollis/stf_regex_ner_2_fuzzyover_70 | juliadollis | "2024-12-03T13:30:26Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T13:29:39Z" | ---
dataset_info:
features:
- name: inteiro_teor
dtype: string
- name: url_download
dtype: string
- name: dataDecisao
dtype: timestamp[ns]
- name: dataPublicacao
dtype: timestamp[ns]
- name: decisao
dtype: string
- name: descricaoClasse
dtype: string
- name: ementa
dtype: string
- name: id
dtype: string
- name: jurisprudenciaCitada
dtype: string
- name: ministroRelator
dtype: string
- name: nomeOrgaoJulgador
dtype: string
- name: numeroProcesso
dtype: string
- name: referenciasLegislativas
sequence: string
- name: siglaClasse
dtype: string
- name: tipoDeDecisao
dtype: string
- name: titulo
dtype: string
- name: acordaosSimilares
sequence: string
- name: partes_lista_texto
dtype: string
- name: temaProcs
sequence: string
- name: inteiro_teor_regex
dtype: string
- name: NER
struct:
- name: JURISPRUDENCIA
sequence: string
- name: LEGISLACAO
sequence: string
- name: LOCAL
sequence: string
- name: ORGANIZACAO
sequence: string
- name: PESSOA
sequence: string
- name: TEMPO
sequence: string
- name: desambiguacao
list:
- name: class
dtype: string
- name: count
dtype: int64
- name: elements
sequence: string
- name: entity
dtype: string
splits:
- name: train
num_bytes: 157934422
num_examples: 1000
download_size: 43794395
dataset_size: 157934422
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DT4LM/t5v1-1base_sst2_pair_leap | DT4LM | "2024-12-03T14:15:15Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:10:54Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int32
splits:
- name: train
num_bytes: 48613
num_examples: 637
download_size: 34535
dataset_size: 48613
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/opengpt_gpt-4o-mini_scale_x.25 | mlfoundations-dev | "2024-12-03T21:28:25Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T14:35:03Z" | ---
dataset_info:
features:
- name: language
dtype: string
- name: quantity
dtype: int64
- name: task
dtype: string
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 3767638
num_examples: 1479
download_size: 1912183
dataset_size: 3767638
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Alwaly/parler_tts-descriptions-tags_bis_wom_test | Alwaly | "2024-12-03T15:29:01Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T15:28:59Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
- name: text_description
dtype: string
splits:
- name: test
num_bytes: 770344
num_examples: 1995
download_size: 301291
dataset_size: 770344
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
all-oj-gen/ds_coder_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-bin | all-oj-gen | "2024-12-03T16:12:19Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T16:12:15Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 19207775
num_examples: 4786
download_size: 8513208
dataset_size: 19207775
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
all-oj-gen/ds_coder_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-full_resp_trace | all-oj-gen | "2024-12-03T16:12:29Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T16:12:20Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 49550398
num_examples: 4786
download_size: 19394475
dataset_size: 49550398
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
mlfoundations-dev/alpaca_scale_x.25 | mlfoundations-dev | "2024-12-03T21:28:39Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:06:56Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: filtered_reason
dtype: 'null'
- name: filtered_decision
dtype: bool
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 35395595
num_examples: 45284
download_size: 21459095
dataset_size: 35395595
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/unnatural_instructions_gpt-4o-mini_scale_x.25 | mlfoundations-dev | "2024-12-03T21:28:28Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:19:58Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: constraints
dtype: string
- name: output
dtype: string
- name: alternative_formulation
dtype: string
- name: alternative_formulation_inlined
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 27774548
num_examples: 17504
download_size: 10360530
dataset_size: 27774548
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ashercn97/reasoning-v2-yay | ashercn97 | "2024-12-03T17:26:32Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:26:27Z" | ---
dataset_info:
features:
- name: text_id
dtype: string
- name: text
dtype: string
- name: label
sequence: string
- name: split_text
sequence: string
splits:
- name: train
num_bytes: 4677252
num_examples: 3000
download_size: 2658627
dataset_size: 4677252
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/unnatural_instructions_gpt-4o-mini_scale_x.5 | mlfoundations-dev | "2024-12-03T21:28:30Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:40:41Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: constraints
dtype: string
- name: output
dtype: string
- name: alternative_formulation
dtype: string
- name: alternative_formulation_inlined
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 52683197
num_examples: 33253
download_size: 19622868
dataset_size: 52683197
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RyanYr/self-reflect_mini8Bit-t0_mistlarge-t12_om2-7 | RyanYr | "2024-12-03T17:43:34Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:43:28Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: generated_solution
dtype: string
- name: answer
dtype: string
- name: problem_source
dtype: string
- name: response@0
sequence: string
- name: response@1
sequence: string
- name: response@2
sequence: string
splits:
- name: train
num_bytes: 303172122
num_examples: 50000
download_size: 130821502
dataset_size: 303172122
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RyanYr/self-reflect_mini8Bit-t0_mistlarge-t12_om2-7_binlabel | RyanYr | "2024-12-03T17:52:46Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T17:52:39Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: generated_solution
dtype: string
- name: answer
dtype: string
- name: problem_source
dtype: string
- name: response@0
sequence: string
- name: response@1
sequence: string
- name: response@2
sequence: string
- name: response@0_ans
sequence: string
- name: response@0_correctness
sequence: bool
- name: response@2_ans
sequence: string
- name: response@2_correctness
sequence: bool
splits:
- name: train
num_bytes: 305368625
num_examples: 50000
download_size: 131914170
dataset_size: 305368625
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Saqib772/fashion | Saqib772 | "2024-12-03T18:46:27Z" | 8 | 0 | [
"license:mit",
"size_categories:1K<n<10K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-03T18:14:36Z" | ---
license: mit
---
|
mlfoundations-dev/alpaca_scale_x.5 | mlfoundations-dev | "2024-12-03T21:28:56Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T18:26:52Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: filtered_reason
dtype: 'null'
- name: filtered_decision
dtype: bool
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 57105095
num_examples: 72773
download_size: 34690806
dataset_size: 57105095
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
NMizu/moonloader | NMizu | "2024-12-03T19:09:39Z" | 8 | 0 | [
"language:ru",
"license:mit",
"region:us"
] | null | "2024-12-03T18:46:39Z" | ---
license: mit
language:
- ru
--- |
haydenbspence/distilabel-example | haydenbspence | "2024-12-03T19:33:30Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T19:33:29Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: completion
dtype: string
- name: meta
struct:
- name: category
dtype: string
- name: completion
dtype: string
- name: id
dtype: int64
- name: input
dtype: 'null'
- name: motivation_app
dtype: 'null'
- name: prompt
dtype: string
- name: source
dtype: string
- name: subcategory
dtype: string
- name: generation
dtype: 'null'
- name: model_name
dtype: 'null'
- name: distilabel_metadata
struct:
- name: raw_input_text_generation_0
dtype: 'null'
- name: raw_output_text_generation_0
dtype: 'null'
splits:
- name: train
num_bytes: 16613
num_examples: 10
download_size: 19461
dataset_size: 16613
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlgawd/final_dpo_nemo_v8 | mlgawd | "2024-12-03T20:12:00Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:11:57Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28069223
num_examples: 5877
download_size: 15919715
dataset_size: 28069223
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LuanaMARD/historiav01 | LuanaMARD | "2024-12-03T20:49:36Z" | 8 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:31:38Z" | ---
license: mit
---
|
mlgawd/final_dpo_nemo_v10 | mlgawd | "2024-12-03T20:32:04Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:32:02Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28102869
num_examples: 5877
download_size: 15924726
dataset_size: 28102869
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Synthyra/homodimer_dataset | Synthyra | "2024-12-05T20:20:20Z" | 8 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:33:29Z" | ---
dataset_info:
features:
- name: seq
dtype: string
- name: label
dtype: int64
splits:
- name: train
num_bytes: 76793473
num_examples: 164336
download_size: 74972805
dataset_size: 76793473
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
Dataset of proteins that homodimerize (1) and randomly selected proteins from StringDB (0).
Homodimers are selected from [here](https://seq2fun.dcmb.med.umich.edu/HomodimerDB/download.cgi) vai the 80% identity clustering.
|
Nash-pAnDiTa/youssef-mohamed-damar4 | Nash-pAnDiTa | "2024-12-03T20:45:08Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:44:52Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 291129184.0
num_examples: 27
download_size: 285538441
dataset_size: 291129184.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlgawd/final_dpo_nemo_v12 | mlgawd | "2024-12-03T20:45:50Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T20:45:47Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 28031447
num_examples: 5864
download_size: 15892999
dataset_size: 28031447
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlfoundations-dev/airoboros_stage_3_roleplay_none_response_gpt-4o-inst_gpt_4o-mini_resp | mlfoundations-dev | "2024-12-03T21:03:42Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T21:03:34Z" | ---
dataset_info:
features:
- name: min_docsearch_score
dtype: float64
- name: airoboros_subset
dtype: string
- name: instruction
dtype: string
- name: embedding
sequence: float64
- name: too_similar
dtype: bool
- name: similar_text
dtype: string
- name: similar_text_distance
dtype: float64
splits:
- name: train
num_bytes: 192199
num_examples: 20
download_size: 148062
dataset_size: 192199
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dgambettaphd/D_gen0_run2_llama2-7b_wiki_doc1000_real64_synt64 | dgambettaphd | "2024-12-03T22:36:26Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-03T22:36:23Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: doc
dtype: string
splits:
- name: train
num_bytes: 578372
num_examples: 1000
download_size: 361575
dataset_size: 578372
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
CambioMoney/ami-speaker-analysis_full_run_5_validation | CambioMoney | "2024-12-04T00:21:20Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:15:51Z" | ---
dataset_info:
features:
- name: meeting_id
dtype: string
- name: audio_id
dtype: string
- name: text
dtype: string
- name: audio
struct:
- name: array
sequence: float64
- name: path
dtype: string
- name: sampling_rate
dtype: int64
- name: begin_time
dtype: float64
- name: end_time
dtype: float64
- name: microphone_id
dtype: string
- name: speaker_id
dtype: string
- name: is_complete
dtype: bool
- name: original_segment
dtype: bool
splits:
- name: train
num_bytes: 413360707
num_examples: 1084
download_size: 91497853
dataset_size: 413360707
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Hieuman/en_HRS_11_22_24 | Hieuman | "2024-12-04T00:16:16Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:16:05Z" | ---
dataset_info:
features:
- name: authorIDs
dtype: string
- name: fullText
sequence:
sequence: string
- name: cluster
dtype: int64
- name: retrieval_idx
sequence: int64
splits:
- name: train
num_bytes: 272149905
num_examples: 11433
download_size: 158253792
dataset_size: 272149905
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Hieuman/ru_HRS_11_22_24 | Hieuman | "2024-12-04T00:16:55Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:16:52Z" | ---
dataset_info:
features:
- name: authorIDs
dtype: string
- name: fullText
sequence:
sequence: string
- name: cluster
dtype: int64
- name: retrieval_idx
sequence: int64
splits:
- name: train
num_bytes: 70363749
num_examples: 2468
download_size: 34801169
dataset_size: 70363749
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
CambioMoney/ami-speaker-analysis_deepgram_run_train | CambioMoney | "2024-12-04T00:30:31Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T00:30:29Z" | ---
dataset_info:
features:
- name: meeting_id
dtype: string
- name: audio_id
dtype: string
- name: text
dtype: string
- name: audio
struct:
- name: array
sequence: float64
- name: path
dtype: string
- name: sampling_rate
dtype: int64
- name: begin_time
dtype: float64
- name: end_time
dtype: float64
- name: microphone_id
dtype: string
- name: speaker_id
dtype: string
- name: is_complete
dtype: bool
- name: original_segment
dtype: bool
- name: confidence
dtype: float64
splits:
- name: train
num_bytes: 18406083
num_examples: 36
download_size: 3608850
dataset_size: 18406083
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
siqi00/llama3_gsm8k_question_gsmlike_unhelpful_0.6_0.9_50_256 | siqi00 | "2024-12-04T01:44:44Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T01:21:47Z" | ---
dataset_info:
features:
- name: real
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_0
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_1
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_2
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_3
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_4
list:
- name: content
dtype: string
- name: role
dtype: string
- name: generated_5
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 36559895
num_examples: 7473
download_size: 14555121
dataset_size: 36559895
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gswamy/pythia-1.4B-tldr-vllm-pair-iter-2 | gswamy | "2024-12-04T01:54:43Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T01:54:22Z" | ---
dataset_info:
features:
- name: info
struct:
- name: id
dtype: string
- name: post
dtype: string
- name: title
dtype: string
- name: subreddit
dtype: string
- name: site
dtype: string
- name: article
dtype: string
- name: summaries
list:
- name: text
dtype: string
- name: policy
dtype: string
- name: note
dtype: string
- name: choice
dtype: int32
- name: worker
dtype: string
- name: batch
dtype: string
- name: split
dtype: string
- name: extra
struct:
- name: confidence
dtype: int32
- name: query_token
sequence: int64
- name: query
dtype: string
- name: response0
dtype: string
- name: response0_token
sequence: int64
- name: response0_token_len
dtype: int64
- name: response0_policy
dtype: string
- name: query_response0
dtype: string
- name: query_response0_token
sequence: int64
- name: query_response0_token_len
dtype: int64
- name: query_response0_token_response_label
sequence: int64
- name: response1
dtype: string
- name: response1_token
sequence: int64
- name: response1_token_len
dtype: int64
- name: response1_policy
dtype: string
- name: query_response1
dtype: string
- name: query_response1_token
sequence: int64
- name: query_response1_token_len
dtype: int64
- name: query_response1_token_response_label
sequence: int64
- name: query_token_len
dtype: int64
- name: policies
dtype: string
- name: iter_2_best_query_response
sequence: int64
- name: iter_2_worst_query_response
sequence: int64
- name: iter_2_best_mask
sequence: int64
- name: iter_2_worst_mask
sequence: int64
- name: iter_2_best_reward
dtype: float64
- name: iter_2_worst_reward
dtype: float64
splits:
- name: train
num_bytes: 4841788931
num_examples: 92858
download_size: 182255447
dataset_size: 4841788931
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
julia-se/tracka_mistral_multilabel | julia-se | "2024-12-04T02:35:48Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T02:35:46Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: text
dtype: string
- name: Anger
dtype: int64
- name: Disgust
dtype: int64
- name: Fear
dtype: int64
- name: Joy
dtype: int64
- name: Sadness
dtype: int64
- name: Surprise
dtype: int64
- name: Raiva
dtype: int64
- name: Nojo
dtype: int64
- name: Medo
dtype: int64
- name: Alegria
dtype: int64
- name: Tristeza
dtype: int64
- name: Surpresa
dtype: int64
splits:
- name: train
num_bytes: 544039
num_examples: 2226
download_size: 221435
dataset_size: 544039
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
all-oj-gen/ds_chat_pos_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-bin | all-oj-gen | "2024-12-04T04:28:03Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T04:28:00Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: rejected_traceback
dtype: string
- name: chosen_probs
dtype: float64
- name: chosen_probs_win
dtype: float64
- name: chosen_probs_lose
dtype: float64
splits:
- name: train
num_bytes: 14458986
num_examples: 4898
download_size: 6165576
dataset_size: 14458986
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_chat_pos_reflct_rmsprop_iter4_sppo_hard_new_all_oj_iter4-bin"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
r1v3r/asterinas_llm_versions | r1v3r | "2024-12-04T04:43:06Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T04:43:02Z" | ---
dataset_info:
features:
- name: version
dtype: string
- name: pull_number
dtype: int64
- name: problem_statement
dtype: string
- name: test_patch
dtype: string
- name: instance_id
dtype: string
- name: created_at
dtype: string
- name: base_commit
dtype: string
- name: repo
dtype: string
- name: issue_numbers
sequence: string
- name: patch
dtype: string
- name: hints_text
dtype: string
- name: environment_setup_commit
dtype: string
splits:
- name: train
num_bytes: 95500
num_examples: 3
download_size: 73934
dataset_size: 95500
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ininini/final2_QA-Dataset | ininini | "2024-12-04T05:43:46Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T05:09:13Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 23107
num_examples: 187
download_size: 10468
dataset_size: 23107
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
presencesw/data_remove_v0_preprocessed_1000 | presencesw | "2024-12-04T07:23:11Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T05:10:14Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: mask
dtype: image
- name: masked_image
dtype: image
- name: mae_embedding
dtype:
array3_d:
shape:
- 1
- 196
- 768
dtype: float32
splits:
- name: train
num_bytes: 13629084978.0
num_examples: 1000
download_size: 13640186121
dataset_size: 13629084978.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Jithendra-k/EmpatheticDialogues_Results | Jithendra-k | "2024-12-04T05:24:59Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T05:24:55Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: conversation
sequence: string
- name: emotion
dtype: string
- name: emotion_cause
dtype: string
- name: cause_utterance_indices
sequence: int64
- name: conv_response
dtype: string
- name: generated_responses
dtype: string
splits:
- name: train
num_bytes: 1258636
num_examples: 2000
download_size: 712740
dataset_size: 1258636
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Nash-pAnDiTa/youssef-StayAtHomeLongTime | Nash-pAnDiTa | "2024-12-04T06:04:08Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T06:03:51Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 153426511.0
num_examples: 15
download_size: 130983965
dataset_size: 153426511.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mlgawd/final_dpo_nemo_v20 | mlgawd | "2024-12-04T09:48:47Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:36:40Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 27177632
num_examples: 5845
download_size: 15358940
dataset_size: 27177632
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
kowndinya23/flan2022-zeroshot-instr-inpt-outp-800000 | kowndinya23 | "2024-12-04T09:58:36Z" | 8 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:58:20Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 789086601
num_examples: 800000
- name: validation
num_bytes: 6604084
num_examples: 7407
download_size: 322527226
dataset_size: 795690685
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
kowndinya23/flan2022-zeroshot-instr-inpt-outp-1600000 | kowndinya23 | "2024-12-04T09:59:23Z" | 8 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T09:58:51Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 1522586846
num_examples: 1600000
- name: validation
num_bytes: 12982473
num_examples: 13716
download_size: 626208029
dataset_size: 1535569319
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
kowndinya23/flan2022-zeroshot-wrong-instr-inpt-outp-800000 | kowndinya23 | "2024-12-04T10:01:06Z" | 8 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T10:00:49Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 794683937
num_examples: 800000
- name: validation
num_bytes: 6934497
num_examples: 7407
download_size: 428329653
dataset_size: 801618434
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
kowndinya23/flan2022-zeroshot-inpt-outp-800000 | kowndinya23 | "2024-12-04T10:05:51Z" | 8 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T10:05:37Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 552943332
num_examples: 800000
- name: validation
num_bytes: 4486336
num_examples: 7407
download_size: 298422800
dataset_size: 557429668
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
kowndinya23/flan2022-zeroshot-inpt-outp-1600000 | kowndinya23 | "2024-12-04T10:06:25Z" | 8 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T10:06:00Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 1055583489
num_examples: 1600000
- name: validation
num_bytes: 9000670
num_examples: 13716
download_size: 579954891
dataset_size: 1064584159
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
mlgawd/final_dpo_nemo_v24 | mlgawd | "2024-12-04T10:13:38Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T10:13:36Z" | ---
dataset_info:
features:
- name: questions
dtype: string
- name: accepted
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 27177865
num_examples: 5845
download_size: 15359684
dataset_size: 27177865
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Ameeeee/my-distiset-8df6f341 | Ameeeee | "2024-12-04T12:06:51Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif",
"datacraft"
] | null | "2024-12-04T11:58:26Z" | ---
size_categories: n<1K
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': dress
'1': outerwear
'2': top
'3': bottom
'4': shoes
'5': accessory
splits:
- name: train
num_bytes: 1887
num_examples: 10
download_size: 3623
dataset_size: 1887
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
- datacraft
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for my-distiset-8df6f341
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/Ameeeee/my-distiset-8df6f341/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/Ameeeee/my-distiset-8df6f341/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"label": 2,
"text": "The intricately designed long sleeve with a high neckline, cinched at the waist, and made of lightweight yet durable fabric, perfect for both formal and semi-formal events, can be considered an appropriate addition to any woman\u0027s wardrobe, complementing dresses or tops alike."
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("Ameeeee/my-distiset-8df6f341", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("Ameeeee/my-distiset-8df6f341")
```
</details>
|
2Vasabi/LLaVa-ru-Instruct-mini | 2Vasabi | "2024-12-04T12:33:14Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T12:13:27Z" | ---
dataset_info:
features:
- name: conversations
dtype: string
- name: type
dtype: string
- name: id
dtype: int64
- name: image
dtype: string
splits:
- name: train
num_bytes: 71942597
num_examples: 40002
download_size: 28578667
dataset_size: 71942597
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
all-oj-gen/ds_coder6.7b_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-full_resp_trace | all-oj-gen | "2024-12-04T12:20:15Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T12:20:13Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: test
dtype: string
- name: tag
dtype: string
- name: id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: text_prompt
dtype: string
- name: text_chosen
dtype: string
- name: text_rejected
dtype: string
- name: generate_0
dtype: string
- name: generate_0_score
dtype: int64
- name: traceback_0
dtype: string
- name: generate_1
dtype: string
- name: generate_1_score
dtype: int64
- name: traceback_1
dtype: string
- name: generate_2
dtype: string
- name: generate_2_score
dtype: int64
- name: traceback_2
dtype: string
- name: generate_3
dtype: string
- name: generate_3_score
dtype: int64
- name: traceback_3
dtype: string
- name: probability
sequence:
sequence: float64
- name: rm_scores
sequence: int64
splits:
- name: train
num_bytes: 48942459
num_examples: 5387
download_size: 18740718
dataset_size: 48942459
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "ds_coder6.7b_pos_reflct_rmsprop_iter2_sppo_hard_new_all_oj_iter2-full_resp_trace"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
jfcalvo/argilla-hub-dataset-exporter-test | jfcalvo | "2024-12-04T14:33:39Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:argilla",
"region:us",
"rlfh",
"argilla",
"human-feedback"
] | null | "2024-12-04T14:13:35Z" | ---
tags:
- rlfh
- argilla
- human-feedback
---
# Dataset Card for argilla-hub-dataset-exporter-test
This dataset has been created with [Argilla](https://github.com/argilla-io/argilla). As shown in the sections below, this dataset can be loaded into your Argilla server as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets).
## Using this dataset with Argilla
To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code:
```python
import argilla as rg
ds = rg.Dataset.from_hub("jfcalvo/argilla-hub-dataset-exporter-test", settings="auto")
```
This will load the settings and records from the dataset repository and push them to you Argilla server for exploration and annotation.
## Using this dataset with `datasets`
To load the records of this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code:
```python
from datasets import load_dataset
ds = load_dataset("jfcalvo/argilla-hub-dataset-exporter-test")
```
This will only load the records of the dataset, but not the Argilla settings.
## Dataset Structure
This dataset repo contains:
* Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `rg.Dataset.from_hub` and can be loaded independently using the `datasets` library via `load_dataset`.
* The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla.
* A dataset configuration folder conforming to the Argilla dataset format in `.argilla`.
The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**.
### Fields
The **fields** are the features or text of a dataset's records. For example, the 'text' column of a text classification dataset of the 'prompt' column of an instruction following dataset.
| Field Name | Title | Type | Required |
| ---------- | ----- | ---- | -------- |
| text | Field Title | text | False |
### Questions
The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking.
| Question Name | Title | Type | Required | Description | Values/Labels |
| ------------- | ----- | ---- | -------- | ----------- | ------------- |
<!-- check length of metadata properties -->
### Data Splits
The dataset contains a single split, which is `train`.
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation guidelines
[More Information Needed]
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] |
NinaCalvi/prefcollection-sample-skyworm-rm | NinaCalvi | "2024-12-04T15:08:54Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T15:08:51Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: truth_result
dtype: string
- name: orig_instruction
dtype: string
- name: orig_response_A
dtype: string
- name: orig_response_B
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: score_A
dtype: float64
- name: score_B
dtype: float64
splits:
- name: train
num_bytes: 35399703
num_examples: 3000
download_size: 17097077
dataset_size: 35399703
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
NinaCalvi/prefcollection-full-armo-rm | NinaCalvi | "2024-12-04T16:31:25Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T16:31:13Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: truth_result
dtype: string
- name: orig_instruction
dtype: string
- name: orig_response_A
dtype: string
- name: orig_response_B
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: score_A
dtype: float64
- name: score_B
dtype: float64
splits:
- name: train
num_bytes: 855993261
num_examples: 72630
download_size: 244971161
dataset_size: 855993261
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
maniro-ai/2024-12-04-rod-nav-tape-straight-02 | maniro-ai | "2024-12-04T17:34:59Z" | 8 | 0 | [
"task_categories:robotics",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2024-12-04T17:34:54Z" | ---
task_categories:
- robotics
tags:
- LeRobot
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
Andrewhg414/CS6613ProjectInstructDS | Andrewhg414 | "2024-12-05T02:29:58Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T18:20:46Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 796783
num_examples: 1480
- name: test
num_bytes: 87890
num_examples: 165
download_size: 427355
dataset_size: 884673
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
skdrx/python-dpo-dataset-varname-formatted-combined-NOSYSTEMPROMPT | skdrx | "2024-12-04T18:55:06Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T18:55:06Z" | ---
dataset_info:
features:
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: prompt
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 1095035
num_examples: 1000
download_size: 432249
dataset_size: 1095035
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
fedric95/umbra | fedric95 | "2024-12-09T00:53:16Z" | 8 | 0 | [
"license:other",
"region:us"
] | null | "2024-12-04T19:51:01Z" | ---
license: other
license_name: license
license_link: LICENSE
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: 'Unnamed: 0'
dtype: int64
- name: id
dtype: string
- name: date
dtype: string
- name: bbox
dtype: string
- name: geometry
dtype: string
- name: satellite
dtype: string
- name: track
dtype: string
- name: direction
dtype: string
- name: mode
dtype: string
- name: band
dtype: string
- name: polarization
dtype: string
- name: azimuth_res
dtype: float64
- name: range_res
dtype: float64
- name: rows
dtype: int64
- name: cols
dtype: int64
- name: image_href
dtype: string
- name: label
dtype:
image:
decode: false
- name: image
dtype:
image:
decode: false
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 1934150670.0
num_examples: 8
download_size: 1854301422
dataset_size: 1934150670.0
---
**AGPL-3.0 License**: This OSI-approved open-source license is for research only activities. See the LICENSE file for more details.
**Enterprise License**: Designed for commercial use, this license permits integration of AI models derived from this dataset into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding this dataset into a commercial offering, contact me. |
Honi086/Billie_Ellish | Honi086 | "2024-12-04T19:53:43Z" | 8 | 0 | [
"license:openrail",
"size_categories:n<1K",
"format:audiofolder",
"modality:audio",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2024-12-04T19:52:25Z" | ---
license: openrail
---
|
shuyuej/PodGPT-Demo-Data | shuyuej | "2024-12-04T21:17:32Z" | 8 | 0 | [
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-04T21:13:33Z" | ---
license: apache-2.0
---
|
BASF-AI/PubChemWikiJAPC | BASF-AI | "2024-12-05T20:29:37Z" | 8 | 0 | [
"task_categories:text-classification",
"language:en",
"language:ja",
"license:cc-by-nc-sa-4.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"chemistry",
"chemteb",
"wikipedia",
"pubchem"
] | [
"text-classification"
] | "2024-12-04T22:35:35Z" | ---
dataset_info:
features:
- name: sent1
dtype: string
- name: sent2
dtype: string
- name: labels
dtype: int64
splits:
- name: test
num_bytes: 1213666
num_examples: 1434
download_size: 640047
dataset_size: 1213666
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
license: cc-by-nc-sa-4.0
task_categories:
- text-classification
language:
- en
- ja
tags:
- chemistry
- chemteb
- wikipedia
- pubchem
size_categories:
- 1K<n<10K
pretty_name: PubChem & Wikipedia English-Japanese Paragraph Pair Classification
---
# PubChem & Wikipedia English-Japanese Paragraph Pair Classification
This dataset is a multilingual extension of the [PubChem & Wikipedia Paragraphs Pair Classification](https://huggingface.co/datasets/BASF-AI/PubChemWikiParagraphsPC) dataset. It includes pairs of paragraphs in English and Japanese (sent1 and sent2) with a binary labels column indicating whether the paragraphs describe the same entity (1) or different entities (0). |
KaiChen1998/nuscenes2d-time-weather-geodiffusion | KaiChen1998 | "2024-12-05T02:02:11Z" | 8 | 0 | [
"task_categories:text-to-image",
"license:apache-2.0",
"arxiv:2306.04607",
"region:us",
"layout-to-image"
] | [
"text-to-image"
] | "2024-12-05T01:47:28Z" | ---
license: apache-2.0
task_categories:
- text-to-image
tags:
- layout-to-image
---
# nuScenes-time-weather-GeoDiffusion Dataset Card
nuScenes-time-weather-GeoDiffusion is the official dataset annotation file used to train [GeoDiffusion](https://arxiv.org/abs/2306.04607) on the nuScenes dataset with time of day (i.e., daytime/night) and weather (i.e., sunny/rain).
Since the nuImages dataset does not equip with those meta tags, we opt for the nuScenes dataset and generate the 2D bounding box annotations via inference with a [Mask R-CNN](https://github.com/open-mmlab/mmdetection3d/tree/main/configs/nuimages#instance-segmentation) pre-trained on the nuImages dataset, which is then saved in the standard COCO format.
Check detailed usage in our [Github repo](https://github.com/KaiChen1998/GeoDiffusion). |
Nachiket-S/LLaMa_1B_Debiasing_Instruction_NoCoT | Nachiket-S | "2024-12-05T03:47:12Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T03:47:11Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: paragraph
dtype: string
splits:
- name: CleanPrompts
num_bytes: 63287
num_examples: 90
download_size: 15182
dataset_size: 63287
configs:
- config_name: default
data_files:
- split: CleanPrompts
path: data/CleanPrompts-*
---
|
violetxi/MATH500-sft-prm800k-llama31-8b-steptok_temp0-0_15 | violetxi | "2024-12-05T04:40:48Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T04:05:57Z" | ---
dataset_info:
features:
- name: problem
dtype: string
- name: is_correct
dtype: bool
- name: target_answer
dtype: string
- name: solution
dtype: string
- name: solution_steps
dtype: string
- name: attempts
dtype: string
- name: model_answer
dtype: string
splits:
- name: train
num_bytes: 77533328
num_examples: 20736
download_size: 5766010
dataset_size: 77533328
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jkazdan/pku-safe-llama-3.1-8B-Instruct-trial | jkazdan | "2024-12-05T05:17:53Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T04:54:17Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 7778
num_examples: 10
download_size: 10606
dataset_size: 7778
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
guodaosun/tale-frame | guodaosun | "2024-12-05T06:04:36Z" | 8 | 0 | [
"task_categories:text-generation",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-generation"
] | "2024-12-05T05:22:15Z" | ---
license: mit
task_categories:
- text-generation
language:
- en
pretty_name: TaleFrame
size_categories:
- 1K<n<10K
---
# TinyStories Dataset README
## Overview
This dataset is based on TinyStories and includes structured JSON data with corresponding annotations, designed for research in controllable story generation and related tasks.
## Dataset Structure
Each data item contains the following fields:
### 1. `conversations`
- **Type**: List
- **Purpose**: Contains the JSON of the story
- **`from`**: Always set to `"human"`.
- **`value`**: Structured data containing entities, events, story structures and relationships described in JSON format.
### 2. `chosen`
- **Type**: Object
- **Purpose**: Contains the final selected story.
- **`from`**: Always set to `"gpt"`.
- **`value`**: The accepted story text.
### 3. `rejected`
- **Type**: Object
- **Purpose**: Contains a rejected version of the story.
- **`from`**: Always set to `"gpt"`.
- **`value`**: The rejected story text.
## Structure of `value` Field
## Units Attributes Definition
### Entity
| Attribute | Definition | Nullability |
|---------------------|-------------------------------------------------------------------------|-------------|
| `entity_id` | A unique identifier for the entity, e.g., "entity_1". | required |
| `entity_name` | The name of the entity. | required |
| `entity_identity` | The identity of the entity. | required |
| `entity_motivation` | The motivation or goal of the entity. | required |
| `personality_traits`| The list of personality traits of the entity. | required |
### Event
| Attribute | Definition | Nullability |
|---------------------|-------------------------------------------------------------------------|-------------|
| `event_id` | A unique identifier of the event, e.g., "event_1". | required |
| `event_time` | Time the event occurs. | nullable |
| `event_location` | Location where the event happens. | nullable |
| `event_details` | A brief description for the event. | required |
| `event_importance` | Importance of the event (three levels: low, medium, high). | required |
| `earlier_event` | List of preceding events that caused this event, using event IDs. | required |
| `later_event` | List of subsequent events resulting from this event, using event IDs. | required |
### Relationship
| Attribute | Definition | Nullability |
|-------------------------|---------------------------------------------------------------------------|-------------|
| `relationship_id` | A unique identifier for the relationship, e.g., "rel_1". | required |
| `included_entities` | The list of entities involved in the relationship, referenced by their entity IDs. | required |
| `emotional_type` | The emotional category between the two entities in the relationship. | required |
| `action_type` | The type of behavior exhibited between the entities in the relationship. | required |
| `action_direction` | The direction of the action/behavior within the relationship. "Internal", "bidirectional", "from AGENT to PATIENT". | required |
| `relationship_strength` | The strength of the relationship, categorized as "low", "medium", or "high". | required |
| `relationship_evolution`| Description of how the relationship evolves over time, such as gradually building trust. | required |
### Story
| Attribute | Definition | Nullability |
|-----------------------|-------------------------------------------------------------------------|-------------|
| `title` | The title of the story. | required |
| `story_description` | A brief description of the story. | required |
| `story_structure` | The structure of the story, including "beginning", "middle", "climax", and "ending". | required |
| `beginning` | List of event IDs that occur at the beginning of the story. | required |
| `middle` | List of event IDs that occur in the middle of the story. | required |
| `climax` | List of event IDs that represent the climax of the story. | required |
| `ending` | List of event IDs that occur at the ending of the story. | required |
## Nullability
- **Required fields**: Must contain a valid value.
- **Nullable fields**: Can be left empty.
## Dataset Statistical Features
### Entities
- **Total entities**: 24,154
- **Unique entity names**: 4,560
- **Total entity traits**: 4,770
- **Entity trait distribution**:
- Negative
- Neutral
- Positive
### Stories
- **Total stories**: 9,851
- **Emotional distribution**:
- Negative: 1,243
- Neutral: 2,433
- Positive: 17,727
### Events
- **Total events**: 68,553
- **Emotional distribution**:
- Negative: 9,226
- Neutral: 2,213
- Positive: 12,715
### Relationships
- **Total relationships**: 69,481
- **Emotional distribution**:
- Negative: 12,213
- Neutral: 15,458
- Positive: 40,882
## Annotations
Stories are evaluated using models like GPT-4 and annotated as "Chosen" or "Rejected" based on quality. |
nit1607/access-levels-instruction-with-astriks | nit1607 | "2024-12-10T19:08:53Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T06:16:45Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 26789819
num_examples: 40000
download_size: 8230717
dataset_size: 26789819
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
moonhpark/jenny-tts-tags-6h-v1 | moonhpark | "2024-12-05T06:20:18Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T06:20:15Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: transcription_normalised
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: float64
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
splits:
- name: train
num_bytes: 1640896
num_examples: 4000
download_size: 1041813
dataset_size: 1640896
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
moonhpark/jenny-tts-text-tags-6h-v1 | moonhpark | "2024-12-05T06:24:09Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T06:24:05Z" | ---
dataset_info:
features:
- name: file_name
dtype: string
- name: text
dtype: string
- name: transcription_normalised
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
splits:
- name: train
num_bytes: 2063542
num_examples: 4000
download_size: 1025292
dataset_size: 2063542
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
sarmass/a4_lima_pairrm_preference_data | sarmass | "2024-12-06T02:37:43Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T06:59:30Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: preference
dtype: string
splits:
- name: train
num_bytes: 499580
num_examples: 520
download_size: 63799
dataset_size: 499580
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shimejii/databricks-dolly-15k-ja__resCount | shimejii | "2024-12-05T07:29:28Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T07:29:13Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: context
dtype: string
- name: response
dtype: string
- name: category
dtype: string
- name: response_char_count
dtype: int64
splits:
- name: train
num_bytes: 14946333
num_examples: 15011
download_size: 8713375
dataset_size: 14946333
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cen001/1250ganqingceshi10 | cen001 | "2024-12-05T07:31:46Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T07:31:33Z" | ---
dataset_info:
features:
- name: conversation
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 3355
num_examples: 7
download_size: 3609
dataset_size: 3355
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
suraj164/lifeexp | suraj164 | "2024-12-05T07:57:50Z" | 8 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T07:57:16Z" | ---
license: apache-2.0
---
|
tororoin/onion-text | tororoin | "2024-12-05T08:33:29Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T08:33:25Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 23517884
num_examples: 33880
download_size: 15755179
dataset_size: 23517884
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
anusha7/dataset | anusha7 | "2024-12-05T09:10:46Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T09:10:42Z" | ---
dataset_info:
features:
- name: Holdings
dtype: string
- name: Price
dtype: float64
- name: Portfolio Weight
dtype: float64
- name: First Bought
dtype: string
- name: Market Value as of Oct 31 2024
dtype: int64
- name: Cur
dtype: string
- name: Share Change %
dtype: float64
- name: 1-Year Return
dtype: float64
- name: 3-Year Return
dtype: float64
- name: 5-Year Return
dtype: float64
- name: Sector
dtype: string
- name: 'Unnamed: 11'
dtype: float64
splits:
- name: train
num_bytes: 3509
num_examples: 26
download_size: 6695
dataset_size: 3509
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DT4LM/gpt2_sst2_leap | DT4LM | "2024-12-05T10:30:12Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T10:30:08Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int32
splits:
- name: train
num_bytes: 52433
num_examples: 704
download_size: 36548
dataset_size: 52433
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gokulsrinivasagan/processed_book_corpus-ld-5 | gokulsrinivasagan | "2024-12-05T10:41:16Z" | 8 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T10:37:09Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: special_tokens_mask
sequence: int8
- name: lda_lables
sequence: float64
splits:
- name: train
num_bytes: 7123525776
num_examples: 2277342
- name: validation
num_bytes: 377568368
num_examples: 120706
download_size: 2169784136
dataset_size: 7501094144
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
Krainmann/common_voice_01_test_de | Krainmann | "2024-12-05T11:02:56Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T10:49:58Z" | ---
dataset_info:
features:
- name: client_id
dtype: string
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 48000
- name: sentence
dtype: string
- name: up_votes
dtype: int64
- name: down_votes
dtype: int64
- name: age
dtype: string
- name: gender
dtype: string
- name: accent
dtype: string
- name: locale
dtype: string
- name: segment
dtype: string
splits:
- name: test
num_bytes: 83607787.235
num_examples: 2269
download_size: 82982728
dataset_size: 83607787.235
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
badayvedat/LJSpeech-1.1 | badayvedat | "2024-12-05T12:12:53Z" | 8 | 0 | [
"size_categories:10K<n<100K",
"format:webdataset",
"modality:audio",
"modality:text",
"library:datasets",
"library:webdataset",
"library:mlcroissant",
"region:us",
"audio",
"text",
"text-to-speech"
] | null | "2024-12-05T11:14:51Z" | ---
tags:
- audio
- text
- text-to-speech
---
# The LJ Speech Dataset
Version 1.1
July 5, 2017
https://keithito.com/LJ-Speech-Dataset
## OVERVIEW
This is a public domain speech dataset consisting of 13,100 short audio clips
of a single speaker reading passages from 7 non-fiction books. A transcription
is provided for each clip. Clips vary in length from 1 to 10 seconds and have
a total length of approximately 24 hours.
The texts were published between 1884 and 1964, and are in the public domain.
The audio was recorded in 2016-17 by the LibriVox project and is also in the
public domain.
## FILE FORMAT
Metadata is provided in metadata.csv. This file consists of one record per
line, delimited by the pipe character (0x7c). The fields are:
1. ID: this is the name of the corresponding .wav file
2. Transcription: words spoken by the reader (UTF-8)
3. Normalized Transcription: transcription with numbers, ordinals, and
monetary units expanded into full words (UTF-8).
Each audio file is a single-channel 16-bit PCM WAV with a sample rate of
22050 Hz.
## STATISTICS
Total Clips 13,100
Total Words 225,715
Total Characters 1,308,674
Total Duration 23:55:17
Mean Clip Duration 6.57 sec
Min Clip Duration 1.11 sec
Max Clip Duration 10.10 sec
Mean Words per Clip 17.23
Distinct Words 13,821
## MISCELLANEOUS
The audio clips range in length from approximately 1 second to 10 seconds.
They were segmented automatically based on silences in the recording. Clip
boundaries generally align with sentence or clause boundaries, but not always.
The text was matched to the audio manually, and a QA pass was done to ensure
that the text accurately matched the words spoken in the audio.
The original LibriVox recordings were distributed as 128 kbps MP3 files. As a
result, they may contain artifacts introduced by the MP3 encoding.
The following abbreviations appear in the text. They may be expanded as
follows:
Abbreviation Expansion
--------------------------
Mr. Mister
Mrs. Misess (*)
Dr. Doctor
No. Number
St. Saint
Co. Company
Jr. Junior
Maj. Major
Gen. General
Drs. Doctors
Rev. Reverend
Lt. Lieutenant
Hon. Honorable
Sgt. Sergeant
Capt. Captain
Esq. Esquire
Ltd. Limited
Col. Colonel
Ft. Fort
* there's no standard expansion of "Mrs."
19 of the transcriptions contain non-ASCII characters (for example, LJ016-0257
contains "raison d'être").
For more information or to report errors, please email kito@kito.us.
## LICENSE
This dataset is in the public domain in the USA (and likely other countries as
well). There are no restrictions on its use. For more information, please see:
https://librivox.org/pages/public-domain.
## CHANGELOG
* 1.0 (July 8, 2017):
Initial release
* 1.1 (Feb 19, 2018):
Version 1.0 included 30 .wav files with no corresponding annotations in
metadata.csv. These have been removed in version 1.1. Thanks to Rafael Valle
for spotting this.
## CREDITS
This dataset consists of excerpts from the following works:
* Morris, William, et al. Arts and Crafts Essays. 1893.
* Griffiths, Arthur. The Chronicles of Newgate, Vol. 2. 1884.
* Roosevelt, Franklin D. The Fireside Chats of Franklin Delano Roosevelt.
1933-42.
* Harland, Marion. Marion Harland's Cookery for Beginners. 1893.
* Rolt-Wheeler, Francis. The Science - History of the Universe, Vol. 5:
Biology. 1910.
* Banks, Edgar J. The Seven Wonders of the Ancient World. 1916.
* President's Commission on the Assassination of President Kennedy. Report
of the President's Commission on the Assassination of President Kennedy. 1964.
Recordings by Linda Johnson. Alignment and annotation by Keith Ito. All text,
audio, and annotations are in the public domain.
There's no requirement to cite this work, but if you'd like to do so, you can
link to: https://keithito.com/LJ-Speech-Dataset
or use the following:
```bibtex
@misc{ljspeech17,
author = {Keith Ito},
title = {The LJ Speech Dataset},
howpublished = {\url{https://keithito.com/LJ-Speech-Dataset/}},
year = 2017
}
``` |
juliadollis/seu_dataset_de_previsoes | juliadollis | "2024-12-05T11:55:50Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T11:38:57Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: toxicity_human
dtype: float64
- name: true_label
dtype: int64
- name: predict_label
dtype: int64
splits:
- name: train
num_bytes: 13025
num_examples: 100
download_size: 10262
dataset_size: 13025
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Krainmann/common_voice_01_test_en | Krainmann | "2024-12-05T11:50:20Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T11:49:54Z" | ---
dataset_info:
features:
- name: client_id
dtype: string
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 48000
- name: sentence
dtype: string
- name: up_votes
dtype: int64
- name: down_votes
dtype: int64
- name: age
dtype: string
- name: gender
dtype: string
- name: accent
dtype: string
- name: locale
dtype: string
- name: segment
dtype: string
splits:
- name: test
num_bytes: 443272447.056
num_examples: 7016
download_size: 299291294
dataset_size: 443272447.056
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
lingvenvist/animacy-ko-goldstandard-original-dataset-no-glosses | lingvenvist | "2024-12-05T12:49:30Z" | 8 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T12:49:25Z" | ---
dataset_info:
features:
- name: sentences
dtype: string
- name: tokens
sequence: string
- name: anim_tags
sequence:
class_label:
names:
'0': N
'1': A
'2': H
- name: target-indexes
sequence: int64
- name: source
dtype: string
splits:
- name: validation
num_bytes: 99347
num_examples: 912
- name: test
num_bytes: 400932
num_examples: 3628
download_size: 234437
dataset_size: 500279
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
syllasgiorgos/mini_commonVoice_27583_16Khz | syllasgiorgos | "2024-12-05T13:20:12Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T13:20:05Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: text
dtype: string
- name: speaker_id
dtype: int64
splits:
- name: train
num_bytes: 69324136.0
num_examples: 459
download_size: 57442486
dataset_size: 69324136.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Erland/NLP701_Assignment2_Subtask3 | Erland | "2024-12-05T13:51:03Z" | 8 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-12-05T13:22:49Z" | ---
dataset_info:
config_name: EN
features:
- name: text
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 318635
num_examples: 88
- name: validation
num_bytes: 118287
num_examples: 30
download_size: 291390
dataset_size: 436922
configs:
- config_name: EN
data_files:
- split: train
path: EN/train-*
- split: validation
path: EN/validation-*
---
|