Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
Spanish
ArXiv:
Libraries:
Datasets
pandas
License:
messirve / README.md
ftvalentini's picture
Upload dataset
608a857 verified
|
raw
history blame
20.1 kB
---
dataset_info:
- config_name: ar
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 12747954
num_examples: 22261
- name: test
num_bytes: 3347551
num_examples: 5780
download_size: 7199085
dataset_size: 16095505
- config_name: bo
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 14642058
num_examples: 25015
- name: test
num_bytes: 2767866
num_examples: 4707
download_size: 7735565
dataset_size: 17409924
- config_name: cl
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 12782903
num_examples: 22330
- name: test
num_bytes: 3200254
num_examples: 5564
download_size: 7094680
dataset_size: 15983157
- config_name: co
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 14871584
num_examples: 25589
- name: test
num_bytes: 3525180
num_examples: 5992
download_size: 8268092
dataset_size: 18396764
- config_name: cr
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 13709906
num_examples: 23605
- name: test
num_bytes: 3367106
num_examples: 5747
download_size: 7936763
dataset_size: 17077012
- config_name: cu
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 12577997
num_examples: 21805
- name: test
num_bytes: 2971491
num_examples: 5053
download_size: 7027626
dataset_size: 15549488
- config_name: do
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 16123501
num_examples: 27470
- name: test
num_bytes: 3406703
num_examples: 5719
download_size: 8891758
dataset_size: 19530204
- config_name: ec
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 16024358
num_examples: 27289
- name: test
num_bytes: 3895337
num_examples: 6384
download_size: 9186022
dataset_size: 19919695
- config_name: es
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 13320680
num_examples: 23115
- name: test
num_bytes: 4344109
num_examples: 7509
download_size: 8067432
dataset_size: 17664789
- config_name: general
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 328791283
num_examples: 571120
- name: test
num_bytes: 94360368
num_examples: 160099
download_size: 196043788
dataset_size: 423151651
- config_name: gt
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 13488775
num_examples: 23311
- name: test
num_bytes: 2522413
num_examples: 4290
download_size: 7119300
dataset_size: 16011188
- config_name: hn
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 15268575
num_examples: 26736
- name: test
num_bytes: 3356442
num_examples: 5690
download_size: 8599342
dataset_size: 18625017
- config_name: mx
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 23350619
num_examples: 40357
download_size: 11018405
dataset_size: 23350619
- config_name: ni
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 19435299
num_examples: 33966
download_size: 9240090
dataset_size: 19435299
- config_name: pa
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 18197383
num_examples: 31561
download_size: 8647392
dataset_size: 18197383
- config_name: pe
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 18055535
num_examples: 31335
download_size: 8080223
dataset_size: 18055535
- config_name: pr
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 19028895
num_examples: 32952
download_size: 9036484
dataset_size: 19028895
- config_name: py
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 17324120
num_examples: 30191
download_size: 8040419
dataset_size: 17324120
- config_name: sv
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 18162220
num_examples: 31741
download_size: 8587078
dataset_size: 18162220
- config_name: us
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 16045173
num_examples: 27732
download_size: 6870579
dataset_size: 16045173
- config_name: uy
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 14998545
num_examples: 26427
download_size: 7199302
dataset_size: 14998545
- config_name: ve
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: docid
dtype: string
- name: docid_text
dtype: string
- name: query_date
dtype: date32
- name: answer_date
dtype: date32
- name: match_score
dtype: float32
- name: expanded_search
dtype: bool
- name: answer_type
dtype: string
splits:
- name: train
num_bytes: 19353758
num_examples: 32856
download_size: 8963688
dataset_size: 19353758
configs:
- config_name: ar
data_files:
- split: train
path: ar/train-*
- split: test
path: ar/test-*
- config_name: bo
data_files:
- split: train
path: bo/train-*
- split: test
path: bo/test-*
- config_name: cl
data_files:
- split: train
path: cl/train-*
- split: test
path: cl/test-*
- config_name: co
data_files:
- split: train
path: co/train-*
- split: test
path: co/test-*
- config_name: cr
data_files:
- split: train
path: cr/train-*
- split: test
path: cr/test-*
- config_name: cu
data_files:
- split: train
path: cu/train-*
- split: test
path: cu/test-*
- config_name: do
data_files:
- split: train
path: do/train-*
- split: test
path: do/test-*
- config_name: ec
data_files:
- split: train
path: ec/train-*
- split: test
path: ec/test-*
- config_name: es
data_files:
- split: train
path: es/train-*
- split: test
path: es/test-*
- config_name: general
data_files:
- split: train
path: general/train-*
- split: test
path: general/test-*
- config_name: gt
data_files:
- split: train
path: gt/train-*
- split: test
path: gt/test-*
- config_name: hn
data_files:
- split: train
path: hn/train-*
- split: test
path: hn/test-*
- config_name: mx
data_files:
- split: train
path: mx/train-*
- config_name: ni
data_files:
- split: train
path: ni/train-*
- config_name: pa
data_files:
- split: train
path: pa/train-*
- config_name: pe
data_files:
- split: train
path: pe/train-*
- config_name: pr
data_files:
- split: train
path: pr/train-*
- config_name: py
data_files:
- split: train
path: py/train-*
- config_name: sv
data_files:
- split: train
path: sv/train-*
- config_name: us
data_files:
- split: train
path: us/train-*
- config_name: uy
data_files:
- split: train
path: uy/train-*
- config_name: ve
data_files:
- split: train
path: ve/train-*
---
# Dataset Card for [More Information Needed]
<!-- Provide a quick summary of the dataset. -->
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
### Data Instances
<!-- Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples.
```
{
'example_field': ...,
...
}
```
Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit. -->
[More Information Needed]
### Data Fields
- `id`: query id
- `query`: query text
- `docid`: relevant document id in the corpus
- `docid_text`: relevant document text
- `query_date`: date the query was extracted
- `answer_date`: date the answer was extracted
- `match_score`: the longest string in the SERP answer that is a substring of the matched document text, as a ratio of the length of the SERP answer
- `expanded_search`: if the SERP returned a message indicating that the search was "expanded" with additional results ("se incluyen resultados de...")
- `answer_type`: type of answer extracted (`feat_snippet`, featured snippets, are the most important)
<!-- Note that the descriptions can be initialized with the **Show Markdown Data Fields** output of the [Datasets Tagging app](https://huggingface.co/spaces/huggingface/datasets-tagging), you will then only need to refine the generated descriptions. -->
### Data Splits
<!-- Describe and name the splits in the dataset if there are more than one.
Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here.
Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example:
| | train | validation | test |
|-------------------------|------:|-----------:|-----:|
| Input Sentences | | | |
| Average Sentence Length | | | |
-->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed]