added Indic config
Browse files- bernice-pretrain-data.py +24 -4
bernice-pretrain-data.py
CHANGED
@@ -52,16 +52,37 @@ _LICENSE = ""
|
|
52 |
# you can just pass the relative paths to the files instead of URLs.
|
53 |
# Only train data, validation split not provided
|
54 |
_URLS = {
|
55 |
-
"
|
|
|
56 |
}
|
57 |
|
58 |
|
59 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
60 |
class BernicePretrainData(datasets.GeneratorBasedBuilder):
|
61 |
"""Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder."""
|
62 |
-
VERSION = datasets.Version("1.
|
63 |
|
64 |
def _info(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
return datasets.DatasetInfo(
|
66 |
# This is the description that will appear on the datasets page.
|
67 |
description=_DESCRIPTION,
|
@@ -92,8 +113,7 @@ class BernicePretrainData(datasets.GeneratorBasedBuilder):
|
|
92 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
93 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
94 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
95 |
-
|
96 |
-
urls_to_download = [f"{dir_url}/{f}" for f in os.listdir(dir_url)]
|
97 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
98 |
return [
|
99 |
datasets.SplitGenerator(
|
|
|
52 |
# you can just pass the relative paths to the files instead of URLs.
|
53 |
# Only train data, validation split not provided
|
54 |
_URLS = {
|
55 |
+
"all": [f"data/{f}" for f in os.listdir("data")],
|
56 |
+
"indic": ["data/indic_tweet_ids.txt.gz"]
|
57 |
}
|
58 |
|
59 |
|
60 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
61 |
class BernicePretrainData(datasets.GeneratorBasedBuilder):
|
62 |
"""Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder."""
|
63 |
+
VERSION = datasets.Version("1.0.0")
|
64 |
|
65 |
def _info(self):
|
66 |
+
# This is an example of a dataset with multiple configurations.
|
67 |
+
# If you don't want/need to define several sub-sets in your dataset,
|
68 |
+
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
69 |
+
|
70 |
+
# If you need to make complex sub-parts in the datasets with configurable options
|
71 |
+
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
72 |
+
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
73 |
+
|
74 |
+
# You will be able to load one or the other configurations in the following list with
|
75 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
76 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
77 |
+
BUILDER_CONFIGS = [
|
78 |
+
datasets.BuilderConfig(name="all", version=VERSION,
|
79 |
+
description="Includes all tweets"),
|
80 |
+
datasets.BuilderConfig(name="indic", version=VERSION,
|
81 |
+
description="Only the Indic languages, plus `undefined'"),
|
82 |
+
]
|
83 |
+
|
84 |
+
DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
85 |
+
|
86 |
return datasets.DatasetInfo(
|
87 |
# This is the description that will appear on the datasets page.
|
88 |
description=_DESCRIPTION,
|
|
|
113 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
114 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
115 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
116 |
+
urls_to_download = self._URLS[self.config.name]
|
|
|
117 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
118 |
return [
|
119 |
datasets.SplitGenerator(
|