pere commited on
Commit
b6e11ee
1 Parent(s): d4f2a80

first submit

Browse files
README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Italian Tweets Test Dataset
2
+ This is a dataset with 10M italian tweets. It still contains errors. Please do not use.
3
+
4
+ ## How to Use
5
+ ```python
6
+ from datasets import load_dataset
7
+ data = load_dataset("pere/italian_tweets_10M")
8
+ ```
data/train-shard-0001-of-0001.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:466ea229e9ece22a3ac0482ee44571681c494cb203f59cfc03573e0ad973ca00
3
+ size 327228071
data/unlabelled_10M.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c324f26340c63aca6e20580196b805a3aa125f6d3d9f795f46682458c3d021
3
+ size 343056388
data/validation-shard-0001-of-0001.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5be844bc2aa0be1d6669f60a34eebdb031dc1491e54dc79e7458872495fa27bf
3
+ size 15830436
italian_tweets_10M.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Norwegian Colossal Corpus v2 dataset."""
2
+ import gzip
3
+ import json
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """\\nItalian tweets."""
8
+ _DATA_URL = "https://huggingface.co/datasets/pere/italian_tweets_10M/resolve/main/data/{split_suffix}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
9
+ _N_SHARDS_PER_SPLIT = {
10
+ "train": 1, "validation": 1
11
+ }
12
+
13
+
14
+ class italian_tweets_10MConfig(datasets.BuilderConfig):
15
+ """BuilderConfig for NbNn."""
16
+
17
+ def __init__(self, *args, **kwargs):
18
+ """BuilderConfig for NbNn.
19
+ Args:
20
+ **kwargs: keyword arguments forwarded to super.
21
+ """
22
+ super().__init__(
23
+ *args,
24
+ name="italian_tweets_10M",
25
+ **kwargs,
26
+ )
27
+
28
+
29
+ class italian_tweets_10M(datasets.GeneratorBasedBuilder):
30
+ """Norwegian Colossal Corpus v2."""
31
+ BUILDER_CONFIGS = [italian_tweets_10MConfig()]
32
+ BUILDER_CONFIG_CLASS = italian_tweets_10MConfig
33
+
34
+ def _info(self):
35
+ return datasets.DatasetInfo(
36
+ description=_DESCRIPTION,
37
+ features=datasets.Features(
38
+ {
39
+ "id": datasets.Value("string"),
40
+ "text": datasets.Value("string"),
41
+
42
+ }
43
+ ),
44
+ supervised_keys=None,
45
+ )
46
+
47
+ def _split_generators(self, dl_manager):
48
+ data_urls = {}
49
+ for split in ["train", "validation"]:
50
+ data_urls[split] = [
51
+ _DATA_URL.format(
52
+ language=self.config.name,
53
+ split_suffix=split,
54
+ index=index,
55
+ n_shards=_N_SHARDS_PER_SPLIT[split],
56
+ )
57
+ for index in range(1, _N_SHARDS_PER_SPLIT[split] + 1)
58
+ ]
59
+ train_downloaded_files = dl_manager.download(data_urls["train"])
60
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
61
+
62
+ return [
63
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
66
+ ),
67
+
68
+ ]
69
+
70
+ def _generate_examples(self, filepaths):
71
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
72
+ id_ = 0
73
+ for filepath in filepaths:
74
+ logger.info("generating examples from = %s", filepath)
75
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
76
+ for line in f:
77
+ if line:
78
+ example = json.loads(line)
79
+ yield id_, example
80
+ id_ += 1