aoxo commited on
Commit
77c2cb3
1 Parent(s): e8a766a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. optimized_processing.py +140 -0
  2. processed_dataset/batch_0/data-00000-of-00001.arrow +3 -0
  3. processed_dataset/batch_0/dataset_info.json +25 -0
  4. processed_dataset/batch_0/state.json +13 -0
  5. processed_dataset/batch_10/data-00000-of-00001.arrow +3 -0
  6. processed_dataset/batch_10/dataset_info.json +25 -0
  7. processed_dataset/batch_10/state.json +13 -0
  8. processed_dataset/batch_11/data-00000-of-00001.arrow +3 -0
  9. processed_dataset/batch_11/dataset_info.json +25 -0
  10. processed_dataset/batch_11/state.json +13 -0
  11. processed_dataset/batch_14/data-00000-of-00001.arrow +3 -0
  12. processed_dataset/batch_14/dataset_info.json +25 -0
  13. processed_dataset/batch_14/state.json +13 -0
  14. processed_dataset/batch_15/data-00000-of-00001.arrow +3 -0
  15. processed_dataset/batch_15/dataset_info.json +25 -0
  16. processed_dataset/batch_15/state.json +13 -0
  17. processed_dataset/batch_16/data-00000-of-00001.arrow +3 -0
  18. processed_dataset/batch_16/dataset_info.json +25 -0
  19. processed_dataset/batch_16/state.json +13 -0
  20. processed_dataset/batch_18/data-00000-of-00001.arrow +3 -0
  21. processed_dataset/batch_18/dataset_info.json +25 -0
  22. processed_dataset/batch_18/state.json +13 -0
  23. processed_dataset/batch_21/data-00000-of-00001.arrow +3 -0
  24. processed_dataset/batch_21/dataset_info.json +25 -0
  25. processed_dataset/batch_21/state.json +13 -0
  26. processed_dataset/batch_22/dataset_info.json +25 -0
  27. processed_dataset/batch_22/state.json +13 -0
  28. processed_dataset/batch_23/data-00000-of-00001.arrow +3 -0
  29. processed_dataset/batch_23/dataset_info.json +25 -0
  30. processed_dataset/batch_23/state.json +13 -0
  31. processed_dataset/batch_24/data-00000-of-00001.arrow +3 -0
  32. processed_dataset/batch_24/dataset_info.json +25 -0
  33. processed_dataset/batch_24/state.json +13 -0
  34. processed_dataset/batch_25/data-00000-of-00001.arrow +3 -0
  35. processed_dataset/batch_25/dataset_info.json +25 -0
  36. processed_dataset/batch_25/state.json +13 -0
  37. processed_dataset/batch_26/dataset_info.json +25 -0
  38. processed_dataset/batch_26/state.json +13 -0
  39. processed_dataset/batch_29/dataset_info.json +25 -0
  40. processed_dataset/batch_29/state.json +13 -0
  41. processed_dataset/batch_3/dataset_info.json +25 -0
  42. processed_dataset/batch_3/state.json +13 -0
  43. processed_dataset/batch_30/data-00000-of-00001.arrow +3 -0
  44. processed_dataset/batch_30/dataset_info.json +25 -0
  45. processed_dataset/batch_30/state.json +13 -0
  46. processed_dataset/batch_31/data-00000-of-00001.arrow +3 -0
  47. processed_dataset/batch_31/dataset_info.json +25 -0
  48. processed_dataset/batch_31/state.json +13 -0
  49. processed_dataset/batch_32/data-00000-of-00001.arrow +3 -0
  50. processed_dataset/batch_32/dataset_info.json +25 -0
optimized_processing.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datasets import Dataset, DatasetDict, load_dataset
3
+ from datasets.features import Audio
4
+ import pandas as pd
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+
8
+ # Function to load your custom dataset
9
+ def load_custom_dataset(data_dir):
10
+ data = {
11
+ "audio": [],
12
+ "text": []
13
+ }
14
+
15
+ wav_dir = os.path.join(data_dir, 'wav')
16
+ txt_dir = os.path.join(data_dir, 'transcription')
17
+
18
+ # Assuming filenames in 'wav' and 'txt' match
19
+ for wav_file in os.listdir(wav_dir):
20
+ if wav_file.endswith('.wav'):
21
+ txt_file = wav_file.replace('.wav', '.txt')
22
+ wav_path = os.path.join(wav_dir, wav_file)
23
+ txt_path = os.path.join(txt_dir, txt_file)
24
+
25
+ # Read the transcription text
26
+ with open(txt_path, 'r', encoding='utf-8') as f:
27
+ transcription = f.read().strip()
28
+
29
+ # Append to the dataset
30
+ data["audio"].append(wav_path)
31
+ data["text"].append(transcription)
32
+
33
+ # Create a pandas dataframe
34
+ df = pd.DataFrame(data)
35
+
36
+ # Convert to a Hugging Face dataset
37
+ dataset = Dataset.from_pandas(df)
38
+
39
+ # Define the audio feature (for .wav files)
40
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) # Adjust the sampling rate if needed
41
+
42
+ return dataset
43
+
44
+ # Load your custom dataset
45
+ custom_train_dataset = load_custom_dataset("./")
46
+
47
+ # Load Common Voice test set (Malayalam)
48
+ common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)
49
+
50
+ common_voice_test = common_voice_test.select_columns(["audio", "sentence"])
51
+
52
+ # Combine them into a DatasetDict
53
+ dataset_dict = DatasetDict({
54
+ "train": custom_train_dataset,
55
+ "test": common_voice_test
56
+ })
57
+
58
+ # Now you have the `dataset_dict` with your custom train set and the Common Voice test set
59
+ print(dataset_dict)
60
+
61
+ from transformers import WhisperFeatureExtractor
62
+
63
+ feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
64
+
65
+ from transformers import WhisperTokenizer
66
+
67
+ tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
68
+
69
+ from transformers import WhisperProcessor
70
+
71
+ processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
72
+
73
+ print(dataset_dict['train'][0])
74
+
75
+ import gc # for garbage collection
76
+
77
+ def prepare_dataset(batch):
78
+ # Prepare input features for each audio file in the batch
79
+ audio_arrays = [item["array"] for item in batch["audio"]]
80
+ sampling_rates = [item["sampling_rate"] for item in batch["audio"]]
81
+
82
+ # Extract features for each audio sample
83
+ features = []
84
+ for audio, sr in zip(audio_arrays, sampling_rates):
85
+ feature = feature_extractor(audio, sampling_rate=sr).input_features[0]
86
+ feature = np.array(feature, dtype=np.float16)
87
+ features.append(feature)
88
+
89
+ # Free memory after each feature extraction
90
+ del audio # Remove reference to the audio array
91
+ del sr
92
+ gc.collect() # Trigger garbage collection to free memory
93
+
94
+ # Store features in batch
95
+ batch["input_features"] = features
96
+
97
+ # Encode target text to label ids
98
+ # Consider using a tokenizer with padding strategy (e.g., `padding="max_length"` or `padding="longest"`)
99
+ batch["labels"] = tokenizer(batch["text"], padding="longest", truncation=True).input_ids
100
+
101
+ return batch
102
+
103
+ # Function to process and save dataset in batches
104
+ def process_and_save_in_batches(dataset, batch_size=1000, save_path="processed_dataset"):
105
+ # Create an empty list to store the processed batches
106
+ all_processed = []
107
+
108
+ # Loop through the dataset in chunks
109
+ for start_idx in range(0, len(dataset), batch_size):
110
+ # Get the batch slice
111
+ batch = dataset[start_idx:start_idx+batch_size]
112
+ batch = Dataset.from_dict(batch)
113
+ # Apply the processing function to the batch
114
+ processed_batch = batch.map(
115
+ prepare_dataset,
116
+ remove_columns=dataset.column_names,
117
+ batched=True,
118
+ batch_size=batch_size,
119
+ num_proc = None,
120
+ )
121
+ print(f"Batch {start_idx} done")
122
+ # Append the processed batch to the list
123
+ all_processed.append(processed_batch)
124
+
125
+ # Clear memory after processing each batch
126
+ del batch # Remove reference to the batch
127
+ gc.collect() # Trigger garbage collection
128
+
129
+ # Save each processed batch to disk
130
+ processed_batch.save_to_disk(os.path.join(save_path, f"batch_{start_idx // batch_size}"))
131
+ del processed_batch # Free memory after saving the batch
132
+ gc.collect()
133
+
134
+ # Optionally, if you want to save the whole dataset in one file at the end
135
+ # You can merge all processed batches (not recommended for large datasets)
136
+ final_dataset = concatenate_datasets(all_processed)
137
+ final_dataset.save_to_disk(save_path)
138
+
139
+ # Process and save the dataset in batches
140
+ process_and_save_in_batches(dataset_dict['train'], batch_size=1000, save_path="processed_dataset")
processed_dataset/batch_0/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42e975533137948b43d323738de666b43511cff7a7cbbfcda224e0668405a799
3
+ size 484488984
processed_dataset/batch_0/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_0/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "88b2cb05b021e3cb",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_10/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de3b0d3bf7519fee122b8db4256c7a09ddb3f67801d13d7b09acbd75cccd084a
3
+ size 484488984
processed_dataset/batch_10/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_10/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "6e01f1623562ac29",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_11/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ec32e034253c3e93f2ca41fbacd32ba9b2463c7e5639613ae34946c2d2200cb
3
+ size 483672984
processed_dataset/batch_11/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_11/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "a0d93ffa81a156ad",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_14/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350c74cd97b9f2cee382f556ec2c71ed0b8bc6132351a066710b61b6b61ce42c
3
+ size 483904984
processed_dataset/batch_14/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_14/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "45c4cd0ee6047a21",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_15/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9ebe9f2eec442d0abda92462a257d596e5668758b381388c73cf6d8daf5ee1c
3
+ size 484720984
processed_dataset/batch_15/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_15/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "5bbfbe86ad451863",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_16/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f57bbba876a2817efb76cf8f874eab285bf2072f613c1767df91d0baf8b11fc
3
+ size 484304984
processed_dataset/batch_16/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_16/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "e29d04abb11bd1e2",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_18/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcc35fbea913b63a712c7489c37a70d290dc3cd8f3ae0d5064fe2da6e2c56843
3
+ size 484040984
processed_dataset/batch_18/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_18/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "83905aa884cf11aa",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_21/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f599c57f75de54f2946a6e88852563e3da2e3cf347768e26cae9312b7ec3de
3
+ size 484384984
processed_dataset/batch_21/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_21/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "5a3e402db5ca7d65",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_22/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_22/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "8bb80671a77b5a01",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_23/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd8a757ee6b125731b780f4c995dbccc3c4195db40e854aee6a8d18f39e8bbd2
3
+ size 483872984
processed_dataset/batch_23/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_23/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "eea223df245851ee",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_24/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1249626363bce6854c8e4e128c1003e2e2ef8435550a49210026342ec47e5ebd
3
+ size 484392984
processed_dataset/batch_24/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_24/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "261c9b3c63994758",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_25/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f4206da0ee6434c3c0a7b39fc5fd0b74501343e28f3ccbf906ff82be8c88e31
3
+ size 485824984
processed_dataset/batch_25/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_25/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "4a9d9aa7fc103bb9",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_26/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_26/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "561ce14c1302f0a8",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_29/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_29/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "f8166704f32b33fd",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_3/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_3/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "536207594d494137",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_30/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c9c61e6a83f3f8abff93fa403d132a8923096191585aabc9513db77d6745a2d
3
+ size 484664984
processed_dataset/batch_30/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_30/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "bcb3cc73097e7a27",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_31/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7677fd0d3c344c6f4920e3a69d058adced9004c656e73b228035889a50fc1f7a
3
+ size 484264984
processed_dataset/batch_31/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }
processed_dataset/batch_31/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "74ba518d7abf28af",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": null
13
+ }
processed_dataset/batch_32/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1ebb0206c867d8f9952d2447c650cbbd022f21c81de97fd88eaea633315f8a4
3
+ size 484248984
processed_dataset/batch_32/dataset_info.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "",
3
+ "description": "",
4
+ "features": {
5
+ "input_features": {
6
+ "feature": {
7
+ "feature": {
8
+ "dtype": "float16",
9
+ "_type": "Value"
10
+ },
11
+ "_type": "Sequence"
12
+ },
13
+ "_type": "Sequence"
14
+ },
15
+ "labels": {
16
+ "feature": {
17
+ "dtype": "int64",
18
+ "_type": "Value"
19
+ },
20
+ "_type": "Sequence"
21
+ }
22
+ },
23
+ "homepage": "",
24
+ "license": ""
25
+ }