init
Browse files- tokenize_dataset_s2s.py +4 -1
tokenize_dataset_s2s.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
|
3 |
import torch
|
|
|
4 |
from datasets import load_dataset, DatasetDict
|
5 |
from encodec_audio_tokenizer import EncodecTokenizer
|
6 |
|
@@ -18,7 +19,9 @@ tokenizer = EncodecTokenizer.from_pretrained()
|
|
18 |
|
19 |
def tokenize(batch):
|
20 |
for side in sides:
|
21 |
-
wav = torch.
|
|
|
|
|
22 |
sr = [i["sampling_rate"] for i in batch[f"{side}.audio"]]
|
23 |
batch[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(wav=wav, sample_rate=sr).numpy().tolist()
|
24 |
return batch
|
|
|
1 |
import os
|
2 |
|
3 |
import torch
|
4 |
+
import numpy as np
|
5 |
from datasets import load_dataset, DatasetDict
|
6 |
from encodec_audio_tokenizer import EncodecTokenizer
|
7 |
|
|
|
19 |
|
20 |
def tokenize(batch):
|
21 |
for side in sides:
|
22 |
+
wav = torch.as_tensor(np.concatenate([i["array"] for i in batch[f"{side}.audio"]]))
|
23 |
+
print(wav.shape)
|
24 |
+
|
25 |
sr = [i["sampling_rate"] for i in batch[f"{side}.audio"]]
|
26 |
batch[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(wav=wav, sample_rate=sr).numpy().tolist()
|
27 |
return batch
|