init
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +84 -0
- experiment_cache/cluster/clap_general_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/clap_general_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/clap_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/clap_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/hubert_base_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/hubert_large_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/hubert_xl_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/meta_voice_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/pyannote_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/pyannote_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/w2v_bert_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/w2v_bert_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/wav2vec_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/wav2vec_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/xlsr_1b_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/xlsr_1b_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/xlsr_2b_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/xlsr_2b_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/cluster/xlsr_300m_se.jvnv-emotional-speech-corpus.speaker_id.csv +3 -0
- experiment_cache/cluster/xlsr_300m_se.jvnv-emotional-speech-corpus.style.csv +3 -0
- experiment_cache/embeddings/clap_general_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/clap_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/hubert_base_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/hubert_large_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/hubert_xl_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/meta_voice_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/pyannote_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/w2v_bert_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/wav2vec_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/xlsr_1b_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/xlsr_2b_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/embeddings/xlsr_300m_se.jvnv-emotional-speech-corpus.json +3 -0
- experiment_cache/figure/2d.latent_space.clap_general_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
- experiment_cache/figure/2d.latent_space.clap_general_se.jvnv-emotional-speech-corpus.style.png +3 -0
- experiment_cache/figure/2d.latent_space.clap_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
- experiment_cache/figure/2d.latent_space.clap_se.jvnv-emotional-speech-corpus.style.png +3 -0
- experiment_cache/figure/2d.latent_space.hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
- experiment_cache/figure/2d.latent_space.hubert_base_se.jvnv-emotional-speech-corpus.style.png +3 -0
- experiment_cache/figure/2d.latent_space.hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
- experiment_cache/figure/2d.latent_space.hubert_large_se.jvnv-emotional-speech-corpus.style.png +3 -0
- experiment_cache/figure/2d.latent_space.hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
- experiment_cache/figure/2d.latent_space.hubert_xl_se.jvnv-emotional-speech-corpus.style.png +3 -0
- experiment_cache/figure/2d.latent_space.meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
- experiment_cache/figure/2d.latent_space.meta_voice_se.jvnv-emotional-speech-corpus.style.png +3 -0
- experiment_cache/figure/2d.latent_space.pyannote_se.jvnv-emotional-speech-corpus.speaker_id.png +3 -0
.gitattributes
CHANGED
@@ -234,3 +234,87 @@ experiment_cache/cluster/hubert_large_se.j-tube-speech.speaker_id.csv filter=lfs
|
|
234 |
experiment_cache/cluster/xlsr_1b_se.j-tube-speech.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
235 |
experiment_cache/cluster/xlsr_300m_se.j-tube-speech.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
236 |
experiment_cache/embeddings/xlsr_2b_se.j-tube-speech.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
experiment_cache/cluster/xlsr_1b_se.j-tube-speech.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
235 |
experiment_cache/cluster/xlsr_300m_se.j-tube-speech.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
236 |
experiment_cache/embeddings/xlsr_2b_se.j-tube-speech.json filter=lfs diff=lfs merge=lfs -text
|
237 |
+
experiment_cache/cluster/pyannote_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
238 |
+
experiment_cache/embeddings/hubert_large_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
239 |
+
experiment_cache/figure/2d.latent_space.clap_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
240 |
+
experiment_cache/tsne/xlsr_300m_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
241 |
+
experiment_cache/cluster/hubert_base_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
242 |
+
experiment_cache/cluster/w2v_bert_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
243 |
+
experiment_cache/figure/2d.latent_space.wav2vec_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
244 |
+
experiment_cache/tsne/clap_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
245 |
+
experiment_cache/embeddings/xlsr_2b_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
246 |
+
experiment_cache/figure/2d.latent_space.clap_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
247 |
+
experiment_cache/figure/2d.latent_space.w2v_bert_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
248 |
+
experiment_cache/figure/2d.latent_space.xlsr_2b_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
249 |
+
experiment_cache/cluster/clap_general_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
250 |
+
experiment_cache/cluster/clap_general_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
251 |
+
experiment_cache/cluster/hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
252 |
+
experiment_cache/embeddings/clap_general_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
253 |
+
experiment_cache/tsne/hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
254 |
+
experiment_cache/tsne/w2v_bert_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
255 |
+
experiment_cache/cluster/meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
256 |
+
experiment_cache/embeddings/pyannote_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
257 |
+
experiment_cache/tsne/clap_general_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
258 |
+
experiment_cache/tsne/hubert_xl_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
259 |
+
experiment_cache/tsne/pyannote_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
260 |
+
experiment_cache/tsne/xlsr_300m_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
261 |
+
experiment_cache/cluster/wav2vec_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
262 |
+
experiment_cache/figure/2d.latent_space.meta_voice_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
263 |
+
experiment_cache/figure/2d.latent_space.w2v_bert_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
264 |
+
experiment_cache/tsne/clap_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
265 |
+
experiment_cache/embeddings/hubert_base_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
266 |
+
experiment_cache/figure/2d.latent_space.xlsr_1b_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
267 |
+
experiment_cache/figure/2d.latent_space.xlsr_300m_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
268 |
+
experiment_cache/cluster/clap_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
269 |
+
experiment_cache/cluster/xlsr_2b_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
270 |
+
experiment_cache/cluster/xlsr_300m_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
271 |
+
experiment_cache/embeddings/clap_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
272 |
+
experiment_cache/figure/2d.latent_space.hubert_base_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
273 |
+
experiment_cache/figure/2d.latent_space.hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
274 |
+
experiment_cache/figure/2d.latent_space.pyannote_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
275 |
+
experiment_cache/tsne/xlsr_1b_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
276 |
+
experiment_cache/cluster/hubert_xl_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
277 |
+
experiment_cache/cluster/xlsr_1b_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
278 |
+
experiment_cache/embeddings/hubert_xl_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
279 |
+
experiment_cache/embeddings/wav2vec_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
280 |
+
experiment_cache/tsne/xlsr_2b_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
281 |
+
experiment_cache/tsne/hubert_base_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
282 |
+
experiment_cache/embeddings/meta_voice_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
283 |
+
experiment_cache/embeddings/xlsr_1b_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
284 |
+
experiment_cache/embeddings/xlsr_300m_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
285 |
+
experiment_cache/figure/2d.latent_space.clap_general_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
286 |
+
experiment_cache/tsne/hubert_large_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
287 |
+
experiment_cache/tsne/w2v_bert_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
288 |
+
experiment_cache/cluster/hubert_large_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
289 |
+
experiment_cache/cluster/xlsr_1b_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
290 |
+
experiment_cache/figure/2d.latent_space.meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
291 |
+
experiment_cache/tsne/clap_general_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
292 |
+
experiment_cache/tsne/wav2vec_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
293 |
+
experiment_cache/tsne/meta_voice_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
294 |
+
experiment_cache/cluster/pyannote_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
295 |
+
experiment_cache/cluster/xlsr_2b_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
296 |
+
experiment_cache/cluster/xlsr_300m_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
297 |
+
experiment_cache/tsne/meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
298 |
+
experiment_cache/cluster/wav2vec_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
299 |
+
experiment_cache/figure/2d.latent_space.hubert_xl_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
300 |
+
experiment_cache/figure/2d.latent_space.xlsr_300m_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
301 |
+
experiment_cache/tsne/pyannote_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
302 |
+
experiment_cache/tsne/hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
303 |
+
experiment_cache/cluster/hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
304 |
+
experiment_cache/cluster/w2v_bert_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
305 |
+
experiment_cache/figure/2d.latent_space.hubert_large_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
306 |
+
experiment_cache/figure/2d.latent_space.wav2vec_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
307 |
+
experiment_cache/figure/2d.latent_space.pyannote_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
308 |
+
experiment_cache/figure/2d.latent_space.xlsr_1b_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
309 |
+
experiment_cache/tsne/wav2vec_se.jvnv-emotional-speech-corpus.style.npy filter=lfs diff=lfs merge=lfs -text
|
310 |
+
experiment_cache/cluster/hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
311 |
+
experiment_cache/figure/2d.latent_space.clap_general_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
312 |
+
experiment_cache/figure/2d.latent_space.hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
313 |
+
experiment_cache/figure/2d.latent_space.hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.png filter=lfs diff=lfs merge=lfs -text
|
314 |
+
experiment_cache/cluster/meta_voice_se.jvnv-emotional-speech-corpus.style.csv filter=lfs diff=lfs merge=lfs -text
|
315 |
+
experiment_cache/embeddings/w2v_bert_se.jvnv-emotional-speech-corpus.json filter=lfs diff=lfs merge=lfs -text
|
316 |
+
experiment_cache/tsne/hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
317 |
+
experiment_cache/cluster/clap_se.jvnv-emotional-speech-corpus.speaker_id.csv filter=lfs diff=lfs merge=lfs -text
|
318 |
+
experiment_cache/figure/2d.latent_space.xlsr_2b_se.jvnv-emotional-speech-corpus.style.png filter=lfs diff=lfs merge=lfs -text
|
319 |
+
experiment_cache/tsne/xlsr_1b_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
320 |
+
experiment_cache/tsne/xlsr_2b_se.jvnv-emotional-speech-corpus.speaker_id.npy filter=lfs diff=lfs merge=lfs -text
|
experiment_cache/cluster/clap_general_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca6f1877eea3414e9cc12c43f016564be2b4da7df922cdd3b9ad05b74070d2e4
|
3 |
+
size 13266
|
experiment_cache/cluster/clap_general_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:74a996e42b910d3b83925fc0517510b20e913d3c138e7c7a2b6bee379b92e6f2
|
3 |
+
size 18128
|
experiment_cache/cluster/clap_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b555ae880a70e945270b8a3fd1d1414bb375d7edd197ed0fcba2162f90b247b
|
3 |
+
size 13522
|
experiment_cache/cluster/clap_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b63eadc2b715841aa1cc3a87aaceac27a8e301a6ca5181c24f6271f540a41d73
|
3 |
+
size 18512
|
experiment_cache/cluster/hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d92dc7896e2e4e72a11d3828b82113ad4e728ae16e0047edd8b469ecfca3a45
|
3 |
+
size 10048
|
experiment_cache/cluster/hubert_base_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:620dbef9523bd472f23c05191a3944a07d2023484bb72e75bd18d2ab32637acd
|
3 |
+
size 13463
|
experiment_cache/cluster/hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb94947abc349370d299b0537e9abb9171a20ad4e34fed2238cad67f753ca78b
|
3 |
+
size 11457
|
experiment_cache/cluster/hubert_large_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f91563daadab191dc5a2d9b87b03471d8b0a9f7f8155f3ab20dfd75d99b46ea
|
3 |
+
size 15461
|
experiment_cache/cluster/hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f91bd608083d4a240a1cbd285aa5b830b497114610ca26bbb66aa0aee4778d7
|
3 |
+
size 12560
|
experiment_cache/cluster/hubert_xl_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:177700cc99e0ce052136e167c4db18bcc9cf7961ffc577988bf99509f0d56cfe
|
3 |
+
size 17014
|
experiment_cache/cluster/meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac3aa938455a3e41b9b1c885c59debd53a33f32b49e55d4c74a73cbb4635ec2a
|
3 |
+
size 14705
|
experiment_cache/cluster/meta_voice_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e67b7228332a2347f2e0c0c81331ca85bf84242493c326c37d5b4ca55c665a1
|
3 |
+
size 20043
|
experiment_cache/cluster/pyannote_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2c0f048ffa850af7826c239c8f7fc99ede7c18b1c0cbba8081cfca32e16ae98
|
3 |
+
size 5045
|
experiment_cache/cluster/pyannote_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91f0ed9ae264ca7fb61e1534ee6ce37ae87a0d84ffcc1a5d90339de5830672f6
|
3 |
+
size 6347
|
experiment_cache/cluster/w2v_bert_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cda9e6a57ce6960739cc65eda6e99e2df6ca9aaae64aed0e04e2216ae89c1721
|
3 |
+
size 9230
|
experiment_cache/cluster/w2v_bert_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49af1c147543db9a63912c9993ebfee094985983307dd7a5f7e1cec856a47162
|
3 |
+
size 12471
|
experiment_cache/cluster/wav2vec_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:697480291958d93e643c532822fc053c23b1133538de8674e997691d0af0f4d9
|
3 |
+
size 12401
|
experiment_cache/cluster/wav2vec_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:baf4fe4997e9b7becebd1cf6ceb77c547696429c19c0d83e67b02eaa9ea04518
|
3 |
+
size 16868
|
experiment_cache/cluster/xlsr_1b_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1db2fdda33aca2b6679fcfdc1822c2e3c97bee4e95e8e2fe4caf870c433f7aca
|
3 |
+
size 13070
|
experiment_cache/cluster/xlsr_1b_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:967263a5f17d73b1582c81e126963844d27c60771055ab5ff793fbd4f5fe3697
|
3 |
+
size 17817
|
experiment_cache/cluster/xlsr_2b_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:494f36dbcdaf5246f3023154f15707ec8d2a56a8748d605dff7b60360b6e9c48
|
3 |
+
size 7259
|
experiment_cache/cluster/xlsr_2b_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6ac19ad62a037b4e183b597ab3ddaec6076e88ddbb3ab3c82e287ae9b713d23
|
3 |
+
size 9778
|
experiment_cache/cluster/xlsr_300m_se.jvnv-emotional-speech-corpus.speaker_id.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19d1e763a6eebaf739b0e38c582e230796baf0d9e1aed83fdf19d8a1d61ed5e2
|
3 |
+
size 12291
|
experiment_cache/cluster/xlsr_300m_se.jvnv-emotional-speech-corpus.style.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82e64a747033a983944f40023400134583e2b1ecd5e7d380895e14825f1783a7
|
3 |
+
size 16866
|
experiment_cache/embeddings/clap_general_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:498208b3756f626c88ed8ba8be431eee5ed0e63a340f59c6c7e3d418ab7c25a9
|
3 |
+
size 18519541
|
experiment_cache/embeddings/clap_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acbcdb95c4983736efeb49dc781215db122a7303a47fbdef8763e98c95511b51
|
3 |
+
size 18517392
|
experiment_cache/embeddings/hubert_base_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f75b855af4d03e0aeb0ec26c0481f72c9f5562050ba01987bbcb2f9bbbd443a
|
3 |
+
size 27288541
|
experiment_cache/embeddings/hubert_large_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18aa556c86239a95ded10ca9faf3d03ad7038bd5f54a0d8ab61277713b1efac4
|
3 |
+
size 36365089
|
experiment_cache/embeddings/hubert_xl_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d5dc6e7883d98daa96f0fcb29b09bf70c7f48f641b4fe8e2d00b6347c521746
|
3 |
+
size 45349107
|
experiment_cache/embeddings/meta_voice_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:62b406266d4117b737edc2535e91c3f58929f4d3a121727bf693525a9ba8d5e3
|
3 |
+
size 7204339
|
experiment_cache/embeddings/pyannote_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22398c3e0f77cf2e3c4de139605ef3a64df0ef603ad69b0535be1e0d57487e1e
|
3 |
+
size 16831930
|
experiment_cache/embeddings/w2v_bert_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2770b70d34c261c06661f55c6f20a1202b9c10094e3ecbd9140e0b0192d28c83
|
3 |
+
size 37284291
|
experiment_cache/embeddings/wav2vec_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3137849232a67478d5419da43510ed8a947c785e03d778f9f335fc6c745d3739
|
3 |
+
size 36447409
|
experiment_cache/embeddings/xlsr_1b_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53e1913061be28a7339e93b6cf3ea864368e690739206660943e59649ff975bb
|
3 |
+
size 43785294
|
experiment_cache/embeddings/xlsr_2b_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a924451dd61b2de0f0e156142d6dc0877d121a754890291ce0a93e17a3499e12
|
3 |
+
size 65211989
|
experiment_cache/embeddings/xlsr_300m_se.jvnv-emotional-speech-corpus.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0bd5ed3cbaf354ba66a9147d3d9b71db2037d0ebd6fcecb5d3053dd5649321e
|
3 |
+
size 35960244
|
experiment_cache/figure/2d.latent_space.clap_general_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_general_se.jvnv-emotional-speech-corpus.style.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_se.jvnv-emotional-speech-corpus.style.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.hubert_base_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.hubert_base_se.jvnv-emotional-speech-corpus.style.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.hubert_large_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.hubert_large_se.jvnv-emotional-speech-corpus.style.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.hubert_xl_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.hubert_xl_se.jvnv-emotional-speech-corpus.style.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.meta_voice_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.meta_voice_se.jvnv-emotional-speech-corpus.style.png
ADDED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.pyannote_se.jvnv-emotional-speech-corpus.speaker_id.png
ADDED
Git LFS Details
|