Guscerra commited on
Commit
e08c10a
·
1 Parent(s): 84e9268
Files changed (3) hide show
  1. pyproject.toml +20 -0
  2. voice_prints.pkl +3 -0
  3. vps_clustering_benchmark.py +99 -0
pyproject.toml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "vps-clustering-benchmark"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["RafalCer <rafal.cerniawski@gmail.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.10"
10
+ numpy = "^1.26.4"
11
+ torch = "^2.3.0"
12
+ nemo-toolkit = {extras = ["asr"], version = "1.23.0"}
13
+ youtokentome = { git = "https://github.com/conversy-ai/YouTokenToMe.git", branch = "master" }
14
+ pandas = "^2.2.2"
15
+ pydub = "^0.25.1"
16
+ protobuf = "^3.20.0"
17
+
18
+ [build-system]
19
+ requires = ["poetry-core"]
20
+ build-backend = "poetry.core.masonry.api"
voice_prints.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6cf4081989aa7ced1264898011be265b7567d78e9f456fc329fde435add0119
3
+ size 187836
vps_clustering_benchmark.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+ import pandas as pd
5
+ import numpy as np
6
+ from typing import List
7
+ from tqdm import tqdm
8
+
9
+ logger = datasets.logging.get_logger(__name__)
10
+
11
+ _DESCRIPTION = """\
12
+ This dataset consists of a small sample of audio clips with annotated
13
+ speaker identities, their age and gender, diarization-based speech segments,
14
+ and transcription. The dataset is intended for the benchmarking of VBI-core.
15
+ """
16
+
17
+ _HF_REPO_PATH = "https://huggingface.co/datasets/conversy/vps_clustering_benchmark/resolve/main/voice_prints.pkl"
18
+
19
+
20
+ class VPClusteringBenchmarkConfig(datasets.BuilderConfig):
21
+ """BuilderConfig for Conversy Benchmark."""
22
+
23
+ def __init__(self, name, version, **kwargs):
24
+ """BuilderConfig for Conversy Benchmark.
25
+ Args:
26
+ **kwargs: keyword arguments forwarded to super.
27
+ """
28
+ self.name = name
29
+ self.version = version
30
+ self.features = kwargs.pop("features", None)
31
+ self.description = kwargs.pop("description", None)
32
+ self.data_url = kwargs.pop("data_url", None)
33
+ self.nb_data_shards = kwargs.pop("nb_data_shards", None)
34
+
35
+ super(VPClusteringBenchmarkConfig, self).__init__(
36
+ name=name,
37
+ version=version,
38
+ **kwargs
39
+ )
40
+
41
+
42
+ class VPClusteringBenchmark(datasets.GeneratorBasedBuilder):
43
+ """Conversy benchmark"""
44
+ VERSION = datasets.Version("1.0.0")
45
+ BUILDER_CONFIGS = [
46
+ VPClusteringBenchmarkConfig(
47
+ name="VPClusteringBenchmark",
48
+ version=VERSION,
49
+ description="Conversy Benchmark for ML models evaluation",
50
+ features=["segment_id", "filename", "speaker", "duration", "vp",
51
+ "segment_clean"],
52
+ data_url=_HF_REPO_PATH,
53
+ nb_data_shards=1)
54
+ ]
55
+
56
+ def _info(self):
57
+ description = (
58
+ "Voice Print Clustering Benchmark"
59
+ )
60
+ features = datasets.Features(
61
+ {
62
+ "segment_id": datasets.Value("string"),
63
+ "filename": datasets.Value("string"),
64
+ "speaker": datasets.Value("string"),
65
+ "duration": datasets.Value("float32"),
66
+ "segment_clean": datasets.Value("bool"),
67
+ "vp": datasets.Array2D(shape=(192,), dtype="float32")
68
+ })
69
+ return datasets.DatasetInfo(
70
+ description=description,
71
+ features=features,
72
+ supervised_keys=None,
73
+ version=self.config.version
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ """Returns SplitGenerators."""
78
+ data_url = self.config.data_url
79
+ downloaded_file = dl_manager.download_and_extract(data_url)
80
+ return [
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN,
83
+ gen_kwargs={"file_path": downloaded_file},
84
+ ),
85
+ ]
86
+
87
+ def _generate_examples(self, file_path):
88
+ """Yields examples."""
89
+ df = pd.read_pickle(file_path)
90
+
91
+ for idx, row in df.iterrows():
92
+ yield idx, {
93
+ "segment_id": row["segment_id"],
94
+ "filename": row["filename"],
95
+ "speaker": row["speaker"],
96
+ "duration": row["duration"],
97
+ "segment_clean": row["segment_clean"],
98
+ "vp": np.array(row["vp"], dtype=np.float32) # Ensure vp is a NumPy array
99
+ }