Snap
Browse files- clustering_segments.py +107 -0
- dataset.pkl +3 -0
clustering_segments.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
logger = datasets.logging.get_logger(__name__)
|
6 |
+
|
7 |
+
_DATA_PATH = "https://huggingface.co/datasets/conversy/clustering_files/resolve/main/dataset.pkl"
|
8 |
+
|
9 |
+
class ClusteringFilesConfig(datasets.BuilderConfig):
|
10 |
+
"""BuilderConfig for Conversy Benchmark."""
|
11 |
+
|
12 |
+
def __init__(self, name, version, **kwargs):
|
13 |
+
"""BuilderConfig for Conversy Benchmark.
|
14 |
+
Args:
|
15 |
+
**kwargs: keyword arguments forwarded to super.
|
16 |
+
"""
|
17 |
+
self.name = name
|
18 |
+
self.version = version
|
19 |
+
self.features = kwargs.pop("features", None)
|
20 |
+
self.description = kwargs.pop("description", None)
|
21 |
+
self.data_url = kwargs.pop("data_url", None)
|
22 |
+
self.nb_data_shards = kwargs.pop("nb_data_shards", None)
|
23 |
+
|
24 |
+
super(ClusteringFilesConfig, self).__init__(
|
25 |
+
name=name,
|
26 |
+
version=version,
|
27 |
+
**kwargs
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
class ClusteringFiles(datasets.GeneratorBasedBuilder):
|
32 |
+
"""Conversy benchmark"""
|
33 |
+
VERSION = datasets.Version("1.0.0")
|
34 |
+
BUILDER_CONFIGS = [
|
35 |
+
ClusteringFilesConfig(
|
36 |
+
name="ClusteringFiles",
|
37 |
+
version=VERSION,
|
38 |
+
description="Conversy Benchmark for ML models evaluation",
|
39 |
+
features=["filename", "segments"],
|
40 |
+
data_url=_DATA_PATH,
|
41 |
+
nb_data_shards=1)
|
42 |
+
]
|
43 |
+
|
44 |
+
def _info(self):
|
45 |
+
description = (
|
46 |
+
"Voice Print Clustering Benchmark"
|
47 |
+
)
|
48 |
+
features = datasets.Features(
|
49 |
+
{
|
50 |
+
"filename": datasets.Value("string"),
|
51 |
+
"segments": [
|
52 |
+
{
|
53 |
+
"segment_id": datasets.Value("int32"),
|
54 |
+
"speaker": datasets.Value("string"),
|
55 |
+
"duration": datasets.Value("float32"),
|
56 |
+
"segment_clean": datasets.Value("bool"),
|
57 |
+
"start": datasets.Value("float32"),
|
58 |
+
"end": datasets.Value("float32"),
|
59 |
+
"readable_start": datasets.Value("string"),
|
60 |
+
"readable_end": datasets.Value("string"),
|
61 |
+
"vp": datasets.Sequence(datasets.Value("float32")),
|
62 |
+
}
|
63 |
+
]
|
64 |
+
})
|
65 |
+
return datasets.DatasetInfo(
|
66 |
+
description=description,
|
67 |
+
features=features,
|
68 |
+
supervised_keys=None,
|
69 |
+
version=self.config.version
|
70 |
+
)
|
71 |
+
|
72 |
+
def _split_generators(self, dl_manager):
|
73 |
+
"""Returns SplitGenerators."""
|
74 |
+
data_url = self.config.data_url
|
75 |
+
downloaded_file = dl_manager.download_and_extract(data_url)
|
76 |
+
return [
|
77 |
+
datasets.SplitGenerator(
|
78 |
+
name=datasets.Split.TRAIN,
|
79 |
+
gen_kwargs={"file_path": downloaded_file},
|
80 |
+
),
|
81 |
+
]
|
82 |
+
|
83 |
+
def _generate_examples(self, file_path):
|
84 |
+
"""Yields examples."""
|
85 |
+
df = pd.read_pickle(file_path)
|
86 |
+
|
87 |
+
files = {}
|
88 |
+
for idx, row in df.iterrows():
|
89 |
+
if row["filename"] not in files:
|
90 |
+
files[row["filename"]] = {
|
91 |
+
"filename": row["filename"],
|
92 |
+
"segments": []
|
93 |
+
}
|
94 |
+
files[row["filename"]]["segments"].append({
|
95 |
+
"segment_id": row["segment_id"],
|
96 |
+
"speaker": row["speaker"],
|
97 |
+
"duration": row["duration"],
|
98 |
+
"segment_clean": row["segment_clean"],
|
99 |
+
"start": row['start'],
|
100 |
+
"end": row['end'],
|
101 |
+
"readable_start": row['readable_start'],
|
102 |
+
"readable_end": row['readable_end'],
|
103 |
+
"vp": np.asarray(row["vp"], dtype=np.float32)
|
104 |
+
})
|
105 |
+
|
106 |
+
for idx, file_data in enumerate(files.values()):
|
107 |
+
yield idx, file_data
|
dataset.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7be56f2d87b72d421e97b8158176eebf3317e0c4ac697e4a9fa671ad1eec949a
|
3 |
+
size 198842
|