init
Browse files- experiment_cache/.DS_Store +0 -0
- experiment_cache/cluster/xlsr_se.expresso.speaker_id.csv +0 -3
- experiment_cache/cluster/xlsr_se.expresso.style.csv +0 -3
- experiment_cache/cluster/xlsr_se.voxceleb1-test-split.speaker_id.csv +0 -3
- experiment_cache/embeddings/xlsr_se.expresso.json +0 -3
- experiment_cache/embeddings/xlsr_se.voxceleb1-test-split.json +0 -3
- experiment_cache/figure/2d.latent_space.xlsr_se.expresso.speaker_id.png +0 -3
- experiment_cache/figure/2d.latent_space.xlsr_se.expresso.style.png +0 -3
- experiment_cache/figure/2d.latent_space.xlsr_se.voxceleb1-test-split.speaker_id.png +0 -3
- experiment_cache/tsne/xlsr_se.expresso.speaker_id.npy +0 -3
- experiment_cache/tsne/xlsr_se.expresso.style.npy +0 -3
- experiment_cache/tsne/xlsr_se.voxceleb1-test-split.speaker_id.npy +0 -3
- experiment_speaker_verification.py +26 -11
- model_hubert.py +1 -2
- model_w2v_bert.py +3 -3
- model_xls.py +24 -6
- test.py +2 -2
experiment_cache/.DS_Store
CHANGED
Binary files a/experiment_cache/.DS_Store and b/experiment_cache/.DS_Store differ
|
|
experiment_cache/cluster/xlsr_se.expresso.speaker_id.csv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:4874b093274ca6d59fd4989784891e2d594c7ef1c2099eaa93bf28fa769c86c8
|
3 |
-
size 297681
|
|
|
|
|
|
|
|
experiment_cache/cluster/xlsr_se.expresso.style.csv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:01a55057f9b14bd51c5ac3645e17146ad139e1083b64e82545fadb6f48690cb4
|
3 |
-
size 331093
|
|
|
|
|
|
|
|
experiment_cache/cluster/xlsr_se.voxceleb1-test-split.speaker_id.csv
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f30902626df185dfa6a9a630de702d5a191698fdc24a75c985693ebcacb6c0a0
|
3 |
-
size 184308
|
|
|
|
|
|
|
|
experiment_cache/embeddings/xlsr_se.expresso.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:fc33ceb69103f5e23f96ab1e2d8beca4ba9e9964a8bbefb26b72b25975e5c196
|
3 |
-
size 192054155
|
|
|
|
|
|
|
|
experiment_cache/embeddings/xlsr_se.voxceleb1-test-split.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:aa7137389f225ee791db9c46713487b8439afdf91e4c56ad2401ce05751db108
|
3 |
-
size 80339549
|
|
|
|
|
|
|
|
experiment_cache/figure/2d.latent_space.xlsr_se.expresso.speaker_id.png
DELETED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.xlsr_se.expresso.style.png
DELETED
Git LFS Details
|
experiment_cache/figure/2d.latent_space.xlsr_se.voxceleb1-test-split.speaker_id.png
DELETED
Git LFS Details
|
experiment_cache/tsne/xlsr_se.expresso.speaker_id.npy
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5ead246e98e244fa2522cdd7cac3d51da21f73e37cd244de08863e82d9b92a38
|
3 |
-
size 93048
|
|
|
|
|
|
|
|
experiment_cache/tsne/xlsr_se.expresso.style.npy
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5ead246e98e244fa2522cdd7cac3d51da21f73e37cd244de08863e82d9b92a38
|
3 |
-
size 93048
|
|
|
|
|
|
|
|
experiment_cache/tsne/xlsr_se.voxceleb1-test-split.speaker_id.npy
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2a50b70d103968b9cf1a44c5032ef9fc583828e60ace0e83f77d35186785e0e2
|
3 |
-
size 39120
|
|
|
|
|
|
|
|
experiment_speaker_verification.py
CHANGED
@@ -18,7 +18,7 @@ from model_meta_voice import MetaVoiceEmbedding
|
|
18 |
from model_pyannote_embedding import PyannoteEmbedding
|
19 |
from model_w2v_bert import W2VBERTEmbedding
|
20 |
from model_clap import CLAPEmbedding, CLAPGeneralEmbedding
|
21 |
-
from model_xls import
|
22 |
from model_hubert import HuBERTBaseEmbedding, HuBERTLargeEmbedding, HuBERTXLEmbedding
|
23 |
|
24 |
|
@@ -121,50 +121,65 @@ if __name__ == '__main__':
|
|
121 |
# get_embedding(W2VBERTEmbedding, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
122 |
# get_embedding(CLAPEmbedding, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
123 |
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
|
124 |
-
# get_embedding(
|
125 |
-
get_embedding(HuBERTBaseEmbedding, "hubert_base_se", "asahi417/voxceleb1-test-split", "test")
|
126 |
# get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "asahi417/voxceleb1-test-split", "test")
|
127 |
# get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "asahi417/voxceleb1-test-split", "test")
|
|
|
|
|
|
|
|
|
128 |
|
129 |
# get_embedding(MetaVoiceEmbedding, "meta_voice_se", "ylacombe/expresso", "train")
|
130 |
# get_embedding(PyannoteEmbedding, "pyannote_se", "ylacombe/expresso", "train")
|
131 |
# get_embedding(W2VBERTEmbedding, "w2v_bert_se", "ylacombe/expresso", "train")
|
132 |
# get_embedding(CLAPEmbedding, "clap_se", "ylacombe/expresso", "train")
|
133 |
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "ylacombe/expresso", "train")
|
134 |
-
# get_embedding(
|
135 |
-
get_embedding(HuBERTBaseEmbedding, "hubert_base_se", "ylacombe/expresso", "train")
|
136 |
# get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "ylacombe/expresso", "train")
|
137 |
# get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "ylacombe/expresso", "train")
|
|
|
|
|
|
|
|
|
138 |
|
139 |
# cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
140 |
# cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
141 |
# cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
142 |
# cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
143 |
# cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
144 |
-
# cluster_embedding("
|
145 |
-
cluster_embedding("hubert_base_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
146 |
# cluster_embedding("hubert_large_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
147 |
# cluster_embedding("hubert_xl_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
|
|
|
|
|
|
|
|
148 |
|
149 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
150 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
151 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
152 |
# cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
153 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
|
154 |
-
# cluster_embedding("
|
155 |
-
cluster_embedding("hubert_base_se", "ylacombe/expresso", "speaker_id")
|
156 |
# cluster_embedding("hubert_large_se", "ylacombe/expresso", "speaker_id")
|
157 |
# cluster_embedding("hubert_xl_se", "ylacombe/expresso", "speaker_id")
|
|
|
|
|
|
|
|
|
158 |
|
159 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
160 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
161 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
162 |
# cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
163 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
|
164 |
-
# cluster_embedding("
|
165 |
-
cluster_embedding("hubert_base_se", "ylacombe/expresso", "style")
|
166 |
# cluster_embedding("hubert_large_se", "ylacombe/expresso", "style")
|
167 |
# cluster_embedding("hubert_xl_se", "ylacombe/expresso", "style")
|
|
|
|
|
|
|
|
|
168 |
|
169 |
|
170 |
|
|
|
18 |
from model_pyannote_embedding import PyannoteEmbedding
|
19 |
from model_w2v_bert import W2VBERTEmbedding
|
20 |
from model_clap import CLAPEmbedding, CLAPGeneralEmbedding
|
21 |
+
from model_xls import Wav2VecEmbedding, XLSR300MEmbedding, XLSR1BEmbedding, XLSR2BEmbedding
|
22 |
from model_hubert import HuBERTBaseEmbedding, HuBERTLargeEmbedding, HuBERTXLEmbedding
|
23 |
|
24 |
|
|
|
121 |
# get_embedding(W2VBERTEmbedding, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
122 |
# get_embedding(CLAPEmbedding, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
123 |
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
|
124 |
+
# get_embedding(HuBERTBaseEmbedding, "hubert_base_se", "asahi417/voxceleb1-test-split", "test")
|
|
|
125 |
# get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "asahi417/voxceleb1-test-split", "test")
|
126 |
# get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "asahi417/voxceleb1-test-split", "test")
|
127 |
+
get_embedding(Wav2VecEmbedding, "wav2vec_se", "asahi417/voxceleb1-test-split", "test")
|
128 |
+
get_embedding(XLSR300MEmbedding, "xlsr_300m_se", "asahi417/voxceleb1-test-split", "test")
|
129 |
+
get_embedding(XLSR1BEmbedding, "xlsr_1b_se", "asahi417/voxceleb1-test-split", "test")
|
130 |
+
get_embedding(XLSR2BEmbedding, "xlsr_2b_se", "asahi417/voxceleb1-test-split", "test")
|
131 |
|
132 |
# get_embedding(MetaVoiceEmbedding, "meta_voice_se", "ylacombe/expresso", "train")
|
133 |
# get_embedding(PyannoteEmbedding, "pyannote_se", "ylacombe/expresso", "train")
|
134 |
# get_embedding(W2VBERTEmbedding, "w2v_bert_se", "ylacombe/expresso", "train")
|
135 |
# get_embedding(CLAPEmbedding, "clap_se", "ylacombe/expresso", "train")
|
136 |
# get_embedding(CLAPGeneralEmbedding, "clap_general_se", "ylacombe/expresso", "train")
|
137 |
+
# get_embedding(HuBERTBaseEmbedding, "hubert_base_se", "ylacombe/expresso", "train")
|
|
|
138 |
# get_embedding(HuBERTLargeEmbedding, "hubert_large_se", "ylacombe/expresso", "train")
|
139 |
# get_embedding(HuBERTXLEmbedding, "hubert_xl_se", "ylacombe/expresso", "train")
|
140 |
+
get_embedding(Wav2VecEmbedding, "wav2vec_se", "ylacombe/expresso", "train")
|
141 |
+
get_embedding(XLSR300MEmbedding, "xlsr_300m_se", "ylacombe/expresso", "train")
|
142 |
+
get_embedding(XLSR1BEmbedding, "xlsr_1b_se", "ylacombe/expresso", "train")
|
143 |
+
get_embedding(XLSR2BEmbedding, "xlsr_2b_se", "ylacombe/expresso", "train")
|
144 |
|
145 |
# cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
146 |
# cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
147 |
# cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
148 |
# cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
149 |
# cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
150 |
+
# cluster_embedding("hubert_base_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
|
|
151 |
# cluster_embedding("hubert_large_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
152 |
# cluster_embedding("hubert_xl_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
153 |
+
cluster_embedding("wav2vec_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
154 |
+
cluster_embedding("xlsr_300m_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
155 |
+
cluster_embedding("xlsr_1b_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
156 |
+
cluster_embedding("xlsr_2b_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
157 |
|
158 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
159 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
160 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
161 |
# cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
162 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
|
163 |
+
# cluster_embedding("hubert_base_se", "ylacombe/expresso", "speaker_id")
|
|
|
164 |
# cluster_embedding("hubert_large_se", "ylacombe/expresso", "speaker_id")
|
165 |
# cluster_embedding("hubert_xl_se", "ylacombe/expresso", "speaker_id")
|
166 |
+
cluster_embedding("wav2vec_se", "ylacombe/expresso", "speaker_id")
|
167 |
+
cluster_embedding("xlsr_300m_se", "ylacombe/expresso", "speaker_id")
|
168 |
+
cluster_embedding("xlsr_1b_se", "ylacombe/expresso", "speaker_id")
|
169 |
+
cluster_embedding("xlsr_2b_se", "ylacombe/expresso", "speaker_id")
|
170 |
|
171 |
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
172 |
# cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
173 |
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
174 |
# cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
175 |
# cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
|
176 |
+
# cluster_embedding("hubert_base_se", "ylacombe/expresso", "style")
|
|
|
177 |
# cluster_embedding("hubert_large_se", "ylacombe/expresso", "style")
|
178 |
# cluster_embedding("hubert_xl_se", "ylacombe/expresso", "style")
|
179 |
+
cluster_embedding("wav2vec_se", "ylacombe/expresso", "style")
|
180 |
+
cluster_embedding("xlsr_300m_se", "ylacombe/expresso", "style")
|
181 |
+
cluster_embedding("xlsr_1b_se", "ylacombe/expresso", "style")
|
182 |
+
cluster_embedding("xlsr_2b_se", "ylacombe/expresso", "style")
|
183 |
|
184 |
|
185 |
|
model_hubert.py
CHANGED
@@ -26,8 +26,7 @@ class HuBERTXLEmbedding:
|
|
26 |
inputs = self.processor(wav, sampling_rate=self.processor.sampling_rate, return_tensors="pt")
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
-
return outputs
|
30 |
-
# return outputs.last_hidden_state.mean(1).cpu().numpy()[0]
|
31 |
|
32 |
|
33 |
class HuBERTLargeEmbedding(HuBERTXLEmbedding):
|
|
|
26 |
inputs = self.processor(wav, sampling_rate=self.processor.sampling_rate, return_tensors="pt")
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
+
return outputs.last_hidden_state.mean(1).cpu().numpy()[0]
|
|
|
30 |
|
31 |
|
32 |
class HuBERTLargeEmbedding(HuBERTXLEmbedding):
|
model_w2v_bert.py
CHANGED
@@ -11,9 +11,9 @@ from transformers import Wav2Vec2BertModel, AutoFeatureExtractor
|
|
11 |
|
12 |
|
13 |
class W2VBERTEmbedding:
|
14 |
-
def __init__(self):
|
15 |
-
self.processor = AutoFeatureExtractor.from_pretrained(
|
16 |
-
self.model = Wav2Vec2BertModel.from_pretrained(
|
17 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
self.model.to(self.device)
|
19 |
self.model.eval()
|
|
|
11 |
|
12 |
|
13 |
class W2VBERTEmbedding:
|
14 |
+
def __init__(self, ckpt: str = "facebook/w2v-bert-2.0"):
|
15 |
+
self.processor = AutoFeatureExtractor.from_pretrained(ckpt)
|
16 |
+
self.model = Wav2Vec2BertModel.from_pretrained(ckpt)
|
17 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
self.model.to(self.device)
|
19 |
self.model.eval()
|
model_xls.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
"""Meta's XLS-R based speaker embedding.
|
2 |
- feature dimension: 768
|
3 |
-
- source: https://huggingface.co/
|
4 |
-
https://huggingface.co/docs/transformers/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput
|
5 |
"""
|
6 |
from typing import Optional
|
7 |
|
@@ -11,10 +10,11 @@ import numpy as np
|
|
11 |
from transformers import AutoFeatureExtractor, AutoModelForPreTraining
|
12 |
|
13 |
|
14 |
-
class
|
15 |
-
|
16 |
-
|
17 |
-
self.
|
|
|
18 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
self.model.to(self.device)
|
20 |
self.model.eval()
|
@@ -27,3 +27,21 @@ class XLSREmbedding:
|
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
return outputs.projected_states.mean(1).cpu().numpy()[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Meta's XLS-R based speaker embedding.
|
2 |
- feature dimension: 768
|
3 |
+
- source: https://huggingface.co/docs/transformers/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput
|
|
|
4 |
"""
|
5 |
from typing import Optional
|
6 |
|
|
|
10 |
from transformers import AutoFeatureExtractor, AutoModelForPreTraining
|
11 |
|
12 |
|
13 |
+
class Wav2VecEmbedding:
|
14 |
+
|
15 |
+
def __init__(self, ckpt: str = "facebook/wav2vec2-large-xlsr-53"):
|
16 |
+
self.processor = AutoFeatureExtractor.from_pretrained(ckpt)
|
17 |
+
self.model = AutoModelForPreTraining.from_pretrained(ckpt)
|
18 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
self.model.to(self.device)
|
20 |
self.model.eval()
|
|
|
27 |
with torch.no_grad():
|
28 |
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
return outputs.projected_states.mean(1).cpu().numpy()[0]
|
30 |
+
|
31 |
+
|
32 |
+
class XLSR2BEmbedding(Wav2VecEmbedding):
|
33 |
+
|
34 |
+
def __init__(self):
|
35 |
+
super().__init__("facebook/wav2vec2-xls-r-2b")
|
36 |
+
|
37 |
+
|
38 |
+
class XLSR1BEmbedding(Wav2VecEmbedding):
|
39 |
+
|
40 |
+
def __init__(self):
|
41 |
+
super().__init__("facebook/wav2vec2-xls-r-1b")
|
42 |
+
|
43 |
+
|
44 |
+
class XLSR300MEmbedding(Wav2VecEmbedding):
|
45 |
+
|
46 |
+
def __init__(self):
|
47 |
+
super().__init__("facebook/wav2vec2-xls-r-300m")
|
test.py
CHANGED
@@ -3,14 +3,14 @@ from model_clap import CLAPEmbedding
|
|
3 |
from model_meta_voice import MetaVoiceEmbedding
|
4 |
from model_pyannote_embedding import PyannoteEmbedding
|
5 |
from model_w2v_bert import W2VBERTEmbedding
|
6 |
-
from model_xls import
|
7 |
from model_hubert import HuBERTXLEmbedding
|
8 |
|
9 |
|
10 |
def test():
|
11 |
wav, sr = librosa.load("sample.wav")
|
12 |
print("XLS-R")
|
13 |
-
model =
|
14 |
v = model.get_speaker_embedding(wav, sr)
|
15 |
print(v.shape)
|
16 |
print("CLAP")
|
|
|
3 |
from model_meta_voice import MetaVoiceEmbedding
|
4 |
from model_pyannote_embedding import PyannoteEmbedding
|
5 |
from model_w2v_bert import W2VBERTEmbedding
|
6 |
+
from model_xls import XLSR300MEmbedding
|
7 |
from model_hubert import HuBERTXLEmbedding
|
8 |
|
9 |
|
10 |
def test():
|
11 |
wav, sr = librosa.load("sample.wav")
|
12 |
print("XLS-R")
|
13 |
+
model = XLSR300MEmbedding()
|
14 |
v = model.get_speaker_embedding(wav, sr)
|
15 |
print(v.shape)
|
16 |
print("CLAP")
|