File size: 3,441 Bytes
dc83cb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# Copyright (c) Microsoft
#               2022 Chengdong Liang (liangchengdong@mail.nwpu.edu.cn)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import gradio as gr
import wesep
import soundfile
import torchaudio
import os
from scipy.signal import resample

input_path = "./audios"
output_path = "./extracted"

if not os.path.exists(input_path):
    os.mkdir(input_path)

if not os.path.exists(output_path):
    os.mkdir(output_path)


en_model =  wesep.load_model("english")

def save_to_file(audio,filename,target_sr=16000):
    audio_path = os.path.join(input_path,filename)
    soundfile.write(audio_path,audio[1],audio[0])
    pcm, sample_rate = torchaudio.load(audio_path)
    transform = torchaudio.transforms.Resample(
                    orig_freq=audio[0],
                    new_freq=target_sr)
    pcm = transform(pcm)
    torchaudio.save(audio_path, pcm, target_sr)

    return audio_path


def speaker_extraction(audio1, audio2, mixture, select_speaker='#1'):
    if audio1 == None or audio2 == None or mixture == None:
        print("??")
        return gr.Warning("The audio file cannot be empty, please upload a valid audio file. 音频文件不能为空,请上传有效的音频文件。")
    
    audio_path1 = save_to_file(audio1,"enroll_1.wav",16000)
    audio_path2 = save_to_file(audio2,"enroll_2.wav",16000)
    audio_mixture = save_to_file(mixture,"mixture.wav",16000)

    model = en_model

    if select_speaker == '#1':
        select_speaker = audio_path1
    elif select_speaker == '#2':
        select_speaker = audio_path2
    
    speech = model.extract_speech(audio_mixture,select_speaker)
    audio_speech = output_path + "/speech.wav"
    soundfile.write(audio_speech,speech[0],16000)
    

    return audio_speech


inputs = [
    gr.Audio(
            show_download_button = True,
            label='Enroll Speaker#1',
            ),
    gr.Audio(
            show_download_button = True,
            label='Enroll Speaker#2'),
    gr.Audio(
            show_download_button = True,
            label='Mixture'),
    gr.Radio(['#1', '#2'], label='Extract Speaker #'),
]

output = gr.Audio(type="filepath",label="Extract Speaker")

# description
description = ("<p>WeSep Demo ! Try it with your own voice ! Note: We recommend that the audio length be greater than 5s !</p>")

article = (
    "<p style='text-align: center'>"
    "<a href='https://github.com/wenet-e2e/wesep' target='_blank'>Github: Learn more about WeSep</a>"
    "</p>")

examples = [
    ['examples/enroll_1.wav', 'examples/enroll_2.wav', 'examples/mixture.wav','#1'],
    ['examples/enroll1_zh.wav', 'examples/enroll2_zh.wav', 'examples/mixture_zh.wav','#2'],
   
]

interface = gr.Interface(
    fn=speaker_extraction,
    inputs=inputs,
    outputs=output,
    title="Speaker Extraction in WeSep : 基于 WeSep 的说话人提取",
    description=description,
    article=article,
    examples=examples
    
)

interface.launch()