zhzluke96 commited on
Commit
37195a7
1 Parent(s): d2b7e94
modules/prompts/news_oral_prompt.txt CHANGED
@@ -2,7 +2,7 @@
2
  任务:新闻稿口播化
3
 
4
  你需要将一个新闻稿改写为口语化的口播文本,以提供给新闻主播在晚间新闻节目中播报
5
- 同时,适当的添加一些 附语言 标签为文本增加多样性
6
 
7
  目前可以使用的附语言标签如下:
8
  - `[laugh]`: 表示笑声
 
2
  任务:新闻稿口播化
3
 
4
  你需要将一个新闻稿改写为口语化的口播文本,以提供给新闻主播在晚间新闻节目中播报
5
+ 同时,适当的添加一些 副语言 标签为文本增加多样性
6
 
7
  目前可以使用的附语言标签如下:
8
  - `[laugh]`: 表示笑声
modules/webui/speaker/speaker_creator.py CHANGED
@@ -77,11 +77,12 @@ def create_spk_from_seed(
77
 
78
  @torch.inference_mode()
79
  @spaces.GPU(duration=120)
80
- def test_spk_voice(seed: int, text: str):
81
- return tts_generate(
82
- spk=seed,
83
- text=text,
84
- )
 
85
 
86
 
87
  def random_speaker():
 
77
 
78
  @torch.inference_mode()
79
  @spaces.GPU(duration=120)
80
+ def test_spk_voice(
81
+ seed: int,
82
+ text: str,
83
+ progress=gr.Progress(track_tqdm=True),
84
+ ):
85
+ return tts_generate(spk=seed, text=text, progress=progress)
86
 
87
 
88
  def random_speaker():
modules/webui/speaker/speaker_editor.py CHANGED
@@ -11,14 +11,15 @@ from modules.webui.webui_utils import tts_generate
11
 
12
  @torch.inference_mode()
13
  @spaces.GPU(duration=120)
14
- def test_spk_voice(spk_file, text: str):
 
 
 
 
15
  if spk_file == "" or spk_file is None:
16
  return None
17
  spk = Speaker.from_file(spk_file)
18
- return tts_generate(
19
- spk=spk,
20
- text=text,
21
- )
22
 
23
 
24
  def speaker_editor_ui():
 
11
 
12
  @torch.inference_mode()
13
  @spaces.GPU(duration=120)
14
+ def test_spk_voice(
15
+ spk_file,
16
+ text: str,
17
+ progress=gr.Progress(track_tqdm=True),
18
+ ):
19
  if spk_file == "" or spk_file is None:
20
  return None
21
  spk = Speaker.from_file(spk_file)
22
+ return tts_generate(spk=spk, text=text, progress=progress)
 
 
 
23
 
24
 
25
  def speaker_editor_ui():
modules/webui/speaker/speaker_merger.py CHANGED
@@ -77,7 +77,16 @@ def merge_spk(
77
  @torch.inference_mode()
78
  @spaces.GPU(duration=120)
79
  def merge_and_test_spk_voice(
80
- spk_a, spk_a_w, spk_b, spk_b_w, spk_c, spk_c_w, spk_d, spk_d_w, test_text
 
 
 
 
 
 
 
 
 
81
  ):
82
  merged_spk = merge_spk(
83
  spk_a,
@@ -89,10 +98,7 @@ def merge_and_test_spk_voice(
89
  spk_d,
90
  spk_d_w,
91
  )
92
- return tts_generate(
93
- spk=merged_spk,
94
- text=test_text,
95
- )
96
 
97
 
98
  @torch.inference_mode()
 
77
  @torch.inference_mode()
78
  @spaces.GPU(duration=120)
79
  def merge_and_test_spk_voice(
80
+ spk_a,
81
+ spk_a_w,
82
+ spk_b,
83
+ spk_b_w,
84
+ spk_c,
85
+ spk_c_w,
86
+ spk_d,
87
+ spk_d_w,
88
+ test_text,
89
+ progress=gr.Progress(track_tqdm=True),
90
  ):
91
  merged_spk = merge_spk(
92
  spk_a,
 
98
  spk_d,
99
  spk_d_w,
100
  )
101
+ return tts_generate(spk=merged_spk, text=test_text, progress=progress)
 
 
 
102
 
103
 
104
  @torch.inference_mode()
webui.py CHANGED
@@ -2,12 +2,6 @@ import logging
2
  import os
3
  import sys
4
 
5
- from modules.api.api_setup import (
6
- process_api_args,
7
- process_model_args,
8
- setup_api_args,
9
- setup_model_args,
10
- )
11
  from modules.ffmpeg_env import setup_ffmpeg_path
12
 
13
  try:
@@ -22,6 +16,12 @@ except BaseException:
22
  import argparse
23
 
24
  from modules import config
 
 
 
 
 
 
25
  from modules.api.app_config import app_description, app_title, app_version
26
  from modules.gradio_dcls_fix import dcls_patch
27
  from modules.utils.env import get_and_update_env
 
2
  import os
3
  import sys
4
 
 
 
 
 
 
 
5
  from modules.ffmpeg_env import setup_ffmpeg_path
6
 
7
  try:
 
16
  import argparse
17
 
18
  from modules import config
19
+ from modules.api.api_setup import (
20
+ process_api_args,
21
+ process_model_args,
22
+ setup_api_args,
23
+ setup_model_args,
24
+ )
25
  from modules.api.app_config import app_description, app_title, app_version
26
  from modules.gradio_dcls_fix import dcls_patch
27
  from modules.utils.env import get_and_update_env