Spaces:
Runtime error
Runtime error
允许音频转文字时的高级参数指令
Browse files- crazy_functional.py +1 -1
- crazy_functions/总结音视频.py +20 -9
crazy_functional.py
CHANGED
@@ -252,7 +252,7 @@ def get_crazy_functions():
|
|
252 |
"Color": "stop",
|
253 |
"AsButton": False,
|
254 |
"AdvancedArgs": True,
|
255 |
-
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3
|
256 |
"Function": HotReload(总结音视频)
|
257 |
}
|
258 |
})
|
|
|
252 |
"Color": "stop",
|
253 |
"AsButton": False,
|
254 |
"AdvancedArgs": True,
|
255 |
+
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
256 |
"Function": HotReload(总结音视频)
|
257 |
}
|
258 |
})
|
crazy_functions/总结音视频.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
|
4 |
def split_audio_file(filename, split_duration=1000):
|
@@ -37,7 +37,7 @@ def split_audio_file(filename, split_duration=1000):
|
|
37 |
audio.close()
|
38 |
return filelist
|
39 |
|
40 |
-
def AnalyAudio(file_manifest, llm_kwargs, chatbot, history):
|
41 |
import os, requests
|
42 |
from moviepy.editor import AudioFileClip
|
43 |
from request_llm.bridge_all import model_info
|
@@ -72,11 +72,20 @@ def AnalyAudio(file_manifest, llm_kwargs, chatbot, history):
|
|
72 |
}
|
73 |
data = {
|
74 |
"model": "whisper-1",
|
|
|
75 |
'response_format': "text"
|
76 |
}
|
77 |
-
response = requests.post(url, headers=headers, files=files, data=data).text
|
78 |
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。'
|
81 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
82 |
inputs=i_say,
|
@@ -84,17 +93,17 @@ def AnalyAudio(file_manifest, llm_kwargs, chatbot, history):
|
|
84 |
llm_kwargs=llm_kwargs,
|
85 |
chatbot=chatbot,
|
86 |
history=[],
|
87 |
-
sys_prompt="
|
88 |
)
|
89 |
|
90 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
91 |
history.extend([i_say_show_user, gpt_say])
|
92 |
audio_history.extend([i_say_show_user, gpt_say])
|
93 |
|
94 |
-
#
|
95 |
result = "".join(audio_history)
|
96 |
if len(audio_history) > 1:
|
97 |
-
i_say = f"
|
98 |
i_say_show_user = f'第{index + 1}段音频的主要内容:'
|
99 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
100 |
inputs=i_say,
|
@@ -127,7 +136,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
127 |
# 基本信息:功能、贡献者
|
128 |
chatbot.append([
|
129 |
"函数插件功能?",
|
130 |
-
"总结音视频内容,函数插件贡献者: dalvqw"])
|
131 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
132 |
|
133 |
try:
|
@@ -168,6 +177,8 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
168 |
return
|
169 |
|
170 |
# 开始正式执行任务
|
171 |
-
|
|
|
|
|
172 |
|
173 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
1 |
+
from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
|
4 |
def split_audio_file(filename, split_duration=1000):
|
|
|
37 |
audio.close()
|
38 |
return filelist
|
39 |
|
40 |
+
def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
41 |
import os, requests
|
42 |
from moviepy.editor import AudioFileClip
|
43 |
from request_llm.bridge_all import model_info
|
|
|
72 |
}
|
73 |
data = {
|
74 |
"model": "whisper-1",
|
75 |
+
"prompt": parse_prompt,
|
76 |
'response_format': "text"
|
77 |
}
|
|
|
78 |
|
79 |
+
chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."])
|
80 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
81 |
+
proxies, = get_conf('proxies')
|
82 |
+
response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text
|
83 |
+
|
84 |
+
chatbot.append(["音频解析结果", response])
|
85 |
+
history.extend(["音频解析结果", response])
|
86 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
87 |
+
|
88 |
+
i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```'
|
89 |
i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。'
|
90 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
91 |
inputs=i_say,
|
|
|
93 |
llm_kwargs=llm_kwargs,
|
94 |
chatbot=chatbot,
|
95 |
history=[],
|
96 |
+
sys_prompt=f"总结音频。音频文件名{fp}"
|
97 |
)
|
98 |
|
99 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
100 |
history.extend([i_say_show_user, gpt_say])
|
101 |
audio_history.extend([i_say_show_user, gpt_say])
|
102 |
|
103 |
+
# 已经对该文章的所有片段总结完毕,如果文章被切分了
|
104 |
result = "".join(audio_history)
|
105 |
if len(audio_history) > 1:
|
106 |
+
i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。"
|
107 |
i_say_show_user = f'第{index + 1}段音频的主要内容:'
|
108 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
109 |
inputs=i_say,
|
|
|
136 |
# 基本信息:功能、贡献者
|
137 |
chatbot.append([
|
138 |
"函数插件功能?",
|
139 |
+
"总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"])
|
140 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
141 |
|
142 |
try:
|
|
|
177 |
return
|
178 |
|
179 |
# 开始正式执行任务
|
180 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
181 |
+
parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文')
|
182 |
+
yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history)
|
183 |
|
184 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|