glt3953 commited on
Commit
a409d9a
1 Parent(s): 12445d7

Upload 5 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/demo_shejipuhui.mp4 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modelscope.pipelines import pipeline
2
+ from modelscope.utils.constant import Tasks
3
+ import gradio as gr
4
+ import datetime
5
+ import os
6
+
7
+ #获取当前北京时间
8
+ utc_dt = datetime.datetime.utcnow()
9
+ beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
10
+ formatted = beijing_dt.strftime("%Y-%m-%d_%H")
11
+ print(f"北京时间: {beijing_dt.year}年{beijing_dt.month}月{beijing_dt.day}日 "
12
+ f"{beijing_dt.hour}时{beijing_dt.minute}分{beijing_dt.second}秒")
13
+ #创建作品存放目录
14
+ works_path = '../works_audio_video_transcribe/' + formatted
15
+ if not os.path.exists(works_path):
16
+ os.makedirs(works_path)
17
+ print('作品目录:' + works_path)
18
+
19
+ inference_pipeline = pipeline(
20
+ task=Tasks.auto_speech_recognition,
21
+ model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
22
+
23
+ def transcript(audiofile, text_file):
24
+ rec_result = inference_pipeline(audio_in=audiofile)
25
+ print(rec_result['text'])
26
+
27
+ with open(text_file, "w") as f:
28
+ f.write(rec_result['text'])
29
+
30
+ return rec_result['text']
31
+
32
+ def audio_recog(audiofile):
33
+ utc_dt = datetime.datetime.utcnow()
34
+ beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
35
+ formatted = beijing_dt.strftime("%Y-%m-%d_%H-%M-%S")
36
+ print(f"开始时间: {beijing_dt.year}年{beijing_dt.month}月{beijing_dt.day}日 "
37
+ f"{beijing_dt.hour}时{beijing_dt.minute}分{beijing_dt.second}秒")
38
+
39
+ print("音频文件:" + audiofile)
40
+
41
+ filename = os.path.splitext(os.path.basename(audiofile))[0]
42
+ text_file = works_path + '/' + filename + '.txt'
43
+
44
+ text_output = transcript(audiofile, text_file)
45
+
46
+ utc_dt = datetime.datetime.utcnow()
47
+ beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
48
+ formatted = beijing_dt.strftime("%Y-%m-%d_%H-%M-%S")
49
+ print(f"结束时间: {beijing_dt.year}年{beijing_dt.month}月{beijing_dt.day}日 "
50
+ f"{beijing_dt.hour}时{beijing_dt.minute}分{beijing_dt.second}秒")
51
+
52
+ return text_output, text_file
53
+
54
+ def video_recog(filepath):
55
+ filename = os.path.splitext(os.path.basename(filepath))[0]
56
+ worksfile = works_path + '/works_' + filename + '.mp4'
57
+ print("视频文件:" + filepath)
58
+
59
+ utc_dt = datetime.datetime.utcnow()
60
+ beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
61
+ formatted = beijing_dt.strftime("%Y-%m-%d_%H-%M-%S.%f")
62
+
63
+ # 提取音频为mp3
64
+ audiofile = works_path + '/' + formatted + '.mp3'
65
+ os.system(f"ffmpeg -i {filepath} -vn -c:a libmp3lame -q:a 4 {audiofile}")
66
+
67
+ #识别音频文件
68
+ text_output, text_file = audio_recog(audiofile)
69
+
70
+ return text_output, text_file
71
+
72
+ css_style = "#fixed_size_img {height: 240px;} " \
73
+ "#overview {margin: auto;max-width: 400px; max-height: 400px;}"
74
+
75
+ title = "音视频识别 by宁侠"
76
+ description = "您只需要上传一段音频或视频文件,我们的服务会快速对其进行语音识别,然后生成相应的文字。这样,您就可以轻松地记录下重要的语音内容。现在就来试试我们的音视频识别服务吧,让您的生活和工作更加便捷!"
77
+
78
+ examples_path = 'examples/'
79
+ examples = [[examples_path + 'demo_shejipuhui.mp4']]
80
+
81
+ # gradio interface
82
+ with gr.Blocks(title=title, css=css_style) as demo:
83
+ gr.HTML('''
84
+ <div style="text-align: center; max-width: 720px; margin: 0 auto;">
85
+ <div
86
+ style="
87
+ display: inline-flex;
88
+ align-items: center;
89
+ gap: 0.8rem;
90
+ font-size: 1.75rem;
91
+ "
92
+ >
93
+ <h1 style="font-family: PingFangSC; font-weight: 500; font-size: 36px; margin-bottom: 7px;">
94
+ 音视频识别
95
+ </h1>
96
+ <h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 16px; margin-bottom: 7px;">
97
+ by宁侠
98
+ </h1>
99
+ ''')
100
+ gr.Markdown(description)
101
+
102
+ with gr.Tab("🔊音频识别 Audio Transcribe"):
103
+ with gr.Row():
104
+ with gr.Column():
105
+ audio_input = gr.Audio(label="🔊音频输入 Audio Input", type="filepath")
106
+ gr.Examples(['examples/paddlespeech.asr-zh.wav', 'examples/demo_shejipuhui.mp3'], [audio_input])
107
+ audio_recog_button = gr.Button("👂音频识别 Recognize")
108
+ with gr.Column():
109
+ audio_text_output = gr.Textbox(label="✏️识别结果 Recognition Result", max_lines=5)
110
+ audio_text_file = gr.File(label="✏️识别结果文件 Recognition Result File")
111
+ audio_subtitles_button = gr.Button("添加字幕\nGenerate Subtitles", visible=False)
112
+ audio_output = gr.Audio(label="🔊音频 Audio", visible=False)
113
+
114
+ audio_recog_button.click(audio_recog, inputs=[audio_input], outputs=[audio_text_output, audio_text_file])
115
+ # audio_subtitles_button.click(audio_subtitles, inputs=[audio_text_input], outputs=[audio_output])
116
+
117
+ with gr.Tab("🎥视频识别 Video Transcribe"):
118
+ with gr.Row():
119
+ with gr.Column():
120
+ video_input = gr.Video(label="🎥视频输入 Video Input")
121
+ gr.Examples(['examples/demo_shejipuhui.mp4'], [video_input], label='语音识别示例 ASR Demo')
122
+ video_recog_button = gr.Button("👂视频识别 Recognize")
123
+ video_output = gr.Video(label="🎥视频 Video", visible=False)
124
+ with gr.Column():
125
+ video_text_output = gr.Textbox(label="✏️识别结果 Recognition Result", max_lines=5)
126
+ video_text_file = gr.File(label="✏️识别结果文件 Recognition Result File")
127
+ with gr.Row(visible=False):
128
+ font_size = gr.Slider(minimum=10, maximum=100, value=32, step=2, label="🔠字幕字体大小 Subtitle Font Size")
129
+ font_color = gr.Radio(["black", "white", "green", "red"], label="🌈字幕颜色 Subtitle Color", value='white')
130
+ video_subtitles_button = gr.Button("添加字幕\nGenerate Subtitles", visible=False)
131
+
132
+
133
+ video_recog_button.click(video_recog, inputs=[video_input], outputs=[video_text_output, video_text_file])
134
+ # video_subtitles_button.click(video_subtitles, inputs=[video_text_input], outputs=[video_output])
135
+
136
+ # start gradio service in local
137
+ demo.queue(api_open=False).launch(debug=True)
examples/demo_shejipuhui.mp3 ADDED
Binary file (430 kB). View file
 
examples/demo_shejipuhui.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8fa2612b7a25e94f8ec3fe96ac88fb01874b8bbed5b7bc10d07ef0555340bc6
3
+ size 4784476
examples/paddlespeech.asr-zh.wav ADDED
Binary file (160 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ funasr
2
+ torchaudio