glt3953 commited on
Commit
019e962
1 Parent(s): 4e65837

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -142
app.py DELETED
@@ -1,142 +0,0 @@
1
- import os
2
-
3
- os.system(
4
- 'pip install "modelscope[cv]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html'
5
- )
6
-
7
- from modelscope.pipelines import pipeline
8
- from modelscope.utils.constant import Tasks
9
- import gradio as gr
10
- import datetime
11
-
12
- #获取当前北京时间
13
- utc_dt = datetime.datetime.utcnow()
14
- beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
15
- formatted = beijing_dt.strftime("%Y-%m-%d_%H")
16
- print(f"北京时间: {beijing_dt.year}年{beijing_dt.month}月{beijing_dt.day}日 "
17
- f"{beijing_dt.hour}时{beijing_dt.minute}分{beijing_dt.second}秒")
18
- #创建作品存放目录
19
- works_path = '../works_audio_video_transcribe/' + formatted
20
- if not os.path.exists(works_path):
21
- os.makedirs(works_path)
22
- print('作品目录:' + works_path)
23
-
24
- inference_pipeline = pipeline(
25
- task=Tasks.auto_speech_recognition,
26
- model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
27
-
28
- def transcript(audiofile, text_file):
29
- rec_result = inference_pipeline(audio_in=audiofile)
30
- print(rec_result['text'])
31
-
32
- with open(text_file, "w") as f:
33
- f.write(rec_result['text'])
34
-
35
- return rec_result['text']
36
-
37
- def audio_recog(audiofile):
38
- utc_dt = datetime.datetime.utcnow()
39
- beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
40
- formatted = beijing_dt.strftime("%Y-%m-%d_%H-%M-%S")
41
- print(f"开始时间: {beijing_dt.year}年{beijing_dt.month}月{beijing_dt.day}日 "
42
- f"{beijing_dt.hour}时{beijing_dt.minute}分{beijing_dt.second}秒")
43
-
44
- print("音频文件:" + audiofile)
45
-
46
- filename = os.path.splitext(os.path.basename(audiofile))[0]
47
- text_file = works_path + '/' + filename + '.txt'
48
-
49
- text_output = transcript(audiofile, text_file)
50
-
51
- utc_dt = datetime.datetime.utcnow()
52
- beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
53
- formatted = beijing_dt.strftime("%Y-%m-%d_%H-%M-%S")
54
- print(f"结束时间: {beijing_dt.year}年{beijing_dt.month}月{beijing_dt.day}日 "
55
- f"{beijing_dt.hour}时{beijing_dt.minute}分{beijing_dt.second}秒")
56
-
57
- return text_output, text_file
58
-
59
- def video_recog(filepath):
60
- filename = os.path.splitext(os.path.basename(filepath))[0]
61
- worksfile = works_path + '/works_' + filename + '.mp4'
62
- print("视频文件:" + filepath)
63
-
64
- utc_dt = datetime.datetime.utcnow()
65
- beijing_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=16)))
66
- formatted = beijing_dt.strftime("%Y-%m-%d_%H-%M-%S.%f")
67
-
68
- # 提取音频为mp3
69
- audiofile = works_path + '/' + formatted + '.mp3'
70
- os.system(f"ffmpeg -i {filepath} -vn -c:a libmp3lame -q:a 4 {audiofile}")
71
-
72
- #识别音频文件
73
- text_output, text_file = audio_recog(audiofile)
74
-
75
- return text_output, text_file
76
-
77
- css_style = "#fixed_size_img {height: 240px;} " \
78
- "#overview {margin: auto;max-width: 400px; max-height: 400px;}"
79
-
80
- title = "音视频识别 by宁侠"
81
- description = "您只需要上传一段音频或视频文件,我们的服务会快速对其进行语音识别,然后生成相应的文字。这样,您就可以轻松地记录下重要的语音内容。现在就来试试我们的音视频识别服务吧,让您的生活和工作更加便捷!"
82
-
83
- examples_path = 'examples/'
84
- examples = [[examples_path + 'demo_shejipuhui.mp4']]
85
-
86
- # gradio interface
87
- with gr.Blocks(title=title, css=css_style) as demo:
88
- gr.HTML('''
89
- <div style="text-align: center; max-width: 720px; margin: 0 auto;">
90
- <div
91
- style="
92
- display: inline-flex;
93
- align-items: center;
94
- gap: 0.8rem;
95
- font-size: 1.75rem;
96
- "
97
- >
98
- <h1 style="font-family: PingFangSC; font-weight: 500; font-size: 36px; margin-bottom: 7px;">
99
- 音视频识别
100
- </h1>
101
- <h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 16px; margin-bottom: 7px;">
102
- by宁侠
103
- </h1>
104
- ''')
105
- gr.Markdown(description)
106
-
107
- with gr.Tab("🔊音频识别 Audio Transcribe"):
108
- with gr.Row():
109
- with gr.Column():
110
- audio_input = gr.Audio(label="🔊音频输入 Audio Input", type="filepath")
111
- gr.Examples(['examples/paddlespeech.asr-zh.wav', 'examples/demo_shejipuhui.mp3'], [audio_input])
112
- audio_recog_button = gr.Button("👂音频识别 Recognize")
113
- with gr.Column():
114
- audio_text_output = gr.Textbox(label="✏️识别结果 Recognition Result", max_lines=5)
115
- audio_text_file = gr.File(label="✏️识别结果文件 Recognition Result File")
116
- audio_subtitles_button = gr.Button("添加字幕\nGenerate Subtitles", visible=False)
117
- audio_output = gr.Audio(label="🔊音频 Audio", visible=False)
118
-
119
- audio_recog_button.click(audio_recog, inputs=[audio_input], outputs=[audio_text_output, audio_text_file])
120
- # audio_subtitles_button.click(audio_subtitles, inputs=[audio_text_input], outputs=[audio_output])
121
-
122
- with gr.Tab("🎥视频识别 Video Transcribe"):
123
- with gr.Row():
124
- with gr.Column():
125
- video_input = gr.Video(label="🎥视频输入 Video Input")
126
- gr.Examples(['examples/demo_shejipuhui.mp4'], [video_input], label='语音识别示例 ASR Demo')
127
- video_recog_button = gr.Button("👂视频识别 Recognize")
128
- video_output = gr.Video(label="🎥视频 Video", visible=False)
129
- with gr.Column():
130
- video_text_output = gr.Textbox(label="✏️识别结果 Recognition Result", max_lines=5)
131
- video_text_file = gr.File(label="✏️识别结果文件 Recognition Result File")
132
- with gr.Row(visible=False):
133
- font_size = gr.Slider(minimum=10, maximum=100, value=32, step=2, label="🔠字幕字体大小 Subtitle Font Size")
134
- font_color = gr.Radio(["black", "white", "green", "red"], label="🌈字幕颜色 Subtitle Color", value='white')
135
- video_subtitles_button = gr.Button("添加字幕\nGenerate Subtitles", visible=False)
136
-
137
-
138
- video_recog_button.click(video_recog, inputs=[video_input], outputs=[video_text_output, video_text_file])
139
- # video_subtitles_button.click(video_subtitles, inputs=[video_text_input], outputs=[video_output])
140
-
141
- # start gradio service in local
142
- demo.queue(api_open=False).launch(debug=True)