Spaces:
Sleeping
Sleeping
Eason Lu
commited on
Commit
•
61ca873
1
Parent(s):
af0656a
OOP migrate
Browse filesFormer-commit-id: 795a3ffd76083f399bbb89f25ffbc63bd8ad822d
- pipeline.py +26 -19
- src/Pigeon.py +372 -0
- {srt_util → src}/__init__.py +0 -0
- src/srt_util/__init__.py +0 -0
- {srt_util → src/srt_util}/srt.py +1 -1
- {srt_util → src/srt_util}/srt2ass.py +27 -1
pipeline.py
CHANGED
@@ -2,11 +2,13 @@ import openai
|
|
2 |
from pytube import YouTube
|
3 |
import argparse
|
4 |
import os
|
|
|
5 |
from tqdm import tqdm
|
6 |
-
from srt_util.srt import SrtScript
|
|
|
7 |
import stable_whisper
|
8 |
import whisper
|
9 |
-
from srt_util
|
10 |
import logging
|
11 |
from datetime import datetime
|
12 |
import torch
|
@@ -50,7 +52,7 @@ def get_sources(args, download_path, result_path, video_name):
|
|
50 |
video = None
|
51 |
audio = None
|
52 |
try:
|
53 |
-
yt = YouTube(video_link)
|
54 |
video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
|
55 |
if video:
|
56 |
video.download(f'{download_path}/video')
|
@@ -93,7 +95,6 @@ def get_sources(args, download_path, result_path, video_name):
|
|
93 |
audio_file = open(args.audio_file, "rb")
|
94 |
audio_path = args.audio_file
|
95 |
pass
|
96 |
-
|
97 |
return audio_path, audio_file, video_path, video_name
|
98 |
|
99 |
|
@@ -117,6 +118,7 @@ def get_srt_class(srt_file_en, result_path, video_name, audio_path, audio_file=N
|
|
117 |
model = whisper.load_model(whisper_model,
|
118 |
device=devices) # using base model in local machine (may use large model on our server)
|
119 |
transcript = model.transcribe(audio_path)
|
|
|
120 |
|
121 |
# use stable-whisper
|
122 |
elif method == "stable":
|
@@ -133,6 +135,7 @@ def get_srt_class(srt_file_en, result_path, video_name, audio_path, audio_file=N
|
|
133 |
.split_by_punctuation(['.', '。', '?'])
|
134 |
)
|
135 |
transcript = transcript.to_dict()
|
|
|
136 |
else:
|
137 |
raise ValueError("invalid speech to text method")
|
138 |
|
@@ -257,7 +260,7 @@ def translate(srt, script_arr, range_arr, model_name, video_name, video_link, at
|
|
257 |
# if failure still happen, split into smaller tokens
|
258 |
if attempts_count == 0:
|
259 |
single_sentences = sentence.split("\n\n")
|
260 |
-
logging.info(
|
261 |
translate = ""
|
262 |
for i, single_sentence in enumerate(single_sentences):
|
263 |
if i == len(single_sentences) - 1:
|
@@ -277,26 +280,25 @@ def translate(srt, script_arr, range_arr, model_name, video_name, video_link, at
|
|
277 |
srt.set_translation(translate, range, model_name, video_name, video_link)
|
278 |
|
279 |
|
280 |
-
def
|
281 |
args = parse_args()
|
282 |
|
283 |
# input check: input should be either video file or youtube video link.
|
284 |
if args.link is None and args.video_file is None and args.srt_file is None and args.audio_file is None:
|
285 |
-
|
286 |
-
exit()
|
287 |
|
288 |
# set up
|
289 |
start_time = time.time()
|
290 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
291 |
-
DOWNLOAD_PATH = args.download
|
292 |
-
if not
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
|
297 |
-
RESULT_PATH = args.output_dir
|
298 |
-
if not
|
299 |
-
|
300 |
|
301 |
# set video name as the input file name if not specified
|
302 |
if args.video_name == 'placeholder':
|
@@ -322,14 +324,14 @@ def main():
|
|
322 |
logging.info("---------------------Video Info---------------------")
|
323 |
logging.info("Video name: {}, translation model: {}, video link: {}".format(VIDEO_NAME, args.model_name, args.link))
|
324 |
|
325 |
-
srt_file_en, srt = get_srt_class(args.srt_file, RESULT_PATH, VIDEO_NAME, audio_path, audio_file)
|
326 |
|
327 |
# SRT class preprocess
|
328 |
logging.info("---------------------Start Preprocessing SRT class---------------------")
|
329 |
srt.write_srt_file_src(srt_file_en)
|
330 |
srt.form_whole_sentence()
|
331 |
-
srt.spell_check_term()
|
332 |
-
srt.correct_with_force_term()
|
333 |
processed_srt_file_en = srt_file_en.split('.srt')[0] + '_processed.srt'
|
334 |
srt.write_srt_file_src(processed_srt_file_en)
|
335 |
script_input = srt.get_source_only()
|
@@ -372,5 +374,10 @@ def main():
|
|
372 |
"Pipeline finished, time duration:{}".format(time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))))
|
373 |
|
374 |
|
|
|
|
|
|
|
|
|
|
|
375 |
if __name__ == "__main__":
|
376 |
main()
|
|
|
2 |
from pytube import YouTube
|
3 |
import argparse
|
4 |
import os
|
5 |
+
from pathlib import Path
|
6 |
from tqdm import tqdm
|
7 |
+
from src.srt_util.srt import SrtScript
|
8 |
+
from src.Pigeon import Pigeon
|
9 |
import stable_whisper
|
10 |
import whisper
|
11 |
+
from src.srt_util import srt2ass
|
12 |
import logging
|
13 |
from datetime import datetime
|
14 |
import torch
|
|
|
52 |
video = None
|
53 |
audio = None
|
54 |
try:
|
55 |
+
yt = YouTube(video_link,use_oauth=True, allow_oauth_cache=True)
|
56 |
video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
|
57 |
if video:
|
58 |
video.download(f'{download_path}/video')
|
|
|
95 |
audio_file = open(args.audio_file, "rb")
|
96 |
audio_path = args.audio_file
|
97 |
pass
|
|
|
98 |
return audio_path, audio_file, video_path, video_name
|
99 |
|
100 |
|
|
|
118 |
model = whisper.load_model(whisper_model,
|
119 |
device=devices) # using base model in local machine (may use large model on our server)
|
120 |
transcript = model.transcribe(audio_path)
|
121 |
+
srt = SRT_script(transcript['segments']) # read segments to SRT class
|
122 |
|
123 |
# use stable-whisper
|
124 |
elif method == "stable":
|
|
|
135 |
.split_by_punctuation(['.', '。', '?'])
|
136 |
)
|
137 |
transcript = transcript.to_dict()
|
138 |
+
srt = SRT_script(transcript['segments']) # read segments to SRT class
|
139 |
else:
|
140 |
raise ValueError("invalid speech to text method")
|
141 |
|
|
|
260 |
# if failure still happen, split into smaller tokens
|
261 |
if attempts_count == 0:
|
262 |
single_sentences = sentence.split("\n\n")
|
263 |
+
logging.info("merge sentence issue found for range", range)
|
264 |
translate = ""
|
265 |
for i, single_sentence in enumerate(single_sentences):
|
266 |
if i == len(single_sentences) - 1:
|
|
|
280 |
srt.set_translation(translate, range, model_name, video_name, video_link)
|
281 |
|
282 |
|
283 |
+
def main_old():
|
284 |
args = parse_args()
|
285 |
|
286 |
# input check: input should be either video file or youtube video link.
|
287 |
if args.link is None and args.video_file is None and args.srt_file is None and args.audio_file is None:
|
288 |
+
raise TypeError("need video source or srt file")
|
|
|
289 |
|
290 |
# set up
|
291 |
start_time = time.time()
|
292 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
293 |
+
DOWNLOAD_PATH = Path(args.download)
|
294 |
+
if not DOWNLOAD_PATH.exists():
|
295 |
+
DOWNLOAD_PATH.mkdir(parents=False, exist_ok=False)
|
296 |
+
DOWNLOAD_PATH.joinpath('audio').mkdir(parents=False, exist_ok=False)
|
297 |
+
DOWNLOAD_PATH.joinpath('video').mkdir(parents=False, exist_ok=False)
|
298 |
|
299 |
+
RESULT_PATH = Path(args.output_dir)
|
300 |
+
if not RESULT_PATH.exists():
|
301 |
+
RESULT_PATH.mkdir(parents=False, exist_ok=False)
|
302 |
|
303 |
# set video name as the input file name if not specified
|
304 |
if args.video_name == 'placeholder':
|
|
|
324 |
logging.info("---------------------Video Info---------------------")
|
325 |
logging.info("Video name: {}, translation model: {}, video link: {}".format(VIDEO_NAME, args.model_name, args.link))
|
326 |
|
327 |
+
srt_file_en, srt = get_srt_class(args.srt_file, RESULT_PATH, VIDEO_NAME, audio_path, audio_file, method="api")
|
328 |
|
329 |
# SRT class preprocess
|
330 |
logging.info("---------------------Start Preprocessing SRT class---------------------")
|
331 |
srt.write_srt_file_src(srt_file_en)
|
332 |
srt.form_whole_sentence()
|
333 |
+
# srt.spell_check_term()
|
334 |
+
# srt.correct_with_force_term()
|
335 |
processed_srt_file_en = srt_file_en.split('.srt')[0] + '_processed.srt'
|
336 |
srt.write_srt_file_src(processed_srt_file_en)
|
337 |
script_input = srt.get_source_only()
|
|
|
374 |
"Pipeline finished, time duration:{}".format(time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))))
|
375 |
|
376 |
|
377 |
+
def main():
|
378 |
+
pigeon = Pigeon()
|
379 |
+
pigeon.run()
|
380 |
+
|
381 |
+
|
382 |
if __name__ == "__main__":
|
383 |
main()
|
src/Pigeon.py
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import subprocess
|
3 |
+
from argparse import ArgumentParser
|
4 |
+
from os import getenv
|
5 |
+
from pathlib import Path
|
6 |
+
from time import time, strftime, gmtime, sleep
|
7 |
+
from tqdm import tqdm
|
8 |
+
from datetime import datetime
|
9 |
+
|
10 |
+
import openai
|
11 |
+
import stable_whisper
|
12 |
+
import torch
|
13 |
+
import whisper
|
14 |
+
from pytube import YouTube
|
15 |
+
|
16 |
+
from src.srt_util.srt import SrtScript
|
17 |
+
from src.srt_util.srt2ass import srt2ass
|
18 |
+
|
19 |
+
|
20 |
+
def split_script(script_in, chunk_size=1000):
|
21 |
+
script_split = script_in.split('\n\n')
|
22 |
+
script_arr = []
|
23 |
+
range_arr = []
|
24 |
+
start = 1
|
25 |
+
end = 0
|
26 |
+
script = ""
|
27 |
+
for sentence in script_split:
|
28 |
+
if len(script) + len(sentence) + 1 <= chunk_size:
|
29 |
+
script += sentence + '\n\n'
|
30 |
+
end += 1
|
31 |
+
else:
|
32 |
+
range_arr.append((start, end))
|
33 |
+
start = end + 1
|
34 |
+
end += 1
|
35 |
+
script_arr.append(script.strip())
|
36 |
+
script = sentence + '\n\n'
|
37 |
+
if script.strip():
|
38 |
+
script_arr.append(script.strip())
|
39 |
+
range_arr.append((start, len(script_split) - 1))
|
40 |
+
|
41 |
+
assert len(script_arr) == len(range_arr)
|
42 |
+
return script_arr, range_arr
|
43 |
+
|
44 |
+
|
45 |
+
def get_response(model_name, sentence):
|
46 |
+
"""
|
47 |
+
Generates a translated response for a given sentence using a specified OpenAI model.
|
48 |
+
|
49 |
+
:param model_name: The name of the OpenAI model to be used for translation, either "gpt-3.5-turbo" or "gpt-4".
|
50 |
+
:param sentence: The English sentence related to StarCraft 2 videos that needs to be translated into Chinese.
|
51 |
+
|
52 |
+
:return: The translated Chinese sentence, maintaining the original format, meaning, and number of lines.
|
53 |
+
"""
|
54 |
+
|
55 |
+
if model_name == "gpt-3.5-turbo" or model_name == "gpt-4":
|
56 |
+
response = openai.ChatCompletion.create(
|
57 |
+
model=model_name,
|
58 |
+
messages=[
|
59 |
+
# {"role": "system", "content": "You are a helpful assistant that translates English to Chinese and have decent background in starcraft2."},
|
60 |
+
# {"role": "system", "content": "Your translation has to keep the orginal format and be as accurate as possible."},
|
61 |
+
# {"role": "system", "content": "Your translation needs to be consistent with the number of sentences in the original."},
|
62 |
+
# {"role": "system", "content": "There is no need for you to add any comments or notes."},
|
63 |
+
# {"role": "user", "content": 'Translate the following English text to Chinese: "{}"'.format(sentence)}
|
64 |
+
|
65 |
+
{"role": "system",
|
66 |
+
"content": "你是一个翻译助理,你的任务是翻译星际争霸视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。"},
|
67 |
+
{"role": "user", "content": sentence}
|
68 |
+
],
|
69 |
+
temperature=0.15
|
70 |
+
)
|
71 |
+
|
72 |
+
return response['choices'][0]['message']['content'].strip()
|
73 |
+
|
74 |
+
|
75 |
+
def check_translation(sentence, translation):
|
76 |
+
"""
|
77 |
+
check merge sentence issue from openai translation
|
78 |
+
"""
|
79 |
+
sentence_count = sentence.count('\n\n') + 1
|
80 |
+
translation_count = translation.count('\n\n') + 1
|
81 |
+
|
82 |
+
if sentence_count != translation_count:
|
83 |
+
# print("sentence length: ", len(sentence), sentence_count)
|
84 |
+
# print("translation length: ", len(translation), translation_count)
|
85 |
+
return False
|
86 |
+
else:
|
87 |
+
return True
|
88 |
+
|
89 |
+
|
90 |
+
# Translate and save
|
91 |
+
def translate(srt, script_arr, range_arr, model_name, video_name, video_link, attempts_count=5):
|
92 |
+
"""
|
93 |
+
Translates the given script array into another language using the chatgpt and writes to the SRT file.
|
94 |
+
|
95 |
+
This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates
|
96 |
+
through sentences and range in the script and range arrays. If the translation check fails for five times, the function
|
97 |
+
will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation.
|
98 |
+
|
99 |
+
:param srt: An instance of the Subtitle class representing the SRT file.
|
100 |
+
:param script_arr: A list of strings representing the original script sentences to be translated.
|
101 |
+
:param range_arr: A list of tuples representing the start and end positions of sentences in the script.
|
102 |
+
:param model_name: The name of the translation model to be used.
|
103 |
+
:param video_name: The name of the video.
|
104 |
+
:param video_link: The link to the video.
|
105 |
+
:param attempts_count: Number of attemps of failures for unmatched sentences.
|
106 |
+
"""
|
107 |
+
logging.info("Start translating...")
|
108 |
+
previous_length = 0
|
109 |
+
for sentence, range_ in tqdm(zip(script_arr, range_arr)):
|
110 |
+
# update the range based on previous length
|
111 |
+
range_ = (range_[0] + previous_length, range_[1] + previous_length)
|
112 |
+
|
113 |
+
# using chatgpt model
|
114 |
+
print(f"now translating sentences {range_}")
|
115 |
+
logging.info(f"now translating sentences {range_}, time: {datetime.now()}")
|
116 |
+
flag = True
|
117 |
+
while flag:
|
118 |
+
flag = False
|
119 |
+
try:
|
120 |
+
translate = get_response(model_name, sentence)
|
121 |
+
# detect merge sentence issue and try to solve for five times:
|
122 |
+
while not check_translation(sentence, translate) and attempts_count > 0:
|
123 |
+
translate = get_response(model_name, sentence)
|
124 |
+
attempts_count -= 1
|
125 |
+
|
126 |
+
# if failure still happen, split into smaller tokens
|
127 |
+
if attempts_count == 0:
|
128 |
+
single_sentences = sentence.split("\n\n")
|
129 |
+
logging.info("merge sentence issue found for range", range_)
|
130 |
+
translate = ""
|
131 |
+
for i, single_sentence in enumerate(single_sentences):
|
132 |
+
if i == len(single_sentences) - 1:
|
133 |
+
translate += get_response(model_name, single_sentence)
|
134 |
+
else:
|
135 |
+
translate += get_response(model_name, single_sentence) + "\n\n"
|
136 |
+
# print(single_sentence, translate.split("\n\n")[-2])
|
137 |
+
logging.info("solved by individually translation!")
|
138 |
+
|
139 |
+
except Exception as e:
|
140 |
+
logging.debug("An error has occurred during translation:", e)
|
141 |
+
print("An error has occurred during translation:", e)
|
142 |
+
print("Retrying... the script will continue after 30 seconds.")
|
143 |
+
sleep(30)
|
144 |
+
flag = True
|
145 |
+
|
146 |
+
srt.set_translation(translate, range_, model_name, video_name, video_link)
|
147 |
+
|
148 |
+
|
149 |
+
class Pigeon(object):
|
150 |
+
def __init__(self):
|
151 |
+
openai.api_key = getenv("OPENAI_API_KEY")
|
152 |
+
self.v = False
|
153 |
+
self.dir_download = None
|
154 |
+
self.dir_result = None
|
155 |
+
self.dir_log = None
|
156 |
+
self.srt_path = None
|
157 |
+
self.srt_only = False
|
158 |
+
self.srt = None
|
159 |
+
self.video_name = None
|
160 |
+
self.video_path = None
|
161 |
+
self.audio_path = None
|
162 |
+
|
163 |
+
self.video_link = None
|
164 |
+
self.video_file = None
|
165 |
+
|
166 |
+
self.model = None
|
167 |
+
|
168 |
+
self.parse()
|
169 |
+
|
170 |
+
self.t_s = None
|
171 |
+
self.t_e = None
|
172 |
+
|
173 |
+
def parse(self):
|
174 |
+
parser = ArgumentParser()
|
175 |
+
parser.add_argument("--link", help="youtube video link here", type=str)
|
176 |
+
parser.add_argument("--video_file", help="local video path", type=str)
|
177 |
+
parser.add_argument("--video_name", help="video name, auto-filled if not provided")
|
178 |
+
parser.add_argument("--audio_file", help="local audio path")
|
179 |
+
parser.add_argument("--srt_file", help="srt file input path here", type=str) # New argument
|
180 |
+
parser.add_argument("--download", help="download path", default='./downloads')
|
181 |
+
parser.add_argument("--output_dir", help="translate result path", default='./results')
|
182 |
+
# default change to gpt-4
|
183 |
+
parser.add_argument("--model_name", help="model name only support gpt-4 and gpt-3.5-turbo", default="gpt-4")
|
184 |
+
parser.add_argument("--log_dir", help="log path", default='./logs')
|
185 |
+
parser.add_argument("-only_srt", help="set script output to only .srt file", action='store_true')
|
186 |
+
parser.add_argument("-v", help="auto encode script with video", action='store_true')
|
187 |
+
args = parser.parse_args()
|
188 |
+
|
189 |
+
self.v = args.v
|
190 |
+
self.model = args.model_name
|
191 |
+
self.srt_path = args.srt_file
|
192 |
+
self.srt_only = args.only_srt
|
193 |
+
|
194 |
+
# Set download path
|
195 |
+
self.dir_download = Path(args.download)
|
196 |
+
if not self.dir_download.exists():
|
197 |
+
self.dir_download.mkdir(parents=False, exist_ok=False)
|
198 |
+
self.dir_download.joinpath('audio').mkdir(parents=False, exist_ok=False)
|
199 |
+
self.dir_download.joinpath('video').mkdir(parents=False, exist_ok=False)
|
200 |
+
|
201 |
+
# Set result path
|
202 |
+
self.dir_result = Path(args.output_dir)
|
203 |
+
if not self.dir_result.exists():
|
204 |
+
self.dir_result.mkdir(parents=False, exist_ok=False)
|
205 |
+
|
206 |
+
# TODO: change if-else logic
|
207 |
+
# Next, prepare video & audio files
|
208 |
+
# Set video related
|
209 |
+
if args.link is not None and (args.video_file is not None or args.audio_file is not None):
|
210 |
+
raise ValueError("Please provide either video link or video/audio file path, not both.")
|
211 |
+
if args.link is not None:
|
212 |
+
self.video_link = args.link
|
213 |
+
# Download audio from YouTube
|
214 |
+
try:
|
215 |
+
yt = YouTube(self.video_link)
|
216 |
+
video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
|
217 |
+
if video:
|
218 |
+
video.download(str(self.dir_download.joinpath("video")))
|
219 |
+
print(f'Video download completed to {self.dir_download.joinpath("video")}!')
|
220 |
+
else:
|
221 |
+
raise FileNotFoundError(f"Video stream not found for link {self.video_link}")
|
222 |
+
audio = yt.streams.filter(only_audio=True, file_extension='mp4').first()
|
223 |
+
if audio:
|
224 |
+
audio.download(str(self.dir_download.joinpath("audio")))
|
225 |
+
print(f'Audio download completed to {self.dir_download.joinpath("audio")}!')
|
226 |
+
else:
|
227 |
+
raise FileNotFoundError(f"Audio stream not found for link {self.video_link}")
|
228 |
+
except Exception as e:
|
229 |
+
print("Connection Error: ", end='')
|
230 |
+
print(e)
|
231 |
+
raise ConnectionError
|
232 |
+
self.video_path = self.dir_download.joinpath("video").joinpath(video.default_filename)
|
233 |
+
self.audio_path = self.dir_download.joinpath("audio").joinpath(audio.default_filename)
|
234 |
+
if args.video_name is not None:
|
235 |
+
self.video_name = args.video_name
|
236 |
+
else:
|
237 |
+
self.video_name = Path(video.default_filename).stem
|
238 |
+
else:
|
239 |
+
if args.video_file is not None:
|
240 |
+
self.video_path = args.video_file
|
241 |
+
# Read from local video file
|
242 |
+
self.video_path = args.video_file
|
243 |
+
if args.video_name is not None:
|
244 |
+
self.video_name = args.video_name
|
245 |
+
else:
|
246 |
+
self.video_name = Path(self.video_path).stem
|
247 |
+
if args.audio_file is not None:
|
248 |
+
self.audio_path = args.audio_file
|
249 |
+
else:
|
250 |
+
audio_path_out = self.dir_download.joinpath("audio").joinpath(f"{self.video_name}.mp3")
|
251 |
+
subprocess.run(['ffmpeg', '-i', self.video_path, '-f', 'mp3', '-ab', '192000', '-vn', audio_path_out])
|
252 |
+
self.audio_path = audio_path_out
|
253 |
+
else:
|
254 |
+
raise NotImplementedError("Currently audio file only not supported")
|
255 |
+
|
256 |
+
if not self.dir_result.joinpath(self.video_name).exists():
|
257 |
+
self.dir_result.joinpath(self.video_name).mkdir(parents=False, exist_ok=False)
|
258 |
+
|
259 |
+
# Log setup
|
260 |
+
self.dir_log = Path(args.log_dir)
|
261 |
+
if not Path(args.log_dir).exists():
|
262 |
+
self.dir_log.mkdir(parents=False, exist_ok=False)
|
263 |
+
logging.basicConfig(level=logging.INFO, handlers=[
|
264 |
+
logging.FileHandler(
|
265 |
+
"{}/{}_{}.log".format(self.dir_log, self.video_name, datetime.now().strftime("%m%d%Y_%H%M%S")),
|
266 |
+
'w', encoding='utf-8')])
|
267 |
+
logging.info("---------------------Video Info---------------------")
|
268 |
+
logging.info(
|
269 |
+
f"Video name: {self.video_name}, translation model: {self.model}, video link: {self.video_link}")
|
270 |
+
return
|
271 |
+
|
272 |
+
def get_srt_class(self, whisper_model='tiny', method="stable"):
|
273 |
+
# Instead of using the script_en variable directly, we'll use script_input
|
274 |
+
if self.srt_path is not None:
|
275 |
+
srt = SrtScript.parse_from_srt_file(self.srt_path)
|
276 |
+
else:
|
277 |
+
# using whisper to perform speech-to-text and save it in <video name>_en.txt under RESULT PATH.
|
278 |
+
self.srt_path = Path(f"{self.dir_result}/{self.video_name}/{self.video_name}_en.srt")
|
279 |
+
if not Path(self.srt_path).exists():
|
280 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
281 |
+
# use OpenAI API for transcribe
|
282 |
+
if method == "api":
|
283 |
+
with open(self.audio_path, "rb") as audio_file:
|
284 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
285 |
+
# use local whisper model
|
286 |
+
elif method == "basic":
|
287 |
+
# using base model in local machine (may use large model on our server)
|
288 |
+
model = whisper.load_model(whisper_model, device=device)
|
289 |
+
transcript = model.transcribe(self.audio_path)
|
290 |
+
# use stable-whisper
|
291 |
+
elif method == "stable":
|
292 |
+
# use cuda if available
|
293 |
+
model = stable_whisper.load_model(whisper_model, device=device)
|
294 |
+
transcript = model.transcribe(str(self.audio_path), regroup=False,
|
295 |
+
initial_prompt="Hello, welcome to my lecture. Are you good my friend?")
|
296 |
+
(
|
297 |
+
transcript
|
298 |
+
.split_by_punctuation(['.', '。', '?'])
|
299 |
+
.merge_by_gap(.15, max_words=3)
|
300 |
+
.merge_by_punctuation([' '])
|
301 |
+
.split_by_punctuation(['.', '。', '?'])
|
302 |
+
)
|
303 |
+
transcript = transcript.to_dict()
|
304 |
+
else:
|
305 |
+
raise ValueError("invalid speech to text method")
|
306 |
+
|
307 |
+
srt = SrtScript(transcript['segments']) # read segments to SRT class
|
308 |
+
else:
|
309 |
+
srt = SrtScript.parse_from_srt_file(self.srt_path)
|
310 |
+
self.srt = srt
|
311 |
+
return
|
312 |
+
|
313 |
+
def preprocess(self):
|
314 |
+
self.t_s = time()
|
315 |
+
self.get_srt_class()
|
316 |
+
# SRT class preprocess
|
317 |
+
logging.info("--------------------Start Preprocessing SRT class--------------------")
|
318 |
+
self.srt.write_srt_file_src(self.srt_path)
|
319 |
+
self.srt.form_whole_sentence()
|
320 |
+
self.srt.spell_check_term()
|
321 |
+
self.srt.correct_with_force_term()
|
322 |
+
processed_srt_file_en = str(Path(self.srt_path).with_suffix('')) + '_processed.srt'
|
323 |
+
self.srt.write_srt_file_src(processed_srt_file_en)
|
324 |
+
script_input = self.srt.get_source_only()
|
325 |
+
|
326 |
+
# write ass
|
327 |
+
if not self.srt_only:
|
328 |
+
logging.info("write English .srt file to .ass")
|
329 |
+
assSub_en = srt2ass(processed_srt_file_en, "default", "No", "Modest")
|
330 |
+
logging.info('ASS subtitle saved as: ' + assSub_en)
|
331 |
+
return script_input
|
332 |
+
|
333 |
+
def start_translation(self, script_input):
|
334 |
+
script_arr, range_arr = split_script(script_input)
|
335 |
+
logging.info("---------------------Start Translation--------------------")
|
336 |
+
translate(self.srt, script_arr, range_arr, self.model, self.video_name, self.video_link)
|
337 |
+
|
338 |
+
def postprocess(self):
|
339 |
+
# SRT post-processing
|
340 |
+
logging.info("---------------------Start Post-processing SRT class---------------------")
|
341 |
+
self.srt.check_len_and_split()
|
342 |
+
self.srt.remove_trans_punctuation()
|
343 |
+
|
344 |
+
base_path = Path(self.dir_result).joinpath(self.video_name).joinpath(self.video_name)
|
345 |
+
|
346 |
+
self.srt.write_srt_file_translate(f"{base_path}_zh.srt")
|
347 |
+
self.srt.write_srt_file_bilingual(f"{base_path}_bi.srt")
|
348 |
+
|
349 |
+
# write ass
|
350 |
+
if not self.srt_only:
|
351 |
+
logging.info("write Chinese .srt file to .ass")
|
352 |
+
assSub_zh = srt2ass(f"{base_path}_zh.srt", "default", "No", "Modest")
|
353 |
+
logging.info('ASS subtitle saved as: ' + assSub_zh)
|
354 |
+
|
355 |
+
# encode to .mp4 video file
|
356 |
+
if self.v:
|
357 |
+
logging.info("encoding video file")
|
358 |
+
if self.srt_only:
|
359 |
+
subprocess.run(
|
360 |
+
f'ffmpeg -i {self.video_path} -vf "subtitles={base_path}_zh.srt" {base_path}.mp4')
|
361 |
+
else:
|
362 |
+
subprocess.run(
|
363 |
+
f'ffmpeg -i {self.video_path} -vf "subtitles={base_path}_zh.ass" {base_path}.mp4')
|
364 |
+
|
365 |
+
self.t_e = time()
|
366 |
+
logging.info(
|
367 |
+
"Pipeline finished, time duration:{}".format(strftime("%H:%M:%S", gmtime(self.t_e - self.t_s))))
|
368 |
+
|
369 |
+
def run(self):
|
370 |
+
script_input = self.preprocess()
|
371 |
+
self.start_translation(script_input)
|
372 |
+
self.postprocess()
|
{srt_util → src}/__init__.py
RENAMED
File without changes
|
src/srt_util/__init__.py
ADDED
File without changes
|
{srt_util → src/srt_util}/srt.py
RENAMED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import re
|
|
|
3 |
from copy import copy, deepcopy
|
4 |
from csv import reader
|
5 |
from datetime import timedelta
|
@@ -73,7 +74,6 @@ class SrtSegment(object):
|
|
73 |
:param other: Another segment that is strictly next to added segment.
|
74 |
:return: new segment of the two sub-segments
|
75 |
"""
|
76 |
-
|
77 |
result = deepcopy(self)
|
78 |
result.merge_seg(other)
|
79 |
return result
|
|
|
1 |
import os
|
2 |
import re
|
3 |
+
from pathlib import Path
|
4 |
from copy import copy, deepcopy
|
5 |
from csv import reader
|
6 |
from datetime import timedelta
|
|
|
74 |
:param other: Another segment that is strictly next to added segment.
|
75 |
:return: new segment of the two sub-segments
|
76 |
"""
|
|
|
77 |
result = deepcopy(self)
|
78 |
result.merge_seg(other)
|
79 |
return result
|
{srt_util → src/srt_util}/srt2ass.py
RENAMED
@@ -78,6 +78,8 @@ def srt2ass(input_file,sub_style, is_split, split_method):
|
|
78 |
dlgLines += 'Dialogue: 0,' + line + ',正文_1080P,,0,0,0,,'
|
79 |
elif sub_style == 'asukaCN':
|
80 |
dlgLines += 'Dialogue: 0,' + line + ',DEFAULT1,,0,0,0,,'
|
|
|
|
|
81 |
else:
|
82 |
if lineCount < 2:
|
83 |
dlg_string = line
|
@@ -128,6 +130,8 @@ def srt2ass(input_file,sub_style, is_split, split_method):
|
|
128 |
head_name = 'head_str_taniguchi'
|
129 |
elif sub_style == 'asukaCN':
|
130 |
head_name = 'head_str_asuka'
|
|
|
|
|
131 |
|
132 |
head_str = STYLE_DICT.get(head_name)
|
133 |
output_str = utf8bom + head_str + '\n' + subLines
|
@@ -288,7 +292,29 @@ Style: 毕业曲MV 日文歌词,思源黑体 CN,58,&H0AFFFFFF,&H000000FF,&H0F000
|
|
288 |
|
289 |
[Events]
|
290 |
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
291 |
-
Dialogue: 0,0:00:00.00,0:00:05.00,Default,,0,0,0,,'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
# ADD MORE
|
293 |
|
294 |
}
|
|
|
78 |
dlgLines += 'Dialogue: 0,' + line + ',正文_1080P,,0,0,0,,'
|
79 |
elif sub_style == 'asukaCN':
|
80 |
dlgLines += 'Dialogue: 0,' + line + ',DEFAULT1,,0,0,0,,'
|
81 |
+
elif sub_style == 'starPigeon':
|
82 |
+
dlgLines += 'Dialogue: 0,' + line + ',Starcraft 2 下(一般字幕),,0,0,0,,'
|
83 |
else:
|
84 |
if lineCount < 2:
|
85 |
dlg_string = line
|
|
|
130 |
head_name = 'head_str_taniguchi'
|
131 |
elif sub_style == 'asukaCN':
|
132 |
head_name = 'head_str_asuka'
|
133 |
+
elif sub_style == 'starPigeon':
|
134 |
+
head_name = 'head_str_pigeon'
|
135 |
|
136 |
head_str = STYLE_DICT.get(head_name)
|
137 |
output_str = utf8bom + head_str + '\n' + subLines
|
|
|
292 |
|
293 |
[Events]
|
294 |
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
295 |
+
Dialogue: 0,0:00:00.00,0:00:05.00,Default,,0,0,0,,''',
|
296 |
+
'head_str_pigeon':'''[Script Info]
|
297 |
+
; The script is generated by project-t owned by starpigeon
|
298 |
+
; http://www.aegisub.org/
|
299 |
+
Title: Default Aegisub file
|
300 |
+
ScriptType: v4.00+
|
301 |
+
WrapStyle: 0
|
302 |
+
ScaledBorderAndShadow: yes
|
303 |
+
YCbCr Matrix: None
|
304 |
+
|
305 |
+
[Aegisub Project Garbage]
|
306 |
+
|
307 |
+
[V4+ Styles]
|
308 |
+
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
309 |
+
Style: Default,Arial,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,2,2,2,10,10,10,1
|
310 |
+
Style: Starcraft 2 下(一般字幕),思源黑体 CN Normal,62,&H00FFFFFF,&H000000FF,&H634C4B45,&H00000000,0,0,0,0,100,100,0,0,3,1,0,2,10,10,30,1
|
311 |
+
Style: Starcraft 2 中(录像模式),思源黑体 CN Normal,62,&H00FFFFFF,&H000000FF,&H634C4B45,&H00000000,0,0,0,0,100,100,0,0,3,1,0,2,10,10,260,1
|
312 |
+
Style: Starcraft 2 上(实战模式),思源黑体 CN Normal,62,&H00FFFFFF,&H000000FF,&H634C4B45,&H00000000,0,0,0,0,100,100,0,0,3,1,0,2,10,10,225,1
|
313 |
+
Style: mianze,思源黑体 CN Normal,48,&H20FFFFFF,&HF0000000,&H20000000,&H000F0F0F,-1,0,0,0,100,100,0,0,1,0.8,0,9,8,24,8,1
|
314 |
+
|
315 |
+
[Events]
|
316 |
+
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
317 |
+
Dialogue: 0,0:00:00.00,0:00:03.00,mianze,,0,0,0,,{\fad(200,200)\an8}本字幕由星际鸽子字幕组制作'''
|
318 |
# ADD MORE
|
319 |
|
320 |
}
|