Spaces:
Runtime error
Runtime error
Added RabbitMQ & Celery Functionality
Browse files- api_functions.py +354 -0
- celery_config.py +3 -0
- helperfunctions.py +3 -2
- main.py +128 -288
- requirements.txt +41 -7
api_functions.py
ADDED
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import uuid
|
4 |
+
import requests
|
5 |
+
|
6 |
+
from helperfunctions import *
|
7 |
+
from models import load_models
|
8 |
+
from languages import CODE2LANG
|
9 |
+
from s3_handler import S3Handler
|
10 |
+
|
11 |
+
from media_download import YoutubeDownloader
|
12 |
+
from transcription import StableWhisper
|
13 |
+
from translation import Translation
|
14 |
+
from summarizer import Extract_Summary, AudioBookNarration
|
15 |
+
from audiobook import AudioBook
|
16 |
+
|
17 |
+
from celery import Celery
|
18 |
+
|
19 |
+
celery = Celery('testing.py', broker='pyamqp://guest:guest@localhost//', backend='rpc://')
|
20 |
+
|
21 |
+
# Output Directory for Files Storage
|
22 |
+
output_folder = 'Output'
|
23 |
+
|
24 |
+
# S3 Handler
|
25 |
+
s3 = S3Handler()
|
26 |
+
|
27 |
+
# Create a context variable to store the contexts for each user
|
28 |
+
users_context = dict()
|
29 |
+
|
30 |
+
|
31 |
+
@celery.task
|
32 |
+
def get_media_metadata_task(user_id: str, url: str):
|
33 |
+
|
34 |
+
# User Folder Path
|
35 |
+
user_folder_path = os.path.join(output_folder, user_id)
|
36 |
+
|
37 |
+
# Getting User's Youtube Downloader
|
38 |
+
youtube_downloader = YoutubeDownloader(url, user_folder_path)
|
39 |
+
|
40 |
+
# Getting Youtube Media Info
|
41 |
+
media_metadata = youtube_downloader.get_media_metadata()
|
42 |
+
|
43 |
+
# Storing User's Media Metadata to Directory
|
44 |
+
media_metadata_path = os.path.join(user_folder_path, 'media_metadata.json')
|
45 |
+
with open(media_metadata_path, "w") as outfile:
|
46 |
+
json.dump(media_metadata, outfile)
|
47 |
+
|
48 |
+
# Storing User's Media Metadata to S3
|
49 |
+
s3_path = s3.upload_file(user_id, 'media_metadata.json', media_metadata_path)
|
50 |
+
|
51 |
+
# Getting Status
|
52 |
+
status = 1 if media_metadata else 0
|
53 |
+
|
54 |
+
if status:
|
55 |
+
# Storing Info in the context for this user's session
|
56 |
+
users_context[user_id] = dict()
|
57 |
+
users_context[user_id]['downloader'] = youtube_downloader
|
58 |
+
# users_context[user_id]['media_metadata'] = media_metadata
|
59 |
+
users_context[user_id]['url'] = url
|
60 |
+
|
61 |
+
return {'status': status,
|
62 |
+
'user_id': user_id,
|
63 |
+
'media_metadata': media_metadata,
|
64 |
+
'media_metadata_path': s3_path}
|
65 |
+
|
66 |
+
|
67 |
+
@celery.task
|
68 |
+
def get_media_formats_task(user_id):
|
69 |
+
|
70 |
+
# Getting Media Formats for User
|
71 |
+
media_formats = users_context[user_id]['downloader'].get_media_formats()
|
72 |
+
|
73 |
+
# User Folder Path
|
74 |
+
user_folder_path = os.path.join(output_folder, user_id)
|
75 |
+
|
76 |
+
# Storing User's Media Formats to Directory
|
77 |
+
media_formats_path = os.path.join(user_folder_path, 'media_formats.json')
|
78 |
+
with open(media_formats_path, "w") as outfile:
|
79 |
+
json.dump(media_formats, outfile)
|
80 |
+
|
81 |
+
# Storing User's Media Formats to S3
|
82 |
+
s3_path = s3.upload_file(user_id, 'media_formats.json', media_formats_path)
|
83 |
+
|
84 |
+
# Getting Status
|
85 |
+
status = 1 if media_formats else 0
|
86 |
+
|
87 |
+
if status:
|
88 |
+
# Storing Media Info in the context for this user's session
|
89 |
+
users_context[user_id]['media_formats'] = media_formats
|
90 |
+
|
91 |
+
return {'status': status,
|
92 |
+
'media_formats': media_formats,
|
93 |
+
'media_formats_path': s3_path}
|
94 |
+
|
95 |
+
@celery.task
|
96 |
+
def download_media_task(user_id,media_type: str, media_format: str, media_quality: str):
|
97 |
+
|
98 |
+
# Downloading Media for User
|
99 |
+
media_path = users_context[user_id]['downloader'].download(media_type, media_format, media_quality)
|
100 |
+
|
101 |
+
# Storing User's Downloaded Media to S3
|
102 |
+
media_file = f"{media_type.lower()}_{media_quality.lower()}.{media_format.lower()}"
|
103 |
+
s3_path = s3.upload_file(user_id, media_file, media_path)
|
104 |
+
|
105 |
+
# Getting Status
|
106 |
+
status = 1 if media_path else 0
|
107 |
+
|
108 |
+
if status:
|
109 |
+
# Storing Media Info in the context for this user's session
|
110 |
+
users_context[user_id]['media_path'] = media_path
|
111 |
+
users_context[user_id]['media_type'] = media_type
|
112 |
+
|
113 |
+
return {'status': status, 'media_path': s3_path}
|
114 |
+
|
115 |
+
|
116 |
+
@celery.task
|
117 |
+
def get_transcript_task(user_id: str, subtitle_format: str = 'srt', word_level: bool = False):
|
118 |
+
|
119 |
+
# If Video Already Downloaded
|
120 |
+
if 'media_path' in users_context[user_id].keys():
|
121 |
+
|
122 |
+
# Retrieving the media_path from the context for this user's session
|
123 |
+
media_path = users_context[user_id]['media_path']
|
124 |
+
|
125 |
+
# Checking if the media_type is Video, then extract it's audio
|
126 |
+
media_type = users_context[user_id]['media_type']
|
127 |
+
if media_type == 'video':
|
128 |
+
media_path = extract_audio(media_path)
|
129 |
+
|
130 |
+
else:
|
131 |
+
|
132 |
+
# Downloading Audio for Transcription
|
133 |
+
media_path = users_context[user_id]['downloader'].download('audio', 'mp3', '128kbps')
|
134 |
+
|
135 |
+
# Whisper based transcription
|
136 |
+
user_folder_path = os.path.join(output_folder, user_id)
|
137 |
+
stable_whisper_transcript = StableWhisper(model=MODELS['transcription'],
|
138 |
+
media_path=media_path,
|
139 |
+
output_path=user_folder_path,
|
140 |
+
subtitle_format=subtitle_format,
|
141 |
+
word_level=word_level)
|
142 |
+
transcript = stable_whisper_transcript.generate_transcript()
|
143 |
+
transcript_path = stable_whisper_transcript.save_transcript()
|
144 |
+
subtitles_path = stable_whisper_transcript.save_subtitles()
|
145 |
+
|
146 |
+
# Storing User's Transcripts to S3
|
147 |
+
s3_transcript_path = s3.upload_file(user_id, 'transcript.txt', transcript_path)
|
148 |
+
s3_subtitles_path = s3.upload_file(user_id, f'subtitles.{subtitle_format}', subtitles_path)
|
149 |
+
|
150 |
+
# Getting Status
|
151 |
+
status = 1 if transcript and s3_transcript_path and s3_subtitles_path else 0
|
152 |
+
|
153 |
+
if status:
|
154 |
+
# Storing Transcript Info in the context for this user's session
|
155 |
+
users_context[user_id]['transcript'] = transcript
|
156 |
+
users_context[user_id]['transcript_path'] = transcript_path
|
157 |
+
users_context[user_id]['subtitles_path'] = subtitles_path
|
158 |
+
|
159 |
+
return {'status': status,
|
160 |
+
'transcript': transcript,
|
161 |
+
'transcript_path': s3_transcript_path,
|
162 |
+
'subtitles_path': s3_subtitles_path}
|
163 |
+
|
164 |
+
@celery.task
|
165 |
+
def get_translation_task(user_id: str, target_language: str = 'en'):
|
166 |
+
|
167 |
+
# If Transcript Available
|
168 |
+
if 'transcript' in users_context[user_id].keys():
|
169 |
+
|
170 |
+
# Retrieving the transcript from the context for this user's session
|
171 |
+
transcript = users_context[user_id]['transcript']
|
172 |
+
|
173 |
+
else:
|
174 |
+
return {'status': 0, 'message': 'Transcript not generated yet'}
|
175 |
+
|
176 |
+
# NLLB based Translation
|
177 |
+
user_folder_path = os.path.join(output_folder, user_id)
|
178 |
+
nllb_translator = Translation(model=MODELS['translation'],
|
179 |
+
transcript_dict=transcript,
|
180 |
+
source_lang=transcript['language'],
|
181 |
+
target_lang=target_language,
|
182 |
+
output_path=user_folder_path)
|
183 |
+
translated_transcript = nllb_translator.get_translated_transcript()
|
184 |
+
translated_subtitles = nllb_translator.get_translated_subtitles()
|
185 |
+
|
186 |
+
# Storing Translated Transcript as TXT file in UTF-8 format
|
187 |
+
translated_transcript_path = os.path.join(user_folder_path, 'translated_transcript.txt')
|
188 |
+
with open(translated_transcript_path, 'w', encoding='utf-8') as file:
|
189 |
+
file.write(translated_transcript)
|
190 |
+
|
191 |
+
# Storing Translated Transcript to S3
|
192 |
+
s3_transcript_path = s3.upload_file(user_id, 'translated_transcript.txt', translated_transcript_path)
|
193 |
+
|
194 |
+
# TODO: Write Translated Transcript as SRT, VTT, ASS files
|
195 |
+
# Storing Translated Subtitles as JSON file (For Now)
|
196 |
+
translated_subtitles_path = os.path.join(user_folder_path, 'translated_subtitles.json')
|
197 |
+
with open(translated_subtitles_path, "w", encoding='utf-8') as file:
|
198 |
+
json.dump(translated_subtitles_path, file)
|
199 |
+
|
200 |
+
# Storing Translated Subtitles to S3
|
201 |
+
s3_subtitles_path = s3.upload_file(user_id, 'translated_subtitles.json', translated_subtitles_path)
|
202 |
+
|
203 |
+
# Getting Status
|
204 |
+
status = 1 if translated_transcript and translated_subtitles else 0
|
205 |
+
|
206 |
+
if status:
|
207 |
+
# Storing Translated Transcript Info in the context for this user's session
|
208 |
+
users_context[user_id]['translated_transcript'] = translated_transcript
|
209 |
+
users_context[user_id]['translated_transcript_path'] = translated_transcript_path
|
210 |
+
users_context[user_id]['translated_subtitles'] = translated_subtitles
|
211 |
+
users_context[user_id]['translated_subtitles_path'] = translated_subtitles_path
|
212 |
+
|
213 |
+
return {'status': status,
|
214 |
+
'transcript': translated_transcript,
|
215 |
+
'subtitles': translated_subtitles,
|
216 |
+
'transcript_path': s3_transcript_path,
|
217 |
+
'subtitles_path': s3_subtitles_path}
|
218 |
+
|
219 |
+
|
220 |
+
@celery.task
|
221 |
+
def get_summary_task(user_id: str, Summary_type: str, Summary_strategy: str, Target_Person_type: str,
|
222 |
+
Response_length: str, Writing_style: str):
|
223 |
+
|
224 |
+
# If Transcript Available
|
225 |
+
if 'transcript' in users_context[user_id].keys():
|
226 |
+
|
227 |
+
# Retrieving the transcript from the context for this user's session
|
228 |
+
text_input = users_context[user_id]['transcript']
|
229 |
+
|
230 |
+
else:
|
231 |
+
return {'status': 0, 'message': 'Transcript not generated yet'}
|
232 |
+
|
233 |
+
# Extracting Summary
|
234 |
+
summary_extractor = Extract_Summary(text_input=text_input)
|
235 |
+
output = summary_extractor.define_chain(Summary_type=Summary_type,
|
236 |
+
Summary_strategy=Summary_strategy,
|
237 |
+
Target_Person_type=Target_Person_type,
|
238 |
+
Response_length=Response_length,
|
239 |
+
Writing_style=Writing_style,
|
240 |
+
key_information=False)
|
241 |
+
|
242 |
+
# Getting Status
|
243 |
+
status = 1 if output else 0
|
244 |
+
|
245 |
+
if status:
|
246 |
+
# Storing Summary Info in the context for this user's session
|
247 |
+
users_context[user_id]['summary'] = output
|
248 |
+
|
249 |
+
return {'status': status, "summary": output}
|
250 |
+
|
251 |
+
|
252 |
+
@celery.task
|
253 |
+
def get_key_info_task(user_id: str, Summary_type: str, Summary_strategy: str, Target_Person_type: str,
|
254 |
+
Response_length: str, Writing_style: str):
|
255 |
+
|
256 |
+
# If Transcript Available
|
257 |
+
if 'transcript' in users_context[user_id].keys():
|
258 |
+
|
259 |
+
# Retrieving the transcript from the context for this user's session
|
260 |
+
text_input = users_context[user_id]['transcript']
|
261 |
+
|
262 |
+
else:
|
263 |
+
return {'status': 0, 'message': 'Transcript not generated yet'}
|
264 |
+
|
265 |
+
# Extracting Key Value Info
|
266 |
+
summary_extractor = Extract_Summary(text_input=text_input)
|
267 |
+
output = summary_extractor.define_chain(Summary_type=Summary_type,
|
268 |
+
Summary_strategy=Summary_strategy,
|
269 |
+
Target_Person_type=Target_Person_type,
|
270 |
+
Response_length=Response_length,
|
271 |
+
Writing_style=Writing_style,
|
272 |
+
key_information=True)
|
273 |
+
|
274 |
+
# Getting Status
|
275 |
+
status = 1 if output else 0
|
276 |
+
|
277 |
+
if status:
|
278 |
+
# Storing Key Info in the context for this user's session
|
279 |
+
users_context[user_id]['key_info'] = output
|
280 |
+
|
281 |
+
return {'status': status, "key_info": output}
|
282 |
+
|
283 |
+
@celery.task
|
284 |
+
def get_audiobook_task(user_id: str, narration_style: str, speaker: str = "male",
|
285 |
+
audio_format: str = "mp3", audio_quality: str = "128kbps"):
|
286 |
+
|
287 |
+
# If Transcript Available
|
288 |
+
if 'transcript' in users_context[user_id].keys():
|
289 |
+
|
290 |
+
# Retrieving the transcript from the context for this user's session
|
291 |
+
text_input = users_context[user_id]['transcript']
|
292 |
+
|
293 |
+
else:
|
294 |
+
return {'status': 0, 'message': 'Transcript not generated yet'}
|
295 |
+
|
296 |
+
# Extracting Narration
|
297 |
+
|
298 |
+
narrator = AudioBookNarration(text_input=text_input)
|
299 |
+
output = narrator.define_chain(narration_style=narration_style)
|
300 |
+
|
301 |
+
# Generating Audiobook
|
302 |
+
audiobook = AudioBook(output_folder=output_folder)
|
303 |
+
audio_path = audiobook.generate_audio_from_text(output, speaker=speaker, filename="output_audio")
|
304 |
+
|
305 |
+
# Converting the Audio to Required Audio Parameters
|
306 |
+
audio_path = convert_audio(audio_path, audio_format, audio_quality)
|
307 |
+
|
308 |
+
# Storing User's Audiobook to S3
|
309 |
+
media_file = f"audiobook_{audio_quality.lower()}.{audio_format.lower()}"
|
310 |
+
s3_path = s3.upload_file(user_id, media_file, audio_path)
|
311 |
+
|
312 |
+
# Getting Status
|
313 |
+
status = 1 if audio_path else 0
|
314 |
+
|
315 |
+
if status:
|
316 |
+
# Storing Audiobook path in the context for this user's session
|
317 |
+
users_context[user_id]['audiobook_path'] = audio_path
|
318 |
+
|
319 |
+
return {'status': status, "audiobook_path": s3_path}
|
320 |
+
|
321 |
+
@celery.task
|
322 |
+
def get_rendered_video_task(user_id: str, video_format: str,
|
323 |
+
video_quality: str, subtitles_type: str = 'original'):
|
324 |
+
|
325 |
+
|
326 |
+
# # Retrieving the media_path from the context for this user's session
|
327 |
+
# media_path = users_context[user_id]['media_path']
|
328 |
+
|
329 |
+
# Downloading Video with Required Video Parameters for User
|
330 |
+
media_path = users_context[user_id]['downloader'].download('video', video_format, video_quality)
|
331 |
+
|
332 |
+
# Getting Required Subtitles
|
333 |
+
if 'original' in subtitles_type.lower():
|
334 |
+
subtitles_path = users_context[user_id]['subtitles_path']
|
335 |
+
|
336 |
+
elif 'translated' in subtitles_type.lower():
|
337 |
+
|
338 |
+
# Getting Translated Subtitles from the context for this user's session
|
339 |
+
translated_subtitles = users_context[user_id]['translated_subtitles_path']
|
340 |
+
|
341 |
+
# Saving Translated Subtitles
|
342 |
+
subtitles_path = save_translated_subtitles(translated_subtitles, media_path)
|
343 |
+
|
344 |
+
# Burning Subtitles & Rendering Video
|
345 |
+
rendered_video_path = burn_subtitles(media_path, subtitles_path)
|
346 |
+
|
347 |
+
# Storing User's Rendered Video to S3
|
348 |
+
media_file = f"subtitles_video_{video_quality.lower()}.{video_format.lower()}"
|
349 |
+
s3_path = s3.upload_file(user_id, media_file, media_path)
|
350 |
+
|
351 |
+
# Getting Status
|
352 |
+
status = 1 if rendered_video_path else 0
|
353 |
+
|
354 |
+
return {'status': status, "rendered_video_path": s3_path}
|
celery_config.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Celery
|
2 |
+
broker_url = 'pyamqp://guest:guest@127.0.0.1:5672//'
|
3 |
+
result_backend = 'rpc://'
|
helperfunctions.py
CHANGED
@@ -140,7 +140,8 @@ def convert_audio(input_file, audio_format, audio_quality):
|
|
140 |
'-loglevel', 'quiet'
|
141 |
]
|
142 |
subprocess.run(command)
|
|
|
|
|
143 |
|
144 |
except Exception as e:
|
145 |
-
print(f"Error in convert_audio: {e}")
|
146 |
-
return output_file
|
|
|
140 |
'-loglevel', 'quiet'
|
141 |
]
|
142 |
subprocess.run(command)
|
143 |
+
|
144 |
+
return output_file
|
145 |
|
146 |
except Exception as e:
|
147 |
+
print(f"Error in convert_audio: {e}")
|
|
main.py
CHANGED
@@ -1,22 +1,32 @@
|
|
1 |
import os
|
2 |
import json
|
|
|
|
|
3 |
|
4 |
import uvicorn
|
5 |
-
from fastapi import FastAPI,
|
6 |
from fastapi.middleware.cors import CORSMiddleware
|
7 |
from contextlib import asynccontextmanager
|
8 |
|
9 |
from helperfunctions import *
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
from languages import CODE2LANG
|
12 |
-
from s3_handler import S3Handler
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
19 |
|
|
|
|
|
|
|
20 |
|
21 |
|
22 |
### API Configurations
|
@@ -32,7 +42,8 @@ async def lifespan(app: FastAPI):
|
|
32 |
|
33 |
# For Storing Models
|
34 |
global MODELS
|
35 |
-
MODELS =
|
|
|
36 |
|
37 |
print('ML Models Loaded!')
|
38 |
|
@@ -46,15 +57,6 @@ async def lifespan(app: FastAPI):
|
|
46 |
# Initializing FastAPI App
|
47 |
app = FastAPI(lifespan=lifespan)
|
48 |
|
49 |
-
# Output Directory for Files Storage
|
50 |
-
output_folder = 'Output'
|
51 |
-
|
52 |
-
# S3 Handler
|
53 |
-
s3 = S3Handler()
|
54 |
-
|
55 |
-
# Create a context variable to store the contexts for each user
|
56 |
-
users_context = dict()
|
57 |
-
|
58 |
# CORS (Cross-Origin Resource Sharing)
|
59 |
origins = [
|
60 |
"http://localhost",
|
@@ -72,6 +74,37 @@ app.add_middleware(
|
|
72 |
|
73 |
### APIs
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
@app.get("/get_media_metadata")
|
76 |
async def get_media_metadata(request: Request, url: str):
|
77 |
|
@@ -81,37 +114,16 @@ async def get_media_metadata(request: Request, url: str):
|
|
81 |
user_ip = request.client.host
|
82 |
user_id = generate_uuid(user_ip, url)
|
83 |
|
84 |
-
#
|
85 |
-
|
86 |
-
|
87 |
-
# Getting User's Youtube Downloader
|
88 |
-
youtube_downloader = YoutubeDownloader(url, user_folder_path)
|
89 |
-
|
90 |
-
# Getting Youtube Media Info
|
91 |
-
media_metadata = youtube_downloader.get_media_metadata()
|
92 |
-
|
93 |
-
# Storing User's Media Metadata to Directory
|
94 |
-
media_metadata_path = os.path.join(user_folder_path, 'media_metadata.json')
|
95 |
-
with open(media_metadata_path, "w") as outfile:
|
96 |
-
json.dump(media_metadata, outfile)
|
97 |
-
|
98 |
-
# Storing User's Media Metadata to S3
|
99 |
-
s3_path = s3.upload_file(user_id, 'media_metadata.json', media_metadata_path)
|
100 |
|
101 |
# Getting Status
|
102 |
-
status = 1 if
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
users_context[user_id] = dict()
|
107 |
-
users_context[user_id]['downloader'] = youtube_downloader
|
108 |
-
# users_context[user_id]['media_metadata'] = media_metadata
|
109 |
-
users_context[user_id]['url'] = url
|
110 |
-
|
111 |
-
return {'status': status,
|
112 |
'user_id': user_id,
|
113 |
-
'
|
114 |
-
'media_metadata_path': s3_path}
|
115 |
|
116 |
except Exception as e:
|
117 |
print(f"An unexpected error occurred:\n{str(e)}")
|
@@ -122,31 +134,16 @@ async def get_media_metadata(request: Request, url: str):
|
|
122 |
async def get_media_formats(user_id: str):
|
123 |
|
124 |
try:
|
125 |
-
|
126 |
-
# Getting Media Formats for User
|
127 |
-
media_formats = users_context[user_id]['downloader'].get_media_formats()
|
128 |
-
|
129 |
-
# User Folder Path
|
130 |
-
user_folder_path = os.path.join(output_folder, user_id)
|
131 |
-
|
132 |
-
# Storing User's Media Formats to Directory
|
133 |
-
media_formats_path = os.path.join(user_folder_path, 'media_formats.json')
|
134 |
-
with open(media_formats_path, "w") as outfile:
|
135 |
-
json.dump(media_formats, outfile)
|
136 |
-
|
137 |
-
# Storing User's Media Formats to S3
|
138 |
-
s3_path = s3.upload_file(user_id, 'media_formats.json', media_formats_path)
|
139 |
|
|
|
|
|
|
|
140 |
# Getting Status
|
141 |
-
status = 1 if
|
142 |
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
return {'status': status,
|
148 |
-
'media_formats': media_formats,
|
149 |
-
'media_formats_path': s3_path}
|
150 |
|
151 |
except Exception as e:
|
152 |
print(f"An unexpected error occurred:\n{str(e)}")
|
@@ -157,23 +154,16 @@ async def get_media_formats(user_id: str):
|
|
157 |
async def download_media(user_id: str, media_type: str, media_format: str, media_quality: str):
|
158 |
|
159 |
try:
|
|
|
|
|
|
|
160 |
|
161 |
-
# Downloading Media for User
|
162 |
-
media_path = users_context[user_id]['downloader'].download(media_type, media_format, media_quality)
|
163 |
-
|
164 |
-
# Storing User's Downloaded Media to S3
|
165 |
-
media_file = f"{media_type.lower()}_{media_quality.lower()}.{media_format.lower()}"
|
166 |
-
s3_path = s3.upload_file(user_id, media_file, media_path)
|
167 |
-
|
168 |
# Getting Status
|
169 |
-
status = 1 if
|
170 |
-
|
171 |
-
if status:
|
172 |
-
# Storing Media Info in the context for this user's session
|
173 |
-
users_context[user_id]['media_path'] = media_path
|
174 |
-
users_context[user_id]['media_type'] = media_type
|
175 |
|
176 |
-
return {'status':
|
|
|
|
|
177 |
|
178 |
except Exception as e:
|
179 |
print(f"An unexpected error occurred:\n{str(e)}")
|
@@ -184,52 +174,17 @@ async def download_media(user_id: str, media_type: str, media_format: str, media
|
|
184 |
async def get_transcript(user_id: str, subtitle_format: str = 'srt', word_level: bool = False):
|
185 |
|
186 |
try:
|
|
|
|
|
|
|
187 |
|
188 |
-
# If Video Already Downloaded
|
189 |
-
if 'media_path' in users_context[user_id].keys():
|
190 |
-
|
191 |
-
# Retrieving the media_path from the context for this user's session
|
192 |
-
media_path = users_context[user_id]['media_path']
|
193 |
-
|
194 |
-
# Checking if the media_type is Video, then extract it's audio
|
195 |
-
media_type = users_context[user_id]['media_type']
|
196 |
-
if media_type == 'video':
|
197 |
-
media_path = extract_audio(media_path)
|
198 |
-
|
199 |
-
else:
|
200 |
-
|
201 |
-
# Downloading Audio for Transcription
|
202 |
-
media_path = users_context[user_id]['downloader'].download('audio', 'mp3', '128kbps')
|
203 |
-
|
204 |
-
# Whisper based transcription
|
205 |
-
user_folder_path = os.path.join(output_folder, user_id)
|
206 |
-
stable_whisper_transcript = StableWhisper(model=MODELS['transcription'],
|
207 |
-
media_path=media_path,
|
208 |
-
output_path=user_folder_path,
|
209 |
-
subtitle_format=subtitle_format,
|
210 |
-
word_level=word_level)
|
211 |
-
transcript = stable_whisper_transcript.generate_transcript()
|
212 |
-
transcript_path = stable_whisper_transcript.save_transcript()
|
213 |
-
subtitles_path = stable_whisper_transcript.save_subtitles()
|
214 |
-
|
215 |
-
# Storing User's Transcripts to S3
|
216 |
-
s3_transcript_path = s3.upload_file(user_id, 'transcript.txt', transcript_path)
|
217 |
-
s3_subtitles_path = s3.upload_file(user_id, f'subtitles.{subtitle_format}', subtitles_path)
|
218 |
-
|
219 |
# Getting Status
|
220 |
-
status = 1 if
|
221 |
-
|
222 |
-
if status:
|
223 |
-
# Storing Transcript Info in the context for this user's session
|
224 |
-
users_context[user_id]['transcript'] = transcript
|
225 |
-
users_context[user_id]['transcript_path'] = transcript_path
|
226 |
-
users_context[user_id]['subtitles_path'] = subtitles_path
|
227 |
|
228 |
-
return {'status':
|
229 |
-
'
|
230 |
-
'
|
231 |
-
|
232 |
-
|
233 |
except Exception as e:
|
234 |
print(f"An unexpected error occurred:\n{str(e)}")
|
235 |
raise HTTPException(status_code=500, detail=str(e))
|
@@ -244,59 +199,17 @@ async def get_translation(user_id: str):
|
|
244 |
async def get_translation(user_id: str, target_language: str = 'en'):
|
245 |
|
246 |
try:
|
|
|
|
|
|
|
247 |
|
248 |
-
# If Transcript Available
|
249 |
-
if 'transcript' in users_context[user_id].keys():
|
250 |
-
|
251 |
-
# Retrieving the transcript from the context for this user's session
|
252 |
-
transcript = users_context[user_id]['transcript']
|
253 |
-
|
254 |
-
else:
|
255 |
-
return {'status': 0, 'message': 'Transcript not generated yet'}
|
256 |
-
|
257 |
-
# NLLB based Translation
|
258 |
-
user_folder_path = os.path.join(output_folder, user_id)
|
259 |
-
nllb_translator = Translation(model=MODELS['translation'],
|
260 |
-
transcript_dict=transcript,
|
261 |
-
source_lang=transcript['language'],
|
262 |
-
target_lang=target_language,
|
263 |
-
output_path=user_folder_path)
|
264 |
-
translated_transcript = nllb_translator.get_translated_transcript()
|
265 |
-
translated_subtitles = nllb_translator.get_translated_subtitles()
|
266 |
-
|
267 |
-
# Storing Translated Transcript as TXT file in UTF-8 format
|
268 |
-
translated_transcript_path = os.path.join(user_folder_path, 'translated_transcript.txt')
|
269 |
-
with open(translated_transcript_path, 'w', encoding='utf-8') as file:
|
270 |
-
file.write(translated_transcript)
|
271 |
-
|
272 |
-
# Storing Translated Transcript to S3
|
273 |
-
s3_transcript_path = s3.upload_file(user_id, 'translated_transcript.txt', translated_transcript_path)
|
274 |
-
|
275 |
-
# TODO: Write Translated Transcript as SRT, VTT, ASS files
|
276 |
-
# Storing Translated Subtitles as JSON file (For Now)
|
277 |
-
translated_subtitles_path = os.path.join(user_folder_path, 'translated_subtitles.json')
|
278 |
-
with open(translated_subtitles_path, "w", encoding='utf-8') as file:
|
279 |
-
json.dump(translated_subtitles_path, file)
|
280 |
-
|
281 |
-
# Storing Translated Subtitles to S3
|
282 |
-
s3_subtitles_path = s3.upload_file(user_id, 'translated_subtitles.json', translated_subtitles_path)
|
283 |
-
|
284 |
# Getting Status
|
285 |
-
status = 1 if
|
286 |
-
|
287 |
-
if status:
|
288 |
-
# Storing Translated Transcript Info in the context for this user's session
|
289 |
-
users_context[user_id]['translated_transcript'] = translated_transcript
|
290 |
-
users_context[user_id]['translated_transcript_path'] = translated_transcript_path
|
291 |
-
users_context[user_id]['translated_subtitles'] = translated_subtitles
|
292 |
-
users_context[user_id]['translated_subtitles_path'] = translated_subtitles_path
|
293 |
|
294 |
-
return {'status':
|
295 |
-
'
|
296 |
-
'
|
297 |
-
|
298 |
-
'subtitles_path': s3_subtitles_path}
|
299 |
-
|
300 |
except Exception as e:
|
301 |
print(f"An unexpected error occurred:\n{str(e)}")
|
302 |
raise HTTPException(status_code=500, detail=str(e))
|
@@ -307,33 +220,20 @@ async def get_summary(user_id: str, Summary_type: str, Summary_strategy: str, Ta
|
|
307 |
Response_length: str, Writing_style: str):
|
308 |
|
309 |
try:
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
text_input = users_context[user_id]['transcript']
|
316 |
-
|
317 |
-
else:
|
318 |
-
return {'status': 0, 'message': 'Transcript not generated yet'}
|
319 |
-
|
320 |
-
# Extracting Summary
|
321 |
-
summary_extractor = Extract_Summary(text_input=text_input)
|
322 |
-
output = summary_extractor.define_chain(Summary_type=Summary_type,
|
323 |
-
Summary_strategy=Summary_strategy,
|
324 |
-
Target_Person_type=Target_Person_type,
|
325 |
-
Response_length=Response_length,
|
326 |
-
Writing_style=Writing_style,
|
327 |
-
key_information=False)
|
328 |
|
329 |
# Getting Status
|
330 |
-
status = 1 if
|
|
|
|
|
|
|
|
|
331 |
|
332 |
-
if status:
|
333 |
-
# Storing Summary Info in the context for this user's session
|
334 |
-
users_context[user_id]['summary'] = output
|
335 |
|
336 |
-
return {'status': status, "summary": output}
|
337 |
|
338 |
except Exception as e:
|
339 |
print(f"An unexpected error occurred:\n{str(e)}")
|
@@ -345,33 +245,20 @@ async def get_key_info(user_id: str, Summary_type: str, Summary_strategy: str, T
|
|
345 |
Response_length: str, Writing_style: str):
|
346 |
|
347 |
try:
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
text_input = users_context[user_id]['transcript']
|
354 |
-
|
355 |
-
else:
|
356 |
-
return {'status': 0, 'message': 'Transcript not generated yet'}
|
357 |
-
|
358 |
-
# Extracting Key Value Info
|
359 |
-
summary_extractor = Extract_Summary(text_input=text_input)
|
360 |
-
output = summary_extractor.define_chain(Summary_type=Summary_type,
|
361 |
-
Summary_strategy=Summary_strategy,
|
362 |
-
Target_Person_type=Target_Person_type,
|
363 |
-
Response_length=Response_length,
|
364 |
-
Writing_style=Writing_style,
|
365 |
-
key_information=True)
|
366 |
|
367 |
# Getting Status
|
368 |
-
status = 1 if
|
369 |
-
|
370 |
-
if status:
|
371 |
-
# Storing Key Info in the context for this user's session
|
372 |
-
users_context[user_id]['key_info'] = output
|
373 |
|
374 |
-
return {'status':
|
|
|
|
|
|
|
|
|
375 |
|
376 |
except Exception as e:
|
377 |
print(f"An unexpected error occurred:\n{str(e)}")
|
@@ -383,47 +270,19 @@ async def get_audiobook(user_id: str, narration_style: str, speaker: str = "male
|
|
383 |
audio_format: str = "mp3", audio_quality: str = "128kbps"):
|
384 |
|
385 |
try:
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
# Retrieving the transcript from the context for this user's session
|
391 |
-
text_input = users_context[user_id]['transcript']
|
392 |
-
|
393 |
-
else:
|
394 |
-
return {'status': 0, 'message': 'Transcript not generated yet'}
|
395 |
-
|
396 |
-
# Extracting Narration
|
397 |
-
|
398 |
-
narrator = AudioBookNarration(text_input=text_input)
|
399 |
-
output = narrator.define_chain(narration_style=narration_style)
|
400 |
-
print("=============="*50)
|
401 |
-
print("####### Generation Audio########")
|
402 |
-
print("=============="*50)
|
403 |
-
# Generating Audiobook
|
404 |
-
audiobook = AudioBook(output_folder=output_folder)
|
405 |
-
audio_path = audiobook.generate_audio_from_text(output, speaker=speaker, filename="output_audio")
|
406 |
-
|
407 |
-
print("=============="*50)
|
408 |
-
print(f"####### Audio Path :{audio_path} ########")
|
409 |
-
print("=============="*50)
|
410 |
-
|
411 |
-
# Converting the Audio to Required Audio Parameters
|
412 |
-
audio_path = convert_audio(audio_path, audio_format, audio_quality)
|
413 |
-
|
414 |
-
# Storing User's Audiobook to S3
|
415 |
-
media_file = f"audiobook_{audio_quality.lower()}.{audio_format.lower()}"
|
416 |
-
s3_path = s3.upload_file(user_id, media_file, audio_path)
|
417 |
-
|
418 |
-
# Getting Status
|
419 |
-
status = 1 if audio_path else 0
|
420 |
|
421 |
-
|
422 |
-
|
423 |
-
users_context[user_id]['audiobook_path'] = audio_path
|
424 |
|
425 |
-
return {'status':
|
426 |
-
|
|
|
|
|
|
|
427 |
except Exception as e:
|
428 |
print(f"An unexpected error occurred:\n{str(e)}")
|
429 |
raise HTTPException(status_code=500, detail=str(e))
|
@@ -433,36 +292,17 @@ async def get_audiobook(user_id: str, narration_style: str, speaker: str = "male
|
|
433 |
async def get_rendered_video(user_id: str, video_format: str, video_quality: str, subtitles_type: str = 'original'):
|
434 |
|
435 |
try:
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
# Downloading Video with Required Video Parameters for User
|
441 |
-
media_path = users_context[user_id]['downloader'].download('video', video_format, video_quality)
|
442 |
-
|
443 |
-
# Getting Required Subtitles
|
444 |
-
if 'original' in subtitles_type.lower():
|
445 |
-
subtitles_path = users_context[user_id]['subtitles_path']
|
446 |
-
|
447 |
-
elif 'translated' in subtitles_type.lower():
|
448 |
-
|
449 |
-
# Getting Translated Subtitles from the context for this user's session
|
450 |
-
translated_subtitles = users_context[user_id]['translated_subtitles_path']
|
451 |
-
|
452 |
-
# Saving Translated Subtitles
|
453 |
-
subtitles_path = save_translated_subtitles(translated_subtitles, media_path)
|
454 |
-
|
455 |
-
# Burning Subtitles & Rendering Video
|
456 |
-
rendered_video_path = burn_subtitles(media_path, subtitles_path)
|
457 |
-
|
458 |
-
# Storing User's Rendered Video to S3
|
459 |
-
media_file = f"subtitles_video_{video_quality.lower()}.{video_format.lower()}"
|
460 |
-
s3_path = s3.upload_file(user_id, media_file, media_path)
|
461 |
|
462 |
# Getting Status
|
463 |
-
status = 1 if
|
464 |
|
465 |
-
return {'status':
|
|
|
|
|
466 |
|
467 |
except Exception as e:
|
468 |
print(f"An unexpected error occurred:\n{str(e)}")
|
|
|
1 |
import os
|
2 |
import json
|
3 |
+
import uuid
|
4 |
+
import requests
|
5 |
|
6 |
import uvicorn
|
7 |
+
from fastapi import HTTPException, FastAPI, Request, WebSocket, Depends
|
8 |
from fastapi.middleware.cors import CORSMiddleware
|
9 |
from contextlib import asynccontextmanager
|
10 |
|
11 |
from helperfunctions import *
|
12 |
+
|
13 |
+
from celery import Celery
|
14 |
+
from celery.result import AsyncResult
|
15 |
+
from celery_config import broker_url, result_backend
|
16 |
+
|
17 |
+
import api_functions
|
18 |
from languages import CODE2LANG
|
|
|
19 |
|
20 |
+
# Initialize Celery
|
21 |
+
celery = Celery(
|
22 |
+
'tasks',
|
23 |
+
broker=broker_url,
|
24 |
+
backend=result_backend
|
25 |
+
)
|
26 |
|
27 |
+
# Load Celery Configuration from File
|
28 |
+
celery.config_from_object('celery_config')
|
29 |
+
celery.autodiscover_tasks(['api_functions'])
|
30 |
|
31 |
|
32 |
### API Configurations
|
|
|
42 |
|
43 |
# For Storing Models
|
44 |
global MODELS
|
45 |
+
MODELS = {}
|
46 |
+
# MODELS = load_models()
|
47 |
|
48 |
print('ML Models Loaded!')
|
49 |
|
|
|
57 |
# Initializing FastAPI App
|
58 |
app = FastAPI(lifespan=lifespan)
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
# CORS (Cross-Origin Resource Sharing)
|
61 |
origins = [
|
62 |
"http://localhost",
|
|
|
74 |
|
75 |
### APIs
|
76 |
|
77 |
+
# WebSocket route to monitor task status
|
78 |
+
@app.websocket("/ws/task_status/{task_id}")
|
79 |
+
async def websocket_endpoint(websocket: WebSocket, task_id: str):
|
80 |
+
await websocket.accept()
|
81 |
+
while True:
|
82 |
+
result = AsyncResult(task_id, app=celery)
|
83 |
+
await websocket.send_json({"status": result.status, "result": result.result})
|
84 |
+
await asyncio.sleep(1)
|
85 |
+
|
86 |
+
# API for monitoring task status
|
87 |
+
@app.get("/task_status/{task_id}")
|
88 |
+
async def get_task_status(request: Request, task_id: str):
|
89 |
+
result = AsyncResult(task_id, app=celery)
|
90 |
+
return {"status": result.status, "result": result.result}
|
91 |
+
|
92 |
+
|
93 |
+
# Function to create a unique task ID
|
94 |
+
def create_task_id():
|
95 |
+
return f"task_{str(uuid.uuid4())[:8]}"
|
96 |
+
|
97 |
+
# Function to run a background task and return task ID
|
98 |
+
def run_background_task(task_function, *args):
|
99 |
+
task_id = create_task_id()
|
100 |
+
task = celery.send_task(
|
101 |
+
task_function,
|
102 |
+
args=args,
|
103 |
+
task_id=task_id
|
104 |
+
)
|
105 |
+
return task_id, task
|
106 |
+
|
107 |
+
|
108 |
@app.get("/get_media_metadata")
|
109 |
async def get_media_metadata(request: Request, url: str):
|
110 |
|
|
|
114 |
user_ip = request.client.host
|
115 |
user_id = generate_uuid(user_ip, url)
|
116 |
|
117 |
+
# Run background task for media metadata
|
118 |
+
task_id, _ = run_background_task("api_functions.get_media_metadata_task", user_id, url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
# Getting Status
|
121 |
+
status = 1 if task_id else 0
|
122 |
+
|
123 |
+
return {'status': 'Task Started',
|
124 |
+
'task_id': task_id,
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
'user_id': user_id,
|
126 |
+
'status': status}
|
|
|
127 |
|
128 |
except Exception as e:
|
129 |
print(f"An unexpected error occurred:\n{str(e)}")
|
|
|
134 |
async def get_media_formats(user_id: str):
|
135 |
|
136 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
|
138 |
+
# Run background task for media metadata
|
139 |
+
task_id, _ = run_background_task("api_functions.get_media_formats_task", user_id)
|
140 |
+
|
141 |
# Getting Status
|
142 |
+
status = 1 if task_id else 0
|
143 |
|
144 |
+
return {'status': 'Task Started',
|
145 |
+
'task_id': task_id,
|
146 |
+
'status': status}
|
|
|
|
|
|
|
|
|
147 |
|
148 |
except Exception as e:
|
149 |
print(f"An unexpected error occurred:\n{str(e)}")
|
|
|
154 |
async def download_media(user_id: str, media_type: str, media_format: str, media_quality: str):
|
155 |
|
156 |
try:
|
157 |
+
# Run background task for media Download
|
158 |
+
task_id, _ = run_background_task("api_functions.download_media_task", user_id,
|
159 |
+
media_type,media_format,media_quality)
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
# Getting Status
|
162 |
+
status = 1 if task_id else 0
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
+
return {'status': 'Task Started',
|
165 |
+
'task_id': task_id,
|
166 |
+
'status': status}
|
167 |
|
168 |
except Exception as e:
|
169 |
print(f"An unexpected error occurred:\n{str(e)}")
|
|
|
174 |
async def get_transcript(user_id: str, subtitle_format: str = 'srt', word_level: bool = False):
|
175 |
|
176 |
try:
|
177 |
+
# Run background task for media Download
|
178 |
+
task_id, _ = run_background_task("api_functions.get_transcript_task", user_id,
|
179 |
+
subtitle_format,word_level)
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
# Getting Status
|
182 |
+
status = 1 if task_id else 0
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
|
184 |
+
return {'status': 'Task Started',
|
185 |
+
'task_id': task_id,
|
186 |
+
'status': status}
|
187 |
+
|
|
|
188 |
except Exception as e:
|
189 |
print(f"An unexpected error occurred:\n{str(e)}")
|
190 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
199 |
async def get_translation(user_id: str, target_language: str = 'en'):
|
200 |
|
201 |
try:
|
202 |
+
# Run background task for get_translation
|
203 |
+
task_id, _ = run_background_task("api_functions.get_translation_task",
|
204 |
+
user_id,target_language)
|
205 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
# Getting Status
|
207 |
+
status = 1 if task_id else 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
|
209 |
+
return {'status': 'Task Started',
|
210 |
+
'task_id': task_id,
|
211 |
+
'status': status}
|
212 |
+
|
|
|
|
|
213 |
except Exception as e:
|
214 |
print(f"An unexpected error occurred:\n{str(e)}")
|
215 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
220 |
Response_length: str, Writing_style: str):
|
221 |
|
222 |
try:
|
223 |
+
# Run background task for get_summary
|
224 |
+
task_id, _ = run_background_task("api_functions.get_summary_task",
|
225 |
+
user_id,Summary_type,Summary_strategy,
|
226 |
+
Target_Person_type,Response_length,
|
227 |
+
Writing_style)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
# Getting Status
|
230 |
+
status = 1 if task_id else 0
|
231 |
+
|
232 |
+
return {'status': 'Task Started',
|
233 |
+
'task_id': task_id,
|
234 |
+
'status': status}
|
235 |
|
|
|
|
|
|
|
236 |
|
|
|
237 |
|
238 |
except Exception as e:
|
239 |
print(f"An unexpected error occurred:\n{str(e)}")
|
|
|
245 |
Response_length: str, Writing_style: str):
|
246 |
|
247 |
try:
|
248 |
+
# Run background task for get_summary
|
249 |
+
task_id, _ = run_background_task("api_functions.get_key_info_task",
|
250 |
+
user_id,Summary_type,Summary_strategy,
|
251 |
+
Target_Person_type,Response_length,
|
252 |
+
Writing_style)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
|
254 |
# Getting Status
|
255 |
+
status = 1 if task_id else 0
|
|
|
|
|
|
|
|
|
256 |
|
257 |
+
return {'status': 'Task Started',
|
258 |
+
'task_id': task_id,
|
259 |
+
'status': status}
|
260 |
+
|
261 |
+
|
262 |
|
263 |
except Exception as e:
|
264 |
print(f"An unexpected error occurred:\n{str(e)}")
|
|
|
270 |
audio_format: str = "mp3", audio_quality: str = "128kbps"):
|
271 |
|
272 |
try:
|
273 |
+
# Run background task for get_summary
|
274 |
+
task_id, _ = run_background_task("api_functions.get_audiobook_task",
|
275 |
+
user_id,narration_style,speaker,
|
276 |
+
audio_format,audio_quality)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
|
278 |
+
# Getting Status
|
279 |
+
status = 1 if task_id else 0
|
|
|
280 |
|
281 |
+
return {'status': 'Task Started',
|
282 |
+
'task_id': task_id,
|
283 |
+
'status': status}
|
284 |
+
|
285 |
+
|
286 |
except Exception as e:
|
287 |
print(f"An unexpected error occurred:\n{str(e)}")
|
288 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
292 |
async def get_rendered_video(user_id: str, video_format: str, video_quality: str, subtitles_type: str = 'original'):
|
293 |
|
294 |
try:
|
295 |
+
# Run background task for get_summary
|
296 |
+
task_id, _ = run_background_task("api_functions.get_rendered_video_task",
|
297 |
+
user_id,video_format,video_quality,
|
298 |
+
subtitles_type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
# Getting Status
|
301 |
+
status = 1 if task_id else 0
|
302 |
|
303 |
+
return {'status': 'Task Started',
|
304 |
+
'task_id': task_id,
|
305 |
+
'status': status}
|
306 |
|
307 |
except Exception as e:
|
308 |
print(f"An unexpected error occurred:\n{str(e)}")
|
requirements.txt
CHANGED
@@ -1,13 +1,19 @@
|
|
1 |
-
aiohttp==3.
|
2 |
aiosignal==1.3.1
|
|
|
|
|
3 |
annotated-types==0.6.0
|
4 |
anyio==3.7.1
|
|
|
5 |
async-timeout==4.0.3
|
6 |
attrs==23.1.0
|
7 |
av==10.0.0
|
|
|
|
|
8 |
boto3==1.29.0
|
9 |
botocore==1.32.0
|
10 |
Brotli==1.1.0
|
|
|
11 |
certifi==2023.7.22
|
12 |
cffi==1.16.0
|
13 |
charset-normalizer==3.3.2
|
@@ -30,6 +36,7 @@ faster-whisper==0.9.0
|
|
30 |
ffmpeg-python==0.2.0
|
31 |
filelock==3.13.1
|
32 |
flatbuffers==23.5.26
|
|
|
33 |
fonttools==4.44.1
|
34 |
frozenlist==1.4.0
|
35 |
fsspec==2023.10.0
|
@@ -38,7 +45,8 @@ future==0.18.3
|
|
38 |
greenlet==3.0.1
|
39 |
h11==0.14.0
|
40 |
httpcore==1.0.2
|
41 |
-
|
|
|
42 |
huggingface-hub==0.17.3
|
43 |
humanfriendly==10.0
|
44 |
idna==3.4
|
@@ -48,18 +56,22 @@ joblib==1.3.2
|
|
48 |
jsonpatch==1.33
|
49 |
jsonpointer==2.4
|
50 |
kiwisolver==1.4.5
|
|
|
51 |
langchain==0.0.335
|
52 |
langsmith==0.0.64
|
53 |
lit==17.0.5
|
|
|
54 |
llvmlite==0.41.1
|
55 |
MarkupSafe==2.1.3
|
56 |
marshmallow==3.20.1
|
|
|
57 |
matplotlib==3.8.1
|
58 |
more-itertools==10.1.0
|
59 |
mpmath==1.3.0
|
60 |
multidict==6.0.4
|
61 |
mutagen==1.47.0
|
62 |
mypy-extensions==1.0.0
|
|
|
63 |
networkx==3.2.1
|
64 |
nltk==3.8.1
|
65 |
numba==0.58.1
|
@@ -76,23 +88,36 @@ nvidia-cusparse-cu11==11.7.4.91
|
|
76 |
nvidia-nccl-cu11==2.14.3
|
77 |
nvidia-nvtx-cu11==11.7.91
|
78 |
onnxruntime==1.16.2
|
79 |
-
openai==0.28.0
|
80 |
openai-whisper==20231106
|
|
|
81 |
packaging==23.2
|
82 |
pandas==2.1.3
|
|
|
|
|
83 |
Pillow==10.1.0
|
84 |
pip==23.3
|
|
|
|
|
|
|
85 |
protobuf==4.25.0
|
|
|
|
|
|
|
86 |
pycparser==2.21
|
87 |
pycryptodome==3.19.0
|
88 |
pycryptodomex==3.19.0
|
89 |
-
pydantic==2.5.
|
90 |
pydantic_core==2.14.1
|
|
|
|
|
91 |
pyparsing==3.1.1
|
92 |
python-dateutil==2.8.2
|
|
|
93 |
pytube==15.0.0
|
94 |
pytz==2023.3.post1
|
95 |
PyYAML==6.0.1
|
|
|
|
|
96 |
regex==2023.10.3
|
97 |
requests==2.31.0
|
98 |
s3transfer==0.7.0
|
@@ -103,8 +128,10 @@ setuptools==68.0.0
|
|
103 |
six==1.16.0
|
104 |
sniffio==1.3.0
|
105 |
soundfile==0.12.1
|
|
|
106 |
SQLAlchemy==2.0.23
|
107 |
stable-ts==2.13.3
|
|
|
108 |
starlette==0.27.0
|
109 |
sympy==1.12
|
110 |
tenacity==8.2.3
|
@@ -113,18 +140,25 @@ tiktoken==0.5.1
|
|
113 |
tokenizers==0.14.1
|
114 |
torch==2.0.1
|
115 |
torchaudio==2.0.2
|
|
|
116 |
tqdm==4.66.1
|
|
|
117 |
transformers==4.35.1
|
118 |
triton==2.0.0
|
119 |
-
typing_extensions==4.8.0
|
120 |
typing-inspect==0.9.0
|
|
|
121 |
tzdata==2023.3
|
122 |
-
urllib3==2.0
|
123 |
uvicorn==0.24.0.post1
|
|
|
|
|
|
|
|
|
124 |
websockets==12.0
|
125 |
wheel==0.41.2
|
126 |
wordcloud==1.9.2
|
127 |
-
|
|
|
128 |
youtube-transcript-api==0.6.1
|
129 |
yt-dlp==2023.10.13
|
130 |
git+https://github.com/suno-ai/bark.git
|
|
|
1 |
+
aiohttp==3.9.1
|
2 |
aiosignal==1.3.1
|
3 |
+
aiostream==0.5.2
|
4 |
+
amqp==5.2.0
|
5 |
annotated-types==0.6.0
|
6 |
anyio==3.7.1
|
7 |
+
asttokens==2.4.1
|
8 |
async-timeout==4.0.3
|
9 |
attrs==23.1.0
|
10 |
av==10.0.0
|
11 |
+
beautifulsoup4==4.12.2
|
12 |
+
billiard==4.2.0
|
13 |
boto3==1.29.0
|
14 |
botocore==1.32.0
|
15 |
Brotli==1.1.0
|
16 |
+
celery==5.3.5
|
17 |
certifi==2023.7.22
|
18 |
cffi==1.16.0
|
19 |
charset-normalizer==3.3.2
|
|
|
36 |
ffmpeg-python==0.2.0
|
37 |
filelock==3.13.1
|
38 |
flatbuffers==23.5.26
|
39 |
+
flower==2.0.1
|
40 |
fonttools==4.44.1
|
41 |
frozenlist==1.4.0
|
42 |
fsspec==2023.10.0
|
|
|
45 |
greenlet==3.0.1
|
46 |
h11==0.14.0
|
47 |
httpcore==1.0.2
|
48 |
+
httptools==0.6.1
|
49 |
+
httpx==0.25.2
|
50 |
huggingface-hub==0.17.3
|
51 |
humanfriendly==10.0
|
52 |
idna==3.4
|
|
|
56 |
jsonpatch==1.33
|
57 |
jsonpointer==2.4
|
58 |
kiwisolver==1.4.5
|
59 |
+
kombu==5.3.4
|
60 |
langchain==0.0.335
|
61 |
langsmith==0.0.64
|
62 |
lit==17.0.5
|
63 |
+
llama-index==0.9.8
|
64 |
llvmlite==0.41.1
|
65 |
MarkupSafe==2.1.3
|
66 |
marshmallow==3.20.1
|
67 |
+
matplotlib-inline==0.1.6
|
68 |
matplotlib==3.8.1
|
69 |
more-itertools==10.1.0
|
70 |
mpmath==1.3.0
|
71 |
multidict==6.0.4
|
72 |
mutagen==1.47.0
|
73 |
mypy-extensions==1.0.0
|
74 |
+
nest-asyncio==1.5.8
|
75 |
networkx==3.2.1
|
76 |
nltk==3.8.1
|
77 |
numba==0.58.1
|
|
|
88 |
nvidia-nccl-cu11==2.14.3
|
89 |
nvidia-nvtx-cu11==11.7.91
|
90 |
onnxruntime==1.16.2
|
|
|
91 |
openai-whisper==20231106
|
92 |
+
openai==0.28.0
|
93 |
packaging==23.2
|
94 |
pandas==2.1.3
|
95 |
+
parso==0.8.3
|
96 |
+
pexpect==4.9.0
|
97 |
Pillow==10.1.0
|
98 |
pip==23.3
|
99 |
+
platformdirs==4.0.0
|
100 |
+
prometheus-client==0.19.0
|
101 |
+
prompt-toolkit==3.0.41
|
102 |
protobuf==4.25.0
|
103 |
+
psutil==5.9.6
|
104 |
+
ptyprocess==0.7.0
|
105 |
+
pure-eval==0.2.2
|
106 |
pycparser==2.21
|
107 |
pycryptodome==3.19.0
|
108 |
pycryptodomex==3.19.0
|
109 |
+
pydantic==2.5.1
|
110 |
pydantic_core==2.14.1
|
111 |
+
pydantic_core==2.14.3
|
112 |
+
Pygments==2.17.2
|
113 |
pyparsing==3.1.1
|
114 |
python-dateutil==2.8.2
|
115 |
+
python-dotenv==1.0.0
|
116 |
pytube==15.0.0
|
117 |
pytz==2023.3.post1
|
118 |
PyYAML==6.0.1
|
119 |
+
pyzmq==25.1.1
|
120 |
+
redis==5.0.1
|
121 |
regex==2023.10.3
|
122 |
requests==2.31.0
|
123 |
s3transfer==0.7.0
|
|
|
128 |
six==1.16.0
|
129 |
sniffio==1.3.0
|
130 |
soundfile==0.12.1
|
131 |
+
soupsieve==2.5
|
132 |
SQLAlchemy==2.0.23
|
133 |
stable-ts==2.13.3
|
134 |
+
stack-data==0.6.3
|
135 |
starlette==0.27.0
|
136 |
sympy==1.12
|
137 |
tenacity==8.2.3
|
|
|
140 |
tokenizers==0.14.1
|
141 |
torch==2.0.1
|
142 |
torchaudio==2.0.2
|
143 |
+
tornado==6.3.3
|
144 |
tqdm==4.66.1
|
145 |
+
traitlets==5.13.0
|
146 |
transformers==4.35.1
|
147 |
triton==2.0.0
|
|
|
148 |
typing-inspect==0.9.0
|
149 |
+
typing_extensions==4.8.0
|
150 |
tzdata==2023.3
|
151 |
+
urllib3==2.1.0
|
152 |
uvicorn==0.24.0.post1
|
153 |
+
uvloop==0.19.0
|
154 |
+
vine==5.1.0
|
155 |
+
watchfiles==0.21.0
|
156 |
+
wcwidth==0.2.11
|
157 |
websockets==12.0
|
158 |
wheel==0.41.2
|
159 |
wordcloud==1.9.2
|
160 |
+
wrapt==1.16.0
|
161 |
+
yarl==1.9.3
|
162 |
youtube-transcript-api==0.6.1
|
163 |
yt-dlp==2023.10.13
|
164 |
git+https://github.com/suno-ai/bark.git
|