Lakpriya Seneviratna commited on
Commit
8f5bdbe
·
1 Parent(s): 32007ab

chore: Add Dockerfile and FastAPI implementation

Browse files
Files changed (3) hide show
  1. Dockerfile +20 -0
  2. main_glitch.py → app.py +61 -16
  3. requirements.txt +3 -1
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image from the Docker Hub
2
+ FROM python:3.9
3
+
4
+ # Create a user to run the application
5
+ RUN useradd -m -u 1000 user
6
+ USER user
7
+ ENV PATH="/home/user/.local/bin:$PATH"
8
+
9
+ # Set the working directory
10
+ WORKDIR /app
11
+
12
+ # Copy the requirements.txt and install dependencies
13
+ COPY --chown=user ./requirements.txt requirements.txt
14
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
15
+
16
+ # Copy the application code
17
+ COPY --chown=user . /app
18
+
19
+ # Command to run the FastAPI application with uvicorn
20
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
main_glitch.py → app.py RENAMED
@@ -23,6 +23,8 @@ from google.auth.transport.requests import Request
23
  import nltk
24
  from tts import synthesiser, speaker_embedding
25
  import soundfile as sf
 
 
26
 
27
  # from tortoise_tts import TextToSpeech
28
 
@@ -499,28 +501,71 @@ def combine_audio_files(audio_files, output_file):
499
  api_key = 'VhLwkCKi3iu5Pf37LXfz-Lp7hTW69EV8uw_hkLAPkiA' # Replace with your Unsplash API key
500
  background_image = fetch_random_nature_image(api_key)
501
 
502
- if background_image:
503
- fetch_reddit_data('Glitch_in_the_Matrix')
504
- reddit_data = read_json('top_post.json')
505
- title = reddit_data.get('title')
506
- selftext = reddit_data.get('selftext')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
 
508
- # Split title into sentences
509
- sentences = nltk.sent_tokenize(selftext)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
 
511
- # Generate audio for each sentence and get durations
512
- audio_files, audio_durations = tts_per_sentence(sentences, 'audio')
513
 
514
- # Create and save the video
515
- create_video_from_title(title, sentences, background_image, "reddit_post_video_cv2.mp4", audio_durations)
516
 
517
- # Combine all audio files into one (if needed)
518
- combined_audio_file = combine_audio_files(audio_files, 'combined_voiceover.mp3') # Implement this function
519
 
520
- filename = "video_" + str(uuid.uuid4())
521
 
522
- # Combine the final video and audio
523
- combine_audio_video('reddit_post_video_cv2.mp4', combined_audio_file, filename + '.mp4')
524
 
525
  # if background_image:
526
  # # Example usage
 
23
  import nltk
24
  from tts import synthesiser, speaker_embedding
25
  import soundfile as sf
26
+ from fastapi import FastAPI
27
+ import uuid
28
 
29
  # from tortoise_tts import TextToSpeech
30
 
 
501
  api_key = 'VhLwkCKi3iu5Pf37LXfz-Lp7hTW69EV8uw_hkLAPkiA' # Replace with your Unsplash API key
502
  background_image = fetch_random_nature_image(api_key)
503
 
504
+ app = FastAPI()
505
+
506
+ @app.get("/generate-video")
507
+ def generate_video():
508
+ try:
509
+ api_key = 'VhLwkCKi3iu5Pf37LXfz-Lp7hTW69EV8uw_hkLAPkiA' # Replace with your Unsplash API key
510
+ background_image = fetch_random_nature_image(api_key)
511
+
512
+ if background_image:
513
+ fetch_reddit_data('Glitch_in_the_Matrix')
514
+ reddit_data = read_json('top_post.json')
515
+ title = reddit_data.get('title')
516
+ selftext = reddit_data.get('selftext')
517
+
518
+ # Split title into sentences
519
+ sentences = nltk.sent_tokenize(selftext)
520
+
521
+ # Generate audio for each sentence and get durations
522
+ audio_files, audio_durations = tts_per_sentence(sentences, 'audio')
523
+
524
+ # Create and save the video
525
+ video_filename = "reddit_post_video_cv2.mp4"
526
+ create_video_from_title(title, sentences, background_image, video_filename, audio_durations)
527
+
528
+ # Combine all audio files into one (if needed)
529
+ combined_audio_file = combine_audio_files(audio_files, 'combined_voiceover.mp3') # Implement this function
530
+
531
+ final_filename = "video_" + str(uuid.uuid4())
532
+
533
+ # Combine the final video and audio
534
+ combine_audio_video(video_filename, combined_audio_file, final_filename + '.mp4')
535
 
536
+ return {"status": "success", "filename": final_filename + '.mp4'}
537
+ else:
538
+ return {"status": "failed", "message": "Failed to fetch background image"}
539
+ except Exception as e:
540
+ return {"status": "error", "message": str(e)}
541
+
542
+ # Run the application using Uvicorn
543
+ if __name__ == "__main__":
544
+ import uvicorn
545
+ uvicorn.run(app, host="0.0.0.0", port=8000)
546
+
547
+ # if background_image:
548
+ # fetch_reddit_data('Glitch_in_the_Matrix')
549
+ # reddit_data = read_json('top_post.json')
550
+ # title = reddit_data.get('title')
551
+ # selftext = reddit_data.get('selftext')
552
+
553
+ # # Split title into sentences
554
+ # sentences = nltk.sent_tokenize(selftext)
555
 
556
+ # # Generate audio for each sentence and get durations
557
+ # audio_files, audio_durations = tts_per_sentence(sentences, 'audio')
558
 
559
+ # # Create and save the video
560
+ # create_video_from_title(title, sentences, background_image, "reddit_post_video_cv2.mp4", audio_durations)
561
 
562
+ # # Combine all audio files into one (if needed)
563
+ # combined_audio_file = combine_audio_files(audio_files, 'combined_voiceover.mp3') # Implement this function
564
 
565
+ # filename = "video_" + str(uuid.uuid4())
566
 
567
+ # # Combine the final video and audio
568
+ # combine_audio_video('reddit_post_video_cv2.mp4', combined_audio_file, filename + '.mp4')
569
 
570
  # if background_image:
571
  # # Example usage
requirements.txt CHANGED
@@ -13,4 +13,6 @@ nltk
13
  gradio
14
  soundfile
15
  transformers
16
- datasets[audio]
 
 
 
13
  gradio
14
  soundfile
15
  transformers
16
+ datasets[audio]
17
+ fastapi
18
+ uvicorn[standard]