spookyuser commited on
Commit
13a5199
1 Parent(s): 0acc319

Add tracy's code

Browse files
.vscode/launch.json DELETED
@@ -1,16 +0,0 @@
1
- {
2
- // Use IntelliSense to learn about possible attributes.
3
- // Hover to view descriptions of existing attributes.
4
- // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
- "version": "0.2.0",
6
- "configurations": [
7
- {
8
- "name": "Python: Current File",
9
- "type": "python",
10
- "request": "launch",
11
- "program": "${file}",
12
- "console": "integratedTerminal",
13
- "justMyCode": true
14
- }
15
- ]
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
OpenCV/README.md DELETED
@@ -1,8 +0,0 @@
1
- 1. Converts RGB to 2D Black and White image
2
- 2. Animate line drawing as a path
3
-
4
-
5
- INSTALLATIONS
6
- moviepy
7
- opencv
8
-
 
 
 
 
 
 
 
 
 
OpenCV/contourfinder.py DELETED
@@ -1,124 +0,0 @@
1
- import cv2
2
- import sys
3
- from cv2 import CONTOURS_MATCH_I2
4
- import numpy as np
5
-
6
- #Takes two inputs as images
7
- #Original 1 and 2 are dummy data
8
-
9
- original =cv2.imread(r"Animate\images\flag (1).png")
10
- original2 =cv2.imread(r"Animate\images\brain2.png")
11
-
12
- # get key positions at which frame needs to be generated
13
- def list_of_positions(num_contours,num_frames=100):
14
- positions=[]
15
- for i in range(0, num_frames):
16
- positions.append(int(num_contours/num_frames*i))
17
- return positions
18
-
19
-
20
-
21
- def contourfinder(image1, image2, text=None, num_frames=100):
22
- #Create two blank pages to write into
23
- blank = np.zeros(image1.shape, dtype='uint8')
24
- blank2 = np.zeros(image1.shape, dtype='uint8')
25
-
26
-
27
- #Threshold and contours for image 1 and 2
28
- threshold=cv2.Canny(image=image1, threshold1=100, threshold2=200)
29
- contours, hierarchies = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
30
-
31
- threshold2=cv2.Canny(image=original2, threshold1=100, threshold2=200)
32
- contours2, hierarchies2 = cv2.findContours(threshold2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
33
-
34
-
35
- #Initialize three empty videos
36
- vid1 = cv2.VideoWriter('vid1.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 24, threshold.shape)
37
- vid2 = cv2.VideoWriter('vid2.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 24, threshold.shape)
38
- text_vid = cv2.VideoWriter('text_vid.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 10, threshold.shape)
39
-
40
-
41
- #Get positions
42
- positions=list_of_positions((len(contours)))
43
- frames=[]
44
-
45
-
46
- #Loop over contours adding them to blank image then writing to video
47
- for i in range(0, len(contours)):
48
- cv2.drawContours(blank, contours=contours, contourIdx=i, color=(125, 200, 255), thickness=1)
49
-
50
- if i in positions:
51
- frames.append(blank)
52
-
53
- #Complile to video
54
- vid1.write(blank)
55
- last_image1=blank
56
- vid1.release()
57
-
58
- positions=list_of_positions((len(contours2)))
59
-
60
- for i in range(0, len(contours2)):
61
- cv2.drawContours(blank2, contours=contours2, contourIdx=i, color=(125, 200, 255), thickness=1)
62
- if i in positions:
63
- frames.append(blank2)
64
-
65
- vid2.write(blank2)
66
- last_image2=blank2
67
- vid2.release()
68
-
69
-
70
- #Next is the text vid
71
-
72
- if text !=None:
73
- # Reading an image in default mode
74
- image = np.zeros(original.shape, dtype='uint8')
75
-
76
- # font
77
- font = cv2.FONT_HERSHEY_COMPLEX
78
-
79
- # org
80
- org = (10, 400)
81
-
82
- # fontScale
83
- fontScale = 3
84
-
85
- # Blue color in BGR
86
- color = (186, 184, 108)
87
-
88
- # Line thickness of 2 px
89
- thickness = 4
90
-
91
-
92
-
93
-
94
- def text_frames(text,image,org):
95
- spacing=55 #spacing between letters
96
- blink=image
97
- cv2.imwrite(f"blink.png", blink)
98
- for i in range(0, len(text)-1):
99
-
100
- text_vid.write(blink)
101
-
102
- # Using cv2.putText() method
103
- image = cv2.putText(image, text[i], org, font,
104
- fontScale, color, thickness, cv2.LINE_AA)
105
-
106
- #Take care of org spacing
107
- org=(org[0]+spacing,org[1])
108
- if text[i].isupper():
109
- org=(org[0] +spacing +1, org[1])
110
- print(f"Upper {text[i]}")
111
- print(org)
112
-
113
- # Displaying the image
114
- cv2.imwrite(f"text_im{i}.png", image)
115
-
116
- #Complile to video
117
- text_vid.write(image)
118
- text_vid.release()
119
-
120
- text_frames(text,image,org)
121
- return last_image1,last_image2
122
-
123
-
124
- #contourfinder(original, original2, "spies gui Fly")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
OpenCV/png.py DELETED
The diff for this file is too large to render. See raw diff
 
OpenCV/requirements.txt DELETED
@@ -1,15 +0,0 @@
1
- tensorflow==2.6.2 # The latest should include tensorflow-gpu
2
- tensorflow-datasets==4.4.0
3
- tensorflow-addons==0.15.0
4
- absl-py==0.12.0
5
- gin-config==0.5.0
6
- parameterized==0.8.1
7
- mediapy==1.0.3
8
- scikit-image==0.19.1
9
- apache-beam==2.34.0
10
- google-cloud-bigquery-storage==1.1.0 # Suppresses a harmless error from beam
11
- natsort==8.1.0
12
- image-tools
13
- opencv-python==4.6.0.66
14
- moviepy==1.0.3
15
- ffmpeg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
OpenCV/videoedit.py DELETED
@@ -1,17 +0,0 @@
1
- from gc import freeze
2
- from moviepy.editor import VideoFileClip as vfc
3
- from moviepy.editor import concatenate_videoclips, vfx, videotools,CompositeVideoClip\
4
- ,TextClip, ImageSequenceClip, transfx
5
- #from contoutfinder import original,original2
6
-
7
-
8
-
9
- clip1=vfc("project.mp4").fx(vfx.fadeout,.5)
10
- #clip1to2=vfc(original).fx(vfx.fadeout,.5).fx(vfx.freeze,'end',3)
11
- clip2=vfc("next_vid.mp4").fx(transfx.crossfadein,2).fx(vfx.fadeout, .5)
12
- clip3=vfc("text_vid.mp4", True).fx(vfx.fadein,.5).fx(vfx.blink, 5, 10)
13
-
14
-
15
- combined=concatenate_videoclips([clip1,clip2, clip3])
16
-
17
- combined.write_videofile("combined.mp4")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
animate.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import cv2
5
+ import mediapy
6
+ import numpy as np
7
+ from eval import interpolator, util
8
+ from huggingface_hub import snapshot_download
9
+ from image_tools.sizes import resize_and_crop
10
+ from moviepy.editor import CompositeVideoClip
11
+ from moviepy.editor import VideoFileClip as vfc
12
+ from PIL import Image
13
+
14
+ # My installs
15
+ os.system("git clone https://github.com/google-research/frame-interpolation")
16
+ sys.path.append("frame-interpolation")
17
+
18
+
19
+ # get key positions at which frame needs to be generated
20
+ def list_of_positions(num_contours, num_frames=100):
21
+ positions = []
22
+ for i in range(0, num_frames):
23
+ positions.append(int(num_contours / num_frames * i))
24
+ return positions
25
+
26
+
27
+ def contourfinder(image1, image2, text=None, num_frames=100):
28
+ # Create two blank pages to write into
29
+ # I just hardcoded 1024*1024 as the size, ideally this should be np.shape(image1)
30
+ blank = np.zeros((2048, 2048, 3), dtype="uint8")
31
+ blank2 = np.zeros((2048, 2048, 3), dtype="uint8")
32
+
33
+ # Threshold and contours for image 1 and 2
34
+ threshold = cv2.Canny(image=image1, threshold1=100, threshold2=200)
35
+ contours, hierarchies = cv2.findContours(
36
+ threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
37
+ )
38
+
39
+ threshold2 = cv2.Canny(image=image2, threshold1=100, threshold2=200)
40
+ contours2, hierarchies2 = cv2.findContours(
41
+ threshold2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
42
+ )
43
+
44
+ # Initialize three empty videos
45
+ vid1 = cv2.VideoWriter(
46
+ "vid1.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 24, threshold.shape
47
+ )
48
+ vid2 = cv2.VideoWriter(
49
+ "vid2.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 24, threshold.shape
50
+ )
51
+ text_vid = cv2.VideoWriter(
52
+ "text_vid.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 10, threshold.shape
53
+ )
54
+
55
+ # Get positions
56
+ positions = list_of_positions((len(contours)))
57
+ frames = []
58
+
59
+ # Loop over contours adding them to blank image then writing to video
60
+ for i in range(0, len(contours)):
61
+ cv2.drawContours(
62
+ blank, contours=contours, contourIdx=i, color=(125, 200, 255), thickness=1
63
+ )
64
+
65
+ if i in positions:
66
+ frames.append(blank)
67
+
68
+ # Complile to video
69
+ vid1.write(blank)
70
+
71
+ vid1.release()
72
+ clip1 = vfc("/home/user/app/vid1.mp4")
73
+ # print(f"Type clip 1{type(clip1)}")
74
+
75
+ positions = list_of_positions((len(contours2)))
76
+
77
+ for i in range(0, len(contours2)):
78
+ cv2.drawContours(
79
+ blank2, contours=contours2, contourIdx=i, color=(125, 200, 255), thickness=1
80
+ )
81
+ if i in positions:
82
+ frames.append(blank2)
83
+
84
+ vid2.write(blank2)
85
+
86
+ vid2.release()
87
+ clip3 = vfc("/home/user/app/vid2.mp4")
88
+
89
+ # Next is the text vid
90
+
91
+ if text != None:
92
+ # Reading an image in default mode
93
+ image = np.zeros(original.shape, dtype="uint8")
94
+
95
+ # font
96
+ font = cv2.FONT_HERSHEY_COMPLEX
97
+
98
+ # org
99
+ org = (10, 400)
100
+
101
+ # fontScale
102
+ fontScale = 3
103
+
104
+ # Blue color in BGR
105
+ color = (186, 184, 108)
106
+
107
+ # Line thickness of 2 px
108
+ thickness = 4
109
+
110
+ def text_frames(text, image, org):
111
+ spacing = 55 # spacing between letters
112
+ blink = image
113
+ cv2.imwrite(f"blink.png", blink)
114
+ for i in range(0, len(text) - 1):
115
+
116
+ text_vid.write(blink)
117
+
118
+ # Using cv2.putText() method
119
+ image = cv2.putText(
120
+ image, text[i], org, font, fontScale, color, thickness, cv2.LINE_AA
121
+ )
122
+
123
+ # Take care of org spacing
124
+ org = (org[0] + spacing, org[1])
125
+ if text[i].isupper():
126
+ org = (org[0] + spacing + 1, org[1])
127
+ print(f"Upper {text[i]}")
128
+ print(org)
129
+
130
+ # Displaying the image
131
+ cv2.imwrite(f"text_im{i}.png", image)
132
+
133
+ # Complile to video
134
+ text_vid.write(image)
135
+ text_vid.release()
136
+
137
+ text_frames(text, image, org)
138
+ return clip1, clip3
139
+
140
+
141
+ def load_model(model_name):
142
+ model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
143
+ return model
144
+
145
+
146
+ model_names = [
147
+ "akhaliq/frame-interpolation-film-style",
148
+ "NimaBoscarino/frame-interpolation_film_l1",
149
+ "NimaBoscarino/frame_interpolation_film_vgg",
150
+ ]
151
+
152
+ models = {model_name: load_model(model_name) for model_name in model_names}
153
+
154
+ ffmpeg_path = util.get_ffmpeg_path()
155
+ mediapy.set_ffmpeg(ffmpeg_path)
156
+
157
+
158
+ def resize(width, img):
159
+ basewidth = width
160
+ img = Image.open(img)
161
+ wpercent = basewidth / float(img.size[0])
162
+ hsize = int((float(img.size[1]) * float(wpercent)))
163
+ img = img.resize((basewidth, hsize), Image.ANTIALIAS)
164
+ return img
165
+
166
+
167
+ def resize_img(img1, img2):
168
+ img_target_size = Image.open(img1)
169
+ img_to_resize = resize_and_crop(
170
+ img2,
171
+ (
172
+ img_target_size.size[0],
173
+ img_target_size.size[1],
174
+ ), # set width and height to match img1
175
+ crop_origin="middle",
176
+ )
177
+ img_to_resize.save("resized_img2.png")
178
+
179
+
180
+ def predict(frame1, frame2, times_to_interpolate, model_name):
181
+ model = models[model_name]
182
+
183
+ img1 = cv2.imread(frame1)
184
+ img2 = cv2.imread(frame2)
185
+
186
+ frame1 = resize(256, frame1)
187
+ frame2 = resize(256, frame2)
188
+
189
+ frame1.save("test1.png")
190
+ frame2.save("test2.png")
191
+
192
+ resize_img("test1.png", "test2.png")
193
+ input_frames = ["test1.png", "resized_img2.png"]
194
+
195
+ frames = list(
196
+ util.interpolate_recursively_from_files(
197
+ input_frames, times_to_interpolate, model
198
+ )
199
+ )
200
+
201
+ mediapy.write_video("out.mp4", frames, fps=5)
202
+
203
+ print(f"TYPES....{type(img1)},{type(img2)} SHAPES{img1.shape} Img {img1}")
204
+ clip1, clip3 = contourfinder(img1, img2) # has a third text option
205
+
206
+ # Use open CV and moviepy code
207
+ # So we move from open CV video 1 to out.mp4 to open CV video2
208
+ clip1 = clip1
209
+ clip2 = vfc("out.mp4").resize(8).set_start(clip1.duration - 0.5).crossfadein(2)
210
+ clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
211
+
212
+ new_clip = CompositeVideoClip([clip1, clip2, clip3])
213
+ new_clip.write_videofile("out.mp4")
214
+
215
+ return "out.mp4"
app.py CHANGED
@@ -104,6 +104,10 @@ def add_static_image_to_audio(image, audio_input) -> str:
104
  video_clip.write_videofile(path, audio_codec="aac")
105
  return path
106
 
 
 
 
 
107
 
108
  def get_stable_diffusion_image(prompt) -> str:
109
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
@@ -124,4 +128,4 @@ iface = gr.Interface(
124
  )
125
 
126
 
127
- iface.launch()
 
104
  video_clip.write_videofile(path, audio_codec="aac")
105
  return path
106
 
107
+ def add_openv_animation_to_audio(image_paths: list[str], audio_path: str) -> str:
108
+
109
+
110
+
111
 
112
  def get_stable_diffusion_image(prompt) -> str:
113
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
 
128
  )
129
 
130
 
131
+ iface.launch()
requirements.txt CHANGED
@@ -5,4 +5,18 @@ ftfy
5
  --extra-index-url https://download.pytorch.org/whl/cu113 torch
6
  moviepy
7
  gradio
8
- spodcast
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  --extra-index-url https://download.pytorch.org/whl/cu113 torch
6
  moviepy
7
  gradio
8
+ spodcast
9
+ tensorflow==2.6.2 # The latest should include tensorflow-gpu
10
+ tensorflow-datasets==4.4.0
11
+ tensorflow-addons==0.15.0
12
+ absl-py==0.12.0
13
+ gin-config==0.5.0
14
+ parameterized==0.8.1
15
+ mediapy==1.0.3
16
+ scikit-image==0.19.1
17
+ apache-beam==2.34.0
18
+ google-cloud-bigquery-storage==1.1.0 # Suppresses a harmless error from beam
19
+ natsort==8.1.0
20
+ image-tools
21
+ opencv-python==4.6.0.66
22
+ moviepy==1.0.3