Sanshruth commited on
Commit
b5732cc
1 Parent(s): ac138f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -77
app.py CHANGED
@@ -1,27 +1,5 @@
1
  import zipfile
2
- def unzip_content():
3
- try:
4
- # First try using Python's zipfile
5
- print("Attempting to unzip content using Python...")
6
- with zipfile.ZipFile('./content.zip', 'r') as zip_ref:
7
- zip_ref.extractall('.')
8
- except Exception as e:
9
- print(f"Python unzip failed: {str(e)}")
10
- try:
11
- # Fallback to system unzip command
12
- print("Attempting to unzip content using system command...")
13
- subprocess.run(['unzip', '-o', './content.zip'], check=True)
14
- except Exception as e:
15
- print(f"System unzip failed: {str(e)}")
16
- raise Exception("Failed to unzip content using both methods")
17
- print("Content successfully unzipped!")
18
-
19
- # Try to unzip content at startup
20
- try:
21
- unzip_content()
22
- except Exception as e:
23
- print(f"Warning: Could not unzip content: {str(e)}")
24
-
25
  import gradio as gr
26
  import numpy as np
27
  import torch
@@ -35,45 +13,59 @@ from omegaconf import OmegaConf
35
  from CLIP import clip
36
  import os
37
  import sys
 
 
 
 
 
 
38
 
39
- #os.chdir('./taming-transformers')
40
- #from taming.models.vqgan import VQModel
41
- #os.chdir('..')
42
  taming_path = os.path.join(os.getcwd(), 'taming-transformers')
43
  sys.path.append(taming_path)
44
  from taming.models.vqgan import VQModel
45
 
46
-
47
- from PIL import Image
48
- import cv2
49
- import imageio
50
-
51
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
52
 
53
- def create_video(image_folder='./generated', video_name='morphing_video.mp4'):
54
- images = sorted([img for img in os.listdir(image_folder) if img.endswith(".png") or img.endswith(".jpg")])
55
- if len(images) == 0:
56
- print("No images found in the folder.")
57
- return None
 
 
 
 
 
 
 
 
 
58
 
59
- frame = cv2.imread(os.path.join(image_folder, images[0]))
60
- height, width, layers = frame.shape
61
- video_writer = imageio.get_writer(video_name, fps=10)
 
 
62
 
63
- for image in images:
64
- img_path = os.path.join(image_folder, image)
65
- img = imageio.imread(img_path)
66
- video_writer.append_data(img)
 
 
 
 
67
 
68
- video_writer.close()
69
- return video_name
70
 
71
- def save_from_tensors(tensor, output_dir, filename):
 
72
  img = tensor.clone()
73
  img = img.mul(255).byte()
74
  img = img.cpu().numpy().transpose((1, 2, 0))
75
- os.makedirs(output_dir, exist_ok=True)
76
- Image.fromarray(img).save(os.path.join(output_dir, filename))
77
 
78
  def norm_data(data):
79
  return (data.clip(-1, 1) + 1) / 2
@@ -216,35 +208,14 @@ def generate_art(include_text, exclude_text, extras_text, num_iterations):
216
  res_img, res_z = training_loop(params, optimizer, include_enc, exclude_enc, extras_enc,
217
  vqgan_model, clip_model, w1, w2, total_iter=num_iterations)
218
 
219
- # Save results
220
- output_dir = "generated"
221
- # Create output directory if it doesn't exist
222
- os.makedirs(output_dir, exist_ok=True)
223
-
224
- # Clear any existing files in the output directory
225
- for file in os.listdir(output_dir):
226
- file_path = os.path.join(output_dir, file)
227
- if os.path.isfile(file_path):
228
- os.remove(file_path)
229
-
230
- for i, img in enumerate(res_img):
231
- save_from_tensors(img, output_dir, f"generated_image_{i:03d}.png")
232
-
233
- # Create video
234
- video_path = create_video()
235
-
236
- # Delete the generated folder and its contents after creating the video
237
- import shutil
238
- shutil.rmtree(output_dir)
239
 
240
  return video_path
241
 
242
  except Exception as e:
243
- # If there's an error, ensure the generated folder is cleaned up
244
- if os.path.exists("generated"):
245
- import shutil
246
- shutil.rmtree("generated")
247
- raise e # Re-raise the exception to be handled by the calling function
248
  def gradio_interface(include_text, exclude_text, extras_text, num_iterations):
249
  try:
250
  video_path = generate_art(include_text, exclude_text, extras_text, int(num_iterations))
@@ -252,6 +223,12 @@ def gradio_interface(include_text, exclude_text, extras_text, num_iterations):
252
  except Exception as e:
253
  return f"An error occurred: {str(e)}"
254
 
 
 
 
 
 
 
255
  # Define and launch the Gradio app
256
  iface = gr.Interface(
257
  fn=gradio_interface,
@@ -265,20 +242,17 @@ iface = gr.Interface(
265
  gr.Number(label="Number of Iterations",
266
  value=200, minimum=1, maximum=1000)
267
  ],
268
- outputs=gr.Video(label="Generated Morphing Video"),
269
  title="VQGAN-CLIP Art Generator",
270
  css="allow",
271
  allow_flagging="never",
272
- #######
273
  description = """
274
  Generate artistic videos using VQGAN-CLIP. Enter your prompts separated by commas and adjust the number of iterations. The model will generate a morphing video based on your inputs.
275
-
276
  Note: This application requires GPU access. Please either:
277
  1. Use the Colab notebook available at https://github.com/SanshruthR/VQGAN-CLIP
278
  2. Clone this space and enable GPU in your personal copy.
279
  """)
280
 
281
-
282
  if __name__ == "__main__":
283
  print("Checking GPU availability:", "GPU AVAILABLE" if torch.cuda.is_available() else "NO GPU FOUND")
284
  iface.launch()
 
1
  import zipfile
2
+ import subprocess
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import gradio as gr
4
  import numpy as np
5
  import torch
 
13
  from CLIP import clip
14
  import os
15
  import sys
16
+ import tempfile
17
+ import io
18
+ from pathlib import Path
19
+ from PIL import Image
20
+ import cv2
21
+ import imageio
22
 
23
+ # Add taming transformers path
 
 
24
  taming_path = os.path.join(os.getcwd(), 'taming-transformers')
25
  sys.path.append(taming_path)
26
  from taming.models.vqgan import VQModel
27
 
 
 
 
 
 
28
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
 
30
+ def unzip_content():
31
+ try:
32
+ print("Attempting to unzip content using Python...")
33
+ with zipfile.ZipFile('./content.zip', 'r') as zip_ref:
34
+ zip_ref.extractall('.')
35
+ except Exception as e:
36
+ print(f"Python unzip failed: {str(e)}")
37
+ try:
38
+ print("Attempting to unzip content using system command...")
39
+ subprocess.run(['unzip', '-o', './content.zip'], check=True)
40
+ except Exception as e:
41
+ print(f"System unzip failed: {str(e)}")
42
+ raise Exception("Failed to unzip content using both methods")
43
+ print("Content successfully unzipped!")
44
 
45
+ def create_video(images_list, video_name='morphing_video.mp4'):
46
+ """Create video from a list of image tensors"""
47
+ if not images_list:
48
+ print("No images provided.")
49
+ return None
50
 
51
+ with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_video:
52
+ video_writer = imageio.get_writer(temp_video.name, fps=10)
53
+
54
+ for img_tensor in images_list:
55
+ # Convert tensor to numpy array
56
+ img = img_tensor.cpu().numpy().transpose((1, 2, 0))
57
+ img = (img * 255).astype('uint8')
58
+ video_writer.append_data(img)
59
 
60
+ video_writer.close()
61
+ return temp_video.name
62
 
63
+ def save_from_tensors(tensor):
64
+ """Process tensor and return the processed version"""
65
  img = tensor.clone()
66
  img = img.mul(255).byte()
67
  img = img.cpu().numpy().transpose((1, 2, 0))
68
+ return img
 
69
 
70
  def norm_data(data):
71
  return (data.clip(-1, 1) + 1) / 2
 
208
  res_img, res_z = training_loop(params, optimizer, include_enc, exclude_enc, extras_enc,
209
  vqgan_model, clip_model, w1, w2, total_iter=num_iterations)
210
 
211
+ # Create video directly from tensors
212
+ video_path = create_video(res_img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
 
214
  return video_path
215
 
216
  except Exception as e:
217
+ raise e
218
+
 
 
 
219
  def gradio_interface(include_text, exclude_text, extras_text, num_iterations):
220
  try:
221
  video_path = generate_art(include_text, exclude_text, extras_text, int(num_iterations))
 
223
  except Exception as e:
224
  return f"An error occurred: {str(e)}"
225
 
226
+ # Try to unzip content at startup
227
+ try:
228
+ unzip_content()
229
+ except Exception as e:
230
+ print(f"Warning: Could not unzip content: {str(e)}")
231
+
232
  # Define and launch the Gradio app
233
  iface = gr.Interface(
234
  fn=gradio_interface,
 
242
  gr.Number(label="Number of Iterations",
243
  value=200, minimum=1, maximum=1000)
244
  ],
245
+ outputs=gr.Video(label="Generated Morphing Video", format="mp4", autoplay=True),
246
  title="VQGAN-CLIP Art Generator",
247
  css="allow",
248
  allow_flagging="never",
 
249
  description = """
250
  Generate artistic videos using VQGAN-CLIP. Enter your prompts separated by commas and adjust the number of iterations. The model will generate a morphing video based on your inputs.
 
251
  Note: This application requires GPU access. Please either:
252
  1. Use the Colab notebook available at https://github.com/SanshruthR/VQGAN-CLIP
253
  2. Clone this space and enable GPU in your personal copy.
254
  """)
255
 
 
256
  if __name__ == "__main__":
257
  print("Checking GPU availability:", "GPU AVAILABLE" if torch.cuda.is_available() else "NO GPU FOUND")
258
  iface.launch()