Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
from diffusers import AutoPipelineForText2Image, DDIMScheduler
|
|
|
5 |
import numpy as np
|
6 |
import spaces # Make sure to import spaces
|
7 |
|
@@ -25,11 +26,14 @@ pipeline.load_ip_adapter(
|
|
25 |
)
|
26 |
pipeline.set_ip_adapter_scale([0.7, 0.5])
|
27 |
|
28 |
-
#
|
|
|
|
|
|
|
|
|
|
|
29 |
@spaces.GPU
|
30 |
def transform_image(face_image):
|
31 |
-
# Move the pipeline to GPU inside the function
|
32 |
-
pipeline.to("cuda")
|
33 |
generator = torch.Generator(device="cuda").manual_seed(0)
|
34 |
|
35 |
# Process the input face image
|
@@ -40,16 +44,19 @@ def transform_image(face_image):
|
|
40 |
else:
|
41 |
raise ValueError("Unsupported image format")
|
42 |
|
43 |
-
#
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
46 |
|
47 |
# Perform the transformation using the configured pipeline
|
48 |
image = pipeline(
|
49 |
prompt="soyjak",
|
50 |
ip_adapter_image=[style_image, processed_face_image],
|
51 |
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
|
52 |
-
num_inference_steps=
|
53 |
generator=generator,
|
54 |
).images[0]
|
55 |
|
|
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
from diffusers import AutoPipelineForText2Image, DDIMScheduler
|
5 |
+
from transformers import CLIPVisionModelWithProjection
|
6 |
import numpy as np
|
7 |
import spaces # Make sure to import spaces
|
8 |
|
|
|
26 |
)
|
27 |
pipeline.set_ip_adapter_scale([0.7, 0.5])
|
28 |
|
29 |
+
# Ensure the model and its components are moved to GPU
|
30 |
+
pipeline.to("cuda")
|
31 |
+
|
32 |
+
# Define the desired size
|
33 |
+
desired_size = (1024, 1024)
|
34 |
+
|
35 |
@spaces.GPU
|
36 |
def transform_image(face_image):
|
|
|
|
|
37 |
generator = torch.Generator(device="cuda").manual_seed(0)
|
38 |
|
39 |
# Process the input face image
|
|
|
44 |
else:
|
45 |
raise ValueError("Unsupported image format")
|
46 |
|
47 |
+
# Resize the face image
|
48 |
+
processed_face_image = processed_face_image.resize(desired_size, Image.LANCZOS)
|
49 |
+
|
50 |
+
# Load and resize the style image from the local path
|
51 |
+
style_image_path = "examples/soyjak2.jpeg"
|
52 |
+
style_image = Image.open(style_image_path).resize(desired_size, Image.LANCZOS)
|
53 |
|
54 |
# Perform the transformation using the configured pipeline
|
55 |
image = pipeline(
|
56 |
prompt="soyjak",
|
57 |
ip_adapter_image=[style_image, processed_face_image],
|
58 |
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
|
59 |
+
num_inference_steps=30,
|
60 |
generator=generator,
|
61 |
).images[0]
|
62 |
|