AP123 commited on
Commit
cc6fea7
·
verified ·
1 Parent(s): 7ebe221

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -6,7 +6,7 @@ from transformers import CLIPVisionModelWithProjection
6
  import numpy as np
7
  import spaces
8
 
9
-
10
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
11
  "h94/IP-Adapter",
12
  subfolder="models/image_encoder",
@@ -19,30 +19,26 @@ pipeline = AutoPipelineForText2Image.from_pretrained(
19
  image_encoder=image_encoder,
20
  )
21
  pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
22
-
23
  pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus-face_sdxl_vit-h.safetensors"])
24
- pipeline.set_ip_adapter_scale([soy_strength, anon_strength])
25
-
26
  pipeline.enable_model_cpu_offload()
27
 
28
  @spaces.GPU
29
- def transform_image(face_image):
30
  generator = torch.Generator(device="cpu").manual_seed(0)
31
 
32
- # Check if the input is already a PIL Image
33
  if isinstance(face_image, Image.Image):
34
  processed_face_image = face_image
35
- # If the input is a NumPy array, convert it to a PIL Image
36
  elif isinstance(face_image, np.ndarray):
37
  processed_face_image = Image.fromarray(face_image)
38
  else:
39
  raise ValueError("Unsupported image format")
40
 
41
- # Load the style image from the local path
42
  style_image_path = "examples/soyjak2.jpg"
43
  style_image = Image.open(style_image_path)
44
 
45
- # Perform the transformation
 
 
46
  image = pipeline(
47
  prompt="soyjak",
48
  ip_adapter_image=[style_image, processed_face_image],
@@ -53,18 +49,18 @@ def transform_image(face_image):
53
 
54
  return image
55
 
56
- # Gradio interface setup
57
  demo = gr.Interface(
58
  fn=transform_image,
59
  inputs=[
60
  gr.Image(label="Upload your face image"),
61
  gr.Slider(minimum=0, maximum=1, step=0.05, value=0.7, label="Soy Strength"),
62
- gr.Slider(minimum=0, maximum=1, step=0.05, value=0.5, label="Face Strength")
63
  ],
64
  outputs=gr.Image(label="Your Soyjak"),
65
  title="InstaSoyjak - turn anyone into a Soyjak",
66
- description="All you need to do is upload an image. **Please use responsibly.** Please follow me on Twitter if you like this space: https://twitter.com/angrypenguinPNG. Idea from Yacine, please give him a follow: https://twitter.com/yacineMTB.",
67
  )
68
 
69
- demo.queue(max_size=20) # Configures the queue with a maximum size of 20
70
- demo.launch()
 
6
  import numpy as np
7
  import spaces
8
 
9
+ # Initialize the image encoder and pipeline outside the function
10
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
11
  "h94/IP-Adapter",
12
  subfolder="models/image_encoder",
 
19
  image_encoder=image_encoder,
20
  )
21
  pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
 
22
  pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus-face_sdxl_vit-h.safetensors"])
 
 
23
  pipeline.enable_model_cpu_offload()
24
 
25
  @spaces.GPU
26
+ def transform_image(face_image, soy_strength, face_strength):
27
  generator = torch.Generator(device="cpu").manual_seed(0)
28
 
 
29
  if isinstance(face_image, Image.Image):
30
  processed_face_image = face_image
 
31
  elif isinstance(face_image, np.ndarray):
32
  processed_face_image = Image.fromarray(face_image)
33
  else:
34
  raise ValueError("Unsupported image format")
35
 
 
36
  style_image_path = "examples/soyjak2.jpg"
37
  style_image = Image.open(style_image_path)
38
 
39
+ # Set the IP adapter scale dynamically based on the sliders
40
+ pipeline.set_ip_adapter_scale([soy_strength, face_strength])
41
+
42
  image = pipeline(
43
  prompt="soyjak",
44
  ip_adapter_image=[style_image, processed_face_image],
 
49
 
50
  return image
51
 
52
+ # Gradio interface setup with dynamic sliders
53
  demo = gr.Interface(
54
  fn=transform_image,
55
  inputs=[
56
  gr.Image(label="Upload your face image"),
57
  gr.Slider(minimum=0, maximum=1, step=0.05, value=0.7, label="Soy Strength"),
58
+ gr.Slider(minimum=0, maximum=1, step=0.05, value=0.5, label="Face Strength") # Renamed to Face Strength
59
  ],
60
  outputs=gr.Image(label="Your Soyjak"),
61
  title="InstaSoyjak - turn anyone into a Soyjak",
62
+ description="All you need to do is upload an image and adjust the strengths. **Please use responsibly.**",
63
  )
64
 
65
+ demo.queue(max_size=20)
66
+ demo.launch()