Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from typing import List, Optional, Union
|
|
5 |
import os
|
6 |
import numpy as np
|
7 |
import torch
|
8 |
-
|
9 |
import PIL
|
10 |
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, PNDMScheduler, UNet2DConditionModel
|
11 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
@@ -13,41 +13,37 @@ from tqdm.auto import tqdm
|
|
13 |
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
14 |
import gradio as gr
|
15 |
import random
|
16 |
-
import
|
17 |
-
|
18 |
-
|
19 |
-
model_path = "CompVis/stable-diffusion-v1-4"
|
20 |
-
|
21 |
-
StableDiffusionInpaintingPipeline = StableDiffusionInpaintingPipelineCustom.StableDiffusionInpaintingPipeline
|
22 |
-
pipe = StableDiffusionInpaintingPipeline.from_pretrained(
|
23 |
-
model_path,
|
24 |
-
revision="fp16",
|
25 |
-
torch_dtype=torch.float16,
|
26 |
-
use_auth_token=os.environ.get("access_token")).to(device)
|
27 |
-
|
28 |
import gdown
|
|
|
29 |
def download_gdrive_url():
|
30 |
url = 'https://drive.google.com/u/0/uc?id=1PPO2MCttsmSqyB-vKh5C7SumwFKuhgyj&export=download'
|
31 |
output = 'haarcascade_frontalface_default.xml'
|
32 |
gdown.download(url, output, quiet=False)
|
33 |
|
34 |
-
from torch import autocast
|
35 |
def inpaint(p, init_image, mask_image=None, strength=0.75, guidance_scale=7.5, generator=None, num_samples=1, n_iter=1):
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def identify_face(user_image):
|
53 |
img = cv2.imread(user_image.name) # read the resized image in cv2
|
|
|
5 |
import os
|
6 |
import numpy as np
|
7 |
import torch
|
8 |
+
import banana_dev as banana
|
9 |
import PIL
|
10 |
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, PNDMScheduler, UNet2DConditionModel
|
11 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
|
|
13 |
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
14 |
import gradio as gr
|
15 |
import random
|
16 |
+
import base64
|
17 |
+
from io import BytesIO
|
18 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
import gdown
|
20 |
+
|
21 |
def download_gdrive_url():
|
22 |
url = 'https://drive.google.com/u/0/uc?id=1PPO2MCttsmSqyB-vKh5C7SumwFKuhgyj&export=download'
|
23 |
output = 'haarcascade_frontalface_default.xml'
|
24 |
gdown.download(url, output, quiet=False)
|
25 |
|
|
|
26 |
def inpaint(p, init_image, mask_image=None, strength=0.75, guidance_scale=7.5, generator=None, num_samples=1, n_iter=1):
|
27 |
+
buffered_init_img = BytesIO()
|
28 |
+
buffered_inverted_img = BytesIO()
|
29 |
+
init_image.save(buffered_init_img,format="JPEG")
|
30 |
+
mask_image.save(buffered_inverted_img,format="JPEG")
|
31 |
+
encoded_init_image = base64.b64encode(buffered_init_img.getvalue()).decode('utf-8')
|
32 |
+
encoded_inverted_image = base64.b64encode(buffered_inverted_img.getvalue()).decode('utf-8')
|
33 |
+
model_inputs = {
|
34 |
+
"prompt": "4K UHD professional profile picture of a person wearing a suit for work and posing for a picture, fine details, realistic shaded.",
|
35 |
+
"init_image": encoded_init_image,
|
36 |
+
"mask_image": encoded_inverted_image,
|
37 |
+
"strength": 0.65,
|
38 |
+
"guidance_scale": 10,
|
39 |
+
"num_inference_steps": 100
|
40 |
+
}
|
41 |
+
out = banana.run(os.environ.get("API_KEY"), os.environ.get("MODEL_KEY"), model_inputs)
|
42 |
+
image_byte_string = out["modelOutputs"][0]["output_image_base64"]
|
43 |
+
image_encoded = image_byte_string.encode('utf-8')
|
44 |
+
image_bytes = BytesIO(base64.b64decode(image_encoded))
|
45 |
+
return_image = Image.open(image_bytes)
|
46 |
+
return return_image
|
47 |
|
48 |
def identify_face(user_image):
|
49 |
img = cv2.imread(user_image.name) # read the resized image in cv2
|