Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from diffusers import StableDiffusionPipeline
|
2 |
from diffusers import StableDiffusionImg2ImgPipeline
|
3 |
import gradio as gr
|
@@ -33,15 +34,17 @@ last_mode = "txt2img"
|
|
33 |
current_model = models[1]
|
34 |
current_model_path = current_model.path
|
35 |
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
|
|
|
36 |
if torch.cuda.is_available():
|
37 |
pipe = pipe.to("cuda")
|
|
|
38 |
|
39 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
40 |
|
41 |
def custom_model_changed(path):
|
42 |
models[0].path = path
|
|
|
43 |
current_model = models[0]
|
44 |
-
return models[0].path
|
45 |
|
46 |
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
|
47 |
|
|
|
1 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
from diffusers import StableDiffusionImg2ImgPipeline
|
4 |
import gradio as gr
|
|
|
34 |
current_model = models[1]
|
35 |
current_model_path = current_model.path
|
36 |
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
|
37 |
+
# pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
|
38 |
if torch.cuda.is_available():
|
39 |
pipe = pipe.to("cuda")
|
40 |
+
# pipe_i2i = pipe_i2i.to("cuda")
|
41 |
|
42 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
43 |
|
44 |
def custom_model_changed(path):
|
45 |
models[0].path = path
|
46 |
+
global current_model
|
47 |
current_model = models[0]
|
|
|
48 |
|
49 |
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
|
50 |
|