stanley commited on
Commit
c2d0224
·
1 Parent(s): 7bb195c

gpu upgrade debug

Browse files
Files changed (1) hide show
  1. app.py +76 -34
app.py CHANGED
@@ -13,7 +13,7 @@ import diffusers
13
  import requests
14
 
15
 
16
- assert tuple(map(int,diffusers.__version__.split("."))) >= (0,9,0), "Please upgrade diffusers to 0.9.0"
17
 
18
  from diffusers.configuration_utils import FrozenDict
19
  from diffusers import (
@@ -41,13 +41,13 @@ from enum import Enum
41
  from utils import *
42
 
43
  # load environment variables from the .env file
44
- if os.path.exists(".env"):
45
- with open(".env") as f:
46
- for line in f:
47
- if line.startswith("#") or not line.strip():
48
- continue
49
- name, value = line.strip().split("=", 1)
50
- os.environ[name] = value
51
 
52
 
53
  # access_token = os.environ.get("HF_ACCESS_TOKEN")
@@ -221,10 +221,19 @@ parser.add_argument(
221
  "--local_model", type=str, help="use a model stored on your PC", default=""
222
  )
223
 
224
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
225
  args = parser.parse_args()
226
  else:
227
- args = parser.parse_args(["--debug"])
228
  # args = parser.parse_args(["--debug"])
229
  if args.auth is not None:
230
  args.auth = tuple(args.auth)
@@ -447,10 +456,17 @@ class StableDiffusionInpaint:
447
  selected_scheduler = scheduler_dict.get(scheduler, scheduler_dict["PLMS"])
448
  for item in [inpaint]:
449
  item.scheduler = selected_scheduler
450
- if enable_safety or self.safety_checker is None:
451
  item.safety_checker = self.safety_checker
452
  else:
453
  item.safety_checker = lambda images, **kwargs: (images, False)
 
 
 
 
 
 
 
454
  width, height = image_pil.size
455
  sel_buffer = np.array(image_pil)
456
  img = sel_buffer[:, :, 0:3]
@@ -1088,16 +1104,56 @@ class StableDiffusion:
1088
  # )["images"]
1089
  # return images
1090
 
1091
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092
  def get_model(token="", model_choice="", model_path=""):
1093
  if "model" not in model:
1094
  model_name = ""
1095
- if args.local_model:
1096
- print(f"Using local_model: {args.local_model}")
1097
- model_path = args.local_model
1098
- elif args.remote_model:
1099
- print(f"Using remote_model: {args.remote_model}")
1100
- model_name = args.remote_model
1101
  if model_choice == ModelChoice.INPAINTING.value:
1102
  if len(model_name) < 1:
1103
  model_name = "runwayml/stable-diffusion-inpainting"
@@ -1105,18 +1161,11 @@ def get_model(token="", model_choice="", model_path=""):
1105
  tmp = StableDiffusionInpaint(
1106
  token=token, model_name=model_name, model_path=model_path
1107
  )
1108
- elif model_choice == ModelChoice.INPAINTING2.value:
1109
- if len(model_name) < 1:
1110
- model_name = "stabilityai/stable-diffusion-2-inpainting"
1111
- print(f"Using [{model_name}] {model_path}")
1112
- tmp = StableDiffusionInpaint(
1113
- token=token, model_name=model_name, model_path=model_path
1114
- )
1115
  elif model_choice == ModelChoice.INPAINTING_IMG2IMG.value:
1116
  print(
1117
  f"Note that {ModelChoice.INPAINTING_IMG2IMG.value} only support remote model and requires larger vRAM"
1118
  )
1119
- tmp = StableDiffusion(token=token, inpainting_model=True)
1120
  else:
1121
  if len(model_name) < 1:
1122
  model_name = (
@@ -1124,19 +1173,12 @@ def get_model(token="", model_choice="", model_path=""):
1124
  if model_choice == ModelChoice.MODEL_1_5.value
1125
  else "CompVis/stable-diffusion-v1-4"
1126
  )
1127
- if model_choice == ModelChoice.MODEL_2_0.value:
1128
- model_name = "stabilityai/stable-diffusion-2-base"
1129
- elif model_choice == ModelChoice.MODEL_2_0_V.value:
1130
- model_name = "stabilityai/stable-diffusion-2"
1131
- elif model_choice == ModelChoice.MODEL_2_1.value:
1132
- model_name = "stabilityai/stable-diffusion-2-1-base"
1133
  tmp = StableDiffusion(
1134
  token=token, model_name=model_name, model_path=model_path
1135
  )
1136
  model["model"] = tmp
1137
  return model["model"]
1138
 
1139
-
1140
  def run_outpaint(
1141
  sel_buffer_str,
1142
  prompt_text,
@@ -1298,7 +1340,7 @@ with blocks as demo:
1298
  # model_choices_lst.insert(0, "local_model")
1299
  elif args.remote_model:
1300
  model_path_input_val = args.remote_model
1301
- model_choices_lst.insert(0, "remote_model")
1302
 
1303
  sd_prompt = gr.Textbox(
1304
  label="Prompt", placeholder="input your prompt here!", lines=2
 
13
  import requests
14
 
15
 
16
+ # assert tuple(map(int,diffusers.__version__.split("."))) >= (0,9,0), "Please upgrade diffusers to 0.9.0"
17
 
18
  from diffusers.configuration_utils import FrozenDict
19
  from diffusers import (
 
41
  from utils import *
42
 
43
  # load environment variables from the .env file
44
+ # if os.path.exists(".env"):
45
+ # with open(".env") as f:
46
+ # for line in f:
47
+ # if line.startswith("#") or not line.strip():
48
+ # continue
49
+ # name, value = line.strip().split("=", 1)
50
+ # os.environ[name] = value
51
 
52
 
53
  # access_token = os.environ.get("HF_ACCESS_TOKEN")
 
221
  "--local_model", type=str, help="use a model stored on your PC", default=""
222
  )
223
 
224
+ # original
225
+ # if __name__ == "__main__":
226
+ # args = parser.parse_args()
227
+ # else:
228
+ # args = parser.parse_args(["--debug"])
229
+ # # args = parser.parse_args(["--debug"])
230
+ # if args.auth is not None:
231
+ # args.auth = tuple(args.auth)
232
+
233
+ if __name__ == "__main__" and not RUN_IN_SPACE:
234
  args = parser.parse_args()
235
  else:
236
+ args = parser.parse_args()
237
  # args = parser.parse_args(["--debug"])
238
  if args.auth is not None:
239
  args.auth = tuple(args.auth)
 
456
  selected_scheduler = scheduler_dict.get(scheduler, scheduler_dict["PLMS"])
457
  for item in [inpaint]:
458
  item.scheduler = selected_scheduler
459
+ if enable_safety:
460
  item.safety_checker = self.safety_checker
461
  else:
462
  item.safety_checker = lambda images, **kwargs: (images, False)
463
+
464
+ # for item in [inpaint]:
465
+ # item.scheduler = selected_scheduler
466
+ # if enable_safety or self.safety_checker is None:
467
+ # item.safety_checker = self.safety_checker
468
+ # else:
469
+ # item.safety_checker = lambda images, **kwargs: (images, False)
470
  width, height = image_pil.size
471
  sel_buffer = np.array(image_pil)
472
  img = sel_buffer[:, :, 0:3]
 
1104
  # )["images"]
1105
  # return images
1106
 
1107
+ # ORIGINAL
1108
+ # def get_model(token="", model_choice="", model_path=""):
1109
+ # if "model" not in model:
1110
+ # model_name = ""
1111
+ # if args.local_model:
1112
+ # print(f"Using local_model: {args.local_model}")
1113
+ # model_path = args.local_model
1114
+ # elif args.remote_model:
1115
+ # print(f"Using remote_model: {args.remote_model}")
1116
+ # model_name = args.remote_model
1117
+ # if model_choice == ModelChoice.INPAINTING.value:
1118
+ # if len(model_name) < 1:
1119
+ # model_name = "runwayml/stable-diffusion-inpainting"
1120
+ # print(f"Using [{model_name}] {model_path}")
1121
+ # tmp = StableDiffusionInpaint(
1122
+ # token=token, model_name=model_name, model_path=model_path
1123
+ # )
1124
+ # elif model_choice == ModelChoice.INPAINTING2.value:
1125
+ # if len(model_name) < 1:
1126
+ # model_name = "stabilityai/stable-diffusion-2-inpainting"
1127
+ # print(f"Using [{model_name}] {model_path}")
1128
+ # tmp = StableDiffusionInpaint(
1129
+ # token=token, model_name=model_name, model_path=model_path
1130
+ # )
1131
+ # elif model_choice == ModelChoice.INPAINTING_IMG2IMG.value:
1132
+ # print(
1133
+ # f"Note that {ModelChoice.INPAINTING_IMG2IMG.value} only support remote model and requires larger vRAM"
1134
+ # )
1135
+ # tmp = StableDiffusion(token=token, inpainting_model=True)
1136
+ # else:
1137
+ # if len(model_name) < 1:
1138
+ # model_name = (
1139
+ # "runwayml/stable-diffusion-v1-5"
1140
+ # if model_choice == ModelChoice.MODEL_1_5.value
1141
+ # else "CompVis/stable-diffusion-v1-4"
1142
+ # )
1143
+ # if model_choice == ModelChoice.MODEL_2_0.value:
1144
+ # model_name = "stabilityai/stable-diffusion-2-base"
1145
+ # elif model_choice == ModelChoice.MODEL_2_0_V.value:
1146
+ # model_name = "stabilityai/stable-diffusion-2"
1147
+ # elif model_choice == ModelChoice.MODEL_2_1.value:
1148
+ # model_name = "stabilityai/stable-diffusion-2-1-base"
1149
+ # tmp = StableDiffusion(
1150
+ # token=token, model_name=model_name, model_path=model_path
1151
+ # )
1152
+ # model["model"] = tmp
1153
+ # return model["model"]
1154
  def get_model(token="", model_choice="", model_path=""):
1155
  if "model" not in model:
1156
  model_name = ""
 
 
 
 
 
 
1157
  if model_choice == ModelChoice.INPAINTING.value:
1158
  if len(model_name) < 1:
1159
  model_name = "runwayml/stable-diffusion-inpainting"
 
1161
  tmp = StableDiffusionInpaint(
1162
  token=token, model_name=model_name, model_path=model_path
1163
  )
 
 
 
 
 
 
 
1164
  elif model_choice == ModelChoice.INPAINTING_IMG2IMG.value:
1165
  print(
1166
  f"Note that {ModelChoice.INPAINTING_IMG2IMG.value} only support remote model and requires larger vRAM"
1167
  )
1168
+ tmp = StableDiffusion(token=token, model_name="runwayml/stable-diffusion-v1-5", inpainting_model=True)
1169
  else:
1170
  if len(model_name) < 1:
1171
  model_name = (
 
1173
  if model_choice == ModelChoice.MODEL_1_5.value
1174
  else "CompVis/stable-diffusion-v1-4"
1175
  )
 
 
 
 
 
 
1176
  tmp = StableDiffusion(
1177
  token=token, model_name=model_name, model_path=model_path
1178
  )
1179
  model["model"] = tmp
1180
  return model["model"]
1181
 
 
1182
  def run_outpaint(
1183
  sel_buffer_str,
1184
  prompt_text,
 
1340
  # model_choices_lst.insert(0, "local_model")
1341
  elif args.remote_model:
1342
  model_path_input_val = args.remote_model
1343
+ # model_choices_lst.insert(0, "remote_model")
1344
 
1345
  sd_prompt = gr.Textbox(
1346
  label="Prompt", placeholder="input your prompt here!", lines=2