toshas commited on
Commit
df96952
1 Parent(s): 478aa9b

attempt to fix crashing of the demo build

Browse files
Files changed (1) hide show
  1. app.py +3 -7
app.py CHANGED
@@ -60,17 +60,14 @@ def process_with_loaded_pipeline(
60
  ):
61
  # Load and cache the pipeline based on the model type.
62
  if model_type not in loaded_pipelines.keys():
63
- auth_token = os.environ.get("KEV_TOKEN")
64
  if model_type == "appearance":
65
  if "lighting" in loaded_pipelines.keys():
66
  del loaded_pipelines[
67
  "lighting"
68
  ] # to save GPU memory. Can be removed if enough memory is available for faster switching between models
69
  torch.cuda.empty_cache()
70
- loaded_pipelines[model_type] = (
71
- MarigoldIIDAppearancePipeline.from_pretrained(
72
- "prs-eth/marigold-iid-appearance-v1-1", token=auth_token
73
- )
74
  )
75
  elif model_type == "lighting":
76
  if "appearance" in loaded_pipelines.keys():
@@ -79,7 +76,7 @@ def process_with_loaded_pipeline(
79
  ] # to save GPU memory. Can be removed if enough memory is available for faster switching between models
80
  torch.cuda.empty_cache()
81
  loaded_pipelines[model_type] = MarigoldIIDLightingPipeline.from_pretrained(
82
- "prs-eth/marigold-iid-lighting-v1-1", token=auth_token
83
  )
84
 
85
  # Move the pipeline to GPU if available
@@ -483,7 +480,6 @@ def run_demo_server():
483
  ).launch(
484
  server_name="0.0.0.0",
485
  server_port=7860,
486
- ssr=False,
487
  )
488
 
489
 
 
60
  ):
61
  # Load and cache the pipeline based on the model type.
62
  if model_type not in loaded_pipelines.keys():
 
63
  if model_type == "appearance":
64
  if "lighting" in loaded_pipelines.keys():
65
  del loaded_pipelines[
66
  "lighting"
67
  ] # to save GPU memory. Can be removed if enough memory is available for faster switching between models
68
  torch.cuda.empty_cache()
69
+ loaded_pipelines[model_type] = MarigoldIIDAppearancePipeline.from_pretrained(
70
+ "prs-eth/marigold-iid-appearance-v1-1"
 
 
71
  )
72
  elif model_type == "lighting":
73
  if "appearance" in loaded_pipelines.keys():
 
76
  ] # to save GPU memory. Can be removed if enough memory is available for faster switching between models
77
  torch.cuda.empty_cache()
78
  loaded_pipelines[model_type] = MarigoldIIDLightingPipeline.from_pretrained(
79
+ "prs-eth/marigold-iid-lighting-v1-1"
80
  )
81
 
82
  # Move the pipeline to GPU if available
 
480
  ).launch(
481
  server_name="0.0.0.0",
482
  server_port=7860,
 
483
  )
484
 
485