fffiloni commited on
Commit
467adb1
1 Parent(s): 0efd3ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py CHANGED
@@ -129,6 +129,43 @@ models_rbm = core.Models(
129
  image_model=models.image_model # Add this line
130
  )
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  def reset_inference_state():
133
  global models_rbm, models_b, extras, extras_b, device, core, core_b
134
 
 
129
  image_model=models.image_model # Add this line
130
  )
131
 
132
+ def unload_models_and_clear_cache():
133
+ global models_rbm, models_b, sam_model, extras, extras_b
134
+
135
+ # Reset sampling configurations
136
+ extras.sampling_configs['cfg'] = 5
137
+ extras.sampling_configs['shift'] = 1
138
+ extras.sampling_configs['timesteps'] = 20
139
+ extras.sampling_configs['t_start'] = 1.0
140
+
141
+ extras_b.sampling_configs['cfg'] = 1.1
142
+ extras_b.sampling_configs['shift'] = 1
143
+ extras_b.sampling_configs['timesteps'] = 10
144
+ extras_b.sampling_configs['t_start'] = 1.0
145
+
146
+ # Move all models to CPU
147
+ models_to(models_rbm, device="cpu")
148
+ models_b.generator.to("cpu")
149
+
150
+ # Move SAM model components to CPU if they exist
151
+ if 'sam_model' in globals():
152
+ models_to(sam_model, device="cpu")
153
+ models_to(sam_model.sam, device="cpu")
154
+
155
+ # Clear CUDA cache
156
+ torch.cuda.empty_cache()
157
+ gc.collect()
158
+
159
+ # Ensure all models are in eval mode and don't require gradients
160
+ for model in [models_rbm.generator, models_b.generator]:
161
+ model.eval()
162
+ for param in model.parameters():
163
+ param.requires_grad = False
164
+
165
+ # Clear CUDA cache again
166
+ torch.cuda.empty_cache()
167
+ gc.collect()
168
+
169
  def reset_inference_state():
170
  global models_rbm, models_b, extras, extras_b, device, core, core_b
171