fantaxy commited on
Commit
3960c92
1 Parent(s): c43a736

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -23
app.py CHANGED
@@ -12,10 +12,6 @@ from PIL import Image
12
  from huggingface_hub import snapshot_download
13
  import gc
14
 
15
- # Force CPU usage
16
- device = "cpu"
17
- dtype = torch.float32
18
-
19
  # Clear memory
20
  gc.collect()
21
  if torch.cuda.is_available():
@@ -28,14 +24,17 @@ css = """
28
  }
29
  """
30
 
 
 
 
 
31
  huggingface_token = os.getenv("HF_TOKEN")
32
 
33
- # Minimal model configuration
34
  model_config = {
35
  "low_cpu_mem_usage": True,
36
  "torch_dtype": dtype,
37
- "use_safetensors": True,
38
- "device_map": "cpu"
39
  }
40
 
41
  model_path = snapshot_download(
@@ -46,38 +45,42 @@ model_path = snapshot_download(
46
  token=huggingface_token,
47
  )
48
 
49
- # Load models on CPU
50
- controlnet = FluxControlNetModel.from_pretrained(
51
- "jasperai/Flux.1-dev-Controlnet-Upscaler",
52
- **model_config
53
- )
 
 
54
 
55
- pipe = FluxControlNetPipeline.from_pretrained(
56
- model_path,
57
- controlnet=controlnet,
58
- **model_config
59
- )
 
 
 
 
 
60
 
61
  # Enable optimizations
62
  pipe.enable_attention_slicing(1)
63
  pipe.enable_vae_slicing()
64
 
65
  MAX_SEED = 1000000
66
- MAX_PIXEL_BUDGET = 64 * 64 # Extremely reduced
67
 
68
  def process_input(input_image, upscale_factor):
69
  input_image = input_image.convert('RGB')
70
 
71
- # Aggressive size reduction
72
  w, h = input_image.size
73
  max_size = int(np.sqrt(MAX_PIXEL_BUDGET))
74
 
75
- # Resize to very small size
76
  new_w = min(w, max_size)
77
  new_h = min(h, max_size)
78
  input_image = input_image.resize((new_w, new_h), Image.LANCZOS)
79
 
80
- # Ensure dimensions are multiples of 8
81
  w = new_w - new_w % 8
82
  h = new_h - new_h % 8
83
 
@@ -94,6 +97,8 @@ def infer(
94
  ):
95
  try:
96
  gc.collect()
 
 
97
 
98
  if randomize_seed:
99
  seed = random.randint(0, MAX_SEED)
@@ -101,7 +106,7 @@ def infer(
101
  input_image, w, h = process_input(input_image, upscale_factor)
102
 
103
  with torch.inference_mode():
104
- generator = torch.Generator().manual_seed(seed)
105
  image = pipe(
106
  prompt="",
107
  control_image=input_image,
@@ -114,6 +119,8 @@ def infer(
114
  ).images[0]
115
 
116
  gc.collect()
 
 
117
 
118
  return [input_image, image, seed]
119
 
@@ -197,7 +204,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
197
  show_api=False,
198
  )
199
 
200
- # Minimal launch configuration
201
  demo.queue(max_size=1).launch(
202
  share=False,
203
  debug=True,
 
12
  from huggingface_hub import snapshot_download
13
  import gc
14
 
 
 
 
 
15
  # Clear memory
16
  gc.collect()
17
  if torch.cuda.is_available():
 
24
  }
25
  """
26
 
27
+ # Device configuration
28
+ device = "cuda" if torch.cuda.is_available() else "cpu"
29
+ dtype = torch.float32
30
+
31
  huggingface_token = os.getenv("HF_TOKEN")
32
 
33
+ # Modified model configuration
34
  model_config = {
35
  "low_cpu_mem_usage": True,
36
  "torch_dtype": dtype,
37
+ "use_safetensors": False, # Disabled safetensors
 
38
  }
39
 
40
  model_path = snapshot_download(
 
45
  token=huggingface_token,
46
  )
47
 
48
+ # Load models with modified configuration
49
+ try:
50
+ controlnet = FluxControlNetModel.from_pretrained(
51
+ "jasperai/Flux.1-dev-Controlnet-Upscaler",
52
+ **model_config
53
+ )
54
+ controlnet.to(device)
55
 
56
+ pipe = FluxControlNetPipeline.from_pretrained(
57
+ model_path,
58
+ controlnet=controlnet,
59
+ **model_config
60
+ )
61
+ pipe.to(device)
62
+
63
+ except Exception as e:
64
+ print(f"Error loading models: {str(e)}")
65
+ raise
66
 
67
  # Enable optimizations
68
  pipe.enable_attention_slicing(1)
69
  pipe.enable_vae_slicing()
70
 
71
  MAX_SEED = 1000000
72
+ MAX_PIXEL_BUDGET = 64 * 64
73
 
74
  def process_input(input_image, upscale_factor):
75
  input_image = input_image.convert('RGB')
76
 
 
77
  w, h = input_image.size
78
  max_size = int(np.sqrt(MAX_PIXEL_BUDGET))
79
 
 
80
  new_w = min(w, max_size)
81
  new_h = min(h, max_size)
82
  input_image = input_image.resize((new_w, new_h), Image.LANCZOS)
83
 
 
84
  w = new_w - new_w % 8
85
  h = new_h - new_h % 8
86
 
 
97
  ):
98
  try:
99
  gc.collect()
100
+ if torch.cuda.is_available():
101
+ torch.cuda.empty_cache()
102
 
103
  if randomize_seed:
104
  seed = random.randint(0, MAX_SEED)
 
106
  input_image, w, h = process_input(input_image, upscale_factor)
107
 
108
  with torch.inference_mode():
109
+ generator = torch.Generator(device=device).manual_seed(seed)
110
  image = pipe(
111
  prompt="",
112
  control_image=input_image,
 
119
  ).images[0]
120
 
121
  gc.collect()
122
+ if torch.cuda.is_available():
123
+ torch.cuda.empty_cache()
124
 
125
  return [input_image, image, seed]
126
 
 
204
  show_api=False,
205
  )
206
 
207
+ # Launch configuration
208
  demo.queue(max_size=1).launch(
209
  share=False,
210
  debug=True,