Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -34,19 +34,10 @@ with open('loras.json', 'r') as f:
|
|
34 |
# Initialize the base model with authentication and specify the device
|
35 |
pipe = DiffusionPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=dtype, token=hf_token).to(device)
|
36 |
|
37 |
-
MAX_SEED =
|
38 |
MAX_IMAGE_SIZE = 2048
|
39 |
|
40 |
|
41 |
-
device_map = infer_auto_device_map(
|
42 |
-
model,
|
43 |
-
max_memory=max_memory,
|
44 |
-
no_split_module_classes=["DecoderLayer", "Attention", "MLP", "LayerNorm", "Linear"],
|
45 |
-
dtype='float16'
|
46 |
-
)
|
47 |
-
|
48 |
-
model = dispatch_model(model, device_map='torch.cuda:0')
|
49 |
-
|
50 |
class calculateDuration:
|
51 |
def __init__(self, activity_name=""):
|
52 |
self.activity_name = activity_name
|
@@ -63,7 +54,7 @@ class calculateDuration:
|
|
63 |
else:
|
64 |
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
|
65 |
|
66 |
-
@spaces.GPU(duration=
|
67 |
def generate_images(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, num_images, progress):
|
68 |
generator = torch.Generator(device=device).manual_seed(seed)
|
69 |
images = []
|
|
|
34 |
# Initialize the base model with authentication and specify the device
|
35 |
pipe = DiffusionPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=dtype, token=hf_token).to(device)
|
36 |
|
37 |
+
MAX_SEED = 2**32 - 1
|
38 |
MAX_IMAGE_SIZE = 2048
|
39 |
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
class calculateDuration:
|
42 |
def __init__(self, activity_name=""):
|
43 |
self.activity_name = activity_name
|
|
|
54 |
else:
|
55 |
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
|
56 |
|
57 |
+
@spaces.GPU(duration=90)
|
58 |
def generate_images(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, num_images, progress):
|
59 |
generator = torch.Generator(device=device).manual_seed(seed)
|
60 |
images = []
|