charbel-malo commited on
Commit
e6dda2a
ยท
verified ยท
1 Parent(s): d997506

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. .gitignore +1 -0
  3. README.md +15 -0
  4. app.py +215 -0
  5. requirements.txt +9 -0
.gitattributes CHANGED
@@ -33,5 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
36
  models/ByteDance/Hyper-SD/hypersd_tearser.jpg filter=lfs diff=lfs merge=lfs -text
37
  models/black-forest-labs/FLUX.1-dev/dev_grid.jpg filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # models/**/* filter=lfs diff=lfs merge=lfs -text
37
  models/ByteDance/Hyper-SD/hypersd_tearser.jpg filter=lfs diff=lfs merge=lfs -text
38
  models/black-forest-labs/FLUX.1-dev/dev_grid.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: flux-loras
3
+ emoji: ๐Ÿช„๐Ÿงฟ
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 4.26.0
8
+ app_file: app.py
9
+ pinned: true
10
+ thumbnail: >-
11
+ https://cdn-uploads.huggingface.co/production/uploads/66d9c5d1e83856057c852a15/yq-nzTWd81HB6NJ_IKGRi.jpeg
12
+ short_description: GrowerAI Flux Vision Studio - Unrealistically Fast
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import argparse
3
+ import os
4
+ import time
5
+ from os import path
6
+ from safetensors.torch import load_file
7
+ from huggingface_hub import hf_hub_download
8
+ from transformers.utils.hub import move_cache
9
+
10
+ # move_cache()
11
+ cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
12
+ # os.environ["TRANSFORMERS_CACHE"] = cache_path
13
+ os.environ["HF_HUB_CACHE"] = cache_path
14
+ os.environ["HF_HOME"] = cache_path
15
+
16
+ import gradio as gr
17
+ import torch
18
+ from diffusers import FluxPipeline
19
+
20
+ torch.backends.cuda.matmul.allow_tf32 = True
21
+
22
+ class timer:
23
+ def __init__(self, method_name="timed process"):
24
+ self.method = method_name
25
+ def __enter__(self):
26
+ self.start = time.time()
27
+ print(f"{self.method} starts")
28
+ def __exit__(self, exc_type, exc_val, exc_tb):
29
+ end = time.time()
30
+ print(f"{self.method} took {str(round(end - self.start, 2))}s")
31
+
32
+ if not path.exists(cache_path):
33
+ os.makedirs(cache_path, exist_ok=True)
34
+
35
+ pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
36
+ pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
37
+ pipe.fuse_lora(lora_scale=0.125)
38
+ pipe.to(device="cuda", dtype=torch.bfloat16)
39
+
40
+
41
+ css = """
42
+ # gen_btn{height: 100%}
43
+ #gen_column{align-self: stretch}
44
+ .primary{background-color: #4C76FF !important}
45
+ #grower-label-span span{background-color: #4C76FF !important}
46
+ #grower-label-image label{background-color: #4C76FF !important}
47
+ """
48
+
49
+
50
+
51
+ js_code = """
52
+ function createGradioAnimation() {
53
+ const emojis = ['โœจ', '๐Ÿค–', '๐Ÿ“ˆ', '๐ŸŽจ', '๐Ÿ”', '๐Ÿ“ฑ', '๐Ÿ”ฎ', '๐Ÿฅฐ', '๐ŸŒˆ', '๐Ÿ’–'];
54
+ const gravity = 0.5;
55
+ const bounceFactor = -0.7;
56
+ const friction = 0.9;
57
+
58
+ document.getElementById('gen_btn').addEventListener('click', (event) => {
59
+ const count = Math.floor(Math.random() * 6) + 10;
60
+ for (let i = 0; i < count; i++) {
61
+ createEmoji(event.clientX, event.clientY);
62
+ }
63
+ });
64
+
65
+ function createEmoji(x, y) {
66
+ const emojiElement = document.createElement('div');
67
+ emojiElement.textContent = emojis[Math.floor(Math.random() * emojis.length)];
68
+ emojiElement.style.position = 'absolute';
69
+ emojiElement.style.fontSize = '24px';
70
+ emojiElement.style.transition = 'opacity 0.1s ease-out';
71
+ document.body.appendChild(emojiElement);
72
+
73
+ const rect = emojiElement.getBoundingClientRect();
74
+ let posX = x - rect.width / 2;
75
+ let posY = y - rect.height / 2;
76
+ let velX = (Math.random() - 0.5) * 10;
77
+ let velY = (Math.random() - 0.5) * 10;
78
+
79
+ function update() {
80
+ if (posY + rect.height >= window.innerHeight) {
81
+ posY = window.innerHeight - rect.height;
82
+ velY *= bounceFactor;
83
+ } else {
84
+ velY += gravity;
85
+ }
86
+
87
+ if (posX <= 0 || posX + rect.width >= window.innerWidth) {
88
+ velX *= bounceFactor;
89
+ }
90
+
91
+ velX *= friction;
92
+ velY *= friction;
93
+
94
+ posX += velX;
95
+ posY += velY;
96
+
97
+ emojiElement.style.transform = `translate(${posX}px, ${posY}px)`;
98
+
99
+ if (Math.abs(velX) > 0.1 || Math.abs(velY) > 0.1) {
100
+ requestAnimationFrame(update);
101
+ } else {
102
+ emojiElement.style.opacity=0;
103
+ setTimeout(function(){
104
+ emojiElement.remove();}, 2000);
105
+ }
106
+ }
107
+
108
+ update();
109
+ }
110
+ return 'Animation created';
111
+ }
112
+ """
113
+
114
+ with gr.Blocks(theme='charbel-malo/Crystal', js=js_code) as demo:
115
+ gr.Markdown(
116
+ """
117
+ <div style="text-align: left;margin-top:20px">
118
+ <h1><img src="https://staging.the-grower.com/assets/images/grower_logo_dark.png" style="height:50px;object-fit:contain;"> GrowerAI VisionPRO</h1>
119
+ <p style="font-size: 1rem; margin-bottom: 1.5rem;">HyperFlux-based Image Generation Model 8Steps-Lora</p>
120
+ </div>
121
+ """
122
+ )
123
+
124
+ with gr.Row():
125
+ with gr.Column(scale=3):
126
+ with gr.Group():
127
+ base_prompt = gr.Textbox(
128
+ label="Base Prompt",
129
+ placeholder="E.g., A serene landscape with mountains and a lake at sunset",
130
+ lines=3,
131
+ elem_id="grower-label-span"
132
+ )
133
+ with gr.Accordion("Advanced Prompt Settings", open=False):
134
+ subject = gr.Textbox(label="Subject", placeholder="Enter the subject")
135
+ object_ = gr.Textbox(label="Object", placeholder="Enter the object")
136
+ style = gr.Textbox(label="Style", placeholder="Enter the style")
137
+ clothing = gr.Textbox(label="Clothing", placeholder="Enter the clothing")
138
+ objective = gr.Dropdown(
139
+ choices=["digital marketing post","website hero visual","Ad cover","Movie poster"],
140
+ value=None,
141
+ multiselect=False,
142
+ label="Objective",
143
+ info="Select an objective"
144
+ )
145
+ with gr.Accordion("Advanced Settings", open=False):
146
+ with gr.Group():
147
+ with gr.Row():
148
+ height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=1024)
149
+ width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024)
150
+
151
+ with gr.Row():
152
+ steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
153
+ scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5)
154
+
155
+ seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0)
156
+
157
+ generate_btn = gr.Button("Generate Image", variant="primary", scale=1, elem_id="gen_btn")
158
+
159
+ with gr.Column(scale=4):
160
+ output = gr.Image(label="Your Generated Image", elem_id="grower-label-image")
161
+
162
+ gr.Markdown(
163
+ """
164
+ <div style="margin: 2rem auto; padding: 1rem; border-radius: 10px;">
165
+ <h2 style="font-size: 1.5rem; margin-bottom: 1rem;">How to Use</h2>
166
+ <ol style="padding-left: 1.5rem;">
167
+ <li>Enter a detailed description of the image you want to create.</li>
168
+ <li>Adjust advanced settings if desired (tap to expand).</li>
169
+ <li>Tap "Generate Image" and wait for your creation!</li>
170
+ </ol>
171
+ <p style="margin-top: 1rem; font-style: italic;">Tip: Be specific in your description for best results!</p>
172
+ </div>
173
+ """
174
+ )
175
+
176
+ @spaces.GPU
177
+ def process_image(height, width, steps, scales, base_prompt, subject, object_, style, clothing, objective, seed):
178
+ # Build the advanced prompt
179
+ advanced_prompt_template = (
180
+ "Create a highly stylized digital avatar of {subject}, holding {object}. "
181
+ "joy, simplified {subject} avatar or emoji. , typical of 3D digital art :: "
182
+ "The overall style is {style}. {clothing}. and modern digital art style, "
183
+ "detailed shading, and dynamic positioning that makes it suitable for {objective}"
184
+ )
185
+ advanced_prompt = advanced_prompt_template.format(
186
+ subject=subject,
187
+ object=object_,
188
+ style=style,
189
+ clothing=clothing,
190
+ objective=objective
191
+ )
192
+ # Combine base prompt and advanced prompt
193
+ prompt = advanced_prompt
194
+ global pipe
195
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
196
+ return pipe(
197
+ prompt=[prompt],
198
+ generator=torch.Generator().manual_seed(int(seed)),
199
+ num_inference_steps=int(steps),
200
+ guidance_scale=float(scales),
201
+ height=int(height),
202
+ width=int(width),
203
+ max_sequence_length=256
204
+ ).images[0]
205
+
206
+ generate_btn.click(
207
+ process_image,
208
+ inputs=[
209
+ height, width, steps, scales, base_prompt, subject, object_, style, clothing, objective, seed
210
+ ],
211
+ outputs=output
212
+ )
213
+
214
+ if __name__ == "__main__":
215
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers==0.30.0
3
+ invisible_watermark
4
+ torch
5
+ transformers==4.43.3
6
+ xformers
7
+ sentencepiece
8
+ peft
9
+ spaces