adamelliotfields
commited on
Commit
β’
7f5047d
1
Parent(s):
2401ce9
Preset improvements
Browse files- lib/config.py +19 -29
- lib/preset.py +210 -192
- pages/1_π¬_Text_Generation.py +1 -1
- pages/2_π¨_Text_to_Image.py +13 -33
lib/config.py
CHANGED
@@ -1,6 +1,19 @@
|
|
1 |
from dataclasses import dataclass
|
2 |
from typing import Dict, List
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
@dataclass
|
6 |
class Txt2TxtConfig:
|
@@ -32,6 +45,7 @@ class Config:
|
|
32 |
txt2txt: Txt2TxtConfig
|
33 |
|
34 |
|
|
|
35 |
config = Config(
|
36 |
title="API Inference",
|
37 |
icon="β‘",
|
@@ -50,34 +64,9 @@ config = Config(
|
|
50 |
"Hugging Face": 2,
|
51 |
"Together": 0,
|
52 |
},
|
53 |
-
models=
|
54 |
-
# Model identifiers referenced in Text_to_Image.py
|
55 |
-
"Black Forest Labs": [
|
56 |
-
"flux-dev",
|
57 |
-
"flux-pro",
|
58 |
-
"flux-pro-1.1",
|
59 |
-
],
|
60 |
-
"Fal": [
|
61 |
-
"fal-ai/aura-flow",
|
62 |
-
"fal-ai/flux/dev",
|
63 |
-
"fal-ai/flux/schnell",
|
64 |
-
"fal-ai/flux-pro",
|
65 |
-
"fal-ai/flux-pro/v1.1",
|
66 |
-
"fal-ai/fooocus",
|
67 |
-
"fal-ai/kolors",
|
68 |
-
"fal-ai/stable-diffusion-v3-medium",
|
69 |
-
],
|
70 |
-
"Hugging Face": [
|
71 |
-
"black-forest-labs/flux.1-dev",
|
72 |
-
"black-forest-labs/flux.1-schnell",
|
73 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
74 |
-
],
|
75 |
-
"Together": [
|
76 |
-
"black-forest-labs/FLUX.1-schnell-Free",
|
77 |
-
],
|
78 |
-
},
|
79 |
hidden_parameters=[
|
80 |
-
#
|
81 |
"enable_safety_checker",
|
82 |
"max_sequence_length",
|
83 |
"num_images",
|
@@ -90,7 +79,7 @@ config = Config(
|
|
90 |
"styles",
|
91 |
"sync_mode",
|
92 |
],
|
93 |
-
negative_prompt="ugly, unattractive, disfigured, deformed, mutated, malformed, blurry, grainy,
|
94 |
default_image_size="square_hd",
|
95 |
image_sizes=[
|
96 |
"landscape_16_9",
|
@@ -99,7 +88,7 @@ config = Config(
|
|
99 |
"portrait_4_3",
|
100 |
"portrait_16_9",
|
101 |
],
|
102 |
-
default_aspect_ratio="1024x1024",
|
103 |
aspect_ratios=[
|
104 |
"704x1408", # 1:2
|
105 |
"704x1344", # 11:21
|
@@ -143,6 +132,7 @@ config = Config(
|
|
143 |
"llama-3.1-sonar-large-128k-chat",
|
144 |
"llama-3.1-sonar-small-128k-online",
|
145 |
"llama-3.1-sonar-large-128k-online",
|
|
|
146 |
],
|
147 |
},
|
148 |
),
|
|
|
1 |
from dataclasses import dataclass
|
2 |
from typing import Dict, List
|
3 |
|
4 |
+
from .preset import preset
|
5 |
+
|
6 |
+
|
7 |
+
def txt2img_models_from_presets(presets):
|
8 |
+
models = {}
|
9 |
+
for p in presets:
|
10 |
+
service = p.service
|
11 |
+
model_id = p.model_id
|
12 |
+
if service not in models:
|
13 |
+
models[service] = []
|
14 |
+
models[service].append(model_id)
|
15 |
+
return models
|
16 |
+
|
17 |
|
18 |
@dataclass
|
19 |
class Txt2TxtConfig:
|
|
|
45 |
txt2txt: Txt2TxtConfig
|
46 |
|
47 |
|
48 |
+
# TODO: API keys should be with services (make a dataclass)
|
49 |
config = Config(
|
50 |
title="API Inference",
|
51 |
icon="β‘",
|
|
|
64 |
"Hugging Face": 2,
|
65 |
"Together": 0,
|
66 |
},
|
67 |
+
models=txt2img_models_from_presets(preset.txt2img.presets),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
hidden_parameters=[
|
69 |
+
# Sent to API but not shown in generation parameters accordion
|
70 |
"enable_safety_checker",
|
71 |
"max_sequence_length",
|
72 |
"num_images",
|
|
|
79 |
"styles",
|
80 |
"sync_mode",
|
81 |
],
|
82 |
+
negative_prompt="ugly, unattractive, disfigured, deformed, mutated, malformed, blurry, grainy, oversaturated, undersaturated, overexposed, underexposed, worst quality, low details, lowres, watermark, signature, sloppy, cluttered",
|
83 |
default_image_size="square_hd",
|
84 |
image_sizes=[
|
85 |
"landscape_16_9",
|
|
|
88 |
"portrait_4_3",
|
89 |
"portrait_16_9",
|
90 |
],
|
91 |
+
default_aspect_ratio="1024x1024", # fooocus aspect ratios
|
92 |
aspect_ratios=[
|
93 |
"704x1408", # 1:2
|
94 |
"704x1344", # 11:21
|
|
|
132 |
"llama-3.1-sonar-large-128k-chat",
|
133 |
"llama-3.1-sonar-small-128k-online",
|
134 |
"llama-3.1-sonar-large-128k-online",
|
135 |
+
"llama-3.1-sonar-huge-128k-online",
|
136 |
],
|
137 |
},
|
138 |
),
|
lib/preset.py
CHANGED
@@ -14,6 +14,8 @@ class Txt2TxtPreset:
|
|
14 |
class Txt2ImgPreset:
|
15 |
# FLUX1.1 has no scale or steps
|
16 |
name: str
|
|
|
|
|
17 |
guidance_scale: Optional[float] = None
|
18 |
guidance_scale_min: Optional[float] = None
|
19 |
guidance_scale_max: Optional[float] = None
|
@@ -32,25 +34,10 @@ class Txt2TxtPresets:
|
|
32 |
|
33 |
@dataclass
|
34 |
class Txt2ImgPresets:
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
# fal
|
40 |
-
aura_flow: Txt2ImgPreset
|
41 |
-
flux_1_1_pro_fal: Txt2ImgPreset
|
42 |
-
flux_dev_fal: Txt2ImgPreset
|
43 |
-
flux_pro_fal: Txt2ImgPreset
|
44 |
-
flux_schnell_fal: Txt2ImgPreset
|
45 |
-
fooocus: Txt2ImgPreset
|
46 |
-
kolors: Txt2ImgPreset
|
47 |
-
stable_diffusion_3: Txt2ImgPreset
|
48 |
-
# hf
|
49 |
-
flux_dev_hf: Txt2ImgPreset
|
50 |
-
flux_schnell_hf: Txt2ImgPreset
|
51 |
-
stable_diffusion_xl: Txt2ImgPreset
|
52 |
-
# together
|
53 |
-
flux_schnell_free_together: Txt2ImgPreset
|
54 |
|
55 |
|
56 |
@dataclass
|
@@ -61,7 +48,6 @@ class Preset:
|
|
61 |
|
62 |
preset = Preset(
|
63 |
txt2txt=Txt2TxtPresets(
|
64 |
-
# Every service has model and system messages
|
65 |
hugging_face=Txt2TxtPreset(
|
66 |
frequency_penalty=0.0,
|
67 |
frequency_penalty_min=-2.0,
|
@@ -76,177 +62,209 @@ preset = Preset(
|
|
76 |
),
|
77 |
),
|
78 |
txt2img=Txt2ImgPresets(
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
"
|
124 |
-
"
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
"
|
195 |
-
"
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
"
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
"
|
227 |
-
"
|
228 |
-
"
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
"
|
245 |
-
"
|
246 |
-
"
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
),
|
252 |
)
|
|
|
14 |
class Txt2ImgPreset:
|
15 |
# FLUX1.1 has no scale or steps
|
16 |
name: str
|
17 |
+
service: str
|
18 |
+
model_id: str
|
19 |
guidance_scale: Optional[float] = None
|
20 |
guidance_scale_min: Optional[float] = None
|
21 |
guidance_scale_max: Optional[float] = None
|
|
|
34 |
|
35 |
@dataclass
|
36 |
class Txt2ImgPresets:
|
37 |
+
presets: List[Txt2ImgPreset] = field(default_factory=list)
|
38 |
+
|
39 |
+
def __iter__(self):
|
40 |
+
return iter(self.presets)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
|
43 |
@dataclass
|
|
|
48 |
|
49 |
preset = Preset(
|
50 |
txt2txt=Txt2TxtPresets(
|
|
|
51 |
hugging_face=Txt2TxtPreset(
|
52 |
frequency_penalty=0.0,
|
53 |
frequency_penalty_min=-2.0,
|
|
|
62 |
),
|
63 |
),
|
64 |
txt2img=Txt2ImgPresets(
|
65 |
+
presets=[
|
66 |
+
Txt2ImgPreset(
|
67 |
+
"AuraFlow",
|
68 |
+
"Fal",
|
69 |
+
"fal-ai/aura-flow",
|
70 |
+
guidance_scale=3.5,
|
71 |
+
guidance_scale_min=1.0,
|
72 |
+
guidance_scale_max=10.0,
|
73 |
+
num_inference_steps=28,
|
74 |
+
num_inference_steps_min=10,
|
75 |
+
num_inference_steps_max=50,
|
76 |
+
parameters=["seed", "num_inference_steps", "guidance_scale", "expand_prompt"],
|
77 |
+
kwargs={"num_images": 1, "sync_mode": False},
|
78 |
+
),
|
79 |
+
Txt2ImgPreset(
|
80 |
+
"FLUX1.1 Pro",
|
81 |
+
"Black Forest Labs",
|
82 |
+
"flux-pro-1.1",
|
83 |
+
parameters=["seed", "width", "height", "prompt_upsampling"],
|
84 |
+
kwargs={"safety_tolerance": 6},
|
85 |
+
),
|
86 |
+
Txt2ImgPreset(
|
87 |
+
"FLUX.1 Pro",
|
88 |
+
"Black Forest Labs",
|
89 |
+
"flux-pro",
|
90 |
+
guidance_scale=2.5,
|
91 |
+
guidance_scale_min=1.5,
|
92 |
+
guidance_scale_max=5.0,
|
93 |
+
num_inference_steps=40,
|
94 |
+
num_inference_steps_min=10,
|
95 |
+
num_inference_steps_max=50,
|
96 |
+
parameters=["seed", "width", "height", "steps", "guidance", "prompt_upsampling"],
|
97 |
+
kwargs={"safety_tolerance": 6, "interval": 1},
|
98 |
+
),
|
99 |
+
Txt2ImgPreset(
|
100 |
+
"FLUX.1 Dev",
|
101 |
+
"Black Forest Labs",
|
102 |
+
"flux-dev",
|
103 |
+
num_inference_steps=28,
|
104 |
+
num_inference_steps_min=10,
|
105 |
+
num_inference_steps_max=50,
|
106 |
+
guidance_scale=3.0,
|
107 |
+
guidance_scale_min=1.5,
|
108 |
+
guidance_scale_max=5.0,
|
109 |
+
parameters=["seed", "width", "height", "steps", "guidance", "prompt_upsampling"],
|
110 |
+
kwargs={"safety_tolerance": 6},
|
111 |
+
),
|
112 |
+
Txt2ImgPreset(
|
113 |
+
"FLUX1.1 Pro",
|
114 |
+
"Fal",
|
115 |
+
"fal-ai/flux-pro/v1.1",
|
116 |
+
parameters=["seed", "image_size"],
|
117 |
+
kwargs={
|
118 |
+
"num_images": 1,
|
119 |
+
"sync_mode": False,
|
120 |
+
"safety_tolerance": 6,
|
121 |
+
"enable_safety_checker": False,
|
122 |
+
},
|
123 |
+
),
|
124 |
+
Txt2ImgPreset(
|
125 |
+
"FLUX.1 Pro",
|
126 |
+
"Fal",
|
127 |
+
"fal-ai/flux-pro",
|
128 |
+
guidance_scale=2.5,
|
129 |
+
guidance_scale_min=1.5,
|
130 |
+
guidance_scale_max=5.0,
|
131 |
+
num_inference_steps=40,
|
132 |
+
num_inference_steps_min=10,
|
133 |
+
num_inference_steps_max=50,
|
134 |
+
parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
|
135 |
+
kwargs={"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
|
136 |
+
),
|
137 |
+
Txt2ImgPreset(
|
138 |
+
"FLUX.1 Dev",
|
139 |
+
"Fal",
|
140 |
+
"fal-ai/flux/dev",
|
141 |
+
num_inference_steps=28,
|
142 |
+
num_inference_steps_min=10,
|
143 |
+
num_inference_steps_max=50,
|
144 |
+
guidance_scale=3.0,
|
145 |
+
guidance_scale_min=1.5,
|
146 |
+
guidance_scale_max=5.0,
|
147 |
+
parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
|
148 |
+
kwargs={"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
|
149 |
+
),
|
150 |
+
Txt2ImgPreset(
|
151 |
+
"FLUX.1 Schnell",
|
152 |
+
"Fal",
|
153 |
+
"fal-ai/flux/schnell",
|
154 |
+
num_inference_steps=4,
|
155 |
+
num_inference_steps_min=1,
|
156 |
+
num_inference_steps_max=12,
|
157 |
+
parameters=["seed", "image_size", "num_inference_steps"],
|
158 |
+
kwargs={"num_images": 1, "sync_mode": False, "enable_safety_checker": False},
|
159 |
+
),
|
160 |
+
Txt2ImgPreset(
|
161 |
+
"FLUX.1 Dev",
|
162 |
+
"Hugging Face",
|
163 |
+
"black-forest-labs/flux.1-dev",
|
164 |
+
num_inference_steps=28,
|
165 |
+
num_inference_steps_min=10,
|
166 |
+
num_inference_steps_max=50,
|
167 |
+
guidance_scale=3.0,
|
168 |
+
guidance_scale_min=1.5,
|
169 |
+
guidance_scale_max=5.0,
|
170 |
+
parameters=["width", "height", "guidance_scale", "num_inference_steps"],
|
171 |
+
kwargs={"max_sequence_length": 512},
|
172 |
+
),
|
173 |
+
Txt2ImgPreset(
|
174 |
+
"FLUX.1 Schnell",
|
175 |
+
"Hugging Face",
|
176 |
+
"black-forest-labs/flux.1-schnell",
|
177 |
+
num_inference_steps=4,
|
178 |
+
num_inference_steps_min=1,
|
179 |
+
num_inference_steps_max=12,
|
180 |
+
parameters=["width", "height", "num_inference_steps"],
|
181 |
+
kwargs={"guidance_scale": 0.0, "max_sequence_length": 256},
|
182 |
+
),
|
183 |
+
Txt2ImgPreset(
|
184 |
+
"FLUX.1 Schnell Free",
|
185 |
+
"Together",
|
186 |
+
"black-forest-labs/FLUX.1-schnell-Free",
|
187 |
+
num_inference_steps=4,
|
188 |
+
num_inference_steps_min=1,
|
189 |
+
num_inference_steps_max=12,
|
190 |
+
parameters=["model", "seed", "width", "height", "steps"],
|
191 |
+
kwargs={"n": 1},
|
192 |
+
),
|
193 |
+
Txt2ImgPreset(
|
194 |
+
"Fooocus",
|
195 |
+
"Fal",
|
196 |
+
"fal-ai/fooocus",
|
197 |
+
guidance_scale=4.0,
|
198 |
+
guidance_scale_min=1.0,
|
199 |
+
guidance_scale_max=10.0,
|
200 |
+
parameters=["seed", "negative_prompt", "aspect_ratio", "guidance_scale"],
|
201 |
+
kwargs={
|
202 |
+
"num_images": 1,
|
203 |
+
"sync_mode": True,
|
204 |
+
"enable_safety_checker": False,
|
205 |
+
"output_format": "png",
|
206 |
+
"sharpness": 2,
|
207 |
+
"styles": ["Fooocus Enhance", "Fooocus V2", "Fooocus Sharp"],
|
208 |
+
"performance": "Quality",
|
209 |
+
},
|
210 |
+
),
|
211 |
+
Txt2ImgPreset(
|
212 |
+
"Kolors",
|
213 |
+
"Fal",
|
214 |
+
"fal-ai/kolors",
|
215 |
+
guidance_scale=5.0,
|
216 |
+
guidance_scale_min=1.0,
|
217 |
+
guidance_scale_max=10.0,
|
218 |
+
num_inference_steps=50,
|
219 |
+
num_inference_steps_min=10,
|
220 |
+
num_inference_steps_max=50,
|
221 |
+
parameters=["seed", "negative_prompt", "image_size", "guidance_scale", "num_inference_steps"],
|
222 |
+
kwargs={
|
223 |
+
"num_images": 1,
|
224 |
+
"sync_mode": True,
|
225 |
+
"enable_safety_checker": False,
|
226 |
+
"scheduler": "EulerDiscreteScheduler",
|
227 |
+
},
|
228 |
+
),
|
229 |
+
Txt2ImgPreset(
|
230 |
+
"SD3",
|
231 |
+
"Fal",
|
232 |
+
"fal-ai/stable-diffusion-v3-medium",
|
233 |
+
guidance_scale=5.0,
|
234 |
+
guidance_scale_min=1.0,
|
235 |
+
guidance_scale_max=10.0,
|
236 |
+
num_inference_steps=28,
|
237 |
+
num_inference_steps_min=10,
|
238 |
+
num_inference_steps_max=50,
|
239 |
+
parameters=[
|
240 |
+
"seed",
|
241 |
+
"negative_prompt",
|
242 |
+
"image_size",
|
243 |
+
"guidance_scale",
|
244 |
+
"num_inference_steps",
|
245 |
+
"prompt_expansion",
|
246 |
+
],
|
247 |
+
kwargs={"num_images": 1, "sync_mode": True, "enable_safety_checker": False},
|
248 |
+
),
|
249 |
+
Txt2ImgPreset(
|
250 |
+
"SDXL",
|
251 |
+
"Hugging Face",
|
252 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
253 |
+
guidance_scale=7.0,
|
254 |
+
guidance_scale_min=1.0,
|
255 |
+
guidance_scale_max=10.0,
|
256 |
+
num_inference_steps=40,
|
257 |
+
num_inference_steps_min=10,
|
258 |
+
num_inference_steps_max=50,
|
259 |
+
parameters=[
|
260 |
+
"seed",
|
261 |
+
"negative_prompt",
|
262 |
+
"width",
|
263 |
+
"height",
|
264 |
+
"guidance_scale",
|
265 |
+
"num_inference_steps",
|
266 |
+
],
|
267 |
+
),
|
268 |
+
]
|
269 |
),
|
270 |
)
|
pages/1_π¬_Text_Generation.py
CHANGED
@@ -43,7 +43,7 @@ st.logo("logo.png")
|
|
43 |
st.sidebar.header("Settings")
|
44 |
service = st.sidebar.selectbox(
|
45 |
"Service",
|
46 |
-
options=
|
47 |
index=0,
|
48 |
disabled=st.session_state.running,
|
49 |
)
|
|
|
43 |
st.sidebar.header("Settings")
|
44 |
service = st.sidebar.selectbox(
|
45 |
"Service",
|
46 |
+
options=SERVICE_SESSION.keys(),
|
47 |
index=0,
|
48 |
disabled=st.session_state.running,
|
49 |
)
|
pages/2_π¨_Text_to_Image.py
CHANGED
@@ -20,29 +20,9 @@ SESSION_TOKEN = {
|
|
20 |
"api_key_together": os.environ.get("TOGETHER_API_KEY") or None,
|
21 |
}
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
PRESET_MODEL =
|
26 |
-
# bfl
|
27 |
-
"flux-pro-1.1": preset.txt2img.flux_1_1_pro_bfl,
|
28 |
-
"flux-pro": preset.txt2img.flux_pro_bfl,
|
29 |
-
"flux-dev": preset.txt2img.flux_dev_bfl,
|
30 |
-
# fal
|
31 |
-
"fal-ai/aura-flow": preset.txt2img.aura_flow,
|
32 |
-
"fal-ai/flux/dev": preset.txt2img.flux_dev_fal,
|
33 |
-
"fal-ai/flux/schnell": preset.txt2img.flux_schnell_fal,
|
34 |
-
"fal-ai/flux-pro": preset.txt2img.flux_pro_fal,
|
35 |
-
"fal-ai/flux-pro/v1.1": preset.txt2img.flux_1_1_pro_fal,
|
36 |
-
"fal-ai/fooocus": preset.txt2img.fooocus,
|
37 |
-
"fal-ai/kolors": preset.txt2img.kolors,
|
38 |
-
"fal-ai/stable-diffusion-v3-medium": preset.txt2img.stable_diffusion_3,
|
39 |
-
# hf
|
40 |
-
"black-forest-labs/flux.1-dev": preset.txt2img.flux_dev_hf,
|
41 |
-
"black-forest-labs/flux.1-schnell": preset.txt2img.flux_schnell_hf,
|
42 |
-
"stabilityai/stable-diffusion-xl-base-1.0": preset.txt2img.stable_diffusion_xl,
|
43 |
-
# together
|
44 |
-
"black-forest-labs/FLUX.1-schnell-Free": preset.txt2img.flux_schnell_free_together,
|
45 |
-
}
|
46 |
|
47 |
st.set_page_config(
|
48 |
page_title=f"{config.title} | Text to Image",
|
@@ -108,8 +88,8 @@ st.html("""
|
|
108 |
|
109 |
# Build parameters from preset by rendering the appropriate input widgets
|
110 |
parameters = {}
|
111 |
-
|
112 |
-
for param in
|
113 |
if param == "model":
|
114 |
parameters[param] = model
|
115 |
if param == "seed":
|
@@ -161,18 +141,18 @@ for param in preset.parameters:
|
|
161 |
if param in ["guidance_scale", "guidance"]:
|
162 |
parameters[param] = st.sidebar.slider(
|
163 |
"Guidance Scale",
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
0.1,
|
168 |
disabled=st.session_state.running,
|
169 |
)
|
170 |
if param in ["num_inference_steps", "steps"]:
|
171 |
parameters[param] = st.sidebar.slider(
|
172 |
"Inference Steps",
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
1,
|
177 |
disabled=st.session_state.running,
|
178 |
)
|
@@ -280,8 +260,8 @@ if prompt := st.chat_input(
|
|
280 |
|
281 |
with st.chat_message("assistant"):
|
282 |
with st.spinner("Running..."):
|
283 |
-
if
|
284 |
-
parameters.update(
|
285 |
session_key = f"api_key_{service.lower().replace(' ', '_')}"
|
286 |
api_key = st.session_state[session_key] or SESSION_TOKEN[session_key]
|
287 |
image = txt2img_generate(api_key, service, model, prompt, parameters)
|
|
|
20 |
"api_key_together": os.environ.get("TOGETHER_API_KEY") or None,
|
21 |
}
|
22 |
|
23 |
+
PRESET_MODEL = {}
|
24 |
+
for p in preset.txt2img.presets:
|
25 |
+
PRESET_MODEL[p.model_id] = p
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
st.set_page_config(
|
28 |
page_title=f"{config.title} | Text to Image",
|
|
|
88 |
|
89 |
# Build parameters from preset by rendering the appropriate input widgets
|
90 |
parameters = {}
|
91 |
+
model_preset = PRESET_MODEL[model]
|
92 |
+
for param in model_preset.parameters:
|
93 |
if param == "model":
|
94 |
parameters[param] = model
|
95 |
if param == "seed":
|
|
|
141 |
if param in ["guidance_scale", "guidance"]:
|
142 |
parameters[param] = st.sidebar.slider(
|
143 |
"Guidance Scale",
|
144 |
+
model_preset.guidance_scale_min,
|
145 |
+
model_preset.guidance_scale_max,
|
146 |
+
model_preset.guidance_scale,
|
147 |
0.1,
|
148 |
disabled=st.session_state.running,
|
149 |
)
|
150 |
if param in ["num_inference_steps", "steps"]:
|
151 |
parameters[param] = st.sidebar.slider(
|
152 |
"Inference Steps",
|
153 |
+
model_preset.num_inference_steps_min,
|
154 |
+
model_preset.num_inference_steps_max,
|
155 |
+
model_preset.num_inference_steps,
|
156 |
1,
|
157 |
disabled=st.session_state.running,
|
158 |
)
|
|
|
260 |
|
261 |
with st.chat_message("assistant"):
|
262 |
with st.spinner("Running..."):
|
263 |
+
if model_preset.kwargs:
|
264 |
+
parameters.update(model_preset.kwargs)
|
265 |
session_key = f"api_key_{service.lower().replace(' ', '_')}"
|
266 |
api_key = st.session_state[session_key] or SESSION_TOKEN[session_key]
|
267 |
image = txt2img_generate(api_key, service, model, prompt, parameters)
|