adamelliotfields commited on
Commit
7f5047d
β€’
1 Parent(s): 2401ce9

Preset improvements

Browse files
lib/config.py CHANGED
@@ -1,6 +1,19 @@
1
  from dataclasses import dataclass
2
  from typing import Dict, List
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  @dataclass
6
  class Txt2TxtConfig:
@@ -32,6 +45,7 @@ class Config:
32
  txt2txt: Txt2TxtConfig
33
 
34
 
 
35
  config = Config(
36
  title="API Inference",
37
  icon="⚑",
@@ -50,34 +64,9 @@ config = Config(
50
  "Hugging Face": 2,
51
  "Together": 0,
52
  },
53
- models={
54
- # Model identifiers referenced in Text_to_Image.py
55
- "Black Forest Labs": [
56
- "flux-dev",
57
- "flux-pro",
58
- "flux-pro-1.1",
59
- ],
60
- "Fal": [
61
- "fal-ai/aura-flow",
62
- "fal-ai/flux/dev",
63
- "fal-ai/flux/schnell",
64
- "fal-ai/flux-pro",
65
- "fal-ai/flux-pro/v1.1",
66
- "fal-ai/fooocus",
67
- "fal-ai/kolors",
68
- "fal-ai/stable-diffusion-v3-medium",
69
- ],
70
- "Hugging Face": [
71
- "black-forest-labs/flux.1-dev",
72
- "black-forest-labs/flux.1-schnell",
73
- "stabilityai/stable-diffusion-xl-base-1.0",
74
- ],
75
- "Together": [
76
- "black-forest-labs/FLUX.1-schnell-Free",
77
- ],
78
- },
79
  hidden_parameters=[
80
- # sent to API but not shown in generation parameters accordion
81
  "enable_safety_checker",
82
  "max_sequence_length",
83
  "num_images",
@@ -90,7 +79,7 @@ config = Config(
90
  "styles",
91
  "sync_mode",
92
  ],
93
- negative_prompt="ugly, unattractive, disfigured, deformed, mutated, malformed, blurry, grainy, noisy, oversaturated, undersaturated, overexposed, underexposed, worst quality, low details, lowres, watermark, signature, autograph, trademark, sloppy, cluttered",
94
  default_image_size="square_hd",
95
  image_sizes=[
96
  "landscape_16_9",
@@ -99,7 +88,7 @@ config = Config(
99
  "portrait_4_3",
100
  "portrait_16_9",
101
  ],
102
- default_aspect_ratio="1024x1024",
103
  aspect_ratios=[
104
  "704x1408", # 1:2
105
  "704x1344", # 11:21
@@ -143,6 +132,7 @@ config = Config(
143
  "llama-3.1-sonar-large-128k-chat",
144
  "llama-3.1-sonar-small-128k-online",
145
  "llama-3.1-sonar-large-128k-online",
 
146
  ],
147
  },
148
  ),
 
1
  from dataclasses import dataclass
2
  from typing import Dict, List
3
 
4
+ from .preset import preset
5
+
6
+
7
+ def txt2img_models_from_presets(presets):
8
+ models = {}
9
+ for p in presets:
10
+ service = p.service
11
+ model_id = p.model_id
12
+ if service not in models:
13
+ models[service] = []
14
+ models[service].append(model_id)
15
+ return models
16
+
17
 
18
  @dataclass
19
  class Txt2TxtConfig:
 
45
  txt2txt: Txt2TxtConfig
46
 
47
 
48
+ # TODO: API keys should be with services (make a dataclass)
49
  config = Config(
50
  title="API Inference",
51
  icon="⚑",
 
64
  "Hugging Face": 2,
65
  "Together": 0,
66
  },
67
+ models=txt2img_models_from_presets(preset.txt2img.presets),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  hidden_parameters=[
69
+ # Sent to API but not shown in generation parameters accordion
70
  "enable_safety_checker",
71
  "max_sequence_length",
72
  "num_images",
 
79
  "styles",
80
  "sync_mode",
81
  ],
82
+ negative_prompt="ugly, unattractive, disfigured, deformed, mutated, malformed, blurry, grainy, oversaturated, undersaturated, overexposed, underexposed, worst quality, low details, lowres, watermark, signature, sloppy, cluttered",
83
  default_image_size="square_hd",
84
  image_sizes=[
85
  "landscape_16_9",
 
88
  "portrait_4_3",
89
  "portrait_16_9",
90
  ],
91
+ default_aspect_ratio="1024x1024", # fooocus aspect ratios
92
  aspect_ratios=[
93
  "704x1408", # 1:2
94
  "704x1344", # 11:21
 
132
  "llama-3.1-sonar-large-128k-chat",
133
  "llama-3.1-sonar-small-128k-online",
134
  "llama-3.1-sonar-large-128k-online",
135
+ "llama-3.1-sonar-huge-128k-online",
136
  ],
137
  },
138
  ),
lib/preset.py CHANGED
@@ -14,6 +14,8 @@ class Txt2TxtPreset:
14
  class Txt2ImgPreset:
15
  # FLUX1.1 has no scale or steps
16
  name: str
 
 
17
  guidance_scale: Optional[float] = None
18
  guidance_scale_min: Optional[float] = None
19
  guidance_scale_max: Optional[float] = None
@@ -32,25 +34,10 @@ class Txt2TxtPresets:
32
 
33
  @dataclass
34
  class Txt2ImgPresets:
35
- # bfl
36
- flux_1_1_pro_bfl: Txt2ImgPreset
37
- flux_dev_bfl: Txt2ImgPreset
38
- flux_pro_bfl: Txt2ImgPreset
39
- # fal
40
- aura_flow: Txt2ImgPreset
41
- flux_1_1_pro_fal: Txt2ImgPreset
42
- flux_dev_fal: Txt2ImgPreset
43
- flux_pro_fal: Txt2ImgPreset
44
- flux_schnell_fal: Txt2ImgPreset
45
- fooocus: Txt2ImgPreset
46
- kolors: Txt2ImgPreset
47
- stable_diffusion_3: Txt2ImgPreset
48
- # hf
49
- flux_dev_hf: Txt2ImgPreset
50
- flux_schnell_hf: Txt2ImgPreset
51
- stable_diffusion_xl: Txt2ImgPreset
52
- # together
53
- flux_schnell_free_together: Txt2ImgPreset
54
 
55
 
56
  @dataclass
@@ -61,7 +48,6 @@ class Preset:
61
 
62
  preset = Preset(
63
  txt2txt=Txt2TxtPresets(
64
- # Every service has model and system messages
65
  hugging_face=Txt2TxtPreset(
66
  frequency_penalty=0.0,
67
  frequency_penalty_min=-2.0,
@@ -76,177 +62,209 @@ preset = Preset(
76
  ),
77
  ),
78
  txt2img=Txt2ImgPresets(
79
- aura_flow=Txt2ImgPreset(
80
- "AuraFlow",
81
- guidance_scale=3.5,
82
- guidance_scale_min=1.0,
83
- guidance_scale_max=10.0,
84
- num_inference_steps=28,
85
- num_inference_steps_min=10,
86
- num_inference_steps_max=50,
87
- parameters=["seed", "num_inference_steps", "guidance_scale", "expand_prompt"],
88
- kwargs={"num_images": 1, "sync_mode": False},
89
- ),
90
- flux_1_1_pro_bfl=Txt2ImgPreset(
91
- "FLUX1.1 Pro",
92
- parameters=["seed", "width", "height", "prompt_upsampling"],
93
- kwargs={"safety_tolerance": 6},
94
- ),
95
- flux_pro_bfl=Txt2ImgPreset(
96
- "FLUX.1 Pro",
97
- guidance_scale=2.5,
98
- guidance_scale_min=1.5,
99
- guidance_scale_max=5.0,
100
- num_inference_steps=40,
101
- num_inference_steps_min=10,
102
- num_inference_steps_max=50,
103
- parameters=["seed", "width", "height", "steps", "guidance", "prompt_upsampling"],
104
- kwargs={"safety_tolerance": 6, "interval": 1},
105
- ),
106
- flux_dev_bfl=Txt2ImgPreset(
107
- "FLUX.1 Dev",
108
- num_inference_steps=28,
109
- num_inference_steps_min=10,
110
- num_inference_steps_max=50,
111
- guidance_scale=3.0,
112
- guidance_scale_min=1.5,
113
- guidance_scale_max=5.0,
114
- parameters=["seed", "width", "height", "steps", "guidance", "prompt_upsampling"],
115
- kwargs={"safety_tolerance": 6},
116
- ),
117
- flux_1_1_pro_fal=Txt2ImgPreset(
118
- "FLUX1.1 Pro",
119
- parameters=["seed", "image_size"],
120
- kwargs={
121
- "num_images": 1,
122
- "sync_mode": False,
123
- "safety_tolerance": 6,
124
- "enable_safety_checker": False,
125
- },
126
- ),
127
- flux_pro_fal=Txt2ImgPreset(
128
- "FLUX.1 Pro",
129
- guidance_scale=2.5,
130
- guidance_scale_min=1.5,
131
- guidance_scale_max=5.0,
132
- num_inference_steps=40,
133
- num_inference_steps_min=10,
134
- num_inference_steps_max=50,
135
- parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
136
- kwargs={"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
137
- ),
138
- flux_dev_fal=Txt2ImgPreset(
139
- "FLUX.1 Dev",
140
- num_inference_steps=28,
141
- num_inference_steps_min=10,
142
- num_inference_steps_max=50,
143
- guidance_scale=3.0,
144
- guidance_scale_min=1.5,
145
- guidance_scale_max=5.0,
146
- parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
147
- kwargs={"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
148
- ),
149
- flux_schnell_fal=Txt2ImgPreset(
150
- "FLUX.1 Schnell",
151
- num_inference_steps=4,
152
- num_inference_steps_min=1,
153
- num_inference_steps_max=12,
154
- parameters=["seed", "image_size", "num_inference_steps"],
155
- kwargs={"num_images": 1, "sync_mode": False, "enable_safety_checker": False},
156
- ),
157
- flux_dev_hf=Txt2ImgPreset(
158
- "FLUX.1 Dev",
159
- num_inference_steps=28,
160
- num_inference_steps_min=10,
161
- num_inference_steps_max=50,
162
- guidance_scale=3.0,
163
- guidance_scale_min=1.5,
164
- guidance_scale_max=5.0,
165
- parameters=["width", "height", "guidance_scale", "num_inference_steps"],
166
- kwargs={"max_sequence_length": 512},
167
- ),
168
- flux_schnell_hf=Txt2ImgPreset(
169
- "FLUX.1 Schnell",
170
- num_inference_steps=4,
171
- num_inference_steps_min=1,
172
- num_inference_steps_max=12,
173
- parameters=["width", "height", "num_inference_steps"],
174
- kwargs={"guidance_scale": 0.0, "max_sequence_length": 256},
175
- ),
176
- flux_schnell_free_together=Txt2ImgPreset(
177
- "FLUX.1 Schnell Free",
178
- num_inference_steps=4,
179
- num_inference_steps_min=1,
180
- num_inference_steps_max=12,
181
- parameters=["model", "seed", "width", "height", "steps"],
182
- kwargs={"n": 1},
183
- ),
184
- fooocus=Txt2ImgPreset(
185
- "Fooocus",
186
- guidance_scale=4.0,
187
- guidance_scale_min=1.0,
188
- guidance_scale_max=10.0,
189
- parameters=["seed", "negative_prompt", "aspect_ratio", "guidance_scale"],
190
- kwargs={
191
- "num_images": 1,
192
- "sync_mode": True,
193
- "enable_safety_checker": False,
194
- "output_format": "png",
195
- "sharpness": 2,
196
- "styles": ["Fooocus Enhance", "Fooocus V2", "Fooocus Sharp"],
197
- "performance": "Quality",
198
- },
199
- ),
200
- kolors=Txt2ImgPreset(
201
- "Kolors",
202
- guidance_scale=5.0,
203
- guidance_scale_min=1.0,
204
- guidance_scale_max=10.0,
205
- num_inference_steps=50,
206
- num_inference_steps_min=10,
207
- num_inference_steps_max=50,
208
- parameters=["seed", "negative_prompt", "image_size", "guidance_scale", "num_inference_steps"],
209
- kwargs={
210
- "num_images": 1,
211
- "sync_mode": True,
212
- "enable_safety_checker": False,
213
- "scheduler": "EulerDiscreteScheduler",
214
- },
215
- ),
216
- stable_diffusion_3=Txt2ImgPreset(
217
- "SD3",
218
- guidance_scale=5.0,
219
- guidance_scale_min=1.0,
220
- guidance_scale_max=10.0,
221
- num_inference_steps=28,
222
- num_inference_steps_min=10,
223
- num_inference_steps_max=50,
224
- parameters=[
225
- "seed",
226
- "negative_prompt",
227
- "image_size",
228
- "guidance_scale",
229
- "num_inference_steps",
230
- "prompt_expansion",
231
- ],
232
- kwargs={"num_images": 1, "sync_mode": True, "enable_safety_checker": False},
233
- ),
234
- stable_diffusion_xl=Txt2ImgPreset(
235
- "SDXL",
236
- guidance_scale=7.0,
237
- guidance_scale_min=1.0,
238
- guidance_scale_max=10.0,
239
- num_inference_steps=40,
240
- num_inference_steps_min=10,
241
- num_inference_steps_max=50,
242
- parameters=[
243
- "seed",
244
- "negative_prompt",
245
- "width",
246
- "height",
247
- "guidance_scale",
248
- "num_inference_steps",
249
- ],
250
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  ),
252
  )
 
14
  class Txt2ImgPreset:
15
  # FLUX1.1 has no scale or steps
16
  name: str
17
+ service: str
18
+ model_id: str
19
  guidance_scale: Optional[float] = None
20
  guidance_scale_min: Optional[float] = None
21
  guidance_scale_max: Optional[float] = None
 
34
 
35
  @dataclass
36
  class Txt2ImgPresets:
37
+ presets: List[Txt2ImgPreset] = field(default_factory=list)
38
+
39
+ def __iter__(self):
40
+ return iter(self.presets)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  @dataclass
 
48
 
49
  preset = Preset(
50
  txt2txt=Txt2TxtPresets(
 
51
  hugging_face=Txt2TxtPreset(
52
  frequency_penalty=0.0,
53
  frequency_penalty_min=-2.0,
 
62
  ),
63
  ),
64
  txt2img=Txt2ImgPresets(
65
+ presets=[
66
+ Txt2ImgPreset(
67
+ "AuraFlow",
68
+ "Fal",
69
+ "fal-ai/aura-flow",
70
+ guidance_scale=3.5,
71
+ guidance_scale_min=1.0,
72
+ guidance_scale_max=10.0,
73
+ num_inference_steps=28,
74
+ num_inference_steps_min=10,
75
+ num_inference_steps_max=50,
76
+ parameters=["seed", "num_inference_steps", "guidance_scale", "expand_prompt"],
77
+ kwargs={"num_images": 1, "sync_mode": False},
78
+ ),
79
+ Txt2ImgPreset(
80
+ "FLUX1.1 Pro",
81
+ "Black Forest Labs",
82
+ "flux-pro-1.1",
83
+ parameters=["seed", "width", "height", "prompt_upsampling"],
84
+ kwargs={"safety_tolerance": 6},
85
+ ),
86
+ Txt2ImgPreset(
87
+ "FLUX.1 Pro",
88
+ "Black Forest Labs",
89
+ "flux-pro",
90
+ guidance_scale=2.5,
91
+ guidance_scale_min=1.5,
92
+ guidance_scale_max=5.0,
93
+ num_inference_steps=40,
94
+ num_inference_steps_min=10,
95
+ num_inference_steps_max=50,
96
+ parameters=["seed", "width", "height", "steps", "guidance", "prompt_upsampling"],
97
+ kwargs={"safety_tolerance": 6, "interval": 1},
98
+ ),
99
+ Txt2ImgPreset(
100
+ "FLUX.1 Dev",
101
+ "Black Forest Labs",
102
+ "flux-dev",
103
+ num_inference_steps=28,
104
+ num_inference_steps_min=10,
105
+ num_inference_steps_max=50,
106
+ guidance_scale=3.0,
107
+ guidance_scale_min=1.5,
108
+ guidance_scale_max=5.0,
109
+ parameters=["seed", "width", "height", "steps", "guidance", "prompt_upsampling"],
110
+ kwargs={"safety_tolerance": 6},
111
+ ),
112
+ Txt2ImgPreset(
113
+ "FLUX1.1 Pro",
114
+ "Fal",
115
+ "fal-ai/flux-pro/v1.1",
116
+ parameters=["seed", "image_size"],
117
+ kwargs={
118
+ "num_images": 1,
119
+ "sync_mode": False,
120
+ "safety_tolerance": 6,
121
+ "enable_safety_checker": False,
122
+ },
123
+ ),
124
+ Txt2ImgPreset(
125
+ "FLUX.1 Pro",
126
+ "Fal",
127
+ "fal-ai/flux-pro",
128
+ guidance_scale=2.5,
129
+ guidance_scale_min=1.5,
130
+ guidance_scale_max=5.0,
131
+ num_inference_steps=40,
132
+ num_inference_steps_min=10,
133
+ num_inference_steps_max=50,
134
+ parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
135
+ kwargs={"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
136
+ ),
137
+ Txt2ImgPreset(
138
+ "FLUX.1 Dev",
139
+ "Fal",
140
+ "fal-ai/flux/dev",
141
+ num_inference_steps=28,
142
+ num_inference_steps_min=10,
143
+ num_inference_steps_max=50,
144
+ guidance_scale=3.0,
145
+ guidance_scale_min=1.5,
146
+ guidance_scale_max=5.0,
147
+ parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
148
+ kwargs={"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
149
+ ),
150
+ Txt2ImgPreset(
151
+ "FLUX.1 Schnell",
152
+ "Fal",
153
+ "fal-ai/flux/schnell",
154
+ num_inference_steps=4,
155
+ num_inference_steps_min=1,
156
+ num_inference_steps_max=12,
157
+ parameters=["seed", "image_size", "num_inference_steps"],
158
+ kwargs={"num_images": 1, "sync_mode": False, "enable_safety_checker": False},
159
+ ),
160
+ Txt2ImgPreset(
161
+ "FLUX.1 Dev",
162
+ "Hugging Face",
163
+ "black-forest-labs/flux.1-dev",
164
+ num_inference_steps=28,
165
+ num_inference_steps_min=10,
166
+ num_inference_steps_max=50,
167
+ guidance_scale=3.0,
168
+ guidance_scale_min=1.5,
169
+ guidance_scale_max=5.0,
170
+ parameters=["width", "height", "guidance_scale", "num_inference_steps"],
171
+ kwargs={"max_sequence_length": 512},
172
+ ),
173
+ Txt2ImgPreset(
174
+ "FLUX.1 Schnell",
175
+ "Hugging Face",
176
+ "black-forest-labs/flux.1-schnell",
177
+ num_inference_steps=4,
178
+ num_inference_steps_min=1,
179
+ num_inference_steps_max=12,
180
+ parameters=["width", "height", "num_inference_steps"],
181
+ kwargs={"guidance_scale": 0.0, "max_sequence_length": 256},
182
+ ),
183
+ Txt2ImgPreset(
184
+ "FLUX.1 Schnell Free",
185
+ "Together",
186
+ "black-forest-labs/FLUX.1-schnell-Free",
187
+ num_inference_steps=4,
188
+ num_inference_steps_min=1,
189
+ num_inference_steps_max=12,
190
+ parameters=["model", "seed", "width", "height", "steps"],
191
+ kwargs={"n": 1},
192
+ ),
193
+ Txt2ImgPreset(
194
+ "Fooocus",
195
+ "Fal",
196
+ "fal-ai/fooocus",
197
+ guidance_scale=4.0,
198
+ guidance_scale_min=1.0,
199
+ guidance_scale_max=10.0,
200
+ parameters=["seed", "negative_prompt", "aspect_ratio", "guidance_scale"],
201
+ kwargs={
202
+ "num_images": 1,
203
+ "sync_mode": True,
204
+ "enable_safety_checker": False,
205
+ "output_format": "png",
206
+ "sharpness": 2,
207
+ "styles": ["Fooocus Enhance", "Fooocus V2", "Fooocus Sharp"],
208
+ "performance": "Quality",
209
+ },
210
+ ),
211
+ Txt2ImgPreset(
212
+ "Kolors",
213
+ "Fal",
214
+ "fal-ai/kolors",
215
+ guidance_scale=5.0,
216
+ guidance_scale_min=1.0,
217
+ guidance_scale_max=10.0,
218
+ num_inference_steps=50,
219
+ num_inference_steps_min=10,
220
+ num_inference_steps_max=50,
221
+ parameters=["seed", "negative_prompt", "image_size", "guidance_scale", "num_inference_steps"],
222
+ kwargs={
223
+ "num_images": 1,
224
+ "sync_mode": True,
225
+ "enable_safety_checker": False,
226
+ "scheduler": "EulerDiscreteScheduler",
227
+ },
228
+ ),
229
+ Txt2ImgPreset(
230
+ "SD3",
231
+ "Fal",
232
+ "fal-ai/stable-diffusion-v3-medium",
233
+ guidance_scale=5.0,
234
+ guidance_scale_min=1.0,
235
+ guidance_scale_max=10.0,
236
+ num_inference_steps=28,
237
+ num_inference_steps_min=10,
238
+ num_inference_steps_max=50,
239
+ parameters=[
240
+ "seed",
241
+ "negative_prompt",
242
+ "image_size",
243
+ "guidance_scale",
244
+ "num_inference_steps",
245
+ "prompt_expansion",
246
+ ],
247
+ kwargs={"num_images": 1, "sync_mode": True, "enable_safety_checker": False},
248
+ ),
249
+ Txt2ImgPreset(
250
+ "SDXL",
251
+ "Hugging Face",
252
+ "stabilityai/stable-diffusion-xl-base-1.0",
253
+ guidance_scale=7.0,
254
+ guidance_scale_min=1.0,
255
+ guidance_scale_max=10.0,
256
+ num_inference_steps=40,
257
+ num_inference_steps_min=10,
258
+ num_inference_steps_max=50,
259
+ parameters=[
260
+ "seed",
261
+ "negative_prompt",
262
+ "width",
263
+ "height",
264
+ "guidance_scale",
265
+ "num_inference_steps",
266
+ ],
267
+ ),
268
+ ]
269
  ),
270
  )
pages/1_πŸ’¬_Text_Generation.py CHANGED
@@ -43,7 +43,7 @@ st.logo("logo.png")
43
  st.sidebar.header("Settings")
44
  service = st.sidebar.selectbox(
45
  "Service",
46
- options=["Hugging Face", "Perplexity"],
47
  index=0,
48
  disabled=st.session_state.running,
49
  )
 
43
  st.sidebar.header("Settings")
44
  service = st.sidebar.selectbox(
45
  "Service",
46
+ options=SERVICE_SESSION.keys(),
47
  index=0,
48
  disabled=st.session_state.running,
49
  )
pages/2_🎨_Text_to_Image.py CHANGED
@@ -20,29 +20,9 @@ SESSION_TOKEN = {
20
  "api_key_together": os.environ.get("TOGETHER_API_KEY") or None,
21
  }
22
 
23
- # TODO: group by service so we can have models with the same name
24
- # Model IDs in lib/config.py
25
- PRESET_MODEL = {
26
- # bfl
27
- "flux-pro-1.1": preset.txt2img.flux_1_1_pro_bfl,
28
- "flux-pro": preset.txt2img.flux_pro_bfl,
29
- "flux-dev": preset.txt2img.flux_dev_bfl,
30
- # fal
31
- "fal-ai/aura-flow": preset.txt2img.aura_flow,
32
- "fal-ai/flux/dev": preset.txt2img.flux_dev_fal,
33
- "fal-ai/flux/schnell": preset.txt2img.flux_schnell_fal,
34
- "fal-ai/flux-pro": preset.txt2img.flux_pro_fal,
35
- "fal-ai/flux-pro/v1.1": preset.txt2img.flux_1_1_pro_fal,
36
- "fal-ai/fooocus": preset.txt2img.fooocus,
37
- "fal-ai/kolors": preset.txt2img.kolors,
38
- "fal-ai/stable-diffusion-v3-medium": preset.txt2img.stable_diffusion_3,
39
- # hf
40
- "black-forest-labs/flux.1-dev": preset.txt2img.flux_dev_hf,
41
- "black-forest-labs/flux.1-schnell": preset.txt2img.flux_schnell_hf,
42
- "stabilityai/stable-diffusion-xl-base-1.0": preset.txt2img.stable_diffusion_xl,
43
- # together
44
- "black-forest-labs/FLUX.1-schnell-Free": preset.txt2img.flux_schnell_free_together,
45
- }
46
 
47
  st.set_page_config(
48
  page_title=f"{config.title} | Text to Image",
@@ -108,8 +88,8 @@ st.html("""
108
 
109
  # Build parameters from preset by rendering the appropriate input widgets
110
  parameters = {}
111
- preset = PRESET_MODEL[model]
112
- for param in preset.parameters:
113
  if param == "model":
114
  parameters[param] = model
115
  if param == "seed":
@@ -161,18 +141,18 @@ for param in preset.parameters:
161
  if param in ["guidance_scale", "guidance"]:
162
  parameters[param] = st.sidebar.slider(
163
  "Guidance Scale",
164
- preset.guidance_scale_min,
165
- preset.guidance_scale_max,
166
- preset.guidance_scale,
167
  0.1,
168
  disabled=st.session_state.running,
169
  )
170
  if param in ["num_inference_steps", "steps"]:
171
  parameters[param] = st.sidebar.slider(
172
  "Inference Steps",
173
- preset.num_inference_steps_min,
174
- preset.num_inference_steps_max,
175
- preset.num_inference_steps,
176
  1,
177
  disabled=st.session_state.running,
178
  )
@@ -280,8 +260,8 @@ if prompt := st.chat_input(
280
 
281
  with st.chat_message("assistant"):
282
  with st.spinner("Running..."):
283
- if preset.kwargs:
284
- parameters.update(preset.kwargs)
285
  session_key = f"api_key_{service.lower().replace(' ', '_')}"
286
  api_key = st.session_state[session_key] or SESSION_TOKEN[session_key]
287
  image = txt2img_generate(api_key, service, model, prompt, parameters)
 
20
  "api_key_together": os.environ.get("TOGETHER_API_KEY") or None,
21
  }
22
 
23
+ PRESET_MODEL = {}
24
+ for p in preset.txt2img.presets:
25
+ PRESET_MODEL[p.model_id] = p
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  st.set_page_config(
28
  page_title=f"{config.title} | Text to Image",
 
88
 
89
  # Build parameters from preset by rendering the appropriate input widgets
90
  parameters = {}
91
+ model_preset = PRESET_MODEL[model]
92
+ for param in model_preset.parameters:
93
  if param == "model":
94
  parameters[param] = model
95
  if param == "seed":
 
141
  if param in ["guidance_scale", "guidance"]:
142
  parameters[param] = st.sidebar.slider(
143
  "Guidance Scale",
144
+ model_preset.guidance_scale_min,
145
+ model_preset.guidance_scale_max,
146
+ model_preset.guidance_scale,
147
  0.1,
148
  disabled=st.session_state.running,
149
  )
150
  if param in ["num_inference_steps", "steps"]:
151
  parameters[param] = st.sidebar.slider(
152
  "Inference Steps",
153
+ model_preset.num_inference_steps_min,
154
+ model_preset.num_inference_steps_max,
155
+ model_preset.num_inference_steps,
156
  1,
157
  disabled=st.session_state.running,
158
  )
 
260
 
261
  with st.chat_message("assistant"):
262
  with st.spinner("Running..."):
263
+ if model_preset.kwargs:
264
+ parameters.update(model_preset.kwargs)
265
  session_key = f"api_key_{service.lower().replace(' ', '_')}"
266
  api_key = st.session_state[session_key] or SESSION_TOKEN[session_key]
267
  image = txt2img_generate(api_key, service, model, prompt, parameters)