adamelliotfields
commited on
Commit
•
ad24d66
1
Parent(s):
7c6aff8
Support image-to-image workflows
Browse files- lib/__init__.py +3 -0
- lib/api.py +9 -9
- lib/config.py +93 -12
- lib/util.py +20 -0
- pages/2_🎨_Text_to_Image.py +31 -1
lib/__init__.py
CHANGED
@@ -1,7 +1,10 @@
|
|
1 |
from .api import txt2img_generate, txt2txt_generate
|
2 |
from .config import config
|
|
|
3 |
|
4 |
__all__ = [
|
|
|
|
|
5 |
"config",
|
6 |
"txt2img_generate",
|
7 |
"txt2txt_generate",
|
|
|
1 |
from .api import txt2img_generate, txt2txt_generate
|
2 |
from .config import config
|
3 |
+
from .util import base64_decode_image_data_url, base64_encode_image_file
|
4 |
|
5 |
__all__ = [
|
6 |
+
"base64_decode_image_data_url",
|
7 |
+
"base64_encode_image_file",
|
8 |
"config",
|
9 |
"txt2img_generate",
|
10 |
"txt2txt_generate",
|
lib/api.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
import base64
|
2 |
-
import io
|
3 |
import time
|
|
|
4 |
|
5 |
import httpx
|
6 |
import streamlit as st
|
@@ -11,6 +10,7 @@ from openai import OpenAI
|
|
11 |
from PIL import Image
|
12 |
|
13 |
from .config import config
|
|
|
14 |
|
15 |
|
16 |
def txt2txt_generate(api_key, service, parameters, **kwargs):
|
@@ -72,6 +72,7 @@ def txt2img_generate(api_key, service, model, inputs, parameters, **kwargs):
|
|
72 |
try:
|
73 |
timeout = config.timeout
|
74 |
response = httpx.post(base_url, headers=headers, json=json, timeout=timeout)
|
|
|
75 |
if response.status_code // 100 == 2: # 2xx
|
76 |
# BFL is async so we need to poll for result
|
77 |
# https://api.bfl.ml/docs
|
@@ -91,7 +92,7 @@ def txt2img_generate(api_key, service, model, inputs, parameters, **kwargs):
|
|
91 |
headers=headers,
|
92 |
timeout=timeout,
|
93 |
)
|
94 |
-
return Image.open(
|
95 |
|
96 |
retries += 1
|
97 |
time.sleep(1)
|
@@ -100,21 +101,20 @@ def txt2img_generate(api_key, service, model, inputs, parameters, **kwargs):
|
|
100 |
|
101 |
if service == "fal":
|
102 |
# Sync mode means wait for image base64 string instead of CDN link
|
|
|
103 |
if parameters.get("sync_mode", True):
|
104 |
-
|
105 |
-
return Image.open(io.BytesIO(bytes))
|
106 |
else:
|
107 |
-
url = response.json()["images"][0]["url"]
|
108 |
image = httpx.get(url, headers=headers, timeout=timeout)
|
109 |
-
return Image.open(
|
110 |
|
111 |
if service == "hf":
|
112 |
-
return Image.open(
|
113 |
|
114 |
if service == "together":
|
115 |
url = response.json()["data"][0]["url"]
|
116 |
image = httpx.get(url, headers=headers, timeout=timeout)
|
117 |
-
return Image.open(
|
118 |
|
119 |
else:
|
120 |
return f"Error: {response.status_code} {response.text}"
|
|
|
|
|
|
|
1 |
import time
|
2 |
+
from io import BytesIO
|
3 |
|
4 |
import httpx
|
5 |
import streamlit as st
|
|
|
10 |
from PIL import Image
|
11 |
|
12 |
from .config import config
|
13 |
+
from .util import base64_decode_image_data_url
|
14 |
|
15 |
|
16 |
def txt2txt_generate(api_key, service, parameters, **kwargs):
|
|
|
72 |
try:
|
73 |
timeout = config.timeout
|
74 |
response = httpx.post(base_url, headers=headers, json=json, timeout=timeout)
|
75 |
+
|
76 |
if response.status_code // 100 == 2: # 2xx
|
77 |
# BFL is async so we need to poll for result
|
78 |
# https://api.bfl.ml/docs
|
|
|
92 |
headers=headers,
|
93 |
timeout=timeout,
|
94 |
)
|
95 |
+
return Image.open(BytesIO(image.content))
|
96 |
|
97 |
retries += 1
|
98 |
time.sleep(1)
|
|
|
101 |
|
102 |
if service == "fal":
|
103 |
# Sync mode means wait for image base64 string instead of CDN link
|
104 |
+
url = response.json()["images"][0]["url"]
|
105 |
if parameters.get("sync_mode", True):
|
106 |
+
return base64_decode_image_data_url(url)
|
|
|
107 |
else:
|
|
|
108 |
image = httpx.get(url, headers=headers, timeout=timeout)
|
109 |
+
return Image.open(BytesIO(image.content))
|
110 |
|
111 |
if service == "hf":
|
112 |
+
return Image.open(BytesIO(response.content))
|
113 |
|
114 |
if service == "together":
|
115 |
url = response.json()["data"][0]["url"]
|
116 |
image = httpx.get(url, headers=headers, timeout=timeout)
|
117 |
+
return Image.open(BytesIO(image.content))
|
118 |
|
119 |
else:
|
120 |
return f"Error: {response.status_code} {response.text}"
|
lib/config.py
CHANGED
@@ -6,7 +6,9 @@ TEXT_SYSTEM_PROMPT = "You are a helpful assistant. Be precise and concise."
|
|
6 |
|
7 |
IMAGE_NEGATIVE_PROMPT = "ugly, unattractive, disfigured, deformed, mutated, malformed, blurry, grainy, oversaturated, undersaturated, overexposed, underexposed, worst quality, low details, lowres, watermark, signature, sloppy, cluttered"
|
8 |
|
9 |
-
|
|
|
|
|
10 |
"landscape_16_9",
|
11 |
"landscape_4_3",
|
12 |
"square_hd",
|
@@ -40,6 +42,8 @@ IMAGE_ASPECT_RATIOS = [
|
|
40 |
|
41 |
IMAGE_RANGE = (256, 1408)
|
42 |
|
|
|
|
|
43 |
|
44 |
@dataclass
|
45 |
class ModelConfig:
|
@@ -68,6 +72,8 @@ class ImageModelConfig(ModelConfig):
|
|
68 |
width_range: Optional[tuple[int, int]] = None
|
69 |
height: Optional[int] = None
|
70 |
height_range: Optional[tuple[int, int]] = None
|
|
|
|
|
71 |
image_size: Optional[str] = None
|
72 |
image_sizes: Optional[List[str]] = field(default_factory=list)
|
73 |
aspect_ratio: Optional[str] = None
|
@@ -149,6 +155,7 @@ config = AppConfig(
|
|
149 |
hidden_parameters=[
|
150 |
# Sent to API but not shown in generation parameters accordion
|
151 |
"enable_safety_checker",
|
|
|
152 |
"max_sequence_length",
|
153 |
"n",
|
154 |
"num_images",
|
@@ -223,17 +230,69 @@ config = AppConfig(
|
|
223 |
"fal-ai/aura-flow": ImageModelConfig(
|
224 |
"AuraFlow",
|
225 |
guidance_scale=3.5,
|
226 |
-
guidance_scale_range=(
|
227 |
-
num_inference_steps=
|
228 |
-
num_inference_steps_range=(
|
229 |
parameters=["seed", "num_inference_steps", "guidance_scale", "expand_prompt"],
|
230 |
kwargs={"num_images": 1, "sync_mode": False},
|
231 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
"fal-ai/flux-pro/v1.1": ImageModelConfig(
|
233 |
"FLUX1.1 Pro",
|
234 |
parameters=["seed", "image_size"],
|
235 |
image_size="square_hd",
|
236 |
-
image_sizes=
|
237 |
kwargs={
|
238 |
"num_images": 1,
|
239 |
"sync_mode": False,
|
@@ -244,7 +303,7 @@ config = AppConfig(
|
|
244 |
"fal-ai/flux-pro": ImageModelConfig(
|
245 |
"FLUX.1 Pro",
|
246 |
image_size="square_hd",
|
247 |
-
image_sizes=
|
248 |
guidance_scale=2.5,
|
249 |
guidance_scale_range=(1.5, 5.0),
|
250 |
num_inference_steps=40,
|
@@ -255,18 +314,38 @@ config = AppConfig(
|
|
255 |
"fal-ai/flux/dev": ImageModelConfig(
|
256 |
"FLUX.1 Dev",
|
257 |
image_size="square_hd",
|
258 |
-
image_sizes=
|
259 |
num_inference_steps=28,
|
260 |
num_inference_steps_range=(10, 50),
|
261 |
guidance_scale=3.0,
|
262 |
guidance_scale_range=(1.5, 5.0),
|
263 |
parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
|
264 |
-
kwargs={"num_images": 1, "sync_mode": False, "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
265 |
),
|
266 |
"fal-ai/flux/schnell": ImageModelConfig(
|
267 |
"FLUX.1 Schnell",
|
268 |
image_size="square_hd",
|
269 |
-
image_sizes=
|
270 |
num_inference_steps=4,
|
271 |
num_inference_steps_range=(1, 12),
|
272 |
parameters=["seed", "image_size", "num_inference_steps"],
|
@@ -274,6 +353,7 @@ config = AppConfig(
|
|
274 |
),
|
275 |
"fal-ai/fooocus": ImageModelConfig(
|
276 |
"Fooocus",
|
|
|
277 |
aspect_ratio="1024x1024",
|
278 |
aspect_ratios=IMAGE_ASPECT_RATIOS,
|
279 |
guidance_scale=4.0,
|
@@ -292,8 +372,9 @@ config = AppConfig(
|
|
292 |
),
|
293 |
"fal-ai/kolors": ImageModelConfig(
|
294 |
"Kolors",
|
|
|
295 |
image_size="square_hd",
|
296 |
-
image_sizes=
|
297 |
guidance_scale=5.0,
|
298 |
guidance_scale_range=(1.0, 10.0),
|
299 |
num_inference_steps=50,
|
@@ -313,9 +394,9 @@ config = AppConfig(
|
|
313 |
},
|
314 |
),
|
315 |
"fal-ai/stable-diffusion-v3-medium": ImageModelConfig(
|
316 |
-
"SD3",
|
317 |
image_size="square_hd",
|
318 |
-
image_sizes=
|
319 |
guidance_scale=5.0,
|
320 |
guidance_scale_range=(1.0, 10.0),
|
321 |
num_inference_steps=28,
|
|
|
6 |
|
7 |
IMAGE_NEGATIVE_PROMPT = "ugly, unattractive, disfigured, deformed, mutated, malformed, blurry, grainy, oversaturated, undersaturated, overexposed, underexposed, worst quality, low details, lowres, watermark, signature, sloppy, cluttered"
|
8 |
|
9 |
+
FOOOCUS_NEGATIVE_PROMPT = "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
|
10 |
+
|
11 |
+
IMAGE_SIZES = [
|
12 |
"landscape_16_9",
|
13 |
"landscape_4_3",
|
14 |
"square_hd",
|
|
|
42 |
|
43 |
IMAGE_RANGE = (256, 1408)
|
44 |
|
45 |
+
STRENGTH_RANGE = (0.0, 1.0)
|
46 |
+
|
47 |
|
48 |
@dataclass
|
49 |
class ModelConfig:
|
|
|
72 |
width_range: Optional[tuple[int, int]] = None
|
73 |
height: Optional[int] = None
|
74 |
height_range: Optional[tuple[int, int]] = None
|
75 |
+
strength: Optional[float] = None
|
76 |
+
strength_range: Optional[tuple[float, float]] = None
|
77 |
image_size: Optional[str] = None
|
78 |
image_sizes: Optional[List[str]] = field(default_factory=list)
|
79 |
aspect_ratio: Optional[str] = None
|
|
|
155 |
hidden_parameters=[
|
156 |
# Sent to API but not shown in generation parameters accordion
|
157 |
"enable_safety_checker",
|
158 |
+
"image_url",
|
159 |
"max_sequence_length",
|
160 |
"n",
|
161 |
"num_images",
|
|
|
230 |
"fal-ai/aura-flow": ImageModelConfig(
|
231 |
"AuraFlow",
|
232 |
guidance_scale=3.5,
|
233 |
+
guidance_scale_range=(0.0, 20.0),
|
234 |
+
num_inference_steps=50,
|
235 |
+
num_inference_steps_range=(20, 50),
|
236 |
parameters=["seed", "num_inference_steps", "guidance_scale", "expand_prompt"],
|
237 |
kwargs={"num_images": 1, "sync_mode": False},
|
238 |
),
|
239 |
+
"fal-ai/fast-sdxl": ImageModelConfig(
|
240 |
+
"Fast SDXL",
|
241 |
+
negative_prompt=IMAGE_NEGATIVE_PROMPT,
|
242 |
+
image_size="square_hd",
|
243 |
+
image_sizes=IMAGE_SIZES,
|
244 |
+
guidance_scale=7.5,
|
245 |
+
guidance_scale_range=(0.0, 20.0),
|
246 |
+
num_inference_steps=25,
|
247 |
+
num_inference_steps_range=(1, 50),
|
248 |
+
parameters=[
|
249 |
+
"seed",
|
250 |
+
"negative_prompt",
|
251 |
+
"image_size",
|
252 |
+
"num_inference_steps",
|
253 |
+
"guidance_scale",
|
254 |
+
"expand_prompt",
|
255 |
+
],
|
256 |
+
kwargs={
|
257 |
+
"num_images": 1,
|
258 |
+
"sync_mode": False,
|
259 |
+
"enable_safety_checker": False,
|
260 |
+
"output_format": "png",
|
261 |
+
},
|
262 |
+
),
|
263 |
+
"fal-ai/fast-sdxl/image-to-image": ImageModelConfig(
|
264 |
+
"Fast SDXL (Image)",
|
265 |
+
negative_prompt=IMAGE_NEGATIVE_PROMPT,
|
266 |
+
image_size="square_hd",
|
267 |
+
image_sizes=IMAGE_SIZES,
|
268 |
+
strength=0.95,
|
269 |
+
strength_range=STRENGTH_RANGE,
|
270 |
+
guidance_scale=7.5,
|
271 |
+
guidance_scale_range=(0.0, 20.0),
|
272 |
+
num_inference_steps=25,
|
273 |
+
num_inference_steps_range=(1, 50),
|
274 |
+
parameters=[
|
275 |
+
"seed",
|
276 |
+
"negative_prompt",
|
277 |
+
"image_size",
|
278 |
+
"num_inference_steps",
|
279 |
+
"guidance_scale",
|
280 |
+
"strength",
|
281 |
+
"expand_prompt",
|
282 |
+
"image_url",
|
283 |
+
],
|
284 |
+
kwargs={
|
285 |
+
"num_images": 1,
|
286 |
+
"sync_mode": False,
|
287 |
+
"enable_safety_checker": False,
|
288 |
+
"output_format": "png",
|
289 |
+
},
|
290 |
+
),
|
291 |
"fal-ai/flux-pro/v1.1": ImageModelConfig(
|
292 |
"FLUX1.1 Pro",
|
293 |
parameters=["seed", "image_size"],
|
294 |
image_size="square_hd",
|
295 |
+
image_sizes=IMAGE_SIZES,
|
296 |
kwargs={
|
297 |
"num_images": 1,
|
298 |
"sync_mode": False,
|
|
|
303 |
"fal-ai/flux-pro": ImageModelConfig(
|
304 |
"FLUX.1 Pro",
|
305 |
image_size="square_hd",
|
306 |
+
image_sizes=IMAGE_SIZES,
|
307 |
guidance_scale=2.5,
|
308 |
guidance_scale_range=(1.5, 5.0),
|
309 |
num_inference_steps=40,
|
|
|
314 |
"fal-ai/flux/dev": ImageModelConfig(
|
315 |
"FLUX.1 Dev",
|
316 |
image_size="square_hd",
|
317 |
+
image_sizes=IMAGE_SIZES,
|
318 |
num_inference_steps=28,
|
319 |
num_inference_steps_range=(10, 50),
|
320 |
guidance_scale=3.0,
|
321 |
guidance_scale_range=(1.5, 5.0),
|
322 |
parameters=["seed", "image_size", "num_inference_steps", "guidance_scale"],
|
323 |
+
kwargs={"num_images": 1, "sync_mode": False, "enable_safety_checker": False},
|
324 |
+
),
|
325 |
+
"fal-ai/flux/dev/image-to-image": ImageModelConfig(
|
326 |
+
"FLUX.1 Dev (Image)",
|
327 |
+
image_size="square_hd",
|
328 |
+
image_sizes=IMAGE_SIZES,
|
329 |
+
strength=0.95,
|
330 |
+
strength_range=STRENGTH_RANGE,
|
331 |
+
num_inference_steps=28,
|
332 |
+
num_inference_steps_range=(10, 50),
|
333 |
+
guidance_scale=3.0,
|
334 |
+
guidance_scale_range=(1.5, 5.0),
|
335 |
+
parameters=[
|
336 |
+
"seed",
|
337 |
+
"image_size",
|
338 |
+
"num_inference_steps",
|
339 |
+
"guidance_scale",
|
340 |
+
"strength",
|
341 |
+
"image_url",
|
342 |
+
],
|
343 |
+
kwargs={"num_images": 1, "sync_mode": False, "enable_safety_checker": False},
|
344 |
),
|
345 |
"fal-ai/flux/schnell": ImageModelConfig(
|
346 |
"FLUX.1 Schnell",
|
347 |
image_size="square_hd",
|
348 |
+
image_sizes=IMAGE_SIZES,
|
349 |
num_inference_steps=4,
|
350 |
num_inference_steps_range=(1, 12),
|
351 |
parameters=["seed", "image_size", "num_inference_steps"],
|
|
|
353 |
),
|
354 |
"fal-ai/fooocus": ImageModelConfig(
|
355 |
"Fooocus",
|
356 |
+
negative_prompt=FOOOCUS_NEGATIVE_PROMPT,
|
357 |
aspect_ratio="1024x1024",
|
358 |
aspect_ratios=IMAGE_ASPECT_RATIOS,
|
359 |
guidance_scale=4.0,
|
|
|
372 |
),
|
373 |
"fal-ai/kolors": ImageModelConfig(
|
374 |
"Kolors",
|
375 |
+
negative_prompt=IMAGE_NEGATIVE_PROMPT,
|
376 |
image_size="square_hd",
|
377 |
+
image_sizes=IMAGE_SIZES,
|
378 |
guidance_scale=5.0,
|
379 |
guidance_scale_range=(1.0, 10.0),
|
380 |
num_inference_steps=50,
|
|
|
394 |
},
|
395 |
),
|
396 |
"fal-ai/stable-diffusion-v3-medium": ImageModelConfig(
|
397 |
+
"SD3 Medium",
|
398 |
image_size="square_hd",
|
399 |
+
image_sizes=IMAGE_SIZES,
|
400 |
guidance_scale=5.0,
|
401 |
guidance_scale_range=(1.0, 10.0),
|
402 |
num_inference_steps=28,
|
lib/util.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mimetypes
|
2 |
+
from base64 import b64decode, b64encode
|
3 |
+
from io import BytesIO
|
4 |
+
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
|
8 |
+
def base64_decode_image_data_url(data_url: str) -> Image:
|
9 |
+
_, data = data_url.split("base64,", maxsplit=1)
|
10 |
+
byte_data = b64decode(data)
|
11 |
+
return Image.open(BytesIO(byte_data))
|
12 |
+
|
13 |
+
|
14 |
+
def base64_encode_image_file(image_file: BytesIO) -> str:
|
15 |
+
file_type = image_file.type
|
16 |
+
if not file_type:
|
17 |
+
file_type = mimetypes.guess_type(image_file.name)[0]
|
18 |
+
file_data = image_file.read()
|
19 |
+
b64 = b64encode(file_data).decode("utf-8")
|
20 |
+
return f"data:{file_type};base64,{b64}"
|
pages/2_🎨_Text_to_Image.py
CHANGED
@@ -2,7 +2,7 @@ from datetime import datetime
|
|
2 |
|
3 |
import streamlit as st
|
4 |
|
5 |
-
from lib import config, txt2img_generate
|
6 |
|
7 |
st.set_page_config(
|
8 |
page_title=f"Text to Image - {config.title}",
|
@@ -81,6 +81,7 @@ parameters = {}
|
|
81 |
for param in model_config.parameters:
|
82 |
if param == "model":
|
83 |
parameters[param] = model
|
|
|
84 |
if param == "seed":
|
85 |
parameters[param] = st.sidebar.number_input(
|
86 |
"Seed",
|
@@ -89,12 +90,14 @@ for param in model_config.parameters:
|
|
89 |
value=-1,
|
90 |
disabled=st.session_state.running,
|
91 |
)
|
|
|
92 |
if param == "negative_prompt":
|
93 |
parameters[param] = st.sidebar.text_area(
|
94 |
"Negative Prompt",
|
95 |
value=model_config.negative_prompt,
|
96 |
disabled=st.session_state.running,
|
97 |
)
|
|
|
98 |
if param == "width":
|
99 |
parameters[param] = st.sidebar.slider(
|
100 |
"Width",
|
@@ -104,6 +107,7 @@ for param in model_config.parameters:
|
|
104 |
max_value=model_config.width_range[1],
|
105 |
disabled=st.session_state.running,
|
106 |
)
|
|
|
107 |
if param == "height":
|
108 |
parameters[param] = st.sidebar.slider(
|
109 |
"Height",
|
@@ -113,6 +117,7 @@ for param in model_config.parameters:
|
|
113 |
max_value=model_config.height_range[1],
|
114 |
disabled=st.session_state.running,
|
115 |
)
|
|
|
116 |
if param == "image_size":
|
117 |
parameters[param] = st.sidebar.select_slider(
|
118 |
"Image Size",
|
@@ -120,6 +125,7 @@ for param in model_config.parameters:
|
|
120 |
value=model_config.image_size,
|
121 |
disabled=st.session_state.running,
|
122 |
)
|
|
|
123 |
if param == "aspect_ratio":
|
124 |
parameters[param] = st.sidebar.select_slider(
|
125 |
"Aspect Ratio",
|
@@ -127,6 +133,7 @@ for param in model_config.parameters:
|
|
127 |
value=model_config.aspect_ratio,
|
128 |
disabled=st.session_state.running,
|
129 |
)
|
|
|
130 |
if param in ["guidance_scale", "guidance"]:
|
131 |
parameters[param] = st.sidebar.slider(
|
132 |
"Guidance Scale",
|
@@ -136,6 +143,7 @@ for param in model_config.parameters:
|
|
136 |
0.1,
|
137 |
disabled=st.session_state.running,
|
138 |
)
|
|
|
139 |
if param in ["num_inference_steps", "steps"]:
|
140 |
parameters[param] = st.sidebar.slider(
|
141 |
"Inference Steps",
|
@@ -145,12 +153,24 @@ for param in model_config.parameters:
|
|
145 |
1,
|
146 |
disabled=st.session_state.running,
|
147 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
if param in ["expand_prompt", "prompt_expansion"]:
|
149 |
parameters[param] = st.sidebar.checkbox(
|
150 |
"Prompt Expansion",
|
151 |
value=False,
|
152 |
disabled=st.session_state.running,
|
153 |
)
|
|
|
154 |
if param == "prompt_upsampling":
|
155 |
parameters[param] = st.sidebar.checkbox(
|
156 |
"Prompt Upsampling",
|
@@ -158,6 +178,16 @@ for param in model_config.parameters:
|
|
158 |
disabled=st.session_state.running,
|
159 |
)
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
# Wrap the prompt in an accordion to display additional parameters
|
162 |
for message in st.session_state.txt2img_messages:
|
163 |
role = message["role"]
|
|
|
2 |
|
3 |
import streamlit as st
|
4 |
|
5 |
+
from lib import base64_encode_image_file, config, txt2img_generate
|
6 |
|
7 |
st.set_page_config(
|
8 |
page_title=f"Text to Image - {config.title}",
|
|
|
81 |
for param in model_config.parameters:
|
82 |
if param == "model":
|
83 |
parameters[param] = model
|
84 |
+
|
85 |
if param == "seed":
|
86 |
parameters[param] = st.sidebar.number_input(
|
87 |
"Seed",
|
|
|
90 |
value=-1,
|
91 |
disabled=st.session_state.running,
|
92 |
)
|
93 |
+
|
94 |
if param == "negative_prompt":
|
95 |
parameters[param] = st.sidebar.text_area(
|
96 |
"Negative Prompt",
|
97 |
value=model_config.negative_prompt,
|
98 |
disabled=st.session_state.running,
|
99 |
)
|
100 |
+
|
101 |
if param == "width":
|
102 |
parameters[param] = st.sidebar.slider(
|
103 |
"Width",
|
|
|
107 |
max_value=model_config.width_range[1],
|
108 |
disabled=st.session_state.running,
|
109 |
)
|
110 |
+
|
111 |
if param == "height":
|
112 |
parameters[param] = st.sidebar.slider(
|
113 |
"Height",
|
|
|
117 |
max_value=model_config.height_range[1],
|
118 |
disabled=st.session_state.running,
|
119 |
)
|
120 |
+
|
121 |
if param == "image_size":
|
122 |
parameters[param] = st.sidebar.select_slider(
|
123 |
"Image Size",
|
|
|
125 |
value=model_config.image_size,
|
126 |
disabled=st.session_state.running,
|
127 |
)
|
128 |
+
|
129 |
if param == "aspect_ratio":
|
130 |
parameters[param] = st.sidebar.select_slider(
|
131 |
"Aspect Ratio",
|
|
|
133 |
value=model_config.aspect_ratio,
|
134 |
disabled=st.session_state.running,
|
135 |
)
|
136 |
+
|
137 |
if param in ["guidance_scale", "guidance"]:
|
138 |
parameters[param] = st.sidebar.slider(
|
139 |
"Guidance Scale",
|
|
|
143 |
0.1,
|
144 |
disabled=st.session_state.running,
|
145 |
)
|
146 |
+
|
147 |
if param in ["num_inference_steps", "steps"]:
|
148 |
parameters[param] = st.sidebar.slider(
|
149 |
"Inference Steps",
|
|
|
153 |
1,
|
154 |
disabled=st.session_state.running,
|
155 |
)
|
156 |
+
|
157 |
+
if param == "strength":
|
158 |
+
parameters[param] = st.sidebar.slider(
|
159 |
+
"Strength",
|
160 |
+
model_config.strength_range[0],
|
161 |
+
model_config.strength_range[1],
|
162 |
+
model_config.strength,
|
163 |
+
0.05,
|
164 |
+
disabled=st.session_state.running,
|
165 |
+
)
|
166 |
+
|
167 |
if param in ["expand_prompt", "prompt_expansion"]:
|
168 |
parameters[param] = st.sidebar.checkbox(
|
169 |
"Prompt Expansion",
|
170 |
value=False,
|
171 |
disabled=st.session_state.running,
|
172 |
)
|
173 |
+
|
174 |
if param == "prompt_upsampling":
|
175 |
parameters[param] = st.sidebar.checkbox(
|
176 |
"Prompt Upsampling",
|
|
|
178 |
disabled=st.session_state.running,
|
179 |
)
|
180 |
|
181 |
+
if param == "image_url":
|
182 |
+
image_file = st.sidebar.file_uploader(
|
183 |
+
"Image",
|
184 |
+
type=["bmp", "gif", "jpg", "jpeg", "png", "webp"],
|
185 |
+
accept_multiple_files=False,
|
186 |
+
disabled=st.session_state.running,
|
187 |
+
)
|
188 |
+
if image_file:
|
189 |
+
parameters[param] = base64_encode_image_file(image_file)
|
190 |
+
|
191 |
# Wrap the prompt in an accordion to display additional parameters
|
192 |
for message in st.session_state.txt2img_messages:
|
193 |
role = message["role"]
|