adamelliotfields
commited on
Commit
•
d808b5b
1
Parent(s):
a5515e4
Use httpx
Browse files- lib/api.py +10 -4
- lib/config.py +4 -4
- lib/presets.py +4 -4
- pages/2_🎨_Text_to_Image.py +6 -6
- requirements.txt +2 -1
lib/api.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import base64
|
2 |
import io
|
3 |
|
4 |
-
import
|
5 |
import streamlit as st
|
6 |
from openai import APIError, OpenAI
|
7 |
from PIL import Image
|
@@ -47,13 +47,19 @@ def txt2img_generate(api_key, service, model, inputs, parameters, **kwargs):
|
|
47 |
base_url = f"{Config.SERVICES[service]}/{model}"
|
48 |
|
49 |
try:
|
50 |
-
response =
|
51 |
if response.status_code // 100 == 2: # 2xx
|
52 |
if service == "Hugging Face":
|
53 |
return Image.open(io.BytesIO(response.content))
|
54 |
if service == "Fal":
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
else:
|
58 |
return f"Error: {response.status_code} {response.text}"
|
59 |
except Exception as e:
|
|
|
1 |
import base64
|
2 |
import io
|
3 |
|
4 |
+
import httpx
|
5 |
import streamlit as st
|
6 |
from openai import APIError, OpenAI
|
7 |
from PIL import Image
|
|
|
47 |
base_url = f"{Config.SERVICES[service]}/{model}"
|
48 |
|
49 |
try:
|
50 |
+
response = httpx.post(base_url, headers=headers, json=json, timeout=Config.TXT2IMG_TIMEOUT)
|
51 |
if response.status_code // 100 == 2: # 2xx
|
52 |
if service == "Hugging Face":
|
53 |
return Image.open(io.BytesIO(response.content))
|
54 |
if service == "Fal":
|
55 |
+
# sync_mode means wait for image base64 instead of CDN link
|
56 |
+
if parameters.get("sync_mode", True):
|
57 |
+
bytes = base64.b64decode(response.json()["images"][0]["url"].split(",")[-1])
|
58 |
+
return Image.open(io.BytesIO(bytes))
|
59 |
+
else:
|
60 |
+
url = response.json()["images"][0]["url"]
|
61 |
+
image = httpx.get(url, headers=headers, timeout=Config.TXT2IMG_TIMEOUT)
|
62 |
+
return Image.open(io.BytesIO(image.content))
|
63 |
else:
|
64 |
return f"Error: {response.status_code} {response.text}"
|
65 |
except Exception as e:
|
lib/config.py
CHANGED
@@ -9,6 +9,7 @@ Config = SimpleNamespace(
|
|
9 |
"Perplexity": "https://api.perplexity.ai",
|
10 |
"Fal": "https://fal.run",
|
11 |
},
|
|
|
12 |
TXT2IMG_HIDDEN_PARAMETERS=[
|
13 |
# sent to API but not shown in generation parameters accordion
|
14 |
"enable_safety_checker",
|
@@ -27,7 +28,7 @@ Config = SimpleNamespace(
|
|
27 |
TXT2IMG_DEFAULT_MODEL={
|
28 |
# index of model in below lists
|
29 |
"Hugging Face": 2,
|
30 |
-
"Fal":
|
31 |
},
|
32 |
TXT2IMG_MODELS={
|
33 |
"Hugging Face": [
|
@@ -36,9 +37,8 @@ Config = SimpleNamespace(
|
|
36 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
37 |
],
|
38 |
"Fal": [
|
39 |
-
|
40 |
-
|
41 |
-
# "fal-ai/flux-pro",
|
42 |
"fal-ai/fooocus",
|
43 |
"fal-ai/kolors",
|
44 |
"fal-ai/pixart-sigma",
|
|
|
9 |
"Perplexity": "https://api.perplexity.ai",
|
10 |
"Fal": "https://fal.run",
|
11 |
},
|
12 |
+
TXT2IMG_TIMEOUT=120,
|
13 |
TXT2IMG_HIDDEN_PARAMETERS=[
|
14 |
# sent to API but not shown in generation parameters accordion
|
15 |
"enable_safety_checker",
|
|
|
28 |
TXT2IMG_DEFAULT_MODEL={
|
29 |
# index of model in below lists
|
30 |
"Hugging Face": 2,
|
31 |
+
"Fal": 2,
|
32 |
},
|
33 |
TXT2IMG_MODELS={
|
34 |
"Hugging Face": [
|
|
|
37 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
38 |
],
|
39 |
"Fal": [
|
40 |
+
"fal-ai/aura-flow",
|
41 |
+
"fal-ai/flux-pro",
|
|
|
42 |
"fal-ai/fooocus",
|
43 |
"fal-ai/kolors",
|
44 |
"fal-ai/pixart-sigma",
|
lib/presets.py
CHANGED
@@ -24,11 +24,11 @@ ModelPresets = SimpleNamespace(
|
|
24 |
"guidance_scale": 3.5,
|
25 |
"guidance_scale_min": 1.0,
|
26 |
"guidance_scale_max": 10.0,
|
27 |
-
"num_inference_steps":
|
28 |
"num_inference_steps_min": 10,
|
29 |
"num_inference_steps_max": 50,
|
30 |
-
"parameters": ["seed", "
|
31 |
-
"kwargs": {"num_images": 1},
|
32 |
},
|
33 |
FLUX_DEV={
|
34 |
"name": "FLUX.1 Dev",
|
@@ -50,7 +50,7 @@ ModelPresets = SimpleNamespace(
|
|
50 |
"guidance_scale_min": 1.0,
|
51 |
"guidance_scale_max": 10.0,
|
52 |
"parameters": ["seed", "image_size", "num_inference_steps", "guidance_scale"],
|
53 |
-
"kwargs": {"num_images": 1, "sync_mode":
|
54 |
},
|
55 |
FLUX_SCHNELL={
|
56 |
"name": "FLUX.1 Schnell",
|
|
|
24 |
"guidance_scale": 3.5,
|
25 |
"guidance_scale_min": 1.0,
|
26 |
"guidance_scale_max": 10.0,
|
27 |
+
"num_inference_steps": 28,
|
28 |
"num_inference_steps_min": 10,
|
29 |
"num_inference_steps_max": 50,
|
30 |
+
"parameters": ["seed", "num_inference_steps", "guidance_scale", "expand_prompt"],
|
31 |
+
"kwargs": {"num_images": 1, "sync_mode": False},
|
32 |
},
|
33 |
FLUX_DEV={
|
34 |
"name": "FLUX.1 Dev",
|
|
|
50 |
"guidance_scale_min": 1.0,
|
51 |
"guidance_scale_max": 10.0,
|
52 |
"parameters": ["seed", "image_size", "num_inference_steps", "guidance_scale"],
|
53 |
+
"kwargs": {"num_images": 1, "sync_mode": False, "safety_tolerance": 6},
|
54 |
},
|
55 |
FLUX_SCHNELL={
|
56 |
"name": "FLUX.1 Schnell",
|
pages/2_🎨_Text_to_Image.py
CHANGED
@@ -19,6 +19,8 @@ PRESET_MODEL = {
|
|
19 |
"black-forest-labs/flux.1-dev": ModelPresets.FLUX_DEV,
|
20 |
"black-forest-labs/flux.1-schnell": ModelPresets.FLUX_SCHNELL,
|
21 |
"stabilityai/stable-diffusion-xl-base-1.0": ModelPresets.STABLE_DIFFUSION_XL,
|
|
|
|
|
22 |
"fal-ai/fooocus": ModelPresets.FOOOCUS,
|
23 |
"fal-ai/kolors": ModelPresets.KOLORS,
|
24 |
"fal-ai/pixart-sigma": ModelPresets.PIXART_SIGMA,
|
@@ -179,14 +181,12 @@ for message in st.session_state.txt2img_messages:
|
|
179 |
div[data-testid="stMarkdownContainer"] p:not(:last-of-type) { margin-bottom: 0 }
|
180 |
</style>
|
181 |
""")
|
182 |
-
filtered_parameters =
|
183 |
-
k
|
184 |
for k, v in message["parameters"].items()
|
185 |
if k not in Config.TXT2IMG_HIDDEN_PARAMETERS
|
186 |
-
|
187 |
-
|
188 |
-
md += "\n\n".join([f"`{k}`: {v}" for k, v in filtered_parameters.items()])
|
189 |
-
st.markdown(md)
|
190 |
|
191 |
if role == "assistant":
|
192 |
# image is full width when _not_ in full-screen mode
|
|
|
19 |
"black-forest-labs/flux.1-dev": ModelPresets.FLUX_DEV,
|
20 |
"black-forest-labs/flux.1-schnell": ModelPresets.FLUX_SCHNELL,
|
21 |
"stabilityai/stable-diffusion-xl-base-1.0": ModelPresets.STABLE_DIFFUSION_XL,
|
22 |
+
"fal-ai/aura-flow": ModelPresets.AURA_FLOW,
|
23 |
+
"fal-ai/flux-pro": ModelPresets.FLUX_PRO,
|
24 |
"fal-ai/fooocus": ModelPresets.FOOOCUS,
|
25 |
"fal-ai/kolors": ModelPresets.KOLORS,
|
26 |
"fal-ai/pixart-sigma": ModelPresets.PIXART_SIGMA,
|
|
|
181 |
div[data-testid="stMarkdownContainer"] p:not(:last-of-type) { margin-bottom: 0 }
|
182 |
</style>
|
183 |
""")
|
184 |
+
filtered_parameters = [
|
185 |
+
f"`{k}`: {v}"
|
186 |
for k, v in message["parameters"].items()
|
187 |
if k not in Config.TXT2IMG_HIDDEN_PARAMETERS
|
188 |
+
]
|
189 |
+
st.markdown(f"`model`: {message['model']}\n\n" + "\n\n".join(filtered_parameters))
|
|
|
|
|
190 |
|
191 |
if role == "assistant":
|
192 |
# image is full width when _not_ in full-screen mode
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
|
|
|
|
1 |
openai==1.41.0
|
2 |
pillow
|
3 |
-
requests
|
4 |
streamlit==1.37.1
|
|
|
1 |
+
h2
|
2 |
+
httpx
|
3 |
openai==1.41.0
|
4 |
pillow
|
|
|
5 |
streamlit==1.37.1
|