000555111 / app.py
dreamdrop-art's picture
Update app.py
90c9c61
raw
history blame
17.1 kB
import gradio as gr
import requests
import time
import json
import base64
import os
from io import BytesIO
import io
import html
import PIL
from PIL import Image
import re
def query(payload, model):
HF_TOKEN = os.getenv("HF_TOKEN")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
url = "https://api-inference.huggingface.co/models/"
API_URL = f"{url}{model}"
response = requests.post(API_URL, headers=headers, json=payload)
return response.content
def hf_inference(prompt, negative, model, steps, sampler, guidance, width, height, seed):
try:
image_bytes = query(payload={
"inputs": f"{prompt}",
"parameters": {
"negative_prompt": f"{negative}",
"num_inference_steps": steps,
"guidance_scale": guidance,
"width": width, "height": height,
"seed": seed,
},
}, model=model)
image = Image.open(io.BytesIO(image_bytes))
return image
except PIL.UnidentifiedImageError:
gr.Warning("This model is not loaded now. Try others models.")
class Prodia:
def __init__(self, api_key, base=None):
self.base = base or "https://api.prodia.com/v1"
self.headers = {
"X-Prodia-Key": api_key
}
def generate(self, params):
response = self._post(f"{self.base}/sd/generate", params)
return response.json()
def transform(self, params):
response = self._post(f"{self.base}/sd/transform", params)
return response.json()
def controlnet(self, params):
response = self._post(f"{self.base}/sd/controlnet", params)
return response.json()
def get_job(self, job_id):
response = self._get(f"{self.base}/job/{job_id}")
return response.json()
def wait(self, job):
job_result = job
while job_result['status'] not in ['succeeded', 'failed']:
time.sleep(0.25)
job_result = self.get_job(job['job'])
return job_result
def list_models(self):
response = self._get(f"{self.base}/sd/models")
return response.json()
def list_samplers(self):
response = self._get(f"{self.base}/sd/samplers")
return response.json()
def _post(self, url, params):
headers = {
**self.headers,
"Content-Type": "application/json"
}
response = requests.post(url, headers=headers, data=json.dumps(params))
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def _get(self, url):
response = requests.get(url, headers=self.headers)
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def image_to_base64(image):
# Convert the image to bytes
buffered = BytesIO()
image.save(buffered, format="PNG") # You can change format to PNG if needed
# Encode the bytes to base64
img_str = base64.b64encode(buffered.getvalue())
return img_str.decode('utf-8') # Convert bytes to string
def remove_id_and_ext(text):
text = re.sub(r'\[.*\]$', '', text)
extension = text[-12:].strip()
if extension == "safetensors":
text = text[:-13]
elif extension == "ckpt":
text = text[:-4]
return text
def get_data(text):
results = {}
patterns = {
'prompt': r'(.*)',
'negative_prompt': r'Negative prompt: (.*)',
'steps': r'Steps: (\d+),',
'seed': r'Seed: (\d+),',
'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
'model': r'Model:\s*([^\s,]+)',
'cfg_scale': r'CFG scale:\s*([\d\.]+)',
'size': r'Size:\s*([0-9]+x[0-9]+)'
}
for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
match = re.search(patterns[key], text)
if match:
results[key] = match.group(1)
else:
results[key] = None
if results['size'] is not None:
w, h = results['size'].split("x")
results['w'] = w
results['h'] = h
else:
results['w'] = None
results['h'] = None
return results
def send_to_img2img_def(image):
return image
def send_to_txt2img(image):
result = {tabs: gr.update(selected="t2i")}
try:
text = image.info['parameters']
data = get_data(text)
result[prompt] = gr.update(value=data['prompt'])
result[negative_prompt] = gr.update(value=data['negative_prompt']) if data[
'negative_prompt'] is not None else gr.update()
result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update()
result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update()
result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update()
if model in model_names:
result[model] = gr.update(value=model_names[model])
else:
result[model] = gr.update()
return result
except Exception as e:
print(e)
return result
prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
model_list = prodia_client.list_models()
model_names = {}
for model_name in model_list:
name_without_ext = remove_id_and_ext(model_name)
model_names[name_without_ext] = model_name
def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
result = prodia_client.generate({
"prompt": prompt,
"negative_prompt": negative_prompt,
"model": model,
"steps": steps,
"sampler": sampler,
"cfg_scale": cfg_scale,
"width": width,
"height": height,
"seed": seed
})
job = prodia_client.wait(result)
return job["imageUrl"]
def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
result = prodia_client.transform({
"imageData": image_to_base64(input_image),
"denoising_strength": denoising,
"prompt": prompt,
"negative_prompt": negative_prompt,
"model": model,
"steps": steps,
"sampler": sampler,
"cfg_scale": cfg_scale,
"width": width,
"height": height,
"seed": seed
})
job = prodia_client.wait(result)
return job["imageUrl"]
css = """
#generate {
height: 100%;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Row():
with gr.Column(scale=6):
model = gr.Dropdown(interactive=True, value="absolutereality_v181.safetensors [3d9d4d2b]", show_label=True,
label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
with gr.Tabs() as tabs:
with gr.Tab("txt2img", id='t2i'):
with gr.Row():
with gr.Column(scale=6, min_width=600):
prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k",
placeholder="Prompt", show_label=False, lines=3)
negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3,
value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
with gr.Column():
text_button = gr.Button("Generate", variant='primary', elem_id="generate")
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Generation"):
with gr.Row():
with gr.Column(scale=1):
sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method",
choices=prodia_client.list_samplers())
with gr.Column(scale=1):
steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
with gr.Row():
with gr.Column(scale=1):
width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
with gr.Column(scale=1):
batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=8, step=1)
seed = gr.Number(label="Seed", value=-1)
with gr.Column(scale=2):
image_output = gr.Image(show_label=False, type="filepath", interactive=False)
send_to_img2img = gr.Button(value="Send to img2img")
text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height,
seed], outputs=image_output, concurrency_limit=64)
with gr.Tab("img2img", id='i2i'):
with gr.Row():
with gr.Column(scale=6, min_width=600):
i2i_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k",
placeholder="Prompt", show_label=False, lines=3)
i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3,
value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
with gr.Column():
i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate")
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Generation"):
i2i_image_input = gr.Image(type="pil")
with gr.Row():
with gr.Column(scale=1):
i2i_sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method",
choices=prodia_client.list_samplers())
with gr.Column(scale=1):
i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
with gr.Row():
with gr.Column(scale=1):
i2i_width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
i2i_height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
with gr.Column(scale=1):
i2i_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
i2i_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1)
i2i_seed = gr.Number(label="Seed", value=-1)
with gr.Column(scale=2):
i2i_image_output = gr.Image(show_label=False, type="filepath", interactive=False)
i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt,
model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height,
i2i_seed], outputs=i2i_image_output, concurrency_limit=64)
send_to_img2img.click(send_to_img2img_def, inputs=image_output, outputs=i2i_image_input)
with gr.Tab("PNG Info"):
def plaintext_to_html(text, classname=None):
content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
def get_exif_data(image):
items = image.info
info = ''
for key, text in items.items():
info += f"""
<div>
<p><b>{plaintext_to_html(str(key))}</b></p>
<p>{plaintext_to_html(str(text))}</p>
</div>
""".strip() + "\n"
if len(info) == 0:
message = "Nothing found in the image."
info = f"<div><p>{message}<p></div>"
return info
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil")
with gr.Column():
exif_output = gr.HTML(label="EXIF Data")
send_to_txt2img_btn = gr.Button("Send to txt2img")
image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt,
steps, seed, model, sampler,
width, height, cfg_scale],
concurrency_limit=64)
with gr.Tab("HuggingFace Inference"):
with gr.Row():
hf_model = gr.Dropdown(label="HuggingFace checkpoint", choices=["runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "dataautogpt3/OpenDalleV1.1", "CompVis/stable-diffusion-v1-4", "playgroundai/playground-v2-1024px-aesthetic", "prompthero/openjourney", "openskyml/dreamdrop-v1", "SG161222/Realistic_Vision_V1.4", "digiplay/AbsoluteReality_v1.8.1", "openskyml/dalle-3-xl", "Lykon/dreamshaper-7", "Pclanglais/Mickey-1928"], value="runwayml/stable-diffusion-v1-5", allow_custom_value=False, interactive=True)
with gr.Row():
with gr.Column(scale=6, min_width=600):
hf_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k",
placeholder="Prompt", show_label=False, lines=3)
hf_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3,
value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
with gr.Column():
hf_text_button = gr.Button("Generate with HF", variant='primary', elem_id="generate")
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Generation"):
with gr.Row():
with gr.Column(scale=1):
hf_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
with gr.Row():
with gr.Column(scale=1):
hf_width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
hf_height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
with gr.Column(scale=1):
hf_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
hf_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
hf_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=8, step=1)
hf_seed = gr.Number(label="Seed", value=-1)
with gr.Column(scale=2):
hf_image_output = gr.Image(show_label=False, type="pil", interactive=False)
#hf_send_to_img2img = gr.Button(value="Send to img2img")
hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
hf_seed], outputs=hf_image_output, concurrency_limit=64)
demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)