Spaces:
Running
Running
File size: 8,497 Bytes
356a5f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
import gradio as gr
import requests
import io
import random
import os
from PIL import Image
# List of available models
list_models = [
"SDXL 1.0", "SD 1.5", "OpenJourney", "Anything V4.0",
"Disney Pixar Cartoon", "Pixel Art XL", "Dalle 3 XL",
"Midjourney V4 XL", "Open Diffusion V1", "SSD 1B",
"Segmind Vega", "Animagine XL-2.0", "Animagine XL-3.0",
"OpenDalle", "OpenDalle V1.1", "PlaygroundV2 1024px aesthetic",
]
# Function to generate images from text
def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7, seed=None):
if current_model == "SD 1.5":
API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
elif current_model == "SDXL 1.0":
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
elif current_model == "OpenJourney":
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
elif current_model == "Anything V4.0":
API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0"
elif current_model == "Disney Pixar Cartoon":
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon"
elif current_model == "Pixel Art XL":
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
elif current_model == "Dalle 3 XL":
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
elif current_model == "Midjourney V4 XL":
API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl"
elif current_model == "Open Diffusion V1":
API_URL = "https://api-inference.huggingface.co/models/openskyml/open-diffusion-v1"
elif current_model == "SSD 1B":
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
elif current_model == "Segmind Vega":
API_URL = "https://api-inference.huggingface.co/models/segmind/Segmind-Vega"
elif current_model == "Animagine XL-2.0":
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
elif current_model == "Animagine XL-3.0":
API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.0"
elif current_model == "OpenDalle":
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalle"
elif current_model == "OpenDalle V1.1":
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1"
elif current_model == "PlaygroundV2 1024px aesthetic":
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
API_TOKEN = os.environ.get("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
if image_style == "None style":
payload = {
"inputs": prompt + ", 8k",
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed is not None else random.randint(-1, 2147483647)
}
elif image_style == "Cinematic":
payload = {
"inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko",
"is_negative": is_negative + ", abstract, cartoon, stylized",
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed is not None else random.randint(-1, 2147483647)
}
elif image_style == "Digital Art":
payload = {
"inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star",
"is_negative": is_negative + ", sharp , modern , bright",
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed is not None else random.randint(-1, 2147483647)
}
elif image_style == "Portrait":
payload = {
"inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)",
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed is not None else random.randint(-1, 2147483647)
}
image_bytes = requests.post(API_URL, headers=headers, json=payload).content
image = Image.open(io.BytesIO(image_bytes))
return image
css = """
/* General Container Styles */
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
max-width: 730px !important;
margin: auto;
padding-top: 1.5rem;
text-align: center; /* Center the content horizontally */
}
/* Button Styles */
.gr-button {
color: white;
background: #007bff; /* Use a primary color for the background */
white-space: nowrap;
border: none;
padding: 10px 20px;
border-radius: 8px;
cursor: pointer;
transition: background-color 0.3s, color 0.3s;
}
.gr-button:hover {
background-color: #0056b3; /* Darken the background color on hover */
}
/* Share Button Styles */
#share-btn-container {
padding: 0.5rem !important;
background-color: #007bff; /* Use a primary color for the background */
justify-content: center;
align-items: center;
border-radius: 9999px !important;
max-width: 13rem;
margin: 0 auto; /* Center the container horizontally */
transition: background-color 0.3s;
}
#share-btn-container:hover {
background-color: #0056b3; /* Darken the background color on hover */
}
#share-btn {
all: initial;
color: #ffffff;
font-weight: 600;
cursor: pointer;
font-family: 'IBM Plex Sans', sans-serif;
margin: 0.5rem !important;
padding: 0.5rem !important;
}
/* Other Styles */
#gallery {
min-height: 22rem;
margin: auto; /* Center the gallery horizontally */
border-bottom-right-radius: 0.5rem !important;
border-bottom-left-radius: 0.5rem !important;
}
/* Centered Container for the Image */
.image-container {
max-width: 100%; /* Set the maximum width for the container */
margin: auto; /* Center the container horizontally */
padding: 20px; /* Add padding for spacing */
border: 1px solid #ccc; /* Add a subtle border to the container */
border-radius: 10px;
overflow: hidden; /* Hide overflow if the image is larger */
max-height: 22rem; /* Set a maximum height for the container */
}
/* Set a fixed size for the image */
.image-container img {
max-width: 100%; /* Ensure the image fills the container */
height: auto; /* Maintain aspect ratio */
max-height: 100%; /* Set a maximum height for the image */
border-radius: 10px;
box-shadow: 0px 2px 4px rgba(0, 0, 0, 0.2);
}
"""
PTI_SD_DESCRIPTION = '''
<div id="content_align">
<span style="color:darkred;font-size:32px;font-weight:bold">
MultiMulti Stable Diffusion Image Generation Simplified Version
</span>
</div>
<div id="content_align">
<span style="color:blue;font-size:16px;font-weight:bold">
Generate images directly from text prompts (no parameter tuning required)
</span>
</div>
<div id="content_align" style="margin-top: 10px;">
</div>
'''
# Creating Gradio interface
with gr.Blocks(css=css) as demo:
gr.Markdown(PTI_SD_DESCRIPTION)
with gr.Row():
with gr.Column():
current_model = gr.Dropdown(label="Select Model", choices=list_models, value=list_models[1])
text_prompt = gr.Textbox(label="Input Prompt", placeholder="Example: a cute dog", lines=2)
with gr.Column():
negative_prompt = gr.Textbox(label="Negative Prompt (optional)", placeholder="Example: blurry, unfocused", lines=2)
image_style = gr.Dropdown(label="Select Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style")
generate_button = gr.Button("Generate Image", variant='primary')
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output")
generate_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output)
# Launch the app
demo.launch()
|