File size: 4,090 Bytes
e547b24 682ce7b e547b24 6f5a32e e547b24 9be63af e547b24 923e7ae 6f5a32e e547b24 6f5a32e e547b24 6f5a32e e547b24 6f5a32e e547b24 6f5a32e e547b24 02f8cfa bc84ac0 02f8cfa 73f7edc e547b24 02f8cfa 923e7ae 02f8cfa 923e7ae 02f8cfa 923e7ae bc84ac0 4a22dd6 02f8cfa e547b24 02f8cfa e547b24 02f8cfa e547b24 257949c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json
# Project by Nymbo
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7):
if prompt == "" or prompt == None:
return None
key = random.randint(0, 999)
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
headers = {"Authorization": f"Bearer {API_TOKEN}"}
prompt = GoogleTranslator(source='my', target='en').translate(prompt)
print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mGeneration {key}:\033[0m {prompt}')
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Error: Failed to get image. Response status: {response.status_code}")
print(f"Response content: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
raise gr.Error(f"{response.status_code}")
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
return image
except Exception as e:
print(f"Error when trying to open the image: {e}")
return None
css = """
#app-container {
max-width: 600px;
margin-left: auto;
margin-right: auto;
}
"""
with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
gr.HTML("<center><h1>Walone AI Image Stable</h1></center>")
with gr.Column(elem_id="app-container"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt ရေးရန်", placeholder="ဒီနေရာမှာ prompt ရေးပါ", lines=2, elem_id="prompt-text-input")
with gr.Row():
with gr.Accordion("အဆင့်မြင့် Settings", open=False):
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
steps = gr.Slider(label="Sampling steps", value=4, minimum=1, maximum=100, step=1)
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
with gr.Row():
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output)
app.launch(show_api=False, share=True) |