File size: 11,752 Bytes
ace08fa
e547b24
0b6f327
5c3de75
e547b24
 
5c3de75
f43eb5f
 
 
e7ea28d
bec43ec
 
 
f43eb5f
e547b24
5c3de75
f43eb5f
b54de9c
bec43ec
8d50bf7
 
 
 
e547b24
508f8c3
e547b24
 
 
78539a4
de32577
 
f43eb5f
 
508f8c3
f43eb5f
5bd5e23
6f5a32e
e547b24
 
c7accf3
143ca85
c7accf3
e547b24
40d7442
001cbbb
143ca85
9be63af
e547b24
f43eb5f
79e0fd9
 
143ca85
 
e547b24
 
143ca85
3f2e57b
 
2d04fb1
26785ab
143ca85
 
 
e547b24
c50b0b7
e547b24
 
c50b0b7
 
f94e79d
 
 
 
e547b24
 
 
 
6f5a32e
 
e547b24
 
 
143ca85
e547b24
 
 
6f5a32e
143ca85
e547b24
6f5a32e
e547b24
f43eb5f
40d7442
c50b0b7
 
 
40d7442
62f1152
ce277d8
508f8c3
 
 
 
 
 
b12b6dd
ce277d8
c50b0b7
3c9286b
508f8c3
e7ea28d
c50b0b7
 
90b9bb9
9e0a74d
9026664
9e0a74d
 
 
008a439
9e0a74d
 
 
 
 
 
c7e9533
9e0a74d
 
 
 
 
 
 
 
 
 
 
 
 
c7e9533
9e0a74d
 
 
 
 
 
13e2925
 
 
 
1b5284a
13e2925
9e0a74d
 
c83d099
afd2e94
 
a36751a
9026664
e6f9032
a36751a
160ac3b
90b9bb9
160ac3b
 
a36751a
 
 
 
 
 
 
 
 
 
160ac3b
e6f9032
160ac3b
a36751a
 
5bd5e23
508f8c3
5bd5e23
 
 
 
 
 
a9040cf
508f8c3
a9040cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5bd5e23
4341179
5bd5e23
 
 
c16e8c4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
#!/usr/bin/env python
import gradio as gr
import requests
import io
import random
import os
import time
import numpy as np
import subprocess
import torch
import json
import uuid
import spaces
from typing import Tuple
from transformers import AutoProcessor, AutoModelForCausalLM
from PIL import Image
from deep_translator import GoogleTranslator
from datetime import datetime
from theme import theme
from typing import Tuple
from fastapi import FastAPI

app = FastAPI()



API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100

def flip_image(x):
    return np.fliplr(x)

def clear():
  return None    

def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=100, width=896, height=1152):
    if prompt == "" or prompt == None:
        return None

    if lora_id.strip() == "" or lora_id == None:
        lora_id = "black-forest-labs/FLUX.1-dev"

    key = random.randint(0, 999)

    API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip()

    API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
    headers = {"Authorization": f"Bearer {API_TOKEN}"}
    
    # prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
    # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
    prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
    print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')

    prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
    print(f'\033[1mGeneration {key}:\033[0m {prompt}')

    # If seed is -1, generate a random seed and use it
    if seed == -1:
        seed = random.randint(1, 1000000000)

    # Prepare the payload for the API call, including width and height
    payload = {
        "inputs": prompt,
        "is_negative": is_negative,
        "steps": steps,
        "cfg_scale": cfg_scale,
        "seed": seed if seed != -1 else random.randint(1, 1000000000),
        "strength": strength,
        "parameters": {
            "width": width,  # Pass the width to the API
            "height": height  # Pass the height to the API
        }
    }

    response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
    if response.status_code != 200:
        print(f"Error: Failed to get image. Response status: {response.status_code}")
        print(f"Response content: {response.text}")
        if response.status_code == 503:
            raise gr.Error(f"{response.status_code} : The model is being loaded")
        raise gr.Error(f"{response.status_code}")

    try:
        image_bytes = response.content
        image = Image.open(io.BytesIO(image_bytes))
        print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
        return image, seed
    except Exception as e:
        print(f"Error when trying to open the image: {e}")
        return None

examples = [
    "a beautiful woman with blonde hair and blue eyes",
    "a beautiful woman with brown hair and grey eyes",
    "a beautiful woman with black hair and brown eyes",
]

css = """
#app-container {
    max-width: 930px;
    margin-left: auto;
    margin-right: auto;
}
".gradio-container {background: url('file=abstract.jpg')}
   
"""
with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app:
    gr.HTML("<center><h6>🎨 FLUX.1-Dev with LoRA 🇬🇧</h6></center>")
    with gr.Tab("Text to Image"):
        with gr.Column(elem_id="app-container"):
            with gr.Row():
                with gr.Column(elem_id="prompt-container"):
                    with gr.Group():
                        with gr.Row():
                            text_prompt = gr.Textbox(label="Image Prompt ✍️", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input")
                        with gr.Row():
                            with gr.Accordion("🎨 Lora trigger words", open=False):
                            		gr.Markdown("""
                                        - **Canopus-Pencil-Art-LoRA**: Pencil Art
                                        - **Flux-Realism-FineDetailed**: Fine Detailed
                                        - **Fashion-Hut-Modeling-LoRA**: Modeling
                                        - **SD3.5-Large-Turbo-HyperRealistic-LoRA**: hyper realistic
                                        - **Flux-Fine-Detail-LoRA**: Super Detail
                                        - **SD3.5-Turbo-Realism-2.0-LoRA**: Turbo Realism
                                        - **Canopus-LoRA-Flux-UltraRealism-2.0**: Ultra realistic 
                                        - **Canopus-Pencil-Art-LoRA**: Pencil Art
                                        - **SD3.5-Large-Photorealistic-LoRA**: photorealistic
                                        - **Flux.1-Dev-LoRA-HDR-Realism**: HDR
                                        - **prithivMLmods/Ton618-Epic-Realism-Flux-LoRA**: Epic Realism
                                        - **john-singer-sargent-style**: John Singer Sargent Style
                                        - **alphonse-mucha-style**: Alphonse Mucha Style
                                        - **ultra-realistic-illustration**: ultra realistic illustration
                                        - **eye-catching**: eye-catching
                                        - **john-constable-style**: John Constable Style
                                        - **film-noir**: in the style of FLMNR
                                        - **flux-lora-pro-headshot**: PROHEADSHOT
                            		""")                       
                                
                        with gr.Row():
                            custom_lora = gr.Dropdown([" ", "prithivMLmods/Canopus-Pencil-Art-LoRA", "prithivMLmods/Flux-Realism-FineDetailed", "prithivMLmods/Fashion-Hut-Modeling-LoRA", "prithivMLmods/SD3.5-Large-Turbo-HyperRealistic-LoRA", "prithivMLmods/Flux-Fine-Detail-LoRA", "prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA", "hugovntr/flux-schnell-realism", "fofr/sdxl-deep-down", "prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", "prithivMLmods/Canopus-Realism-LoRA", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "prithivMLmods/SD3.5-Large-Photorealistic-LoRA", "prithivMLmods/Flux.1-Dev-LoRA-HDR-Realism", "prithivMLmods/Ton618-Epic-Realism-Flux-LoRA", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "dvyio/flux-lora-pro-headshot"], label="Custom LoRA",)
                        with gr.Row():
                            with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"):
                                negative_prompt = gr.Textbox(label="Negative Prompt", lines=5, placeholder="What should not be in the image", value=" (visible hand:1.3), (ugly:1.3), (duplicate:1.2), (morbid:1.1), (mutilated:1.1), out of frame, bad face, extra fingers, mutated hands, (poorly drawn hands:1.1), (poorly drawn face:1.3), (mutation:1.3), (deformed:1.3), blurry, (bad anatomy:1.1), (bad proportions:1.2), (extra limbs:1.1), cloned face, (disfigured:1.2), gross proportions, malformed limbs, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), fused fingers, too many fingers, (long neck:1.2), sketched by bad-artist, (bad-image-v2-39000:1.3) ")
                                with gr.Row():
                                    width = gr.Slider(label="Image Width", value=896, minimum=64, maximum=1216, step=32)
                                    height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32)
                                strength = gr.Slider(label="Prompt Strength", value=100, minimum=0, maximum=100, step=1)    
                                steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1)
                                cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
                                seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)                            
                                method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ 2S a Karras", "DPM2 a Karras", "DPM2 Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "Euler", "Euler CFG PP", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "DDIM", "LMS Karras", "PLMS", "UniPC", "UniPC BH2"])
                        
                        with gr.Row():
                            with gr.Accordion("🫘Seed", open=False):
                                seed_output = gr.Textbox(label="Seed Used", elem_id="seed-output")
                                
            # Add a button to trigger the image generation    
            with gr.Row():
                text_button = gr.Button("Generate Image 🎨", variant='primary', elem_id="gen-button")
                clear_prompt =gr.Button("Clear Prompt 🗑️",variant="primary", elem_id="clear_button")
                clear_prompt.click(lambda: (None), None, [text_prompt], queue=False, show_api=False)

            with gr.Group():
                with gr.Row():
                    image_output = gr.Image(type="pil", label="Image Output", format="png", show_share_button=False, elem_id="gallery")
                
                    
                with gr.Group():
                    with gr.Row():
                        gr.Examples(
                            examples = examples,
                            inputs = [text_prompt],
                        )
                        
            with gr.Group(): 
                with gr.Row():
                    clear_results = gr.Button(value="Clear Image 🗑️", variant="primary", elem_id="clear_button")
                    clear_results.click(lambda: (None), None, [image_output], queue=False, show_api=False)
                    
                text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output])

    with gr.Tab("Flip Image"):
        with gr.Row():
            image_input = gr.Image()
            image_output = gr.Image(format="png")
        with gr.Row():    
            image_button = gr.Button("Run", variant='primary')
            image_button.click(flip_image, inputs=image_input, outputs=image_output, concurrency_limit=2)

    with gr.Tab("Tips"):
        with gr.Row():
                gr.Markdown(
        """
        <div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px; background-color: #f0f0f0;">
            <h2 style="font-size: 1.5rem; margin-bottom: 1rem;">How to Use</h2>
            <ol style="padding-left: 1.5rem;">
                <li>Enter a detailed description of the image you want to create.</li>
                <li>Adjust advanced settings if desired (tap to expand).</li>
                <li>Tap "Generate Image" and wait for your creation!</li>
            </ol>
            <p style="margin-top: 1rem; font-style: italic;">Tip: Be specific in your description for best results!</p>
        </div>
        """
        )
    
    app.queue(default_concurrency_limit=200, max_size=200)  # <-- Sets up a queue with default parameters
    
    if __name__ == "__main__": 
        app.launch(show_api=False, share=False)