Spaces:
Runtime error
Runtime error
File size: 3,289 Bytes
67d278f e8b1e31 c351ba2 e8b1e31 c351ba2 cece123 c351ba2 cece123 c351ba2 cece123 ec504d8 c351ba2 67d278f cf9fd45 cece123 c351ba2 67d278f c351ba2 d6bb065 c351ba2 d6bb065 c351ba2 d6bb065 c351ba2 d6bb065 c351ba2 d6bb065 c351ba2 d6bb065 67d278f d6bb065 67d278f 06af5b4 67d278f 06af5b4 67d278f 06af5b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
import requests
from PIL import Image
from io import BytesIO
import os
from huggingface_hub import InferenceClient
API_TOKEN = os.getenv("HF_API_TOKEN") # Ensure you've set this environment variable
API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
def enhance_prompt(prompt, system_prompt='You are a prompt enhancer', model="meta-llama/Llama-3.2-1B-Instruct", max_tokens=512, stream=False):
enhancer = InferenceClient(api_key=API_TOKEN)
response = ""
for message in enhancer.chat_completion(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
max_tokens=max_tokens,
stream=stream,
):
response += message#.choices[0].delta.content
return response.strip() # Ensure trailing whitespace is removed
def generate_image(prompt, enhance=False):
if enhance:
prompt = enhance_prompt(prompt)
headers = {"Authorization": f"Bearer {API_TOKEN}"}
data = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=data)
if response.status_code == 200:
image_bytes = BytesIO(response.content)
image = Image.open(image_bytes)
return image
else:
return f"Error: {response.status_code}, {response.text}"
title_html = """
<center>
<div id="title-container">
<h1 id="title-text">FLUX Capacitor</h1>
</div>
</center>
"""
css = """
.gradio-container {
background: url(https://huggingface.co/spaces/K00B404/FLUX.1-Dev-Serverless-darn-enhanced-prompt/resolve/main/edge.png);
background-size: 900px 880px;
background-repeat: no-repeat;
background-position: center;
background-attachment: fixed;
color: #000;
}
.dark\\:bg-gray-950:is(.dark *) {
--tw-bg-opacity: 1;
background-color: rgb(157, 17, 142);
}
.gradio-container-4-41-0 .prose :last-child {
margin-top: 8px !important;
}
.gradio-container-4-41-0 .prose :last-child {
margin-bottom: -7px !important;
}
.dark {
--button-primary-background-fill: #09e60d70;
--button-primary-background-fill-hover: #00000070;
--background-fill-primary: #000;
--background-fill-secondary: #000;
}
.hide-container {
margin-top: -2px;
}
#title-text {
font-size: 30px;
font-weight: bold;
color: #000;
}
"""
# Create Gradio interface
def create_ui():
with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as ui:
gr.Markdown("## Flux Uncensored - Text to Image Generator")
with gr.Row():
prompt_input = gr.Textbox(label="Enter a Prompt", placeholder="Describe the image you want to generate", lines=3)
enhance_checkbox = gr.Checkbox(label="Enhance Prompt", value=True) # Checkbox for enhancing prompt
generate_button = gr.Button("Generate Image")
with gr.Row():
output_image = gr.Image(label="Generated Image")
# Link the button to the function
generate_button.click(fn=generate_image, inputs=[prompt_input, enhance_checkbox], outputs=output_image)
return ui
# Run the interface
if __name__ == "__main__":
create_ui().launch() |