Pencil-Vision / app.py
GastinoKuros's picture
Update app.py
ddb71ce verified
import gradio as gr
import requests
import io
import os
import logging
from PIL import Image, ImageEnhance
from gradio_client import Client
# API URLs and headers
API_URL_1 = "https://api-inference.huggingface.co/models/sd-community/sdxl-flash"
API_KEY = os.getenv("HF_API_KEY")
headers = {"Authorization": f"Bearer {API_KEY}"}
client = Client("AP123/SDXL-Lightning")
def query(api_url, payload):
try:
response = requests.post(api_url, headers=headers, json=payload)
response.raise_for_status()
return response.content
except requests.exceptions.HTTPError as http_err:
logging.error(f"HTTP error occurred: {http_err}")
print(f"HTTP error occurred: {http_err}")
except Exception as err:
logging.error(f"An error occurred: {err}")
print(f"An error occurred: {err}")
return None
def map_inference_steps(steps):
if steps <= 1:
return "1-Step"
elif steps <= 2:
return "2-Step"
elif steps <= 4:
return "4-Step"
else:
return "8-Step"
def gen_img(prompt, num_inference_steps, guidance_scale, contrast):
payload = {
"inputs": prompt,
"parameters": {
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale
}
}
# Try generating image with the first model
image_bytes = query(API_URL_1, payload)
if image_bytes is None:
# print("First model failed, trying second model...")
try:
mapped_steps = map_inference_steps(num_inference_steps)
result = client.predict(prompt, mapped_steps, api_name="/generate_image")
with open(result, "rb") as img_file:
image = Image.open(img_file)
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(contrast)
return image, "Image generated by the second model"
except Exception as e:
# logging.error(f"Error with the second model: {e}")
# print(f"Error with the second model: {e}")
return None, "Error with the second model"
try:
image = Image.open(io.BytesIO(image_bytes))
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(contrast)
return image, "Image generated by the first model"
except Exception as e:
# logging.error(f"Error processing image: {e}")
return None, "Error processing image"
with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.lime, spacing_size="md", radius_size="lg",
font=[gr.themes.GoogleFont("Copperplate"), "Fantasy", "sans-serif"])) as demo:
gr.Markdown("""
<div style='text-align: center;
font-size: 36px;
font-family: Copperplate, Fantasy;
color: orange;'>
Pencil Vision (early beta 1.3)
</div>
""")
with gr.Row():
prompt = gr.Textbox(label="Enter your prompt to generate the image", placeholder="A fantasy landscape with mountains and a river")
with gr.Row():
num_inference_steps = gr.Slider(minimum=1, maximum=15, value=6, step=1, label="Number of Inference Steps")
guidance_scale = gr.Slider(minimum=1.0, maximum=5.0, value=2.5, step=0.1, label="Guidance Scale")
contrast_scale = gr.Slider(minimum=1.0, maximum=3.0, value=1.0, step=0.1, label="Contrast Scale")
with gr.Row():
generate_button = gr.Button("Generate Image")
output_image = gr.Image(label="Generated Image")
# model_info = gr.Textbox(label="Model Info", interactive=False)
error_message = gr.Markdown(visible=False)
def handle_click(prompt, num_inference_steps, guidance_scale, contrast_scale):
image, info = gen_img(prompt, num_inference_steps, guidance_scale, contrast_scale)
if image is None:
return None, info, gr.update(visible=True, value="Error generating image. Please try again.")
return image, info, gr.update(visible=True)
generate_button.click(handle_click, inputs=[prompt, num_inference_steps, guidance_scale, contrast_scale], outputs=[output_image, error_message])
demo.launch(share=True)