import gradio as gr import requests import json import os # Hugging Face Inference API setup API_URL = "https://api-inference.huggingface.co/models/Benevolent/PonyDiffusionV10" HF_API_TOKEN = os.getenv("HF_API_TOKEN") # Get token from environment variable headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return json.loads(response.content) # Function to call the HF Inference API def generate_image(prompt): payload = { "inputs": prompt, } response = query(payload) if 'error' in response: return f"Error: {response['error']}" # Check for the generated image if isinstance(response, list) and len(response) > 0 and "generated_image" in response[0]: return response[0]["generated_image"] # Base64 encoded image return "No image returned from model" # Gradio Blocks Web UI with gr.Blocks() as demo: with gr.Row(): gr.Markdown("# PonyDiffusion V10 Text-to-Image Generator") with gr.Row(): prompt = gr.Textbox(label="Enter a prompt for the image") with gr.Row(): generate_button = gr.Button("Generate Image") output_image = gr.Image() # When generate_button is clicked, call generate_image generate_button.click(fn=generate_image, inputs=prompt, outputs=output_image) # Launch the app demo.launch()