Spaces:
Running
Running
import gradio as gr | |
import requests | |
from PIL import Image | |
from io import BytesIO | |
import os | |
# Load API Token from environment variable | |
API_TOKEN = os.getenv("HF_API_TOKEN") # Ensure you've set this environment variable | |
# Hugging Face Inference API URL | |
API_URL = "https://api-inference.huggingface.co/models/Benevolent/PonyDiffusionV10" | |
# Function to call Hugging Face API and get the generated image | |
def generate_image(prompt): | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
data = {"inputs": prompt} | |
response = requests.post(API_URL, headers=headers, json=data) | |
if response.status_code == 200: | |
image_bytes = BytesIO(response.content) | |
image = Image.open(image_bytes) | |
return image | |
else: | |
return f"Error: {response.status_code}, {response.text}" | |
# Create Gradio interface | |
def create_ui(): | |
with gr.Blocks() as ui: | |
gr.Markdown("## PonyDiffusionV10 - Text to Image Generator") | |
with gr.Row(): | |
prompt_input = gr.Textbox(label="Enter a Prompt", placeholder="Describe the image you want to generate", lines=3) | |
generate_button = gr.Button("Generate Image") | |
with gr.Row(): | |
output_image = gr.Image(label="Generated Image") | |
# Link the button to the function | |
generate_button.click(fn=generate_image, inputs=prompt_input, outputs=output_image) | |
return ui | |
# Run the interface | |
if __name__ == "__main__": | |
create_ui().launch() |