Spaces:
Running
Running
import gradio as gr | |
import torch | |
import os | |
from diffusers import DiffusionPipeline | |
from huggingface_hub import InferenceApi | |
hf_api_key = os.getenv("PRODIGY_GA_02") | |
if hf_api_key is None: | |
raise ValueError("Hugging Face API key 'PRODIGY_GA_02' not found. Ensure it is set as a secret.") | |
# Initialize the Hugging Face API with the restricted model and token | |
inference = InferenceApi(repo_id="stabilityai/stable-diffusion-3.5-medium", token=hf_api_key) | |
# Example inference request | |
response = inference(inputs="Your input text here") | |
print(response) | |
# Load model | |
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" | |
pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") | |
pipe.to("cpu")# If you have GPU access; otherwise, use "cpu" | |
# Define Gradio interface | |
def generate_image(prompt): | |
images = pipe(prompt).images | |
return images[0] | |
# Create Gradio UI | |
iface = gr.Interface( | |
fn=generate_image, | |
inputs="text", | |
outputs="image", | |
title="Stable Diffusion Generator", | |
description="Enter a text prompt to generate an image", | |
) | |
# Launch the interface | |
iface.launch() |