Spaces:
Running
Running
File size: 1,130 Bytes
b71c528 35f14de ae7a8a2 bf93081 4019bd5 f9d8220 4019bd5 f9d8220 ae7a8a2 e03092c f58b81f e03092c f9d8220 4dcdf0c b71c528 f9d8220 4dcdf0c b71c528 4dcdf0c f9d8220 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
import torch
import os
from diffusers import DiffusionPipeline
from huggingface_hub import InferenceApi
hf_api_key = os.getenv("PRODIGY_GA_02")
if hf_api_key is None:
raise ValueError("Hugging Face API key 'PRODIGY_GA_02' not found. Ensure it is set as a secret.")
# Initialize the Hugging Face API with the restricted model and token
inference = InferenceApi(repo_id="stabilityai/stable-diffusion-3.5-medium", token=hf_api_key)
# Example inference request
response = inference(inputs="Your input text here")
print(response)
# Load model
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
pipe.to("cpu")# If you have GPU access; otherwise, use "cpu"
# Define Gradio interface
def generate_image(prompt):
images = pipe(prompt).images
return images[0]
# Create Gradio UI
iface = gr.Interface(
fn=generate_image,
inputs="text",
outputs="image",
title="Stable Diffusion Generator",
description="Enter a text prompt to generate an image",
)
# Launch the interface
iface.launch() |