Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import StableDiffusionPipeline | |
import torch | |
from PIL import Image | |
import requests | |
def generate_image(prompt): | |
# Load the preprocessing and model pipeline | |
# Here, we assume the Kvikontent/midjourney-v6 model has text-to-image capabilities in a manner similar to stable diffusion. | |
# This part needs verification and adjustment according to actual model documentation and availability. | |
model_id = "Kvikontent/midjourney-v6" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Setup the model pipeline (this can be adjusted if the model's actual interface differs) | |
# This example uses the typical usage pattern for generative models, but you should adjust according to the actual model's specs. | |
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True) # Replace with actual method to load Kvikontent/midjourney-v6 if different | |
pipe = pipe.to(device) | |
# Generating the image | |
image = pipe(prompt).images[0] # This line assumes the return type is accessible like this, adjust this according to actual usage. | |
# Convert tensor to PIL Image (adjust if the output format differs) | |
image = Image.fromarray(image.numpy(), 'RGB') | |
return image | |
# Create a Gradio interface | |
iface = gr.Interface(fn=generate_image, | |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."), | |
outputs="image", | |
title="Text to Image Generator", | |
description="Type some text and generate an image using the Kvikontent/midjourney-v6 model.") | |
# Running the application | |
iface.launch() |