Spaces:
Runtime error
Runtime error
File size: 1,759 Bytes
393b5eb 912d9da 936baa4 393b5eb 927b9ac 393b5eb 9c61b4b 393b5eb 936baa4 393b5eb 936baa4 393b5eb 625c254 393b5eb 625c254 393b5eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from transformers.tools.base import Tool, get_default_device
from transformers.utils import is_accelerate_available
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
TEXT_TO_IMAGE_DESCRIPTION = (
"This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which "
"contains the image description and outputs an image."
)
class TextToImageTool(Tool):
default_checkpoint = "runwayml/stable-diffusion-v1-5"
description = TEXT_TO_IMAGE_DESCRIPTION
inputs = ['text']
outputs = ['image']
def __init__(self, device=None, **hub_kwargs) -> None:
if not is_accelerate_available():
raise ImportError("Accelerate should be installed in order to use tools.")
super().__init__()
self.device = device
self.pipeline = None
self.hub_kwargs = hub_kwargs
def setup(self):
if self.device is None:
self.device = get_default_device()
self.pipeline = DiffusionPipeline.from_pretrained(self.default_checkpoint)
self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config)
self.pipeline.to(self.device)
if self.device.type == "cuda":
self.pipeline.to(torch_dtype=torch.float16)
self.is_initialized = True
def __call__(self, prompt):
if not self.is_initialized:
self.setup()
negative_prompt = "low quality, bad quality, deformed, low resolution"
added_prompt = " , highest quality, highly realistic, very high resolution"
return self.pipeline(prompt + added_prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
|