PR-Branch
#6
by
crystalai
- opened
- README.md +2 -2
- app.py +1 -1
- image_transformation.py +5 -5
- requirements.txt +1 -1
- tool_config.json +2 -2
README.md
CHANGED
@@ -4,9 +4,9 @@ emoji: ⚡
|
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
tags:
|
11 |
- tool
|
12 |
-
---
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.27.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
tags:
|
11 |
- tool
|
12 |
+
---
|
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from transformers.
|
2 |
from image_transformation import ImageTransformationTool
|
3 |
|
4 |
launch_gradio_demo(ImageTransformationTool)
|
|
|
1 |
+
from transformers.tools.base import launch_gradio_demo
|
2 |
from image_transformation import ImageTransformationTool
|
3 |
|
4 |
launch_gradio_demo(ImageTransformationTool)
|
image_transformation.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
from PIL import Image
|
2 |
import torch
|
3 |
-
from transformers.
|
4 |
from transformers.utils import (
|
5 |
is_accelerate_available,
|
6 |
is_vision_available,
|
@@ -21,10 +21,10 @@ class ImageTransformationTool(Tool):
|
|
21 |
default_stable_diffusion_checkpoint = "timbrooks/instruct-pix2pix"
|
22 |
description = IMAGE_TRANSFORMATION_DESCRIPTION
|
23 |
inputs = {
|
24 |
-
'image': {"type":
|
25 |
-
'prompt': {"type":
|
26 |
}
|
27 |
-
output_type =
|
28 |
|
29 |
def __init__(self, device=None, controlnet=None, stable_diffusion=None, **hub_kwargs) -> None:
|
30 |
if not is_accelerate_available():
|
@@ -51,7 +51,7 @@ class ImageTransformationTool(Tool):
|
|
51 |
|
52 |
self.is_initialized = True
|
53 |
|
54 |
-
def
|
55 |
if not self.is_initialized:
|
56 |
self.setup()
|
57 |
|
|
|
1 |
from PIL import Image
|
2 |
import torch
|
3 |
+
from transformers.tools.base import Tool
|
4 |
from transformers.utils import (
|
5 |
is_accelerate_available,
|
6 |
is_vision_available,
|
|
|
21 |
default_stable_diffusion_checkpoint = "timbrooks/instruct-pix2pix"
|
22 |
description = IMAGE_TRANSFORMATION_DESCRIPTION
|
23 |
inputs = {
|
24 |
+
'image': {"type": Image.Image, "description": "the image to transform"},
|
25 |
+
'prompt': {"type": str, "description": "the prompt to use to change the image"}
|
26 |
}
|
27 |
+
output_type = Image.Image
|
28 |
|
29 |
def __init__(self, device=None, controlnet=None, stable_diffusion=None, **hub_kwargs) -> None:
|
30 |
if not is_accelerate_available():
|
|
|
51 |
|
52 |
self.is_initialized = True
|
53 |
|
54 |
+
def __call__(self, image, prompt):
|
55 |
if not self.is_initialized:
|
56 |
self.setup()
|
57 |
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
transformers
|
2 |
diffusers
|
3 |
accelerate
|
4 |
opencv-python
|
|
|
1 |
+
transformers @ git+https://github.com/huggingface/transformers@test_composition
|
2 |
diffusers
|
3 |
accelerate
|
4 |
opencv-python
|
tool_config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
-
"description": "This is a tool that transforms an image according to a prompt and returns the modified image.",
|
3 |
-
"name": "
|
4 |
"tool_class": "image_transformation.ImageTransformationTool",
|
5 |
"inputs": {"image": "image", "prompt": "str"},
|
6 |
"output_type": "image"
|
|
|
1 |
{
|
2 |
+
"description": "This is a tool that transforms an image according to a prompt. It takes two inputs: `image`, which should be the image to transform, and `prompt`, which should be the prompt to use to change it. The prompt should only contain descriptive adjectives, as if completing the prompt of the original image. It returns the modified image.",
|
3 |
+
"name": "image_transformer",
|
4 |
"tool_class": "image_transformation.ImageTransformationTool",
|
5 |
"inputs": {"image": "image", "prompt": "str"},
|
6 |
"output_type": "image"
|