Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
c712159
1
Parent(s):
9899a49
Use Zero Spaces (#2)
Browse files- Update app.py (4991b244b5fa5ad2bc2da3ae398d3b53269df5c7)
- Update requirements.txt (7e13740cf2673a8fab6d0f5b4baef752b86c7b9d)
- Update requirements.txt (e095422a7dca7248cbb17d9df26e88f97b8dd2a3)
- Update README.md (9b6cd9151c82ec5d8fdb028c597ca2d725daf387)
- updates on change (3ceb728d939da3d6443d825c09ac2e06571644e5)
- no need for xformers on Spaces Zero (b7758dc0fb7aac6390e644ce988b5a60c7e65257)
Co-authored-by: Radamés Ajna <radames@users.noreply.huggingface.co>
- README.md +1 -1
- app.py +16 -11
- requirements.txt +3 -2
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: ⚡
|
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc-by-nc-4.0
|
|
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.36.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc-by-nc-4.0
|
app.py
CHANGED
@@ -1,12 +1,15 @@
|
|
1 |
import random
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
from diffusers import LCMScheduler, PixArtAlphaPipeline, Transformer2DModel
|
7 |
from peft import PeftModel
|
|
|
8 |
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
10 |
|
11 |
transformer = Transformer2DModel.from_pretrained(
|
12 |
"PixArt-alpha/PixArt-XL-2-1024-MS",
|
@@ -23,7 +26,8 @@ if torch.cuda.is_available():
|
|
23 |
transformer=transformer,
|
24 |
torch_dtype=torch.float16,
|
25 |
)
|
26 |
-
|
|
|
27 |
pipe = pipe.to(device)
|
28 |
else:
|
29 |
pipe = PixArtAlphaPipeline.from_pretrained(
|
@@ -46,8 +50,8 @@ MAX_IMAGE_SIZE = 1024
|
|
46 |
NUM_INFERENCE_STEPS = 4
|
47 |
|
48 |
|
|
|
49 |
def infer(prompt, seed, randomize_seed):
|
50 |
-
|
51 |
if randomize_seed:
|
52 |
seed = random.randint(0, MAX_SEED)
|
53 |
|
@@ -88,7 +92,6 @@ else:
|
|
88 |
power_device = "CPU"
|
89 |
|
90 |
with gr.Blocks(css=css) as demo:
|
91 |
-
|
92 |
with gr.Column(elem_id="col-container"):
|
93 |
gr.Markdown(
|
94 |
f"""
|
@@ -107,7 +110,6 @@ with gr.Blocks(css=css) as demo:
|
|
107 |
)
|
108 |
|
109 |
with gr.Row():
|
110 |
-
|
111 |
prompt = gr.Text(
|
112 |
label="Prompt",
|
113 |
show_label=False,
|
@@ -121,7 +123,6 @@ with gr.Blocks(css=css) as demo:
|
|
121 |
result = gr.Image(label="Result", show_label=False)
|
122 |
|
123 |
with gr.Accordion("Advanced Settings", open=False):
|
124 |
-
|
125 |
seed = gr.Slider(
|
126 |
label="Seed",
|
127 |
minimum=0,
|
@@ -134,14 +135,18 @@ with gr.Blocks(css=css) as demo:
|
|
134 |
|
135 |
examples = gr.Examples(examples=examples, inputs=[prompt])
|
136 |
|
137 |
-
gr.Markdown(
|
138 |
-
"**Disclaimer:**"
|
139 |
-
)
|
140 |
gr.Markdown(
|
141 |
"This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
|
142 |
)
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
demo.queue().launch(show_api=False)
|
|
|
1 |
import random
|
2 |
+
import spaces
|
3 |
|
4 |
import gradio as gr
|
5 |
import numpy as np
|
6 |
import torch
|
7 |
from diffusers import LCMScheduler, PixArtAlphaPipeline, Transformer2DModel
|
8 |
from peft import PeftModel
|
9 |
+
import os
|
10 |
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
+
IS_SPACE = os.environ.get("SPACE_ID", None) is not None
|
13 |
|
14 |
transformer = Transformer2DModel.from_pretrained(
|
15 |
"PixArt-alpha/PixArt-XL-2-1024-MS",
|
|
|
26 |
transformer=transformer,
|
27 |
torch_dtype=torch.float16,
|
28 |
)
|
29 |
+
if not IS_SPACE:
|
30 |
+
pipe.enable_xformers_memory_efficient_attention()
|
31 |
pipe = pipe.to(device)
|
32 |
else:
|
33 |
pipe = PixArtAlphaPipeline.from_pretrained(
|
|
|
50 |
NUM_INFERENCE_STEPS = 4
|
51 |
|
52 |
|
53 |
+
@spaces.GPU
|
54 |
def infer(prompt, seed, randomize_seed):
|
|
|
55 |
if randomize_seed:
|
56 |
seed = random.randint(0, MAX_SEED)
|
57 |
|
|
|
92 |
power_device = "CPU"
|
93 |
|
94 |
with gr.Blocks(css=css) as demo:
|
|
|
95 |
with gr.Column(elem_id="col-container"):
|
96 |
gr.Markdown(
|
97 |
f"""
|
|
|
110 |
)
|
111 |
|
112 |
with gr.Row():
|
|
|
113 |
prompt = gr.Text(
|
114 |
label="Prompt",
|
115 |
show_label=False,
|
|
|
123 |
result = gr.Image(label="Result", show_label=False)
|
124 |
|
125 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
126 |
seed = gr.Slider(
|
127 |
label="Seed",
|
128 |
minimum=0,
|
|
|
135 |
|
136 |
examples = gr.Examples(examples=examples, inputs=[prompt])
|
137 |
|
138 |
+
gr.Markdown("**Disclaimer:**")
|
|
|
|
|
139 |
gr.Markdown(
|
140 |
"This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
|
141 |
)
|
142 |
+
gr.on(
|
143 |
+
[run_button.click, seed.change, prompt.change, randomize_seed.change],
|
144 |
+
fn=infer,
|
145 |
+
inputs=[prompt, seed, randomize_seed],
|
146 |
+
outputs=[result],
|
147 |
+
show_progress="minimal",
|
148 |
+
show_api=False,
|
149 |
+
trigger_mode="always_last",
|
150 |
+
)
|
151 |
|
152 |
demo.queue().launch(show_api=False)
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
accelerate
|
2 |
-
diffusers
|
3 |
invisible_watermark
|
4 |
--extra-index-url https://download.pytorch.org/whl/cu118
|
5 |
torch==2.0.1
|
@@ -9,4 +9,5 @@ optimum
|
|
9 |
beautifulsoup4
|
10 |
transformers >= 4.34.0
|
11 |
xformers
|
12 |
-
ftfy
|
|
|
|
1 |
accelerate
|
2 |
+
git+https://github.com/huggingface/diffusers/
|
3 |
invisible_watermark
|
4 |
--extra-index-url https://download.pytorch.org/whl/cu118
|
5 |
torch==2.0.1
|
|
|
9 |
beautifulsoup4
|
10 |
transformers >= 4.34.0
|
11 |
xformers
|
12 |
+
ftfy
|
13 |
+
spaces
|