Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -47,8 +47,17 @@ def get_embed_new(prompt, pipeline, compel, only_convert_string=False, compel_pr
|
|
47 |
|
48 |
# Convert to Compel
|
49 |
attention = parse_prompt_attention(prompt)
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
|
|
|
|
52 |
for att in attention:
|
53 |
for chunk in att[0].split(','):
|
54 |
temp_prompt_chunks = tokenize_line(chunk, pipeline.tokenizer)
|
@@ -85,7 +94,7 @@ def get_embed_new(prompt, pipeline, compel, only_convert_string=False, compel_pr
|
|
85 |
return ' '.join([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chunks])
|
86 |
|
87 |
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chunks], compel)
|
88 |
-
|
89 |
if not torch.cuda.is_available():
|
90 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
91 |
|
@@ -131,16 +140,17 @@ def infer(
|
|
131 |
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
|
132 |
requires_pooled=[False, True]
|
133 |
)
|
134 |
-
#
|
135 |
conditioning, pooled = get_embed_new(prompt, pipe, compel_instance)
|
136 |
-
|
137 |
# 处理反向提示(negative_prompt)
|
138 |
if use_negative_prompt and negative_prompt:
|
139 |
negative_conditioning, negative_pooled = get_embed_new(negative_prompt, pipe, compel_instance)
|
140 |
else:
|
141 |
negative_conditioning = None
|
142 |
negative_pooled = None
|
143 |
-
|
|
|
144 |
image = pipe(
|
145 |
prompt_embeds=conditioning,
|
146 |
pooled_prompt_embeds=pooled,
|
@@ -153,7 +163,6 @@ def infer(
|
|
153 |
generator=generator,
|
154 |
use_resolution_binning=use_resolution_binning,
|
155 |
).images[0]
|
156 |
-
image.save("output_image.png")
|
157 |
return image, seed
|
158 |
|
159 |
examples = [
|
|
|
47 |
|
48 |
# Convert to Compel
|
49 |
attention = parse_prompt_attention(prompt)
|
50 |
+
|
51 |
+
# 新增处理,当 attention 为空时
|
52 |
+
if not attention:
|
53 |
+
if only_convert_string:
|
54 |
+
return prompt
|
55 |
+
else:
|
56 |
+
conditioning, pooled = compel(prompt)
|
57 |
+
return conditioning, pooled
|
58 |
|
59 |
+
global_attention_chunks = []
|
60 |
+
# 下面的部分保持不变
|
61 |
for att in attention:
|
62 |
for chunk in att[0].split(','):
|
63 |
temp_prompt_chunks = tokenize_line(chunk, pipeline.tokenizer)
|
|
|
94 |
return ' '.join([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chunks])
|
95 |
|
96 |
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chunks], compel)
|
97 |
+
|
98 |
if not torch.cuda.is_available():
|
99 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
100 |
|
|
|
140 |
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
|
141 |
requires_pooled=[False, True]
|
142 |
)
|
143 |
+
# 在 infer 函数中调用 get_embed_new
|
144 |
conditioning, pooled = get_embed_new(prompt, pipe, compel_instance)
|
145 |
+
|
146 |
# 处理反向提示(negative_prompt)
|
147 |
if use_negative_prompt and negative_prompt:
|
148 |
negative_conditioning, negative_pooled = get_embed_new(negative_prompt, pipe, compel_instance)
|
149 |
else:
|
150 |
negative_conditioning = None
|
151 |
negative_pooled = None
|
152 |
+
|
153 |
+
# 在调用 pipe 时,使用新的参数名称(确保参数名称正确)
|
154 |
image = pipe(
|
155 |
prompt_embeds=conditioning,
|
156 |
pooled_prompt_embeds=pooled,
|
|
|
163 |
generator=generator,
|
164 |
use_resolution_binning=use_resolution_binning,
|
165 |
).images[0]
|
|
|
166 |
return image, seed
|
167 |
|
168 |
examples = [
|