Remove unnecessary tokenization (still needs work)
Browse files- flux_emphasis.py +0 -9
flux_emphasis.py
CHANGED
@@ -343,15 +343,6 @@ def get_weighted_text_embeddings_flux(
|
|
343 |
t5_length = 512 if pipe.name == "flux-dev" else 256
|
344 |
clip_length = 77
|
345 |
|
346 |
-
tokenizer_t5(
|
347 |
-
prompt,
|
348 |
-
add_special_tokens=True,
|
349 |
-
padding="max_length",
|
350 |
-
truncation=True,
|
351 |
-
max_length=t5_length,
|
352 |
-
return_tensors="pt",
|
353 |
-
)
|
354 |
-
|
355 |
# tokenizer 1
|
356 |
prompt_tokens_clip, prompt_weights_clip = get_prompts_tokens_with_weights(
|
357 |
tokenizer_clip, prompt, debug=debug
|
|
|
343 |
t5_length = 512 if pipe.name == "flux-dev" else 256
|
344 |
clip_length = 77
|
345 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
# tokenizer 1
|
347 |
prompt_tokens_clip, prompt_weights_clip = get_prompts_tokens_with_weights(
|
348 |
tokenizer_clip, prompt, debug=debug
|