Spaces:
Running
on
Zero
Running
on
Zero
DongfuJiang
commited on
Commit
•
bd0bce1
1
Parent(s):
d86be6b
update
Browse files- model/model_manager.py +6 -46
- model/models/__init__.py +27 -0
- model/{fal_api_utils.py → models/fal_api_models.py} +0 -0
- model/{imagenhub_utils.py → models/imagenhub_models.py} +0 -0
- model/models/playground_api.py +35 -0
- model/other_models.py +0 -0
- serve/gradio_web.py +8 -8
- serve/gradio_web_image_editing.py +4 -4
- serve/vote_utils.py +21 -21
model/model_manager.py
CHANGED
@@ -1,68 +1,28 @@
|
|
1 |
import concurrent.futures
|
2 |
import random
|
3 |
import gradio as gr
|
4 |
-
# from fal_api_utils import load_fal_model
|
5 |
-
from .imagenhub_utils import load_imagenhub_model
|
6 |
-
import spaces
|
7 |
import requests
|
8 |
import io, base64, json
|
9 |
from PIL import Image
|
10 |
-
import
|
11 |
-
|
12 |
-
|
13 |
-
IMAGE_GENERATION_MODELS = ['imagenhub_LCM_generation','imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation', 'imagenhub_PixArtAlpha_generation',
|
14 |
-
'imagenhub_OpenJourney_generation','imagenhub_SDXLLightning_generation', 'imagenhub_StableCascade_generation',
|
15 |
-
'imagenhub_PlayGroundV2_generation', 'imagenhub_PlayGroundV2.5_generation']
|
16 |
-
IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
|
17 |
-
'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition']
|
18 |
|
19 |
class ModelManager:
|
20 |
def __init__(self):
|
21 |
self.model_ig_list = IMAGE_GENERATION_MODELS
|
22 |
self.model_ie_list = IMAGE_EDITION_MODELS
|
23 |
self.loaded_models = {}
|
24 |
-
|
25 |
def load_model_pipe(self, model_name):
|
26 |
-
model_source, model_name, model_type = model_name.split("_")
|
27 |
if not model_name in self.loaded_models:
|
28 |
-
|
29 |
-
pipe = load_imagenhub_model(model_name)
|
30 |
-
# elif model_source == "fal":
|
31 |
-
# pipe = load_fal_model(model_name, model_type)
|
32 |
-
else:
|
33 |
-
raise ValueError(f"Model source {model_source} not supported")
|
34 |
self.loaded_models[model_name] = pipe
|
35 |
else:
|
36 |
pipe = self.loaded_models[model_name]
|
37 |
return pipe
|
38 |
-
|
39 |
-
def generate_image_playground(self, model_name, prompt):
|
40 |
-
if model_name == "imagenhub_PlayGroundV2_generation":
|
41 |
-
model_name = "Playground_v2"
|
42 |
-
elif model_name == "imagenhub_PlayGroundV2.5_generation":
|
43 |
-
model_name = "Playground_v2.5"
|
44 |
-
|
45 |
-
headers = {
|
46 |
-
'Content-Type': 'application/json',
|
47 |
-
'Authorization': os.environ['PlaygroundAPI'],
|
48 |
-
}
|
49 |
-
|
50 |
-
data = json.dumps({"prompt": prompt, "filter_model": model_name, "scheduler": "DPMPP_2M_K", "guidance_scale": 3})
|
51 |
|
52 |
-
response = requests.post('https://playground.com/api/models/external/v1', headers=headers, data=data)
|
53 |
-
response.raise_for_status()
|
54 |
-
json_obj = response.json()
|
55 |
-
image_base64 = json_obj['images'][0]
|
56 |
-
img = Image.open(io.BytesIO(base64.decodebytes(bytes(image_base64, "utf-8"))))
|
57 |
-
|
58 |
-
return img
|
59 |
-
@spaces.GPU(duration=60)
|
60 |
def generate_image_ig(self, prompt, model_name):
|
61 |
-
|
62 |
-
|
63 |
-
else:
|
64 |
-
pipe = self.load_model_pipe(model_name)
|
65 |
-
result = pipe(prompt=prompt)
|
66 |
return result
|
67 |
|
68 |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
|
@@ -88,7 +48,7 @@ class ModelManager:
|
|
88 |
result = future.result()
|
89 |
results.append(result)
|
90 |
return results[0], results[1]
|
91 |
-
|
92 |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name):
|
93 |
pipe = self.load_model_pipe(model_name)
|
94 |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct)
|
|
|
1 |
import concurrent.futures
|
2 |
import random
|
3 |
import gradio as gr
|
|
|
|
|
|
|
4 |
import requests
|
5 |
import io, base64, json
|
6 |
from PIL import Image
|
7 |
+
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, load_pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
class ModelManager:
|
10 |
def __init__(self):
|
11 |
self.model_ig_list = IMAGE_GENERATION_MODELS
|
12 |
self.model_ie_list = IMAGE_EDITION_MODELS
|
13 |
self.loaded_models = {}
|
14 |
+
|
15 |
def load_model_pipe(self, model_name):
|
|
|
16 |
if not model_name in self.loaded_models:
|
17 |
+
pipe = load_pipeline(model_name)
|
|
|
|
|
|
|
|
|
|
|
18 |
self.loaded_models[model_name] = pipe
|
19 |
else:
|
20 |
pipe = self.loaded_models[model_name]
|
21 |
return pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def generate_image_ig(self, prompt, model_name):
|
24 |
+
pipe = self.load_model_pipe(model_name)
|
25 |
+
result = pipe(prompt=prompt)
|
|
|
|
|
|
|
26 |
return result
|
27 |
|
28 |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
|
|
|
48 |
result = future.result()
|
49 |
results.append(result)
|
50 |
return results[0], results[1]
|
51 |
+
|
52 |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name):
|
53 |
pipe = self.load_model_pipe(model_name)
|
54 |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct)
|
model/models/__init__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .imagenhub_models import load_imagenhub_model
|
2 |
+
from .playground_api import load_playground_model
|
3 |
+
|
4 |
+
IMAGE_GENERATION_MODELS = ['imagenhub_LCM_generation','imagenhub_SDXLTurbo_generation','imagenhub_SDXL_generation', 'imagenhub_PixArtAlpha_generation',
|
5 |
+
'imagenhub_OpenJourney_generation','imagenhub_SDXLLightning_generation', 'imagenhub_StableCascade_generation',
|
6 |
+
'playground_PlayGroundV2_generation', 'playground_PlayGroundV2.5_generation']
|
7 |
+
IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
|
8 |
+
'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition']
|
9 |
+
|
10 |
+
|
11 |
+
def load_pipeline(model_name):
|
12 |
+
"""
|
13 |
+
Load a model pipeline based on the model name
|
14 |
+
Args:
|
15 |
+
model_name (str): The name of the model to load, should be of the form {source}_{name}_{type}
|
16 |
+
the source can be either imagenhub or playground
|
17 |
+
the name is the name of the model used to load the model
|
18 |
+
the type is the type of the model, either generation or edition
|
19 |
+
"""
|
20 |
+
model_source, model_name, model_type = model_name.split("_")
|
21 |
+
if model_source == "imagenhub":
|
22 |
+
pipe = load_imagenhub_model(model_name, model_type)
|
23 |
+
elif model_source == "playground":
|
24 |
+
pipe = load_playground_model(model_name)
|
25 |
+
else:
|
26 |
+
raise ValueError(f"Model source {model_source} not supported")
|
27 |
+
return pipe
|
model/{fal_api_utils.py → models/fal_api_models.py}
RENAMED
File without changes
|
model/{imagenhub_utils.py → models/imagenhub_models.py}
RENAMED
File without changes
|
model/models/playground_api.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
from PIL import Image
|
5 |
+
import io
|
6 |
+
import base64
|
7 |
+
class PlayGround():
|
8 |
+
def __init__(self, model_name, model_type=None):
|
9 |
+
self.model_name = model_name
|
10 |
+
self.model_type = model_type
|
11 |
+
self.api_key = os.environ['PlaygroundAPI']
|
12 |
+
if model_name == "PlayGroundV2":
|
13 |
+
self._model_name = "Playground_v2"
|
14 |
+
elif model_name == "PlayGroundV2.5":
|
15 |
+
self._model_name = "Playground_v2.5"
|
16 |
+
|
17 |
+
|
18 |
+
def __call__(self, prompt):
|
19 |
+
headers = {
|
20 |
+
'Content-Type': 'application/json',
|
21 |
+
'Authorization': "Bearer " + self.api_key,
|
22 |
+
}
|
23 |
+
|
24 |
+
data = json.dumps({"prompt": prompt, "filter_model": self._model_name, "scheduler": "DPMPP_2M_K", "guidance_scale": 3})
|
25 |
+
|
26 |
+
response = requests.post('https://playground.com/api/models/external/v1', headers=headers, data=data)
|
27 |
+
response.raise_for_status()
|
28 |
+
json_obj = response.json()
|
29 |
+
image_base64 = json_obj['images'][0]
|
30 |
+
img = Image.open(io.BytesIO(base64.decodebytes(bytes(image_base64, "utf-8"))))
|
31 |
+
|
32 |
+
return img
|
33 |
+
|
34 |
+
def load_playground_model(model_name, model_type="generation"):
|
35 |
+
return PlayGround(model_name, model_type)
|
model/other_models.py
ADDED
File without changes
|
serve/gradio_web.py
CHANGED
@@ -46,9 +46,9 @@ Find out who is the 🥇conditional image generation models! More models are goi
|
|
46 |
gr.Markdown(model_description_md, elem_id="model_description_markdown")
|
47 |
with gr.Row():
|
48 |
with gr.Column():
|
49 |
-
chatbot_left = gr.Image(
|
50 |
with gr.Column():
|
51 |
-
chatbot_right = gr.Image(
|
52 |
|
53 |
with gr.Row():
|
54 |
with gr.Column():
|
@@ -86,7 +86,7 @@ Find out who is the 🥇conditional image generation models! More models are goi
|
|
86 |
|
87 |
gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
88 |
|
89 |
-
dummy_img_output = gr.Image(
|
90 |
gr.Examples(
|
91 |
examples=[["a cute dog is playing a ball", os.path.join("./examples", "dog.jpg")],
|
92 |
["Buildings on fire, old film still", os.path.join("./examples", "fire.jpg")],
|
@@ -215,9 +215,9 @@ def build_side_by_side_ui_named(models):
|
|
215 |
|
216 |
with gr.Row():
|
217 |
with gr.Column():
|
218 |
-
chatbot_left = gr.Image(
|
219 |
with gr.Column():
|
220 |
-
chatbot_right = gr.Image(
|
221 |
with gr.Row():
|
222 |
leftvote_btn = gr.Button(
|
223 |
value="👈 A is better", visible=False, interactive=False
|
@@ -245,7 +245,7 @@ def build_side_by_side_ui_named(models):
|
|
245 |
|
246 |
gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
247 |
|
248 |
-
dummy_img_output = gr.Image(
|
249 |
gr.Examples(
|
250 |
examples=[["a cute dog is playing a ball", os.path.join("./examples", "dog.jpg")],
|
251 |
["Buildings on fire, old film still", os.path.join("./examples", "fire.jpg")],
|
@@ -378,7 +378,7 @@ def build_single_model_ui(models, add_promotion_links=False):
|
|
378 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
379 |
|
380 |
with gr.Row():
|
381 |
-
chatbot = gr.Image(
|
382 |
|
383 |
with gr.Row() as button_row:
|
384 |
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
|
@@ -390,7 +390,7 @@ def build_single_model_ui(models, add_promotion_links=False):
|
|
390 |
if add_promotion_links:
|
391 |
gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
392 |
|
393 |
-
dummy_img_output = gr.Image(
|
394 |
gr.Examples(
|
395 |
examples=[["a cute dog is playing a ball", os.path.join("./examples", "dog.jpg")],
|
396 |
["Buildings on fire, old film still", os.path.join("./examples", "fire.jpg")],
|
|
|
46 |
gr.Markdown(model_description_md, elem_id="model_description_markdown")
|
47 |
with gr.Row():
|
48 |
with gr.Column():
|
49 |
+
chatbot_left = gr.Image(width=512, label = "Model A")
|
50 |
with gr.Column():
|
51 |
+
chatbot_right = gr.Image(width=512, label = "Model B")
|
52 |
|
53 |
with gr.Row():
|
54 |
with gr.Column():
|
|
|
86 |
|
87 |
gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
88 |
|
89 |
+
dummy_img_output = gr.Image(width=512, visible=False)
|
90 |
gr.Examples(
|
91 |
examples=[["a cute dog is playing a ball", os.path.join("./examples", "dog.jpg")],
|
92 |
["Buildings on fire, old film still", os.path.join("./examples", "fire.jpg")],
|
|
|
215 |
|
216 |
with gr.Row():
|
217 |
with gr.Column():
|
218 |
+
chatbot_left = gr.Image(width=512, label = "Model A")
|
219 |
with gr.Column():
|
220 |
+
chatbot_right = gr.Image(width=512, label = "Model B")
|
221 |
with gr.Row():
|
222 |
leftvote_btn = gr.Button(
|
223 |
value="👈 A is better", visible=False, interactive=False
|
|
|
245 |
|
246 |
gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
247 |
|
248 |
+
dummy_img_output = gr.Image(width=512, visible=False)
|
249 |
gr.Examples(
|
250 |
examples=[["a cute dog is playing a ball", os.path.join("./examples", "dog.jpg")],
|
251 |
["Buildings on fire, old film still", os.path.join("./examples", "fire.jpg")],
|
|
|
378 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
379 |
|
380 |
with gr.Row():
|
381 |
+
chatbot = gr.Image()
|
382 |
|
383 |
with gr.Row() as button_row:
|
384 |
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
|
|
|
390 |
if add_promotion_links:
|
391 |
gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
|
392 |
|
393 |
+
dummy_img_output = gr.Image(visible=False)
|
394 |
gr.Examples(
|
395 |
examples=[["a cute dog is playing a ball", os.path.join("./examples", "dog.jpg")],
|
396 |
["Buildings on fire, old film still", os.path.join("./examples", "fire.jpg")],
|
serve/gradio_web_image_editing.py
CHANGED
@@ -48,9 +48,9 @@ Find out who is the 🥇conditional image edition models!
|
|
48 |
gr.Markdown(model_description_md, elem_id="model_description_markdown")
|
49 |
with gr.Row():
|
50 |
with gr.Column():
|
51 |
-
chatbot_left = gr.Image(
|
52 |
with gr.Column():
|
53 |
-
chatbot_right = gr.Image(
|
54 |
|
55 |
with gr.Row():
|
56 |
with gr.Column():
|
@@ -249,9 +249,9 @@ def build_side_by_side_ui_named_ie(models):
|
|
249 |
|
250 |
with gr.Row():
|
251 |
with gr.Column():
|
252 |
-
chatbot_left = gr.Image(
|
253 |
with gr.Column():
|
254 |
-
chatbot_right = gr.Image(
|
255 |
|
256 |
with gr.Row():
|
257 |
leftvote_btn = gr.Button(
|
|
|
48 |
gr.Markdown(model_description_md, elem_id="model_description_markdown")
|
49 |
with gr.Row():
|
50 |
with gr.Column():
|
51 |
+
chatbot_left = gr.Image(width=512, type="pil")
|
52 |
with gr.Column():
|
53 |
+
chatbot_right = gr.Image(width=512, type="pil")
|
54 |
|
55 |
with gr.Row():
|
56 |
with gr.Column():
|
|
|
249 |
|
250 |
with gr.Row():
|
251 |
with gr.Column():
|
252 |
+
chatbot_left = gr.Image(width=512, label = "Model A")
|
253 |
with gr.Column():
|
254 |
+
chatbot_right = gr.Image(width=512, label = "Model B")
|
255 |
|
256 |
with gr.Row():
|
257 |
leftvote_btn = gr.Button(
|
serve/vote_utils.py
CHANGED
@@ -292,9 +292,9 @@ class ImageStateIE:
|
|
292 |
|
293 |
def generate_ig(gen_func, state, text, model_name, request: gr.Request):
|
294 |
if not text:
|
295 |
-
raise gr.
|
296 |
if not model_name:
|
297 |
-
raise gr.
|
298 |
if state is None:
|
299 |
state = ImageStateIG(model_name)
|
300 |
ip = get_ip(request)
|
@@ -332,11 +332,11 @@ def generate_ig(gen_func, state, text, model_name, request: gr.Request):
|
|
332 |
|
333 |
def generate_igm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
334 |
if not text:
|
335 |
-
raise gr.
|
336 |
if not model_name0:
|
337 |
-
raise gr.
|
338 |
if not model_name1:
|
339 |
-
raise gr.
|
340 |
if state0 is None:
|
341 |
state0 = ImageStateIG(model_name0)
|
342 |
if state1 is None:
|
@@ -392,7 +392,7 @@ def generate_igm(gen_func, state0, state1, text, model_name0, model_name1, reque
|
|
392 |
|
393 |
def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
394 |
if not text:
|
395 |
-
raise gr.
|
396 |
if state0 is None:
|
397 |
state0 = ImageStateIG(model_name0)
|
398 |
if state1 is None:
|
@@ -450,15 +450,15 @@ def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
|
|
450 |
|
451 |
def generate_ie(gen_func, state, source_text, target_text, instruct_text, source_image, model_name, request: gr.Request):
|
452 |
if not source_text:
|
453 |
-
raise gr.
|
454 |
if not target_text:
|
455 |
-
raise gr.
|
456 |
if not instruct_text:
|
457 |
-
raise gr.
|
458 |
if not source_image:
|
459 |
-
raise gr.
|
460 |
if not model_name:
|
461 |
-
raise gr.
|
462 |
if state is None:
|
463 |
state = ImageStateIE(model_name)
|
464 |
ip = get_ip(request)
|
@@ -503,17 +503,17 @@ def generate_ie(gen_func, state, source_text, target_text, instruct_text, source
|
|
503 |
|
504 |
def generate_iem(gen_func, state0, state1, source_text, target_text, instruct_text, source_image, model_name0, model_name1, request: gr.Request):
|
505 |
if not source_text:
|
506 |
-
raise gr.
|
507 |
if not target_text:
|
508 |
-
raise gr.
|
509 |
if not instruct_text:
|
510 |
-
raise gr.
|
511 |
if not source_image:
|
512 |
-
raise gr.
|
513 |
if not model_name0:
|
514 |
-
raise gr.
|
515 |
if not model_name1:
|
516 |
-
raise gr.
|
517 |
if state0 is None:
|
518 |
state0 = ImageStateIE(model_name0)
|
519 |
if state1 is None:
|
@@ -580,13 +580,13 @@ def generate_iem(gen_func, state0, state1, source_text, target_text, instruct_te
|
|
580 |
|
581 |
def generate_iem_annoy(gen_func, state0, state1, source_text, target_text, instruct_text, source_image, model_name0, model_name1, request: gr.Request):
|
582 |
if not source_text:
|
583 |
-
raise gr.
|
584 |
if not target_text:
|
585 |
-
raise gr.
|
586 |
if not instruct_text:
|
587 |
-
raise gr.
|
588 |
if not source_image:
|
589 |
-
raise gr.
|
590 |
if state0 is None:
|
591 |
state0 = ImageStateIE(model_name0)
|
592 |
if state1 is None:
|
|
|
292 |
|
293 |
def generate_ig(gen_func, state, text, model_name, request: gr.Request):
|
294 |
if not text:
|
295 |
+
raise gr.Warning("Prompt cannot be empty.")
|
296 |
if not model_name:
|
297 |
+
raise gr.Warning("Model name cannot be empty.")
|
298 |
if state is None:
|
299 |
state = ImageStateIG(model_name)
|
300 |
ip = get_ip(request)
|
|
|
332 |
|
333 |
def generate_igm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
334 |
if not text:
|
335 |
+
raise gr.Warning("Prompt cannot be empty.")
|
336 |
if not model_name0:
|
337 |
+
raise gr.Warning("Model name A cannot be empty.")
|
338 |
if not model_name1:
|
339 |
+
raise gr.Warning("Model name B cannot be empty.")
|
340 |
if state0 is None:
|
341 |
state0 = ImageStateIG(model_name0)
|
342 |
if state1 is None:
|
|
|
392 |
|
393 |
def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
394 |
if not text:
|
395 |
+
raise gr.Warning("Prompt cannot be empty.")
|
396 |
if state0 is None:
|
397 |
state0 = ImageStateIG(model_name0)
|
398 |
if state1 is None:
|
|
|
450 |
|
451 |
def generate_ie(gen_func, state, source_text, target_text, instruct_text, source_image, model_name, request: gr.Request):
|
452 |
if not source_text:
|
453 |
+
raise gr.Warning("Source prompt cannot be empty.")
|
454 |
if not target_text:
|
455 |
+
raise gr.Warning("Target prompt cannot be empty.")
|
456 |
if not instruct_text:
|
457 |
+
raise gr.Warning("Instruction prompt cannot be empty.")
|
458 |
if not source_image:
|
459 |
+
raise gr.Warning("Source image cannot be empty.")
|
460 |
if not model_name:
|
461 |
+
raise gr.Warning("Model name cannot be empty.")
|
462 |
if state is None:
|
463 |
state = ImageStateIE(model_name)
|
464 |
ip = get_ip(request)
|
|
|
503 |
|
504 |
def generate_iem(gen_func, state0, state1, source_text, target_text, instruct_text, source_image, model_name0, model_name1, request: gr.Request):
|
505 |
if not source_text:
|
506 |
+
raise gr.Warning("Source prompt cannot be empty.")
|
507 |
if not target_text:
|
508 |
+
raise gr.Warning("Target prompt cannot be empty.")
|
509 |
if not instruct_text:
|
510 |
+
raise gr.Warning("Instruction prompt cannot be empty.")
|
511 |
if not source_image:
|
512 |
+
raise gr.Warning("Source image cannot be empty.")
|
513 |
if not model_name0:
|
514 |
+
raise gr.Warning("Model name A cannot be empty.")
|
515 |
if not model_name1:
|
516 |
+
raise gr.Warning("Model name B cannot be empty.")
|
517 |
if state0 is None:
|
518 |
state0 = ImageStateIE(model_name0)
|
519 |
if state1 is None:
|
|
|
580 |
|
581 |
def generate_iem_annoy(gen_func, state0, state1, source_text, target_text, instruct_text, source_image, model_name0, model_name1, request: gr.Request):
|
582 |
if not source_text:
|
583 |
+
raise gr.Warning("Source prompt cannot be empty.")
|
584 |
if not target_text:
|
585 |
+
raise gr.Warning("Target prompt cannot be empty.")
|
586 |
if not instruct_text:
|
587 |
+
raise gr.Warning("Instruction prompt cannot be empty.")
|
588 |
if not source_image:
|
589 |
+
raise gr.Warning("Source image cannot be empty.")
|
590 |
if state0 is None:
|
591 |
state0 = ImageStateIE(model_name0)
|
592 |
if state1 is None:
|