Update app.py
Browse files
app.py
CHANGED
@@ -79,7 +79,7 @@ generatorjoker = deepcopy(original_generator)
|
|
79 |
|
80 |
generatorvoldemort = deepcopy(original_generator)
|
81 |
|
82 |
-
|
83 |
|
84 |
# generatorcaitlyn = deepcopy(original_generator)
|
85 |
|
@@ -106,25 +106,18 @@ transform = transforms.Compose(
|
|
106 |
|
107 |
|
108 |
modeljoker = hf_hub_download(repo_id="Abhinowww/Capstone", filename="JokerEightHundredFalse.pt")
|
109 |
-
|
110 |
-
|
111 |
ckptjoker = torch.load(modeljoker, map_location=lambda storage, loc: storage)
|
112 |
-
# print(ckptjoker.keys())
|
113 |
-
# generatorjoker.load_state_dict(ckptjoker["g"], strict=False)
|
114 |
generatorjoker.load_state_dict(ckptjoker, strict=False)
|
115 |
|
116 |
|
117 |
modelvoldemort = hf_hub_download(repo_id="Abhinowww/Capstone", filename="VoldemortEightHundredFalse.pt")
|
118 |
-
|
119 |
ckptvoldemort = torch.load(modelvoldemort, map_location=lambda storage, loc: storage)
|
120 |
-
# generatorvoldemort.load_state_dict(ckptvoldemort["g"], strict=False)
|
121 |
generatorvoldemort.load_state_dict(ckptvoldemort, strict=False)
|
122 |
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
# generatorjinx.load_state_dict(ckptjinx["g"], strict=False)
|
128 |
|
129 |
|
130 |
# modelcaitlyn = hf_hub_download(repo_id="akhaliq/jojogan-arcane", filename="arcane_caitlyn_preserve_color.pt")
|
@@ -172,9 +165,9 @@ def inference(img, model):
|
|
172 |
elif model == 'Voldemort':
|
173 |
with torch.no_grad():
|
174 |
my_sample = generatorvoldemort(my_w, input_is_latent=True)
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
# elif model == 'Caitlyn':
|
179 |
# with torch.no_grad():
|
180 |
# my_sample = generatorcaitlyn(my_w, input_is_latent=True)
|
@@ -207,7 +200,7 @@ description = "Capstone Project. To use it, simply upload your image, or click o
|
|
207 |
# examples=[['mona.png','Joker']]
|
208 |
# gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['JoJo', 'Disney','Jinx','Caitlyn','Yasuho','Arcane Multi','Art','Spider-Verse','Sketch'], type="value", default='JoJo', label="Model")], gr.outputs.Image(type="pil"),title=title,description=description,article=article,allow_flagging=False,examples=examples,allow_screenshot=False).launch()
|
209 |
|
210 |
-
css_code='body{background-image:url("https://picsum.photos/seed/picsum/200/300");}'
|
|
|
211 |
|
212 |
-
gr.Interface(
|
213 |
-
gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['Joker', 'Voldemort'], type="value", default='Joker', label="Model")], gr.outputs.Image(type="pil"),title=title,description=description,allow_flagging=False,allow_screenshot=False).launch()
|
|
|
79 |
|
80 |
generatorvoldemort = deepcopy(original_generator)
|
81 |
|
82 |
+
generatorpushpa = deepcopy(original_generator)
|
83 |
|
84 |
# generatorcaitlyn = deepcopy(original_generator)
|
85 |
|
|
|
106 |
|
107 |
|
108 |
modeljoker = hf_hub_download(repo_id="Abhinowww/Capstone", filename="JokerEightHundredFalse.pt")
|
|
|
|
|
109 |
ckptjoker = torch.load(modeljoker, map_location=lambda storage, loc: storage)
|
|
|
|
|
110 |
generatorjoker.load_state_dict(ckptjoker, strict=False)
|
111 |
|
112 |
|
113 |
modelvoldemort = hf_hub_download(repo_id="Abhinowww/Capstone", filename="VoldemortEightHundredFalse.pt")
|
|
|
114 |
ckptvoldemort = torch.load(modelvoldemort, map_location=lambda storage, loc: storage)
|
|
|
115 |
generatorvoldemort.load_state_dict(ckptvoldemort, strict=False)
|
116 |
|
117 |
|
118 |
+
modelpushpa = hf_hub_download(repo_id="Abhinowww/Capstone", filename="PushpaFourHundredFalse.pt")
|
119 |
+
ckptpushpa = torch.load(modelpushpa, map_location=lambda storage, loc: storage)
|
120 |
+
generatorpushpa.load_state_dict(ckptpushpa, strict=False)
|
|
|
121 |
|
122 |
|
123 |
# modelcaitlyn = hf_hub_download(repo_id="akhaliq/jojogan-arcane", filename="arcane_caitlyn_preserve_color.pt")
|
|
|
165 |
elif model == 'Voldemort':
|
166 |
with torch.no_grad():
|
167 |
my_sample = generatorvoldemort(my_w, input_is_latent=True)
|
168 |
+
elif model == 'Pushpa':
|
169 |
+
with torch.no_grad():
|
170 |
+
my_sample = generatorpushpa(my_w, input_is_latent=True)
|
171 |
# elif model == 'Caitlyn':
|
172 |
# with torch.no_grad():
|
173 |
# my_sample = generatorcaitlyn(my_w, input_is_latent=True)
|
|
|
200 |
# examples=[['mona.png','Joker']]
|
201 |
# gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['JoJo', 'Disney','Jinx','Caitlyn','Yasuho','Arcane Multi','Art','Spider-Verse','Sketch'], type="value", default='JoJo', label="Model")], gr.outputs.Image(type="pil"),title=title,description=description,article=article,allow_flagging=False,examples=examples,allow_screenshot=False).launch()
|
202 |
|
203 |
+
# css_code='body{background-image:url("https://picsum.photos/seed/picsum/200/300");}'
|
204 |
+
# gr.Interface(lambda x:x, "textbox", "textbox", css=css_code).launch(debug=True)
|
205 |
|
206 |
+
gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['Joker', 'Voldemort', 'Pushpa'], type="value", default='Joker', label="Model")], gr.outputs.Image(type="pil"),title=title,description=description,allow_flagging=False,allow_screenshot=False).launch()
|
|