Spaces:
Runtime error
Runtime error
Badr AlKhamissi
commited on
Commit
•
fee1f8d
1
Parent(s):
e169248
added examples
Browse files- app.py +31 -51
- code/utils.py +2 -2
app.py
CHANGED
@@ -13,8 +13,8 @@ import sys
|
|
13 |
sys.path.append('/home/user/app/code')
|
14 |
|
15 |
# set up diffvg
|
16 |
-
|
17 |
# os.system('git clone https://github.com/BachiLi/diffvg.git')
|
|
|
18 |
os.system('git submodule update --init')
|
19 |
os.chdir('diffvg')
|
20 |
os.system('git submodule update --init --recursive')
|
@@ -50,16 +50,20 @@ from utils import (
|
|
50 |
combine_word)
|
51 |
import warnings
|
52 |
|
53 |
-
TITLE="""<h1 style="font-size: 42px;" align="center">
|
54 |
-
|
55 |
-
Please select a semantic concept word and a letter you wish to generate, it will take ~5 minutes to perform 500 iterations."""
|
56 |
|
57 |
-
DESCRIPTION="""This demo builds on the [Word-As-Image for Semantic Typography](https://wordasimage.github.io/Word-As-Image-Page/) work to support Arabic fonts and morphing whole words into semantic concepts."""
|
58 |
|
59 |
# DESCRIPTION += '\n<p>This demo is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"> Creative Commons Attribution-ShareAlike 4.0 International License</a>.</p>'
|
|
|
|
|
60 |
|
61 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
62 |
-
DESCRIPTION
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
warnings.filterwarnings("ignore")
|
@@ -145,7 +149,9 @@ def init_shapes(svg_path, trainable: Mapping[str, bool]):
|
|
145 |
return shapes_init, shape_groups_init, parameters
|
146 |
|
147 |
|
148 |
-
def run_main_ex(
|
|
|
|
|
149 |
return list(next(run_main_app(semantic_concept, word, prompt, font_name, num_steps, 1)))
|
150 |
|
151 |
def run_main_app(semantic_concept, word, prompt, font_name, num_steps, example=0):
|
@@ -240,7 +246,7 @@ def run_main_app(semantic_concept, word, prompt, font_name, num_steps, example=0
|
|
240 |
save_svg.save_svg(
|
241 |
filename, w, h, shapes, shape_groups)
|
242 |
|
243 |
-
combine_word(cfg.word, cfg.optimized_letter, cfg.font, cfg.experiment_dir)
|
244 |
|
245 |
image = os.path.join(cfg.experiment_dir,f"{cfg.font}_{cfg.word}_{cfg.optimized_letter}.svg")
|
246 |
yield gr.update(value=filename_init,visible=True),gr.update(visible=False),gr.update(value=image,visible=True)
|
@@ -314,51 +320,25 @@ with gr.Blocks() as demo:
|
|
314 |
with gr.Row():
|
315 |
# examples
|
316 |
examples = [
|
317 |
-
[
|
318 |
-
"
|
319 |
-
"
|
320 |
-
"
|
321 |
-
"KaushanScript-Regular",
|
322 |
-
500
|
323 |
-
],
|
324 |
-
[
|
325 |
-
"LION",
|
326 |
-
"LION",
|
327 |
-
"O",
|
328 |
-
"Quicksand",
|
329 |
-
500
|
330 |
-
],
|
331 |
-
[
|
332 |
-
"FROG",
|
333 |
-
"FROG",
|
334 |
-
"G",
|
335 |
-
"IndieFlower-Regular",
|
336 |
-
500
|
337 |
-
],
|
338 |
-
[
|
339 |
-
"CAT",
|
340 |
-
"CAT",
|
341 |
-
"C",
|
342 |
-
"LuckiestGuy-Regular",
|
343 |
-
500
|
344 |
-
],
|
345 |
]
|
346 |
demo.queue(max_size=10, concurrency_count=2)
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
# fn=run_main_ex,
|
361 |
-
# cache_examples=True)
|
362 |
|
363 |
|
364 |
# inputs
|
|
|
13 |
sys.path.append('/home/user/app/code')
|
14 |
|
15 |
# set up diffvg
|
|
|
16 |
# os.system('git clone https://github.com/BachiLi/diffvg.git')
|
17 |
+
|
18 |
os.system('git submodule update --init')
|
19 |
os.chdir('diffvg')
|
20 |
os.system('git submodule update --init --recursive')
|
|
|
50 |
combine_word)
|
51 |
import warnings
|
52 |
|
53 |
+
TITLE="""<h1 style="font-size: 42px;" align="center">Word-To-Image: Morphing Arabic Text to a Visual Representation</h1>"""
|
54 |
+
|
|
|
55 |
|
56 |
+
DESCRIPTION="""This demo builds on the [Word-As-Image for Semantic Typography](https://wordasimage.github.io/Word-As-Image-Page/) work to support Arabic fonts and morphing whole words into semantic concepts. It is part of an ongoing project with the [ARBML](https://arbml.github.io/website/) community."""
|
57 |
|
58 |
# DESCRIPTION += '\n<p>This demo is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"> Creative Commons Attribution-ShareAlike 4.0 International License</a>.</p>'
|
59 |
+
# DESCRIPTION += """<br>For faster inference without waiting in queue, you can [![]()]()"""
|
60 |
+
DESCRIPTION += '\n<p>For faster inference without waiting in queue, you can <a href="https://colab.research.google.com/drive/1wobOAsnLpkIzaRxG5yac8NcV7iCrlycP"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a></p>'
|
61 |
|
62 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
63 |
+
DESCRIPTION = DESCRIPTION.replace("</p>", " ")
|
64 |
+
DESCRIPTION += f'or duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
65 |
+
else:
|
66 |
+
DESCRIPTION = DESCRIPTION.replace("either", "")
|
67 |
|
68 |
|
69 |
warnings.filterwarnings("ignore")
|
|
|
149 |
return shapes_init, shape_groups_init, parameters
|
150 |
|
151 |
|
152 |
+
def run_main_ex(word, semantic_concept, num_steps):
|
153 |
+
prompt = f"a {semantic_concept}. minimal flat 2d vector. lineal color. trending on artstation"
|
154 |
+
font_name = "ArefRuqaa"
|
155 |
return list(next(run_main_app(semantic_concept, word, prompt, font_name, num_steps, 1)))
|
156 |
|
157 |
def run_main_app(semantic_concept, word, prompt, font_name, num_steps, example=0):
|
|
|
246 |
save_svg.save_svg(
|
247 |
filename, w, h, shapes, shape_groups)
|
248 |
|
249 |
+
combine_word(cfg.word, cfg.optimized_letter, cfg.font, cfg.experiment_dir, device)
|
250 |
|
251 |
image = os.path.join(cfg.experiment_dir,f"{cfg.font}_{cfg.word}_{cfg.optimized_letter}.svg")
|
252 |
yield gr.update(value=filename_init,visible=True),gr.update(visible=False),gr.update(value=image,visible=True)
|
|
|
320 |
with gr.Row():
|
321 |
# examples
|
322 |
examples = [
|
323 |
+
["قطة", "Cat", 500],
|
324 |
+
["كلب", "Dog", 500],
|
325 |
+
["حصان", "Horse", 500],
|
326 |
+
["أخطبوط", "Octopus", 500],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
]
|
328 |
demo.queue(max_size=10, concurrency_count=2)
|
329 |
+
gr.Examples(examples=examples,
|
330 |
+
inputs=[
|
331 |
+
word,
|
332 |
+
semantic_concept,
|
333 |
+
num_steps
|
334 |
+
],
|
335 |
+
outputs=[
|
336 |
+
result0,
|
337 |
+
result1,
|
338 |
+
result2
|
339 |
+
],
|
340 |
+
fn=run_main_ex,
|
341 |
+
cache_examples=False)
|
|
|
|
|
342 |
|
343 |
|
344 |
# inputs
|
code/utils.py
CHANGED
@@ -142,7 +142,7 @@ def get_letter_ids(letter, word, shape_groups):
|
|
142 |
return group.shape_ids
|
143 |
|
144 |
|
145 |
-
def combine_word(word, letter, font, experiment_dir):
|
146 |
word_svg_scaled = f"./code/data/init/{font}_{word}_scaled.svg"
|
147 |
canvas_width_word, canvas_height_word, shapes_word, shape_groups_word = pydiffvg.svg_to_scene(word_svg_scaled)
|
148 |
|
@@ -202,7 +202,7 @@ def combine_word(word, letter, font, experiment_dir):
|
|
202 |
scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, canvas_height, shapes_word, shape_groups_word)
|
203 |
img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args)
|
204 |
img = img[:, :, 3:4] * img[:, :, :3] + \
|
205 |
-
torch.ones(img.shape[0], img.shape[1], 3, device=
|
206 |
img = img[:, :, :3]
|
207 |
save_image(img, f"{experiment_dir}/{font}_{word}_{letter}.png")
|
208 |
|
|
|
142 |
return group.shape_ids
|
143 |
|
144 |
|
145 |
+
def combine_word(word, letter, font, experiment_dir, device='cuda'):
|
146 |
word_svg_scaled = f"./code/data/init/{font}_{word}_scaled.svg"
|
147 |
canvas_width_word, canvas_height_word, shapes_word, shape_groups_word = pydiffvg.svg_to_scene(word_svg_scaled)
|
148 |
|
|
|
202 |
scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, canvas_height, shapes_word, shape_groups_word)
|
203 |
img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args)
|
204 |
img = img[:, :, 3:4] * img[:, :, :3] + \
|
205 |
+
torch.ones(img.shape[0], img.shape[1], 3, device=device) * (1 - img[:, :, 3:4])
|
206 |
img = img[:, :, :3]
|
207 |
save_image(img, f"{experiment_dir}/{font}_{word}_{letter}.png")
|
208 |
|