bill-jiang commited on
Commit
8554568
1 Parent(s): 4409449
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +176 -95
  2. assets/css/custom.css +0 -15
  3. assets/images/avatar_user.png +0 -0
  4. assets/videos/example0.mp4 +0 -0
  5. assets/videos/example0_blender.mp4 +3 -0
  6. assets/videos/example0_fast.mp4 +0 -0
  7. assets/videos/example1.mp4 +0 -0
  8. assets/videos/example2.mp4 +0 -0
  9. assets/videos/example3.mp4 +0 -0
  10. assets/videos/example4.mp4 +0 -0
  11. assets/videos/example5.mp4 +0 -0
  12. assets/videos/example6.mp4 +0 -0
  13. assets/videos/example7.mp4 +0 -0
  14. assets/videos/example8.mp4 +0 -0
  15. assets/videos/m2t_0.mp4 +0 -0
  16. assets/videos/t2m_0.mp4 +0 -0
  17. mGPT/render/pyrender/smpl_render.py +93 -45
  18. pyrender/.coveragerc +5 -0
  19. pyrender/.flake8 +8 -0
  20. pyrender/.gitignore +106 -0
  21. pyrender/.pre-commit-config.yaml +6 -0
  22. pyrender/.travis.yml +43 -0
  23. pyrender/LICENSE +21 -0
  24. pyrender/MANIFEST.in +5 -0
  25. pyrender/README.md +92 -0
  26. pyrender/docs/Makefile +23 -0
  27. pyrender/docs/make.bat +35 -0
  28. pyrender/docs/source/api/index.rst +59 -0
  29. pyrender/docs/source/conf.py +352 -0
  30. pyrender/docs/source/examples/cameras.rst +26 -0
  31. pyrender/docs/source/examples/index.rst +20 -0
  32. pyrender/docs/source/examples/lighting.rst +21 -0
  33. pyrender/docs/source/examples/models.rst +143 -0
  34. pyrender/docs/source/examples/offscreen.rst +87 -0
  35. pyrender/docs/source/examples/quickstart.rst +71 -0
  36. pyrender/docs/source/examples/scenes.rst +78 -0
  37. pyrender/docs/source/examples/viewer.rst +61 -0
  38. pyrender/docs/source/index.rst +41 -0
  39. pyrender/docs/source/install/index.rst +172 -0
  40. pyrender/examples/duck.py +13 -0
  41. pyrender/examples/example.py +157 -0
  42. pyrender/pyrender/__init__.py +24 -0
  43. pyrender/pyrender/camera.py +437 -0
  44. pyrender/pyrender/constants.py +149 -0
  45. pyrender/pyrender/font.py +272 -0
  46. pyrender/pyrender/fonts/OpenSans-Bold.ttf +0 -0
  47. pyrender/pyrender/fonts/OpenSans-BoldItalic.ttf +0 -0
  48. pyrender/pyrender/fonts/OpenSans-ExtraBold.ttf +0 -0
  49. pyrender/pyrender/fonts/OpenSans-ExtraBoldItalic.ttf +0 -0
  50. pyrender/pyrender/fonts/OpenSans-Italic.ttf +0 -0
app.py CHANGED
@@ -52,8 +52,8 @@ forced_decoder_ids_zh = audio_processor.get_decoder_prompt_ids(
52
  forced_decoder_ids_en = audio_processor.get_decoder_prompt_ids(
53
  language="en", task="translate")
54
 
55
- # HTML Style
56
 
 
57
  Video_Components = """
58
  <div class="side-video" style="position: relative;">
59
  <video width="340" autoplay loop>
@@ -320,102 +320,172 @@ def bot(history, motion_uploaded, data_stored, method):
320
 
321
 
322
  def bot_example(history, responses):
323
- for response in responses:
324
- history[-1][1] = ""
325
- for character in response:
326
- history[-1][1] += character
327
- time.sleep(0.02)
328
- yield history, motion_uploaded, data_stored
329
-
330
-
331
- # Examples
332
- chat_instruct = [
333
- (None,
334
- "**👋 Hi, I'm MotionGPT! I can generate realistic human motion from text, or generate text from motion.**"
335
- ),
336
- (None,
337
- "You can chat with me in pure text like generating human motion following your descriptions."
338
- ),
339
- (None,
340
- "After generation, you can click the button in the top right of generation human motion result to download the human motion video or feature stored in .npy format."
341
- ),
342
- (None,
343
- "With the human motion feature file downloaded or got from dataset, you are able to ask me to translate it!"
344
- ),
345
- (None,
346
- "Of courser, you can also purely chat with me and let me give you human motion in text, here are some examples!"
347
- ),
348
- (None,
349
- "We provide two motion visulization methods. The default fast method is skeleton line ploting which is like the examples below:"
350
- ),
351
- (None,
352
- Video_Components_example.format(video_path="assets/videos/t2m_0.mp4",
353
- video_fname="example1.mp4")),
354
- (None,
355
- "And the slow method is SMPL model rendering which is more realistic but slower."
356
- ),
357
- (None,
358
- Video_Components_example.format(video_path="assets/videos/t2m_0.mp4",
359
- video_fname="example1.mp4")),
360
- (None, "👉 Follow the examples and try yourself!"),
361
- ]
362
-
363
- t2m_examples = [
364
- (None,
365
- "You can chat with me in pure text, following are some examples of text-to-motion generation!"
366
- ),
367
- ("Generate a person is walking forwards, but stumbles and steps back, then carries on forward.",
368
- Video_Components_example.format(video_path="assets/videos/t2m_0.mp4",
369
- video_fname="example1.mp4")),
370
- ("Generate a person is walking forwards, but stumbles and steps back, then carries on forward.",
371
- Video_Components_example.format(video_path="assets/videos/t2m_0.mp4",
372
- video_fname="example1.mp4")),
373
- ("Generate a person is walking forwards, but stumbles and steps back, then carries on forward.",
374
- Video_Components_example.format(video_path="assets/videos/t2m_0.mp4",
375
- video_fname="example1.mp4")),
376
- ]
377
-
378
- m2t_examples = [
379
- (None,
380
- "With the human motion feature file downloaded or got from dataset, you are able to ask me to translate it, here are some examples!"
381
- ),
382
- ("Please explain the movement shown in [Motion_tokens] using natural language.",
383
- None),
384
- (Video_Components_example.format(video_path="assets/videos/m2t_0.mp4",
385
- video_fname="example2.mp4"),
386
- "a person walks forward then does a backwards z-shape movement to its left side. then back to the right."
387
- ),
388
- ("Please explain the movement shown in [Motion_tokens] using natural language.",
389
- None),
390
- (Video_Components_example.format(video_path="assets/videos/m2t_0.mp4",
391
- video_fname="example2.mp4"),
392
- "a person walks forward then does a backwards z-shape movement to its left side. then back to the right."
393
- ),
394
- ]
395
-
396
- t2t_examples = [
397
- (None,
398
- "Of courser, you can also purely chat with me and let me give you human motion in text, here are some examples!"
399
- ),
400
- ('Depict a motion as like you have seen it.',
401
- "The person walks while swaying their hips along a curved path to the left slowly then stops to look down at the edge of the grey platform at something."
402
- ),
403
- ('Depict a motion as like you have seen it.',
404
- "The person walks while swaying their hips along a curved path to the left slowly then stops to look down at the edge of the grey platform at something."
405
- ),
406
- ]
407
-
408
- Init_chatbot = [
409
- (None,
410
- "**👋 Hi, I'm MotionGPT! I can generate realistic human motion from text, or generate text from motion.**"
411
- )
412
- ] + t2m_examples[:3] + m2t_examples[:2] + t2t_examples[:2] + chat_instruct[-4:]
413
 
414
  with open("assets/css/custom.css", "r", encoding="utf-8") as f:
415
  customCSS = f.read()
416
 
417
  with gr.Blocks(css=customCSS) as demo:
418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
  # Variables
420
  motion_uploaded = gr.State({
421
  "feats": None,
@@ -434,7 +504,7 @@ with gr.Blocks(css=customCSS) as demo:
434
  elem_id="mGPT",
435
  height=600,
436
  label="MotionGPT",
437
- avatar_images=(None,
438
  ("assets/images/avatar_bot.jpg")),
439
  bubble_full_width=False)
440
 
@@ -444,6 +514,7 @@ with gr.Blocks(css=customCSS) as demo:
444
  txt = gr.Textbox(
445
  label="Text",
446
  show_label=False,
 
447
  placeholder=
448
  "Enter text and press ENTER or speak to input. You can also upload motion.",
449
  container=False)
@@ -454,8 +525,7 @@ with gr.Blocks(css=customCSS) as demo:
454
  type='filepath')
455
  btn = gr.UploadButton("📁 Upload motion",
456
  elem_id="upload",
457
- file_types=["file"],
458
- variant='primary')
459
  regen = gr.Button("🔄 Regenerate", elem_id="regen")
460
  clear = gr.ClearButton([txt, chatbot, aud], value='🗑️ Clear')
461
 
@@ -465,7 +535,7 @@ with gr.Blocks(css=customCSS) as demo:
465
  ''')
466
 
467
  with gr.Row():
468
- instruct = gr.Button("Instructions", elem_id="instruction")
469
  t2m_eg = gr.Button("Text-to-Motion", elem_id="t2m")
470
  m2t_eg = gr.Button("Motion-to-Text", elem_id="m2t")
471
  t2t_eg = gr.Button("Random description", elem_id="t2t")
@@ -503,6 +573,17 @@ with gr.Blocks(css=customCSS) as demo:
503
  [chatbot, motion_uploaded, data_stored, method],
504
  [chatbot, motion_uploaded, data_stored],
505
  queue=False)
 
 
 
 
 
 
 
 
 
 
 
506
  chatbot.change(scroll_to_output=True)
507
 
508
  demo.queue()
 
52
  forced_decoder_ids_en = audio_processor.get_decoder_prompt_ids(
53
  language="en", task="translate")
54
 
 
55
 
56
+ # HTML Style
57
  Video_Components = """
58
  <div class="side-video" style="position: relative;">
59
  <video width="340" autoplay loop>
 
320
 
321
 
322
  def bot_example(history, responses):
323
+ history = history + responses
324
+ return history
325
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
  with open("assets/css/custom.css", "r", encoding="utf-8") as f:
328
  customCSS = f.read()
329
 
330
  with gr.Blocks(css=customCSS) as demo:
331
 
332
+ # Examples
333
+ chat_instruct = gr.State([
334
+ (None,
335
+ "👋 Hi, I'm MotionGPT! I can generate realistic human motion from text, or generate text from motion."
336
+ ),
337
+ (None,
338
+ "💡 You can chat with me in pure text like generating human motion following your descriptions."
339
+ ),
340
+ (None,
341
+ "💡 After generation, you can click the button in the top right of generation human motion result to download the human motion video or feature stored in .npy format."
342
+ ),
343
+ (None,
344
+ "💡 With the human motion feature file downloaded or got from dataset, you are able to ask me to translate it!"
345
+ ),
346
+ (None,
347
+ "💡 Of courser, you can also purely chat with me and let me give you human motion in text, here are some examples!"
348
+ ),
349
+ (None,
350
+ "💡 We provide two motion visulization methods. The default fast method is skeleton line ploting which is like the examples below:"
351
+ ),
352
+ (None,
353
+ Video_Components_example.format(
354
+ video_path="assets/videos/example0_fast.mp4",
355
+ video_fname="example0_fast.mp4")),
356
+ (None,
357
+ "💡 And the slow method is SMPL model rendering which is more realistic but slower."
358
+ ),
359
+ (None,
360
+ Video_Components_example.format(
361
+ video_path="assets/videos/example0.mp4",
362
+ video_fname="example0.mp4")),
363
+ (None,
364
+ "💡 If you want to get the video in our paper and website like below, you can refer to the scirpt in our [github repo](https://github.com/OpenMotionLab/MotionGPT#-visualization)."
365
+ ),
366
+ (None,
367
+ Video_Components_example.format(
368
+ video_path="assets/videos/example0_blender.mp4",
369
+ video_fname="example0_blender.mp4")),
370
+ (None, "👉 Follow the examples and try yourself!"),
371
+ ])
372
+
373
+ t2m_examples = gr.State([
374
+ (None,
375
+ "💡 You can chat with me in pure text, following are some examples of text-to-motion generation!"
376
+ ),
377
+ ("A person is walking forwards, but stumbles and steps back, then carries on forward.",
378
+ Video_Components_example.format(
379
+ video_path="assets/videos/example0.mp4",
380
+ video_fname="example0.mp4")),
381
+ ("Generate a man aggressively kicks an object to the left using his right foot.",
382
+ Video_Components_example.format(
383
+ video_path="assets/videos/example1.mp4",
384
+ video_fname="example1.mp4")),
385
+ ("Generate a person lowers their arms, gets onto all fours, and crawls.",
386
+ Video_Components_example.format(
387
+ video_path="assets/videos/example2.mp4",
388
+ video_fname="example2.mp4")),
389
+ ("Show me the video of a person bends over and picks things up with both hands individually, then walks forward.",
390
+ Video_Components_example.format(
391
+ video_path="assets/videos/example3.mp4",
392
+ video_fname="example3.mp4")),
393
+ ("Image a person is practing balancing on one leg.",
394
+ Video_Components_example.format(
395
+ video_path="assets/videos/example5.mp4",
396
+ video_fname="example5.mp4")),
397
+ ("Show me a person walks forward, stops, turns directly to their right, then walks forward again.",
398
+ Video_Components_example.format(
399
+ video_path="assets/videos/example6.mp4",
400
+ video_fname="example6.mp4")),
401
+ ("I saw a person sits on the ledge of something then gets off and walks away.",
402
+ Video_Components_example.format(
403
+ video_path="assets/videos/example7.mp4",
404
+ video_fname="example7.mp4")),
405
+ ("Show me a person is crouched down and walking around sneakily.",
406
+ Video_Components_example.format(
407
+ video_path="assets/videos/example8.mp4",
408
+ video_fname="example8.mp4")),
409
+ ])
410
+
411
+ m2t_examples = gr.State([
412
+ (None,
413
+ "💡 With the human motion feature file downloaded or got from dataset, you are able to ask me to translate it, here are some examples!"
414
+ ),
415
+ ("Please explain the movement shown in <Motion_Placeholder> using natural language.",
416
+ None),
417
+ (Video_Components_example.format(
418
+ video_path="assets/videos/example0.mp4",
419
+ video_fname="example0.mp4"),
420
+ "The person was pushed but didn't fall down"),
421
+ ("What kind of action is being represented in <Motion_Placeholder>? Explain it in text.",
422
+ None),
423
+ (Video_Components_example.format(
424
+ video_path="assets/videos/example4.mp4",
425
+ video_fname="example4.mp4"),
426
+ "The figure has its hands curled at jaw level, steps onto its left foot and raises right leg with bent knee to kick forward and return to starting stance."
427
+ ),
428
+ ("Provide a summary of the motion demonstrated in <Motion_Placeholder> using words.",
429
+ None),
430
+ (Video_Components_example.format(
431
+ video_path="assets/videos/example2.mp4",
432
+ video_fname="example2.mp4"),
433
+ "A person who is standing with his arms up and away from his sides bends over, gets down on his hands and then his knees and crawls forward."
434
+ ),
435
+ ("Generate text for <Motion_Placeholder>:", None),
436
+ (Video_Components_example.format(
437
+ video_path="assets/videos/example5.mp4",
438
+ video_fname="example5.mp4"),
439
+ "The man tries to stand in a yoga tree pose and looses his balance."),
440
+ ("Provide a summary of the motion depicted in <Motion_Placeholder> using language.",
441
+ None),
442
+ (Video_Components_example.format(
443
+ video_path="assets/videos/example6.mp4",
444
+ video_fname="example6.mp4"),
445
+ "Person walks up some steps then leeps to the other side and goes up a few more steps and jumps dow"
446
+ ),
447
+ ("Describe the motion represented by <Motion_Placeholder> in plain English.",
448
+ None),
449
+ (Video_Components_example.format(
450
+ video_path="assets/videos/example7.mp4",
451
+ video_fname="example7.mp4"),
452
+ "Person sits down, then stands up and walks forward. then the turns around 180 degrees and walks the opposite direction"
453
+ ),
454
+ ("Provide a description of the action in <Motion_Placeholder> using words.",
455
+ None),
456
+ (Video_Components_example.format(
457
+ video_path="assets/videos/example8.mp4",
458
+ video_fname="example8.mp4"),
459
+ "This man is bent forward and walks slowly around."),
460
+ ])
461
+
462
+ t2t_examples = gr.State([
463
+ (None,
464
+ "💡 Of courser, you can also purely chat with me and let me give you human motion in text, here are some examples!"
465
+ ),
466
+ ('Depict a motion as like you have seen it.',
467
+ "A person slowly walked forward in rigth direction while making the circle"
468
+ ),
469
+ ('Random say something about describing a human motion.',
470
+ "A man throws punches using his right hand."),
471
+ ('Describe the motion of someone as you will.',
472
+ "Person is moving left to right in a dancing stance swaying hips, moving feet left to right with arms held out"
473
+ ),
474
+ ('Come up with a human motion caption.',
475
+ "A person is walking in a counter counterclockwise motion."),
476
+ ('Write a sentence about how someone might dance.',
477
+ "A person with his hands down by his sides reaches down for something with his right hand, uses the object to make a stirring motion, then places the item back down."
478
+ ),
479
+ ('Depict a motion as like you have seen it.',
480
+ "A person is walking forward a few feet, then turns around, walks back, and continues walking."
481
+ )
482
+ ])
483
+
484
+ Init_chatbot = chat_instruct.value[:
485
+ 1] + t2m_examples.value[:
486
+ 3] + m2t_examples.value[:2] + t2t_examples.value[:2] + chat_instruct.value[
487
+ -4:]
488
+
489
  # Variables
490
  motion_uploaded = gr.State({
491
  "feats": None,
 
504
  elem_id="mGPT",
505
  height=600,
506
  label="MotionGPT",
507
+ avatar_images=(("assets/images/avatar_user.png"),
508
  ("assets/images/avatar_bot.jpg")),
509
  bubble_full_width=False)
510
 
 
514
  txt = gr.Textbox(
515
  label="Text",
516
  show_label=False,
517
+ elem_id="textbox",
518
  placeholder=
519
  "Enter text and press ENTER or speak to input. You can also upload motion.",
520
  container=False)
 
525
  type='filepath')
526
  btn = gr.UploadButton("📁 Upload motion",
527
  elem_id="upload",
528
+ file_types=["file"])
 
529
  regen = gr.Button("🔄 Regenerate", elem_id="regen")
530
  clear = gr.ClearButton([txt, chatbot, aud], value='🗑️ Clear')
531
 
 
535
  ''')
536
 
537
  with gr.Row():
538
+ instruct_eg = gr.Button("Instructions", elem_id="instruct")
539
  t2m_eg = gr.Button("Text-to-Motion", elem_id="t2m")
540
  m2t_eg = gr.Button("Motion-to-Text", elem_id="m2t")
541
  t2t_eg = gr.Button("Random description", elem_id="t2t")
 
573
  [chatbot, motion_uploaded, data_stored, method],
574
  [chatbot, motion_uploaded, data_stored],
575
  queue=False)
576
+
577
+ instruct_msg = instruct_eg.click(bot_example, [chatbot, chat_instruct],
578
+ [chatbot],
579
+ queue=False)
580
+ t2m_eg_msg = t2m_eg.click(bot_example, [chatbot, t2m_examples], [chatbot],
581
+ queue=False)
582
+ m2t_eg_msg = m2t_eg.click(bot_example, [chatbot, m2t_examples], [chatbot],
583
+ queue=False)
584
+ t2t_eg_msg = t2t_eg.click(bot_example, [chatbot, t2t_examples], [chatbot],
585
+ queue=False)
586
+
587
  chatbot.change(scroll_to_output=True)
588
 
589
  demo.queue()
assets/css/custom.css CHANGED
@@ -64,22 +64,7 @@
64
  max-width: 340px;
65
  }
66
 
67
- /* @media only screen and (min-width: 768px) {
68
- .side-content {
69
- float: left;
70
- overflow-wrap: break-word;
71
- padding-right: 2rem;
72
- }
73
-
74
- .side-video {
75
- float: right;
76
- }
77
- } */
78
-
79
  /* Buttom */
80
- #upload {
81
- color: #000000;
82
- }
83
 
84
  .videodl-button {
85
  position: absolute;
 
64
  max-width: 340px;
65
  }
66
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  /* Buttom */
 
 
 
68
 
69
  .videodl-button {
70
  position: absolute;
assets/images/avatar_user.png ADDED
assets/videos/example0.mp4 ADDED
Binary file (108 kB). View file
 
assets/videos/example0_blender.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95a109ab76e463446d352cd58e056207ad3f29ee6c54af7ece21dcf0c4ec68dc
3
+ size 195406
assets/videos/example0_fast.mp4 ADDED
Binary file (113 kB). View file
 
assets/videos/example1.mp4 ADDED
Binary file (88.9 kB). View file
 
assets/videos/example2.mp4 ADDED
Binary file (70.8 kB). View file
 
assets/videos/example3.mp4 ADDED
Binary file (149 kB). View file
 
assets/videos/example4.mp4 ADDED
Binary file (184 kB). View file
 
assets/videos/example5.mp4 ADDED
Binary file (156 kB). View file
 
assets/videos/example6.mp4 ADDED
Binary file (154 kB). View file
 
assets/videos/example7.mp4 ADDED
Binary file (167 kB). View file
 
assets/videos/example8.mp4 ADDED
Binary file (163 kB). View file
 
assets/videos/m2t_0.mp4 DELETED
Binary file (500 kB)
 
assets/videos/t2m_0.mp4 DELETED
Binary file (811 kB)
 
mGPT/render/pyrender/smpl_render.py CHANGED
@@ -10,10 +10,12 @@ import glob
10
  import pickle
11
  import pyrender
12
  import trimesh
 
13
  from smplx import SMPL as _SMPL
14
  from smplx.utils import SMPLOutput as ModelOutput
15
  from scipy.spatial.transform.rotation import Rotation as RRR
16
 
 
17
  class SMPL(_SMPL):
18
  """ Extension of the official SMPL implementation to support more joints """
19
 
@@ -39,44 +41,80 @@ class SMPL(_SMPL):
39
  full_pose=smpl_output.full_pose)
40
  return output
41
 
 
42
  class Renderer:
43
  """
44
  Renderer used for visualizing the SMPL model
45
  Code adapted from https://github.com/vchoutas/smplify-x
46
  """
47
- def __init__(self, focal_length=5000, img_res=(224,224), faces=None):
 
 
 
 
 
48
  self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res[0],
49
- viewport_height=img_res[1],
50
- point_size=1.0)
51
  self.focal_length = focal_length
52
  self.camera_center = [img_res[0] // 2, img_res[1] // 2]
53
  self.faces = faces
54
- def __call__(self, vertices, camera_translation, image):
55
- material = pyrender.MetallicRoughnessMaterial(
56
- metallicFactor=0.2,
57
- alphaMode='OPAQUE',
58
- baseColorFactor=(0.8, 0.3, 0.3, 1.0))
59
 
60
- camera_translation[0] *= -1.
 
 
 
61
 
62
- mesh = trimesh.Trimesh(vertices, self.faces)
63
- rot = trimesh.transformations.rotation_matrix(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  np.radians(180), [1, 0, 0])
65
- mesh.apply_transform(rot)
66
- mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
 
 
 
67
 
68
-
69
- scene = pyrender.Scene(bg_color = [1, 1, 1, 0.8], ambient_light=(0.4, 0.4, 0.4))
 
 
 
 
 
70
  scene.add(mesh, 'mesh')
71
 
 
 
 
 
 
72
  camera_pose = np.eye(4)
 
73
  camera_pose[:3, 3] = camera_translation
74
- camera = pyrender.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length,
75
- cx=self.camera_center[0], cy=self.camera_center[1])
 
 
76
  scene.add(camera, pose=camera_pose)
77
 
78
-
79
- light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=300)
80
  light_pose = np.eye(4)
81
 
82
  light_pose[:3, 3] = np.array([0, -1, 1])
@@ -88,48 +126,58 @@ class Renderer:
88
  light_pose[:3, 3] = np.array([1, 1, 2])
89
  scene.add(light, pose=light_pose)
90
 
91
- color, rend_depth = self.renderer.render(scene, flags=pyrender.RenderFlags.RGBA)
92
- color = color.astype(np.float32) / 255.0
93
- valid_mask = (rend_depth > 0)[:,:,None]
94
- output_img = (color[:, :, :3] * valid_mask +
95
- (1 - valid_mask) * image)
96
- return output_img
97
 
98
  class SMPLRender():
 
99
  def __init__(self, SMPL_MODEL_DIR):
100
  if torch.cuda.is_available():
101
  self.device = torch.device("cuda")
102
  else:
103
  self.device = torch.device("cpu")
104
-
105
- self.smpl = SMPL(SMPL_MODEL_DIR,
106
- batch_size=1,
107
- create_transl=False).to(self.device)
108
-
109
  self.focal_length = 5000
110
 
111
- def render(self, image, smpl_param, is_headroot=False):
112
  pose = smpl_param['pred_pose']
113
- if pose.size==72:
114
- pose = pose.reshape(-1,3)
115
  pose = RRR.from_rotvec(pose).as_matrix()
116
- pose = pose.reshape(1,24,3,3)
117
- pred_betas = torch.from_numpy(smpl_param['pred_shape'].reshape(1, 10).astype(np.float32)).to(self.device)
 
118
  pred_rotmat = torch.from_numpy(pose.astype(np.float32)).to(self.device)
119
- pred_camera_t = smpl_param['pred_root'].reshape(1, 3).astype(np.float32)
120
- smpl_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:],
121
- global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False)
122
-
123
-
 
124
  vertices = smpl_output.vertices[0].detach().cpu().numpy()
 
 
125
  pred_camera_t = pred_camera_t[0]
126
 
127
  if is_headroot:
128
- pred_camera_t = pred_camera_t - smpl_output.joints[0,12].detach().cpu().numpy()
 
 
 
129
 
130
- renderer = Renderer(focal_length=self.focal_length,
131
- img_res=(image.shape[1], image.shape[0]), faces=self.smpl.faces)
 
 
 
132
 
133
- renderImg = renderer(vertices, pred_camera_t.copy(), image / 255.0)
134
- renderer.renderer.delete()
 
135
  return renderImg
 
10
  import pickle
11
  import pyrender
12
  import trimesh
13
+ from shapely import geometry
14
  from smplx import SMPL as _SMPL
15
  from smplx.utils import SMPLOutput as ModelOutput
16
  from scipy.spatial.transform.rotation import Rotation as RRR
17
 
18
+
19
  class SMPL(_SMPL):
20
  """ Extension of the official SMPL implementation to support more joints """
21
 
 
41
  full_pose=smpl_output.full_pose)
42
  return output
43
 
44
+
45
  class Renderer:
46
  """
47
  Renderer used for visualizing the SMPL model
48
  Code adapted from https://github.com/vchoutas/smplify-x
49
  """
50
+
51
+ def __init__(self,
52
+ vertices,
53
+ focal_length=5000,
54
+ img_res=(224, 224),
55
+ faces=None):
56
  self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res[0],
57
+ viewport_height=img_res[1],
58
+ point_size=1.0)
59
  self.focal_length = focal_length
60
  self.camera_center = [img_res[0] // 2, img_res[1] // 2]
61
  self.faces = faces
 
 
 
 
 
62
 
63
+ if torch.cuda.is_available():
64
+ self.device = torch.device("cuda")
65
+ else:
66
+ self.device = torch.device("cpu")
67
 
68
+ vertices = np.concatenate(vertices)
69
+ # Center the first root to the first frame
70
+ vertices -= vertices[[0], [0], :]
71
+ # Remove the floor
72
+ vertices[..., 2] -= vertices[..., 2].min()
73
+ data = vertices[..., [2, 0, 1]]
74
+ minx, miny, _ = data.min(axis=(0, 1))
75
+ maxx, maxy, _ = data.max(axis=(0, 1))
76
+ minz, maxz = -0.5, 0.5
77
+ minx = minx - 0.5
78
+ maxx = maxx + 0.5
79
+ miny = miny - 0.5
80
+ maxy = maxy + 0.5
81
+
82
+ polygon = geometry.Polygon([[minx, minz], [minx, maxz], [maxx, maxz],
83
+ [maxx, minz]])
84
+ self.polygon_mesh = trimesh.creation.extrude_polygon(polygon, 1e-5)
85
+ self.polygon_mesh.visual.face_colors = [0, 0, 0, 0.21]
86
+ self.rot = trimesh.transformations.rotation_matrix(
87
  np.radians(180), [1, 0, 0])
88
+ # self.polygon_mesh.apply_transform(self.rot)
89
+
90
+ def __call__(self, vertices, camera_translation):
91
+ scene = pyrender.Scene(bg_color=(1., 1., 1., 0.8),
92
+ ambient_light=(0.4, 0.4, 0.4))
93
 
94
+ material = pyrender.MetallicRoughnessMaterial(
95
+ metallicFactor=0.4,
96
+ alphaMode='OPAQUE',
97
+ baseColorFactor=(0.658, 0.214, 0.0114, 0.2))
98
+ mesh = trimesh.Trimesh(vertices, self.faces)
99
+ mesh.apply_transform(self.rot)
100
+ mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
101
  scene.add(mesh, 'mesh')
102
 
103
+ polygon_render = pyrender.Mesh.from_trimesh(self.polygon_mesh,
104
+ smooth=False)
105
+ c = np.pi / 2
106
+ scene.add(polygon_render)
107
+
108
  camera_pose = np.eye(4)
109
+ camera_translation[0] *= -1.
110
  camera_pose[:3, 3] = camera_translation
111
+ camera = pyrender.IntrinsicsCamera(fx=self.focal_length,
112
+ fy=self.focal_length,
113
+ cx=self.camera_center[0],
114
+ cy=self.camera_center[1])
115
  scene.add(camera, pose=camera_pose)
116
 
117
+ light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=300)
 
118
  light_pose = np.eye(4)
119
 
120
  light_pose[:3, 3] = np.array([0, -1, 1])
 
126
  light_pose[:3, 3] = np.array([1, 1, 2])
127
  scene.add(light, pose=light_pose)
128
 
129
+ color, rend_depth = self.renderer.render(
130
+ scene, flags=pyrender.RenderFlags.RGBA)
131
+
132
+ return color
133
+
 
134
 
135
  class SMPLRender():
136
+
137
  def __init__(self, SMPL_MODEL_DIR):
138
  if torch.cuda.is_available():
139
  self.device = torch.device("cuda")
140
  else:
141
  self.device = torch.device("cpu")
142
+ self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=1,
143
+ create_transl=False).to(self.device)
144
+ self.vertices = []
145
+ self.pred_camera_t = []
 
146
  self.focal_length = 5000
147
 
148
+ def fit(self, smpl_param, is_headroot=False):
149
  pose = smpl_param['pred_pose']
150
+ if pose.size == 72:
151
+ pose = pose.reshape(-1, 3)
152
  pose = RRR.from_rotvec(pose).as_matrix()
153
+ pose = pose.reshape(1, 24, 3, 3)
154
+ pred_betas = torch.from_numpy(smpl_param['pred_shape'].reshape(
155
+ 1, 10).astype(np.float32)).to(self.device)
156
  pred_rotmat = torch.from_numpy(pose.astype(np.float32)).to(self.device)
157
+ pred_camera_t = smpl_param['pred_root'].reshape(1,
158
+ 3).astype(np.float32)
159
+ smpl_output = self.smpl(betas=pred_betas,
160
+ body_pose=pred_rotmat[:, 1:],
161
+ global_orient=pred_rotmat[:, 0].unsqueeze(1),
162
+ pose2rot=False)
163
  vertices = smpl_output.vertices[0].detach().cpu().numpy()
164
+ self.vertices.append(vertices[None])
165
+
166
  pred_camera_t = pred_camera_t[0]
167
 
168
  if is_headroot:
169
+ pred_camera_t = pred_camera_t - smpl_output.joints[
170
+ 0, 12].detach().cpu().numpy()
171
+
172
+ self.pred_camera_t.append(pred_camera_t)
173
 
174
+ def init_renderer(self, res):
175
+ self.renderer = Renderer(vertices=self.vertices,
176
+ focal_length=self.focal_length,
177
+ img_res=(res[1], res[0]),
178
+ faces=self.smpl.faces)
179
 
180
+ def render(self, index):
181
+ renderImg = self.renderer(self.vertices[index][0],
182
+ self.pred_camera_t[index].copy())
183
  return renderImg
pyrender/.coveragerc ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [report]
2
+ exclude_lines =
3
+ def __repr__
4
+ def __str__
5
+ @abc.abstractmethod
pyrender/.flake8 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore = E231,W504,F405,F403
3
+ max-line-length = 79
4
+ select = B,C,E,F,W,T4,B9
5
+ exclude =
6
+ docs/source/conf.py,
7
+ __pycache__,
8
+ examples/*
pyrender/.gitignore ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ docs/**/generated/**
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ .hypothesis/
50
+ .pytest_cache/
51
+
52
+ # Translations
53
+ *.mo
54
+ *.pot
55
+
56
+ # Django stuff:
57
+ *.log
58
+ local_settings.py
59
+ db.sqlite3
60
+
61
+ # Flask stuff:
62
+ instance/
63
+ .webassets-cache
64
+
65
+ # Scrapy stuff:
66
+ .scrapy
67
+
68
+ # Sphinx documentation
69
+ docs/_build/
70
+
71
+ # PyBuilder
72
+ target/
73
+
74
+ # Jupyter Notebook
75
+ .ipynb_checkpoints
76
+
77
+ # pyenv
78
+ .python-version
79
+
80
+ # celery beat schedule file
81
+ celerybeat-schedule
82
+
83
+ # SageMath parsed files
84
+ *.sage.py
85
+
86
+ # Environments
87
+ .env
88
+ .venv
89
+ env/
90
+ venv/
91
+ ENV/
92
+ env.bak/
93
+ venv.bak/
94
+
95
+ # Spyder project settings
96
+ .spyderproject
97
+ .spyproject
98
+
99
+ # Rope project settings
100
+ .ropeproject
101
+
102
+ # mkdocs documentation
103
+ /site
104
+
105
+ # mypy
106
+ .mypy_cache/
pyrender/.pre-commit-config.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://gitlab.com/pycqa/flake8
3
+ rev: 3.7.1
4
+ hooks:
5
+ - id: flake8
6
+ exclude: ^setup.py
pyrender/.travis.yml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ language: python
2
+ sudo: required
3
+ dist: xenial
4
+
5
+ python:
6
+ - '3.6'
7
+ - '3.7'
8
+
9
+ before_install:
10
+ # Pre-install osmesa
11
+ - sudo apt update
12
+ - sudo wget https://github.com/mmatl/travis_debs/raw/master/xenial/mesa_18.3.3-0.deb
13
+ - sudo dpkg -i ./mesa_18.3.3-0.deb || true
14
+ - sudo apt install -f
15
+ - git clone https://github.com/mmatl/pyopengl.git
16
+ - cd pyopengl
17
+ - pip install .
18
+ - cd ..
19
+
20
+ install:
21
+ - pip install .
22
+ # - pip install -q pytest pytest-cov coveralls
23
+ - pip install pytest pytest-cov coveralls
24
+ - pip install ./pyopengl
25
+
26
+ script:
27
+ - PYOPENGL_PLATFORM=osmesa pytest --cov=pyrender tests
28
+
29
+ after_success:
30
+ - coveralls || true
31
+
32
+ deploy:
33
+ provider: pypi
34
+ skip_existing: true
35
+ user: mmatl
36
+ on:
37
+ tags: true
38
+ branch: master
39
+ password:
40
+ secure: O4WWMbTYb2eVYIO4mMOVa6/xyhX7mPvJpd96cxfNvJdyuqho8VapOhzqsI5kahMB1hFjWWr61yR4+Ru5hoDYf3XA6BQVk8eCY9+0H7qRfvoxex71lahKAqfHLMoE1xNdiVTgl+QN9hYjOnopLod24rx8I8eXfpHu/mfCpuTYGyLlNcDP5St3bXpXLPB5wg8Jo1YRRv6W/7fKoXyuWjewk9cJAS0KrEgnDnSkdwm6Pb+80B2tcbgdGvpGaByw5frndwKiMUMgVUownepDU5POQq2p29wwn9lCvRucULxjEgO+63jdbZRj5fNutLarFa2nISfYnrd72LOyDfbJubwAzzAIsy2JbFORyeHvCgloiuE9oE7a9oOQt/1QHBoIV0seiawMWn55Yp70wQ7HlJs4xSGJWCGa5+9883QRNsvj420atkb3cgO8P+PXwiwTi78Dq7Z/xHqccsU0b8poqBneQoA+pUGgNnF6V7Z8e9RsCcse2gAWSZWuOK3ua+9xCgH7I7MeL3afykr2aJ+yFCoYJMFrUjJeodMX2RbL0q+3FzIPZeGW3WdhTEAL9TSKRcJBSQTskaQlZx/OcpobxS7t3d2S68CCLG9uMTqOTYws55WZ1etalA75sRk9K2MR7ZGjZW3jdtvMViISc/t6Rrjea1GE8ZHGJC6/IeLIWA2c7nc=
41
+ distributions: sdist bdist_wheel
42
+ notifications:
43
+ email: false
pyrender/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 Matthew Matl
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
pyrender/MANIFEST.in ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Include the license
2
+ include LICENSE
3
+ include README.rst
4
+ include pyrender/fonts/*
5
+ include pyrender/shaders/*
pyrender/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pyrender
2
+
3
+ [![Build Status](https://travis-ci.org/mmatl/pyrender.svg?branch=master)](https://travis-ci.org/mmatl/pyrender)
4
+ [![Documentation Status](https://readthedocs.org/projects/pyrender/badge/?version=latest)](https://pyrender.readthedocs.io/en/latest/?badge=latest)
5
+ [![Coverage Status](https://coveralls.io/repos/github/mmatl/pyrender/badge.svg?branch=master)](https://coveralls.io/github/mmatl/pyrender?branch=master)
6
+ [![PyPI version](https://badge.fury.io/py/pyrender.svg)](https://badge.fury.io/py/pyrender)
7
+ [![Downloads](https://pepy.tech/badge/pyrender)](https://pepy.tech/project/pyrender)
8
+
9
+ Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based
10
+ rendering and visualization.
11
+ It is designed to meet the [glTF 2.0 specification from Khronos](https://www.khronos.org/gltf/).
12
+
13
+ Pyrender is lightweight, easy to install, and simple to use.
14
+ It comes packaged with both an intuitive scene viewer and a headache-free
15
+ offscreen renderer with support for GPU-accelerated rendering on headless
16
+ servers, which makes it perfect for machine learning applications.
17
+
18
+ Extensive documentation, including a quickstart guide, is provided [here](https://pyrender.readthedocs.io/en/latest/).
19
+
20
+ For a minimal working example of GPU-accelerated offscreen rendering using EGL,
21
+ check out the [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing).
22
+
23
+
24
+ <p align="center">
25
+ <img width="48%" src="https://github.com/mmatl/pyrender/blob/master/docs/source/_static/rotation.gif?raw=true" alt="GIF of Viewer"/>
26
+ <img width="48%" src="https://github.com/mmatl/pyrender/blob/master/docs/source/_static/damaged_helmet.png?raw=true" alt="Damaged Helmet"/>
27
+ </p>
28
+
29
+ ## Installation
30
+ You can install pyrender directly from pip.
31
+
32
+ ```bash
33
+ pip install pyrender
34
+ ```
35
+
36
+ ## Features
37
+
38
+ Despite being lightweight, pyrender has lots of features, including:
39
+
40
+ * Simple interoperation with the amazing [trimesh](https://github.com/mikedh/trimesh) project,
41
+ which enables out-of-the-box support for dozens of mesh types, including OBJ,
42
+ STL, DAE, OFF, PLY, and GLB.
43
+ * An easy-to-use scene viewer with support for animation, showing face and vertex
44
+ normals, toggling lighting conditions, and saving images and GIFs.
45
+ * An offscreen rendering module that supports OSMesa and EGL backends.
46
+ * Shadow mapping for directional and spot lights.
47
+ * Metallic-roughness materials for physically-based rendering, including several
48
+ types of texture and normal mapping.
49
+ * Transparency.
50
+ * Depth and color image generation.
51
+
52
+ ## Sample Usage
53
+
54
+ For sample usage, check out the [quickstart
55
+ guide](https://pyrender.readthedocs.io/en/latest/examples/index.html) or one of
56
+ the Google CoLab Notebooks:
57
+
58
+ * [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing)
59
+
60
+ ## Viewer Keyboard and Mouse Controls
61
+
62
+ When using the viewer, the basic controls for moving about the scene are as follows:
63
+
64
+ * To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor.
65
+ * To rotate the camera about its viewing axis, hold `CTRL` left mouse button and drag the cursor.
66
+ * To pan the camera, do one of the following:
67
+ * Hold `SHIFT`, then hold the left mouse button and drag the cursor.
68
+ * Hold the middle mouse button and drag the cursor.
69
+ * To zoom the camera in or out, do one of the following:
70
+ * Scroll the mouse wheel.
71
+ * Hold the right mouse button and drag the cursor.
72
+
73
+ The available keyboard commands are as follows:
74
+
75
+ * `a`: Toggles rotational animation mode.
76
+ * `c`: Toggles backface culling.
77
+ * `f`: Toggles fullscreen mode.
78
+ * `h`: Toggles shadow rendering.
79
+ * `i`: Toggles axis display mode (no axes, world axis, mesh axes, all axes).
80
+ * `l`: Toggles lighting mode (scene lighting, Raymond lighting, or direct lighting).
81
+ * `m`: Toggles face normal visualization.
82
+ * `n`: Toggles vertex normal visualization.
83
+ * `o`: Toggles orthographic camera mode.
84
+ * `q`: Quits the viewer.
85
+ * `r`: Starts recording a GIF, and pressing again stops recording and opens a file dialog.
86
+ * `s`: Opens a file dialog to save the current view as an image.
87
+ * `w`: Toggles wireframe mode (scene default, flip wireframes, all wireframe, or all solid).
88
+ * `z`: Resets the camera to the default view.
89
+
90
+ As a note, displaying shadows significantly slows down rendering, so if you're
91
+ experiencing low framerates, just kill shadows or reduce the number of lights in
92
+ your scene.
pyrender/docs/Makefile ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line.
5
+ SPHINXOPTS =
6
+ SPHINXBUILD = sphinx-build
7
+ SOURCEDIR = source
8
+ BUILDDIR = build
9
+
10
+ # Put it first so that "make" without argument is like "make help".
11
+ help:
12
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13
+
14
+ .PHONY: help Makefile
15
+
16
+ clean:
17
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
18
+ rm -rf ./source/generated/*
19
+
20
+ # Catch-all target: route all unknown targets to Sphinx using the new
21
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
22
+ %: Makefile
23
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
pyrender/docs/make.bat ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=source
11
+ set BUILDDIR=build
12
+
13
+ if "%1" == "" goto help
14
+
15
+ %SPHINXBUILD% >NUL 2>NUL
16
+ if errorlevel 9009 (
17
+ echo.
18
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19
+ echo.installed, then set the SPHINXBUILD environment variable to point
20
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
21
+ echo.may add the Sphinx directory to PATH.
22
+ echo.
23
+ echo.If you don't have Sphinx installed, grab it from
24
+ echo.http://sphinx-doc.org/
25
+ exit /b 1
26
+ )
27
+
28
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29
+ goto end
30
+
31
+ :help
32
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33
+
34
+ :end
35
+ popd
pyrender/docs/source/api/index.rst ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Pyrender API Documentation
2
+ ==========================
3
+
4
+ Constants
5
+ ---------
6
+ .. automodapi:: pyrender.constants
7
+ :no-inheritance-diagram:
8
+ :no-main-docstr:
9
+ :no-heading:
10
+
11
+ Cameras
12
+ -------
13
+ .. automodapi:: pyrender.camera
14
+ :no-inheritance-diagram:
15
+ :no-main-docstr:
16
+ :no-heading:
17
+
18
+ Lighting
19
+ --------
20
+ .. automodapi:: pyrender.light
21
+ :no-inheritance-diagram:
22
+ :no-main-docstr:
23
+ :no-heading:
24
+
25
+ Objects
26
+ -------
27
+ .. automodapi:: pyrender
28
+ :no-inheritance-diagram:
29
+ :no-main-docstr:
30
+ :no-heading:
31
+ :skip: Camera, DirectionalLight, Light, OffscreenRenderer, Node
32
+ :skip: OrthographicCamera, PerspectiveCamera, PointLight, RenderFlags
33
+ :skip: Renderer, Scene, SpotLight, TextAlign, Viewer, GLTF
34
+
35
+ Scenes
36
+ ------
37
+ .. automodapi:: pyrender
38
+ :no-inheritance-diagram:
39
+ :no-main-docstr:
40
+ :no-heading:
41
+ :skip: Camera, DirectionalLight, Light, OffscreenRenderer
42
+ :skip: OrthographicCamera, PerspectiveCamera, PointLight, RenderFlags
43
+ :skip: Renderer, SpotLight, TextAlign, Viewer, Sampler, Texture, Material
44
+ :skip: MetallicRoughnessMaterial, Primitive, Mesh, GLTF
45
+
46
+ On-Screen Viewer
47
+ ----------------
48
+ .. automodapi:: pyrender.viewer
49
+ :no-inheritance-diagram:
50
+ :no-inherited-members:
51
+ :no-main-docstr:
52
+ :no-heading:
53
+
54
+ Off-Screen Rendering
55
+ --------------------
56
+ .. automodapi:: pyrender.offscreen
57
+ :no-inheritance-diagram:
58
+ :no-main-docstr:
59
+ :no-heading:
pyrender/docs/source/conf.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # core documentation build configuration file, created by
4
+ # sphinx-quickstart on Sun Oct 16 14:33:48 2016.
5
+ #
6
+ # This file is execfile()d with the current directory set to its
7
+ # containing dir.
8
+ #
9
+ # Note that not all possible configuration values are present in this
10
+ # autogenerated file.
11
+ #
12
+ # All configuration values have a default; values that are commented out
13
+ # serve to show the default.
14
+
15
+ import sys
16
+ import os
17
+ from pyrender import __version__
18
+ from sphinx.domains.python import PythonDomain
19
+
20
+ # If extensions (or modules to document with autodoc) are in another directory,
21
+ # add these directories to sys.path here. If the directory is relative to the
22
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
23
+ sys.path.insert(0, os.path.abspath('../../'))
24
+
25
+ # -- General configuration ------------------------------------------------
26
+
27
+ # If your documentation needs a minimal Sphinx version, state it here.
28
+ #needs_sphinx = '1.0'
29
+
30
+ # Add any Sphinx extension module names here, as strings. They can be
31
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32
+ # ones.
33
+ extensions = [
34
+ 'sphinx.ext.autodoc',
35
+ 'sphinx.ext.autosummary',
36
+ 'sphinx.ext.coverage',
37
+ 'sphinx.ext.githubpages',
38
+ 'sphinx.ext.intersphinx',
39
+ 'sphinx.ext.napoleon',
40
+ 'sphinx.ext.viewcode',
41
+ 'sphinx_automodapi.automodapi',
42
+ 'sphinx_automodapi.smart_resolver'
43
+ ]
44
+ numpydoc_class_members_toctree = False
45
+ automodapi_toctreedirnm = 'generated'
46
+ automodsumm_inherited_members = True
47
+
48
+ # Add any paths that contain templates here, relative to this directory.
49
+ templates_path = ['_templates']
50
+
51
+ # The suffix(es) of source filenames.
52
+ # You can specify multiple suffix as a list of string:
53
+ # source_suffix = ['.rst', '.md']
54
+ source_suffix = '.rst'
55
+
56
+ # The encoding of source files.
57
+ #source_encoding = 'utf-8-sig'
58
+
59
+ # The master toctree document.
60
+ master_doc = 'index'
61
+
62
+ # General information about the project.
63
+ project = u'pyrender'
64
+ copyright = u'2018, Matthew Matl'
65
+ author = u'Matthew Matl'
66
+
67
+ # The version info for the project you're documenting, acts as replacement for
68
+ # |version| and |release|, also used in various other places throughout the
69
+ # built documents.
70
+ #
71
+ # The short X.Y version.
72
+ version = __version__
73
+ # The full version, including alpha/beta/rc tags.
74
+ release = __version__
75
+
76
+ # The language for content autogenerated by Sphinx. Refer to documentation
77
+ # for a list of supported languages.
78
+ #
79
+ # This is also used if you do content translation via gettext catalogs.
80
+ # Usually you set "language" from the command line for these cases.
81
+ language = None
82
+
83
+ # There are two options for replacing |today|: either, you set today to some
84
+ # non-false value, then it is used:
85
+ #today = ''
86
+ # Else, today_fmt is used as the format for a strftime call.
87
+ #today_fmt = '%B %d, %Y'
88
+
89
+ # List of patterns, relative to source directory, that match files and
90
+ # directories to ignore when looking for source files.
91
+ exclude_patterns = []
92
+
93
+ # The reST default role (used for this markup: `text`) to use for all
94
+ # documents.
95
+ #default_role = None
96
+
97
+ # If true, '()' will be appended to :func: etc. cross-reference text.
98
+ #add_function_parentheses = True
99
+
100
+ # If true, the current module name will be prepended to all description
101
+ # unit titles (such as .. function::).
102
+ #add_module_names = True
103
+
104
+ # If true, sectionauthor and moduleauthor directives will be shown in the
105
+ # output. They are ignored by default.
106
+ #show_authors = False
107
+
108
+ # The name of the Pygments (syntax highlighting) style to use.
109
+ pygments_style = 'sphinx'
110
+
111
+ # A list of ignored prefixes for module index sorting.
112
+ #modindex_common_prefix = []
113
+
114
+ # If true, keep warnings as "system message" paragraphs in the built documents.
115
+ #keep_warnings = False
116
+
117
+ # If true, `todo` and `todoList` produce output, else they produce nothing.
118
+ todo_include_todos = False
119
+
120
+
121
+ # -- Options for HTML output ----------------------------------------------
122
+
123
+ # The theme to use for HTML and HTML Help pages. See the documentation for
124
+ # a list of builtin themes.
125
+ import sphinx_rtd_theme
126
+ html_theme = 'sphinx_rtd_theme'
127
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
128
+
129
+ # Theme options are theme-specific and customize the look and feel of a theme
130
+ # further. For a list of options available for each theme, see the
131
+ # documentation.
132
+ #html_theme_options = {}
133
+
134
+ # Add any paths that contain custom themes here, relative to this directory.
135
+ #html_theme_path = []
136
+
137
+ # The name for this set of Sphinx documents. If None, it defaults to
138
+ # "<project> v<release> documentation".
139
+ #html_title = None
140
+
141
+ # A shorter title for the navigation bar. Default is the same as html_title.
142
+ #html_short_title = None
143
+
144
+ # The name of an image file (relative to this directory) to place at the top
145
+ # of the sidebar.
146
+ #html_logo = None
147
+
148
+ # The name of an image file (relative to this directory) to use as a favicon of
149
+ # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
150
+ # pixels large.
151
+ #html_favicon = None
152
+
153
+ # Add any paths that contain custom static files (such as style sheets) here,
154
+ # relative to this directory. They are copied after the builtin static files,
155
+ # so a file named "default.css" will overwrite the builtin "default.css".
156
+ html_static_path = ['_static']
157
+
158
+ # Add any extra paths that contain custom files (such as robots.txt or
159
+ # .htaccess) here, relative to this directory. These files are copied
160
+ # directly to the root of the documentation.
161
+ #html_extra_path = []
162
+
163
+ # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
164
+ # using the given strftime format.
165
+ #html_last_updated_fmt = '%b %d, %Y'
166
+
167
+ # If true, SmartyPants will be used to convert quotes and dashes to
168
+ # typographically correct entities.
169
+ #html_use_smartypants = True
170
+
171
+ # Custom sidebar templates, maps document names to template names.
172
+ #html_sidebars = {}
173
+
174
+ # Additional templates that should be rendered to pages, maps page names to
175
+ # template names.
176
+ #html_additional_pages = {}
177
+
178
+ # If false, no module index is generated.
179
+ #html_domain_indices = True
180
+
181
+ # If false, no index is generated.
182
+ #html_use_index = True
183
+
184
+ # If true, the index is split into individual pages for each letter.
185
+ #html_split_index = False
186
+
187
+ # If true, links to the reST sources are added to the pages.
188
+ #html_show_sourcelink = True
189
+
190
+ # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
191
+ #html_show_sphinx = True
192
+
193
+ # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
194
+ #html_show_copyright = True
195
+
196
+ # If true, an OpenSearch description file will be output, and all pages will
197
+ # contain a <link> tag referring to it. The value of this option must be the
198
+ # base URL from which the finished HTML is served.
199
+ #html_use_opensearch = ''
200
+
201
+ # This is the file name suffix for HTML files (e.g. ".xhtml").
202
+ #html_file_suffix = None
203
+
204
+ # Language to be used for generating the HTML full-text search index.
205
+ # Sphinx supports the following languages:
206
+ # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
207
+ # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
208
+ #html_search_language = 'en'
209
+
210
+ # A dictionary with options for the search language support, empty by default.
211
+ # Now only 'ja' uses this config value
212
+ #html_search_options = {'type': 'default'}
213
+
214
+ # The name of a javascript file (relative to the configuration directory) that
215
+ # implements a search results scorer. If empty, the default will be used.
216
+ #html_search_scorer = 'scorer.js'
217
+
218
+ # Output file base name for HTML help builder.
219
+ htmlhelp_basename = 'coredoc'
220
+
221
+ # -- Options for LaTeX output ---------------------------------------------
222
+
223
+ latex_elements = {
224
+ # The paper size ('letterpaper' or 'a4paper').
225
+ #'papersize': 'letterpaper',
226
+
227
+ # The font size ('10pt', '11pt' or '12pt').
228
+ #'pointsize': '10pt',
229
+
230
+ # Additional stuff for the LaTeX preamble.
231
+ #'preamble': '',
232
+
233
+ # Latex figure (float) alignment
234
+ #'figure_align': 'htbp',
235
+ }
236
+
237
+ # Grouping the document tree into LaTeX files. List of tuples
238
+ # (source start file, target name, title,
239
+ # author, documentclass [howto, manual, or own class]).
240
+ latex_documents = [
241
+ (master_doc, 'pyrender.tex', u'pyrender Documentation',
242
+ u'Matthew Matl', 'manual'),
243
+ ]
244
+
245
+ # The name of an image file (relative to this directory) to place at the top of
246
+ # the title page.
247
+ #latex_logo = None
248
+
249
+ # For "manual" documents, if this is true, then toplevel headings are parts,
250
+ # not chapters.
251
+ #latex_use_parts = False
252
+
253
+ # If true, show page references after internal links.
254
+ #latex_show_pagerefs = False
255
+
256
+ # If true, show URL addresses after external links.
257
+ #latex_show_urls = False
258
+
259
+ # Documents to append as an appendix to all manuals.
260
+ #latex_appendices = []
261
+
262
+ # If false, no module index is generated.
263
+ #latex_domain_indices = True
264
+
265
+
266
+ # -- Options for manual page output ---------------------------------------
267
+
268
+ # One entry per manual page. List of tuples
269
+ # (source start file, name, description, authors, manual section).
270
+ man_pages = [
271
+ (master_doc, 'pyrender', u'pyrender Documentation',
272
+ [author], 1)
273
+ ]
274
+
275
+ # If true, show URL addresses after external links.
276
+ #man_show_urls = False
277
+
278
+
279
+ # -- Options for Texinfo output -------------------------------------------
280
+
281
+ # Grouping the document tree into Texinfo files. List of tuples
282
+ # (source start file, target name, title, author,
283
+ # dir menu entry, description, category)
284
+ texinfo_documents = [
285
+ (master_doc, 'pyrender', u'pyrender Documentation',
286
+ author, 'pyrender', 'One line description of project.',
287
+ 'Miscellaneous'),
288
+ ]
289
+
290
+ # Documents to append as an appendix to all manuals.
291
+ #texinfo_appendices = []
292
+
293
+ # If false, no module index is generated.
294
+ #texinfo_domain_indices = True
295
+
296
+ # How to display URL addresses: 'footnote', 'no', or 'inline'.
297
+ #texinfo_show_urls = 'footnote'
298
+
299
+ # If true, do not generate a @detailmenu in the "Top" node's menu.
300
+ #texinfo_no_detailmenu = False
301
+
302
+ intersphinx_mapping = {
303
+ 'python' : ('https://docs.python.org/', None),
304
+ 'pyrender' : ('https://pyrender.readthedocs.io/en/latest/', None),
305
+ }
306
+
307
+ # Autosummary fix
308
+ autosummary_generate = True
309
+
310
+ # Try to suppress multiple-definition warnings by always taking the shorter
311
+ # path when two or more paths have the same base module
312
+
313
+ class MyPythonDomain(PythonDomain):
314
+
315
+ def find_obj(self, env, modname, classname, name, type, searchmode=0):
316
+ """Ensures an object always resolves to the desired module
317
+ if defined there."""
318
+ orig_matches = PythonDomain.find_obj(
319
+ self, env, modname, classname, name, type, searchmode
320
+ )
321
+
322
+ if len(orig_matches) <= 1:
323
+ return orig_matches
324
+
325
+ # If multiple matches, try to take the shortest if all the modules are
326
+ # the same
327
+ first_match_name_sp = orig_matches[0][0].split('.')
328
+ base_name = first_match_name_sp[0]
329
+ min_len = len(first_match_name_sp)
330
+ best_match = orig_matches[0]
331
+
332
+ for match in orig_matches[1:]:
333
+ match_name = match[0]
334
+ match_name_sp = match_name.split('.')
335
+ match_base = match_name_sp[0]
336
+
337
+ # If we have mismatched bases, return them all to trigger warnings
338
+ if match_base != base_name:
339
+ return orig_matches
340
+
341
+ # Otherwise, check and see if it's shorter
342
+ if len(match_name_sp) < min_len:
343
+ min_len = len(match_name_sp)
344
+ best_match = match
345
+
346
+ return (best_match,)
347
+
348
+
349
+ def setup(sphinx):
350
+ """Use MyPythonDomain in place of PythonDomain"""
351
+ sphinx.override_domain(MyPythonDomain)
352
+
pyrender/docs/source/examples/cameras.rst ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _camera_guide:
2
+
3
+ Creating Cameras
4
+ ================
5
+
6
+ Pyrender supports three camera types -- :class:`.PerspectiveCamera` and
7
+ :class:`.IntrinsicsCamera` types,
8
+ which render scenes as a human would see them, and
9
+ :class:`.OrthographicCamera` types, which preserve distances between points.
10
+
11
+ Creating cameras is easy -- just specify their basic attributes:
12
+
13
+ >>> pc = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.414)
14
+ >>> oc = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)
15
+
16
+ For more information, see the Khronos group's documentation here_:
17
+
18
+ .. _here: https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#projection-matrices
19
+
20
+ When you add cameras to the scene, make sure that you're using OpenGL camera
21
+ coordinates to specify their pose. See the illustration below for details.
22
+ Basically, the camera z-axis points away from the scene, the x-axis points
23
+ right in image space, and the y-axis points up in image space.
24
+
25
+ .. image:: /_static/camera_coords.png
26
+
pyrender/docs/source/examples/index.rst ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _guide:
2
+
3
+ User Guide
4
+ ==========
5
+
6
+ This section contains guides on how to use Pyrender to quickly visualize
7
+ your 3D data, including a quickstart guide and more detailed descriptions
8
+ of each part of the rendering pipeline.
9
+
10
+
11
+ .. toctree::
12
+ :maxdepth: 2
13
+
14
+ quickstart.rst
15
+ models.rst
16
+ lighting.rst
17
+ cameras.rst
18
+ scenes.rst
19
+ offscreen.rst
20
+ viewer.rst
pyrender/docs/source/examples/lighting.rst ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _lighting_guide:
2
+
3
+ Creating Lights
4
+ ===============
5
+
6
+ Pyrender supports three types of punctual light:
7
+
8
+ - :class:`.PointLight`: Point-based light sources, such as light bulbs.
9
+ - :class:`.SpotLight`: A conical light source, like a flashlight.
10
+ - :class:`.DirectionalLight`: A general light that does not attenuate with
11
+ distance.
12
+
13
+ Creating lights is easy -- just specify their basic attributes:
14
+
15
+ >>> pl = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
16
+ >>> sl = pyrender.SpotLight(color=[1.0, 1.0, 1.0], intensity=2.0,
17
+ ... innerConeAngle=0.05, outerConeAngle=0.5)
18
+ >>> dl = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2.0)
19
+
20
+ For more information about how these lighting models are implemented,
21
+ see their class documentation.
pyrender/docs/source/examples/models.rst ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _model_guide:
2
+
3
+ Loading and Configuring Models
4
+ ==============================
5
+ The first step to any rendering application is loading your models.
6
+ Pyrender implements the GLTF 2.0 specification, which means that all
7
+ models are composed of a hierarchy of objects.
8
+
9
+ At the top level, we have a :class:`.Mesh`. The :class:`.Mesh` is
10
+ basically a wrapper of any number of :class:`.Primitive` types,
11
+ which actually represent geometry that can be drawn to the screen.
12
+
13
+ Primitives are composed of a variety of parameters, including
14
+ vertex positions, vertex normals, color and texture information,
15
+ and triangle indices if smooth rendering is desired.
16
+ They can implement point clouds, triangular meshes, or lines
17
+ depending on how you configure their data and set their
18
+ :attr:`.Primitive.mode` parameter.
19
+
20
+ Although you can create primitives yourself if you want to,
21
+ it's probably easier to just use the utility functions provided
22
+ in the :class:`.Mesh` class.
23
+
24
+ Creating Triangular Meshes
25
+ --------------------------
26
+
27
+ Simple Construction
28
+ ~~~~~~~~~~~~~~~~~~~
29
+ Pyrender allows you to create a :class:`.Mesh` containing a
30
+ triangular mesh model directly from a :class:`~trimesh.base.Trimesh` object
31
+ using the :meth:`.Mesh.from_trimesh` static method.
32
+
33
+ >>> import trimesh
34
+ >>> import pyrender
35
+ >>> import numpy as np
36
+ >>> tm = trimesh.load('examples/models/fuze.obj')
37
+ >>> m = pyrender.Mesh.from_trimesh(tm)
38
+ >>> m.primitives
39
+ [<pyrender.primitive.Primitive at 0x7fbb0af60e50>]
40
+
41
+ You can also create a single :class:`.Mesh` from a list of
42
+ :class:`~trimesh.base.Trimesh` objects:
43
+
44
+ >>> tms = [trimesh.creation.icosahedron(), trimesh.creation.cylinder()]
45
+ >>> m = pyrender.Mesh.from_trimesh(tms)
46
+ [<pyrender.primitive.Primitive at 0x7fbb0c2b74d0>,
47
+ <pyrender.primitive.Primitive at 0x7fbb0c2b7550>]
48
+
49
+ Vertex Smoothing
50
+ ~~~~~~~~~~~~~~~~
51
+
52
+ The :meth:`.Mesh.from_trimesh` method has a few additional optional parameters.
53
+ If you want to render the mesh without interpolating face normals, which can
54
+ be useful for meshes that are supposed to be angular (e.g. a cube), you
55
+ can specify ``smooth=False``.
56
+
57
+ >>> m = pyrender.Mesh.from_trimesh(tm, smooth=False)
58
+
59
+ Per-Face or Per-Vertex Coloration
60
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+
62
+ If you have an untextured trimesh, you can color it in with per-face or
63
+ per-vertex colors:
64
+
65
+ >>> tm.visual.vertex_colors = np.random.uniform(size=tm.vertices.shape)
66
+ >>> tm.visual.face_colors = np.random.uniform(size=tm.faces.shape)
67
+ >>> m = pyrender.Mesh.from_trimesh(tm)
68
+
69
+ Instancing
70
+ ~~~~~~~~~~
71
+
72
+ If you want to render many copies of the same mesh at different poses,
73
+ you can statically create a vast array of them in an efficient manner.
74
+ Simply specify the ``poses`` parameter to be a list of ``N`` 4x4 homogenous
75
+ transformation matrics that position the meshes relative to their common
76
+ base frame:
77
+
78
+ >>> tfs = np.tile(np.eye(4), (3,1,1))
79
+ >>> tfs[1,:3,3] = [0.1, 0.0, 0.0]
80
+ >>> tfs[2,:3,3] = [0.2, 0.0, 0.0]
81
+ >>> tfs
82
+ array([[[1. , 0. , 0. , 0. ],
83
+ [0. , 1. , 0. , 0. ],
84
+ [0. , 0. , 1. , 0. ],
85
+ [0. , 0. , 0. , 1. ]],
86
+ [[1. , 0. , 0. , 0.1],
87
+ [0. , 1. , 0. , 0. ],
88
+ [0. , 0. , 1. , 0. ],
89
+ [0. , 0. , 0. , 1. ]],
90
+ [[1. , 0. , 0. , 0.2],
91
+ [0. , 1. , 0. , 0. ],
92
+ [0. , 0. , 1. , 0. ],
93
+ [0. , 0. , 0. , 1. ]]])
94
+
95
+ >>> m = pyrender.Mesh.from_trimesh(tm, poses=tfs)
96
+
97
+ Custom Materials
98
+ ~~~~~~~~~~~~~~~~
99
+
100
+ You can also specify a custom material for any triangular mesh you create
101
+ in the ``material`` parameter of :meth:`.Mesh.from_trimesh`.
102
+ The main material supported by Pyrender is the
103
+ :class:`.MetallicRoughnessMaterial`.
104
+ The metallic-roughness model supports rendering highly-realistic objects across
105
+ a wide gamut of materials.
106
+
107
+ For more information, see the documentation of the
108
+ :class:`.MetallicRoughnessMaterial` constructor or look at the Khronos_
109
+ documentation for more information.
110
+
111
+ .. _Khronos: https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#materials
112
+
113
+ Creating Point Clouds
114
+ ---------------------
115
+
116
+ Point Sprites
117
+ ~~~~~~~~~~~~~
118
+ Pyrender also allows you to create a :class:`.Mesh` containing a
119
+ point cloud directly from :class:`numpy.ndarray` instances
120
+ using the :meth:`.Mesh.from_points` static method.
121
+
122
+ Simply provide a list of points and optional per-point colors and normals.
123
+
124
+ >>> pts = tm.vertices.copy()
125
+ >>> colors = np.random.uniform(size=pts.shape)
126
+ >>> m = pyrender.Mesh.from_points(pts, colors=colors)
127
+
128
+ Point clouds created in this way will be rendered as square point sprites.
129
+
130
+ .. image:: /_static/points.png
131
+
132
+ Point Spheres
133
+ ~~~~~~~~~~~~~
134
+ If you have a monochromatic point cloud and would like to render it with
135
+ spheres, you can render it by instancing a spherical trimesh:
136
+
137
+ >>> sm = trimesh.creation.uv_sphere(radius=0.1)
138
+ >>> sm.visual.vertex_colors = [1.0, 0.0, 0.0]
139
+ >>> tfs = np.tile(np.eye(4), (len(pts), 1, 1))
140
+ >>> tfs[:,:3,3] = pts
141
+ >>> m = pyrender.Mesh.from_trimesh(sm, poses=tfs)
142
+
143
+ .. image:: /_static/points2.png
pyrender/docs/source/examples/offscreen.rst ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _offscreen_guide:
2
+
3
+ Offscreen Rendering
4
+ ===================
5
+
6
+ .. note::
7
+ If you're using a headless server, you'll need to use either EGL (for
8
+ GPU-accelerated rendering) or OSMesa (for CPU-only software rendering).
9
+ If you're using OSMesa, be sure that you've installed it properly. See
10
+ :ref:`osmesa` for details.
11
+
12
+ Choosing a Backend
13
+ ------------------
14
+
15
+ Once you have a scene set up with its geometry, cameras, and lights,
16
+ you can render it using the :class:`.OffscreenRenderer`. Pyrender supports
17
+ three backends for offscreen rendering:
18
+
19
+ - Pyglet, the same engine that runs the viewer. This requires an active
20
+ display manager, so you can't run it on a headless server. This is the
21
+ default option.
22
+ - OSMesa, a software renderer.
23
+ - EGL, which allows for GPU-accelerated rendering without a display manager.
24
+
25
+ If you want to use OSMesa or EGL, you need to set the ``PYOPENGL_PLATFORM``
26
+ environment variable before importing pyrender or any other OpenGL library.
27
+ You can do this at the command line:
28
+
29
+ .. code-block:: bash
30
+
31
+ PYOPENGL_PLATFORM=osmesa python render.py
32
+
33
+ or at the top of your Python script:
34
+
35
+ .. code-block:: bash
36
+
37
+ # Top of main python script
38
+ import os
39
+ os.environ['PYOPENGL_PLATFORM'] = 'egl'
40
+
41
+ The handle for EGL is ``egl``, and the handle for OSMesa is ``osmesa``.
42
+
43
+ Running the Renderer
44
+ --------------------
45
+
46
+ Once you've set your environment variable appropriately, create your scene and
47
+ then configure the :class:`.OffscreenRenderer` object with a window width,
48
+ a window height, and a size for point-cloud points:
49
+
50
+ >>> r = pyrender.OffscreenRenderer(viewport_width=640,
51
+ ... viewport_height=480,
52
+ ... point_size=1.0)
53
+
54
+ Then, just call the :meth:`.OffscreenRenderer.render` function:
55
+
56
+ >>> color, depth = r.render(scene)
57
+
58
+ .. image:: /_static/scene.png
59
+
60
+ This will return a ``(w,h,3)`` channel floating-point color image and
61
+ a ``(w,h)`` floating-point depth image rendered from the scene's main camera.
62
+
63
+ You can customize the rendering process by using flag options from
64
+ :class:`.RenderFlags` and bitwise or-ing them together. For example,
65
+ the following code renders a color image with an alpha channel
66
+ and enables shadow mapping for all directional lights:
67
+
68
+ >>> flags = RenderFlags.RGBA | RenderFlags.SHADOWS_DIRECTIONAL
69
+ >>> color, depth = r.render(scene, flags=flags)
70
+
71
+ Once you're done with the offscreen renderer, you need to close it before you
72
+ can run a different renderer or open the viewer for the same scene:
73
+
74
+ >>> r.delete()
75
+
76
+ Google CoLab Examples
77
+ ---------------------
78
+
79
+ For a minimal working example of offscreen rendering using OSMesa,
80
+ see the `OSMesa Google CoLab notebook`_.
81
+
82
+ .. _OSMesa Google CoLab notebook: https://colab.research.google.com/drive/1Z71mHIc-Sqval92nK290vAsHZRUkCjUx
83
+
84
+ For a minimal working example of offscreen rendering using EGL,
85
+ see the `EGL Google CoLab notebook`_.
86
+
87
+ .. _EGL Google CoLab notebook: https://colab.research.google.com/drive/1rTLHk0qxh4dn8KNe-mCnN8HAWdd2_BEh
pyrender/docs/source/examples/quickstart.rst ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _quickstart_guide:
2
+
3
+ Quickstart
4
+ ==========
5
+
6
+
7
+ Minimal Example for 3D Viewer
8
+ -----------------------------
9
+ Here is a minimal example of loading and viewing a triangular mesh model
10
+ in pyrender.
11
+
12
+ >>> import trimesh
13
+ >>> import pyrender
14
+ >>> fuze_trimesh = trimesh.load('examples/models/fuze.obj')
15
+ >>> mesh = pyrender.Mesh.from_trimesh(fuze_trimesh)
16
+ >>> scene = pyrender.Scene()
17
+ >>> scene.add(mesh)
18
+ >>> pyrender.Viewer(scene, use_raymond_lighting=True)
19
+
20
+ .. image:: /_static/fuze.png
21
+
22
+
23
+ Minimal Example for Offscreen Rendering
24
+ ---------------------------------------
25
+ .. note::
26
+ If you're using a headless server, make sure that you followed the guide
27
+ for installing OSMesa. See :ref:`osmesa`.
28
+
29
+ Here is a minimal example of rendering a mesh model offscreen in pyrender.
30
+ The only additional necessities are that you need to add lighting and a camera.
31
+
32
+ >>> import numpy as np
33
+ >>> import trimesh
34
+ >>> import pyrender
35
+ >>> import matplotlib.pyplot as plt
36
+
37
+ >>> fuze_trimesh = trimesh.load('examples/models/fuze.obj')
38
+ >>> mesh = pyrender.Mesh.from_trimesh(fuze_trimesh)
39
+ >>> scene = pyrender.Scene()
40
+ >>> scene.add(mesh)
41
+ >>> camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
42
+ >>> s = np.sqrt(2)/2
43
+ >>> camera_pose = np.array([
44
+ ... [0.0, -s, s, 0.3],
45
+ ... [1.0, 0.0, 0.0, 0.0],
46
+ ... [0.0, s, s, 0.35],
47
+ ... [0.0, 0.0, 0.0, 1.0],
48
+ ... ])
49
+ >>> scene.add(camera, pose=camera_pose)
50
+ >>> light = pyrender.SpotLight(color=np.ones(3), intensity=3.0,
51
+ ... innerConeAngle=np.pi/16.0,
52
+ ... outerConeAngle=np.pi/6.0)
53
+ >>> scene.add(light, pose=camera_pose)
54
+ >>> r = pyrender.OffscreenRenderer(400, 400)
55
+ >>> color, depth = r.render(scene)
56
+ >>> plt.figure()
57
+ >>> plt.subplot(1,2,1)
58
+ >>> plt.axis('off')
59
+ >>> plt.imshow(color)
60
+ >>> plt.subplot(1,2,2)
61
+ >>> plt.axis('off')
62
+ >>> plt.imshow(depth, cmap=plt.cm.gray_r)
63
+ >>> plt.show()
64
+
65
+ .. image:: /_static/minexcolor.png
66
+ :width: 45%
67
+ :align: left
68
+ .. image:: /_static/minexdepth.png
69
+ :width: 45%
70
+ :align: right
71
+
pyrender/docs/source/examples/scenes.rst ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _scene_guide:
2
+
3
+ Creating Scenes
4
+ ===============
5
+
6
+ Before you render anything, you need to put all of your lights, cameras,
7
+ and meshes into a scene. The :class:`.Scene` object keeps track of the relative
8
+ poses of these primitives by inserting them into :class:`.Node` objects and
9
+ keeping them in a directed acyclic graph.
10
+
11
+ Adding Objects
12
+ --------------
13
+
14
+ To create a :class:`.Scene`, simply call the constructor. You can optionally
15
+ specify an ambient light color and a background color:
16
+
17
+ >>> scene = pyrender.Scene(ambient_light=[0.02, 0.02, 0.02],
18
+ ... bg_color=[1.0, 1.0, 1.0])
19
+
20
+ You can add objects to a scene by first creating a :class:`.Node` object
21
+ and adding the object and its pose to the :class:`.Node`. Poses are specified
22
+ as 4x4 homogenous transformation matrices that are stored in the node's
23
+ :attr:`.Node.matrix` attribute. Note that the :class:`.Node`
24
+ constructor requires you to specify whether you're adding a mesh, light,
25
+ or camera.
26
+
27
+ >>> mesh = pyrender.Mesh.from_trimesh(tm)
28
+ >>> light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
29
+ >>> cam = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.414)
30
+ >>> nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))
31
+ >>> nl = pyrender.Node(light=light, matrix=np.eye(4))
32
+ >>> nc = pyrender.Node(camera=cam, matrix=np.eye(4))
33
+ >>> scene.add_node(nm)
34
+ >>> scene.add_node(nl)
35
+ >>> scene.add_node(nc)
36
+
37
+ You can also add objects directly to a scene with the :meth:`.Scene.add` function,
38
+ which takes care of creating a :class:`.Node` for you.
39
+
40
+ >>> scene.add(mesh, pose=np.eye(4))
41
+ >>> scene.add(light, pose=np.eye(4))
42
+ >>> scene.add(cam, pose=np.eye(4))
43
+
44
+ Nodes can be hierarchical, in which case the node's :attr:`.Node.matrix`
45
+ specifies that node's pose relative to its parent frame. You can add nodes to
46
+ a scene hierarchically by specifying a parent node in your calls to
47
+ :meth:`.Scene.add` or :meth:`.Scene.add_node`:
48
+
49
+ >>> scene.add_node(nl, parent_node=nc)
50
+ >>> scene.add(cam, parent_node=nm)
51
+
52
+ If you add multiple cameras to a scene, you can specify which one to render from
53
+ by setting the :attr:`.Scene.main_camera_node` attribute.
54
+
55
+ Updating Objects
56
+ ----------------
57
+
58
+ You can update the poses of existing nodes with the :meth:`.Scene.set_pose`
59
+ function. Simply call it with a :class:`.Node` that is already in the scene
60
+ and the new pose of that node with respect to its parent as a 4x4 homogenous
61
+ transformation matrix:
62
+
63
+ >>> scene.set_pose(nl, pose=np.eye(4))
64
+
65
+ If you want to get the local pose of a node, you can just access its
66
+ :attr:`.Node.matrix` attribute. However, if you want to the get
67
+ the pose of a node *with respect to the world frame*, you can call the
68
+ :meth:`.Scene.get_pose` method.
69
+
70
+ >>> tf = scene.get_pose(nl)
71
+
72
+ Removing Objects
73
+ ----------------
74
+
75
+ Finally, you can remove a :class:`.Node` and all of its children from the
76
+ scene with the :meth:`.Scene.remove_node` function:
77
+
78
+ >>> scene.remove_node(nl)
pyrender/docs/source/examples/viewer.rst ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _viewer_guide:
2
+
3
+ Live Scene Viewer
4
+ =================
5
+
6
+ Standard Usage
7
+ --------------
8
+ In addition to the offscreen renderer, Pyrender comes with a live scene viewer.
9
+ In its standard invocation, calling the :class:`.Viewer`'s constructor will
10
+ immediately pop a viewing window that you can navigate around in.
11
+
12
+ >>> pyrender.Viewer(scene)
13
+
14
+ By default, the viewer uses your scene's lighting. If you'd like to start with
15
+ some additional lighting that moves around with the camera, you can specify that
16
+ with:
17
+
18
+ >>> pyrender.Viewer(scene, use_raymond_lighting=True)
19
+
20
+ For a full list of the many options that the :class:`.Viewer` supports, check out its
21
+ documentation.
22
+
23
+ .. image:: /_static/rotation.gif
24
+
25
+ Running the Viewer in a Separate Thread
26
+ ---------------------------------------
27
+ If you'd like to animate your models, you'll want to run the viewer in a
28
+ separate thread so that you can update the scene while the viewer is running.
29
+ To do this, first pop the viewer in a separate thread by calling its constructor
30
+ with the ``run_in_thread`` option set:
31
+
32
+ >>> v = pyrender.Viewer(scene, run_in_thread=True)
33
+
34
+ Then, you can manipulate the :class:`.Scene` while the viewer is running to
35
+ animate things. However, be careful to acquire the viewer's
36
+ :attr:`.Viewer.render_lock` before editing the scene to prevent data corruption:
37
+
38
+ >>> i = 0
39
+ >>> while True:
40
+ ... pose = np.eye(4)
41
+ ... pose[:3,3] = [i, 0, 0]
42
+ ... v.render_lock.acquire()
43
+ ... scene.set_pose(mesh_node, pose)
44
+ ... v.render_lock.release()
45
+ ... i += 0.01
46
+
47
+ .. image:: /_static/scissors.gif
48
+
49
+ You can wait on the viewer to be closed manually:
50
+
51
+ >>> while v.is_active:
52
+ ... pass
53
+
54
+ Or you can close it from the main thread forcibly.
55
+ Make sure to still loop and block for the viewer to actually exit before using
56
+ the scene object again.
57
+
58
+ >>> v.close_external()
59
+ >>> while v.is_active:
60
+ ... pass
61
+
pyrender/docs/source/index.rst ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. core documentation master file, created by
2
+ sphinx-quickstart on Sun Oct 16 14:33:48 2016.
3
+ You can adapt this file completely to your liking, but it should at least
4
+ contain the root `toctree` directive.
5
+
6
+ Pyrender Documentation
7
+ ========================
8
+ Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based
9
+ rendering and visualization.
10
+ It is designed to meet the glTF 2.0 specification_ from Khronos
11
+
12
+ .. _specification: https://www.khronos.org/gltf/
13
+
14
+ Pyrender is lightweight, easy to install, and simple to use.
15
+ It comes packaged with both an intuitive scene viewer and a headache-free
16
+ offscreen renderer with support for GPU-accelerated rendering on headless
17
+ servers, which makes it perfect for machine learning applications.
18
+ Check out the :ref:`guide` for a full tutorial, or fork me on
19
+ Github_.
20
+
21
+ .. _Github: https://github.com/mmatl/pyrender
22
+
23
+ .. image:: _static/rotation.gif
24
+
25
+ .. image:: _static/damaged_helmet.png
26
+
27
+ .. toctree::
28
+ :maxdepth: 2
29
+
30
+ install/index.rst
31
+ examples/index.rst
32
+ api/index.rst
33
+
34
+
35
+ Indices and tables
36
+ ==================
37
+
38
+ * :ref:`genindex`
39
+ * :ref:`modindex`
40
+ * :ref:`search`
41
+
pyrender/docs/source/install/index.rst ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Installation Guide
2
+ ==================
3
+
4
+ Python Installation
5
+ -------------------
6
+
7
+ This package is available via ``pip``.
8
+
9
+ .. code-block:: bash
10
+
11
+ pip install pyrender
12
+
13
+ If you're on MacOS, you'll need
14
+ to pre-install my fork of ``pyglet``, as the version on PyPI hasn't yet included
15
+ my change that enables OpenGL contexts on MacOS.
16
+
17
+ .. code-block:: bash
18
+
19
+ git clone https://github.com/mmatl/pyglet.git
20
+ cd pyglet
21
+ pip install .
22
+
23
+ .. _osmesa:
24
+
25
+ Getting Pyrender Working with OSMesa
26
+ ------------------------------------
27
+ If you want to render scenes offscreen but don't want to have to
28
+ install a display manager or deal with the pains of trying to get
29
+ OpenGL to work over SSH, you have two options.
30
+
31
+ The first (and preferred) option is using EGL, which enables you to perform
32
+ GPU-accelerated rendering on headless servers.
33
+ However, you'll need EGL 1.5 to get modern OpenGL contexts.
34
+ This comes packaged with NVIDIA's current drivers, but if you are having issues
35
+ getting EGL to work with your hardware, you can try using OSMesa,
36
+ a software-based offscreen renderer that is included with any Mesa
37
+ install.
38
+
39
+ If you want to use OSMesa with pyrender, you'll have to perform two additional
40
+ installation steps:
41
+
42
+ - :ref:`installmesa`
43
+ - :ref:`installpyopengl`
44
+
45
+ Then, read the offscreen rendering tutorial. See :ref:`offscreen_guide`.
46
+
47
+ .. _installmesa:
48
+
49
+ Installing OSMesa
50
+ *****************
51
+
52
+ As a first step, you'll need to rebuild and re-install Mesa with support
53
+ for fast offscreen rendering and OpenGL 3+ contexts.
54
+ I'd recommend installing from source, but you can also try my ``.deb``
55
+ for Ubuntu 16.04 and up.
56
+
57
+ Installing from a Debian Package
58
+ ********************************
59
+
60
+ If you're running Ubuntu 16.04 or newer, you should be able to install the
61
+ required version of Mesa from my ``.deb`` file.
62
+
63
+ .. code-block:: bash
64
+
65
+ sudo apt update
66
+ sudo wget https://github.com/mmatl/travis_debs/raw/master/xenial/mesa_18.3.3-0.deb
67
+ sudo dpkg -i ./mesa_18.3.3-0.deb || true
68
+ sudo apt install -f
69
+
70
+ If this doesn't work, try building from source.
71
+
72
+ Building From Source
73
+ ********************
74
+
75
+ First, install build dependencies via `apt` or your system's package manager.
76
+
77
+ .. code-block:: bash
78
+
79
+ sudo apt-get install llvm-6.0 freeglut3 freeglut3-dev
80
+
81
+ Then, download the current release of Mesa from here_.
82
+ Unpack the source and go to the source folder:
83
+
84
+ .. _here: https://archive.mesa3d.org/mesa-18.3.3.tar.gz
85
+
86
+ .. code-block:: bash
87
+
88
+ tar xfv mesa-18.3.3.tar.gz
89
+ cd mesa-18.3.3
90
+
91
+ Replace ``PREFIX`` with the path you want to install Mesa at.
92
+ If you're not worried about overwriting your default Mesa install,
93
+ a good place is at ``/usr/local``.
94
+
95
+ Now, configure the installation by running the following command:
96
+
97
+ .. code-block:: bash
98
+
99
+ ./configure --prefix=PREFIX \
100
+ --enable-opengl --disable-gles1 --disable-gles2 \
101
+ --disable-va --disable-xvmc --disable-vdpau \
102
+ --enable-shared-glapi \
103
+ --disable-texture-float \
104
+ --enable-gallium-llvm --enable-llvm-shared-libs \
105
+ --with-gallium-drivers=swrast,swr \
106
+ --disable-dri --with-dri-drivers= \
107
+ --disable-egl --with-egl-platforms= --disable-gbm \
108
+ --disable-glx \
109
+ --disable-osmesa --enable-gallium-osmesa \
110
+ ac_cv_path_LLVM_CONFIG=llvm-config-6.0
111
+
112
+ Finally, build and install Mesa.
113
+
114
+ .. code-block:: bash
115
+
116
+ make -j8
117
+ make install
118
+
119
+ Finally, if you didn't install Mesa in the system path,
120
+ add the following lines to your ``~/.bashrc`` file after
121
+ changing ``MESA_HOME`` to your mesa installation path (i.e. what you used as
122
+ ``PREFIX`` during the configure command).
123
+
124
+ .. code-block:: bash
125
+
126
+ MESA_HOME=/path/to/your/mesa/installation
127
+ export LIBRARY_PATH=$LIBRARY_PATH:$MESA_HOME/lib
128
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$MESA_HOME/lib
129
+ export C_INCLUDE_PATH=$C_INCLUDE_PATH:$MESA_HOME/include/
130
+ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:$MESA_HOME/include/
131
+
132
+ .. _installpyopengl:
133
+
134
+ Installing a Compatible Fork of PyOpenGL
135
+ ****************************************
136
+
137
+ Next, install and use my fork of ``PyOpenGL``.
138
+ This fork enables getting modern OpenGL contexts with OSMesa.
139
+ My patch has been included in ``PyOpenGL``, but it has not yet been released
140
+ on PyPI.
141
+
142
+ .. code-block:: bash
143
+
144
+ git clone https://github.com/mmatl/pyopengl.git
145
+ pip install ./pyopengl
146
+
147
+
148
+ Building Documentation
149
+ ----------------------
150
+
151
+ The online documentation for ``pyrender`` is automatically built by Read The Docs.
152
+ Building ``pyrender``'s documentation locally requires a few extra dependencies --
153
+ specifically, `sphinx`_ and a few plugins.
154
+
155
+ .. _sphinx: http://www.sphinx-doc.org/en/master/
156
+
157
+ To install the dependencies required, simply change directories into the `pyrender` source and run
158
+
159
+ .. code-block:: bash
160
+
161
+ $ pip install .[docs]
162
+
163
+ Then, go to the ``docs`` directory and run ``make`` with the appropriate target.
164
+ For example,
165
+
166
+ .. code-block:: bash
167
+
168
+ $ cd docs/
169
+ $ make html
170
+
171
+ will generate a set of web pages. Any documentation files
172
+ generated in this manner can be found in ``docs/build``.
pyrender/examples/duck.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pyrender import Mesh, Scene, Viewer
2
+ from io import BytesIO
3
+ import numpy as np
4
+ import trimesh
5
+ import requests
6
+
7
+ duck_source = "https://github.com/KhronosGroup/glTF-Sample-Models/raw/master/2.0/Duck/glTF-Binary/Duck.glb"
8
+
9
+ duck = trimesh.load(BytesIO(requests.get(duck_source).content), file_type='glb')
10
+ duckmesh = Mesh.from_trimesh(list(duck.geometry.values())[0])
11
+ scene = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]))
12
+ scene.add(duckmesh)
13
+ Viewer(scene)
pyrender/examples/example.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Examples of using pyrender for viewing and offscreen rendering.
2
+ """
3
+ import pyglet
4
+ pyglet.options['shadow_window'] = False
5
+ import os
6
+ import numpy as np
7
+ import trimesh
8
+
9
+ from pyrender import PerspectiveCamera,\
10
+ DirectionalLight, SpotLight, PointLight,\
11
+ MetallicRoughnessMaterial,\
12
+ Primitive, Mesh, Node, Scene,\
13
+ Viewer, OffscreenRenderer, RenderFlags
14
+
15
+ #==============================================================================
16
+ # Mesh creation
17
+ #==============================================================================
18
+
19
+ #------------------------------------------------------------------------------
20
+ # Creating textured meshes from trimeshes
21
+ #------------------------------------------------------------------------------
22
+
23
+ # Fuze trimesh
24
+ fuze_trimesh = trimesh.load('./models/fuze.obj')
25
+ fuze_mesh = Mesh.from_trimesh(fuze_trimesh)
26
+
27
+ # Drill trimesh
28
+ drill_trimesh = trimesh.load('./models/drill.obj')
29
+ drill_mesh = Mesh.from_trimesh(drill_trimesh)
30
+ drill_pose = np.eye(4)
31
+ drill_pose[0,3] = 0.1
32
+ drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2])
33
+
34
+ # Wood trimesh
35
+ wood_trimesh = trimesh.load('./models/wood.obj')
36
+ wood_mesh = Mesh.from_trimesh(wood_trimesh)
37
+
38
+ # Water bottle trimesh
39
+ bottle_gltf = trimesh.load('./models/WaterBottle.glb')
40
+ bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
41
+ bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
42
+ bottle_pose = np.array([
43
+ [1.0, 0.0, 0.0, 0.1],
44
+ [0.0, 0.0, -1.0, -0.16],
45
+ [0.0, 1.0, 0.0, 0.13],
46
+ [0.0, 0.0, 0.0, 1.0],
47
+ ])
48
+
49
+ #------------------------------------------------------------------------------
50
+ # Creating meshes with per-vertex colors
51
+ #------------------------------------------------------------------------------
52
+ boxv_trimesh = trimesh.creation.box(extents=0.1*np.ones(3))
53
+ boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
54
+ boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
55
+ boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
56
+
57
+ #------------------------------------------------------------------------------
58
+ # Creating meshes with per-face colors
59
+ #------------------------------------------------------------------------------
60
+ boxf_trimesh = trimesh.creation.box(extents=0.1*np.ones(3))
61
+ boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
62
+ boxf_trimesh.visual.face_colors = boxf_face_colors
63
+ boxf_mesh = Mesh.from_trimesh(boxf_trimesh, smooth=False)
64
+
65
+ #------------------------------------------------------------------------------
66
+ # Creating meshes from point clouds
67
+ #------------------------------------------------------------------------------
68
+ points = trimesh.creation.icosphere(radius=0.05).vertices
69
+ point_colors = np.random.uniform(size=points.shape)
70
+ points_mesh = Mesh.from_points(points, colors=point_colors)
71
+
72
+ #==============================================================================
73
+ # Light creation
74
+ #==============================================================================
75
+
76
+ direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
77
+ spot_l = SpotLight(color=np.ones(3), intensity=10.0,
78
+ innerConeAngle=np.pi/16, outerConeAngle=np.pi/6)
79
+ point_l = PointLight(color=np.ones(3), intensity=10.0)
80
+
81
+ #==============================================================================
82
+ # Camera creation
83
+ #==============================================================================
84
+
85
+ cam = PerspectiveCamera(yfov=(np.pi / 3.0))
86
+ cam_pose = np.array([
87
+ [0.0, -np.sqrt(2)/2, np.sqrt(2)/2, 0.5],
88
+ [1.0, 0.0, 0.0, 0.0],
89
+ [0.0, np.sqrt(2)/2, np.sqrt(2)/2, 0.4],
90
+ [0.0, 0.0, 0.0, 1.0]
91
+ ])
92
+
93
+ #==============================================================================
94
+ # Scene creation
95
+ #==============================================================================
96
+
97
+ scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
98
+
99
+ #==============================================================================
100
+ # Adding objects to the scene
101
+ #==============================================================================
102
+
103
+ #------------------------------------------------------------------------------
104
+ # By manually creating nodes
105
+ #------------------------------------------------------------------------------
106
+ fuze_node = Node(mesh=fuze_mesh, translation=np.array([0.1, 0.15, -np.min(fuze_trimesh.vertices[:,2])]))
107
+ scene.add_node(fuze_node)
108
+ boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
109
+ scene.add_node(boxv_node)
110
+ boxf_node = Node(mesh=boxf_mesh, translation=np.array([-0.1, -0.10, 0.05]))
111
+ scene.add_node(boxf_node)
112
+
113
+ #------------------------------------------------------------------------------
114
+ # By using the add() utility function
115
+ #------------------------------------------------------------------------------
116
+ drill_node = scene.add(drill_mesh, pose=drill_pose)
117
+ bottle_node = scene.add(bottle_mesh, pose=bottle_pose)
118
+ wood_node = scene.add(wood_mesh)
119
+ direc_l_node = scene.add(direc_l, pose=cam_pose)
120
+ spot_l_node = scene.add(spot_l, pose=cam_pose)
121
+
122
+ #==============================================================================
123
+ # Using the viewer with a default camera
124
+ #==============================================================================
125
+
126
+ v = Viewer(scene, shadows=True)
127
+
128
+ #==============================================================================
129
+ # Using the viewer with a pre-specified camera
130
+ #==============================================================================
131
+ cam_node = scene.add(cam, pose=cam_pose)
132
+ v = Viewer(scene, central_node=drill_node)
133
+
134
+ #==============================================================================
135
+ # Rendering offscreen from that camera
136
+ #==============================================================================
137
+
138
+ r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2)
139
+ color, depth = r.render(scene)
140
+
141
+ import matplotlib.pyplot as plt
142
+ plt.figure()
143
+ plt.imshow(color)
144
+ plt.show()
145
+
146
+ #==============================================================================
147
+ # Segmask rendering
148
+ #==============================================================================
149
+
150
+ nm = {node: 20*(i + 1) for i, node in enumerate(scene.mesh_nodes)}
151
+ seg = r.render(scene, RenderFlags.SEG, nm)[0]
152
+ plt.figure()
153
+ plt.imshow(seg)
154
+ plt.show()
155
+
156
+ r.delete()
157
+
pyrender/pyrender/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .camera import (Camera, PerspectiveCamera, OrthographicCamera,
2
+ IntrinsicsCamera)
3
+ from .light import Light, PointLight, DirectionalLight, SpotLight
4
+ from .sampler import Sampler
5
+ from .texture import Texture
6
+ from .material import Material, MetallicRoughnessMaterial
7
+ from .primitive import Primitive
8
+ from .mesh import Mesh
9
+ from .node import Node
10
+ from .scene import Scene
11
+ from .renderer import Renderer
12
+ from .viewer import Viewer
13
+ from .offscreen import OffscreenRenderer
14
+ from .version import __version__
15
+ from .constants import RenderFlags, TextAlign, GLTF
16
+
17
+ __all__ = [
18
+ 'Camera', 'PerspectiveCamera', 'OrthographicCamera', 'IntrinsicsCamera',
19
+ 'Light', 'PointLight', 'DirectionalLight', 'SpotLight',
20
+ 'Sampler', 'Texture', 'Material', 'MetallicRoughnessMaterial',
21
+ 'Primitive', 'Mesh', 'Node', 'Scene', 'Renderer', 'Viewer',
22
+ 'OffscreenRenderer', '__version__', 'RenderFlags', 'TextAlign',
23
+ 'GLTF'
24
+ ]
pyrender/pyrender/camera.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Virtual cameras compliant with the glTF 2.0 specification as described at
2
+ https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-camera
3
+
4
+ Author: Matthew Matl
5
+ """
6
+ import abc
7
+ import numpy as np
8
+ import six
9
+ import sys
10
+
11
+ from .constants import DEFAULT_Z_NEAR, DEFAULT_Z_FAR
12
+
13
+
14
+ @six.add_metaclass(abc.ABCMeta)
15
+ class Camera(object):
16
+ """Abstract base class for all cameras.
17
+
18
+ Note
19
+ ----
20
+ Camera poses are specified in the OpenGL format,
21
+ where the z axis points away from the view direction and the
22
+ x and y axes point to the right and up in the image plane, respectively.
23
+
24
+ Parameters
25
+ ----------
26
+ znear : float
27
+ The floating-point distance to the near clipping plane.
28
+ zfar : float
29
+ The floating-point distance to the far clipping plane.
30
+ ``zfar`` must be greater than ``znear``.
31
+ name : str, optional
32
+ The user-defined name of this object.
33
+ """
34
+
35
+ def __init__(self,
36
+ znear=DEFAULT_Z_NEAR,
37
+ zfar=DEFAULT_Z_FAR,
38
+ name=None):
39
+ self.name = name
40
+ self.znear = znear
41
+ self.zfar = zfar
42
+
43
+ @property
44
+ def name(self):
45
+ """str : The user-defined name of this object.
46
+ """
47
+ return self._name
48
+
49
+ @name.setter
50
+ def name(self, value):
51
+ if value is not None:
52
+ value = str(value)
53
+ self._name = value
54
+
55
+ @property
56
+ def znear(self):
57
+ """float : The distance to the near clipping plane.
58
+ """
59
+ return self._znear
60
+
61
+ @znear.setter
62
+ def znear(self, value):
63
+ value = float(value)
64
+ if value < 0:
65
+ raise ValueError('z-near must be >= 0.0')
66
+ self._znear = value
67
+
68
+ @property
69
+ def zfar(self):
70
+ """float : The distance to the far clipping plane.
71
+ """
72
+ return self._zfar
73
+
74
+ @zfar.setter
75
+ def zfar(self, value):
76
+ value = float(value)
77
+ if value <= 0 or value <= self.znear:
78
+ raise ValueError('zfar must be >0 and >znear')
79
+ self._zfar = value
80
+
81
+ @abc.abstractmethod
82
+ def get_projection_matrix(self, width=None, height=None):
83
+ """Return the OpenGL projection matrix for this camera.
84
+
85
+ Parameters
86
+ ----------
87
+ width : int
88
+ Width of the current viewport, in pixels.
89
+ height : int
90
+ Height of the current viewport, in pixels.
91
+ """
92
+ pass
93
+
94
+
95
+ class PerspectiveCamera(Camera):
96
+
97
+ """A perspective camera for perspective projection.
98
+
99
+ Parameters
100
+ ----------
101
+ yfov : float
102
+ The floating-point vertical field of view in radians.
103
+ znear : float
104
+ The floating-point distance to the near clipping plane.
105
+ If not specified, defaults to 0.05.
106
+ zfar : float, optional
107
+ The floating-point distance to the far clipping plane.
108
+ ``zfar`` must be greater than ``znear``.
109
+ If None, the camera uses an infinite projection matrix.
110
+ aspectRatio : float, optional
111
+ The floating-point aspect ratio of the field of view.
112
+ If not specified, the camera uses the viewport's aspect ratio.
113
+ name : str, optional
114
+ The user-defined name of this object.
115
+ """
116
+
117
+ def __init__(self,
118
+ yfov,
119
+ znear=DEFAULT_Z_NEAR,
120
+ zfar=None,
121
+ aspectRatio=None,
122
+ name=None):
123
+ super(PerspectiveCamera, self).__init__(
124
+ znear=znear,
125
+ zfar=zfar,
126
+ name=name,
127
+ )
128
+
129
+ self.yfov = yfov
130
+ self.aspectRatio = aspectRatio
131
+
132
+ @property
133
+ def yfov(self):
134
+ """float : The vertical field of view in radians.
135
+ """
136
+ return self._yfov
137
+
138
+ @yfov.setter
139
+ def yfov(self, value):
140
+ value = float(value)
141
+ if value <= 0.0:
142
+ raise ValueError('Field of view must be positive')
143
+ self._yfov = value
144
+
145
+ @property
146
+ def zfar(self):
147
+ """float : The distance to the far clipping plane.
148
+ """
149
+ return self._zfar
150
+
151
+ @zfar.setter
152
+ def zfar(self, value):
153
+ if value is not None:
154
+ value = float(value)
155
+ if value <= 0 or value <= self.znear:
156
+ raise ValueError('zfar must be >0 and >znear')
157
+ self._zfar = value
158
+
159
+ @property
160
+ def aspectRatio(self):
161
+ """float : The ratio of the width to the height of the field of view.
162
+ """
163
+ return self._aspectRatio
164
+
165
+ @aspectRatio.setter
166
+ def aspectRatio(self, value):
167
+ if value is not None:
168
+ value = float(value)
169
+ if value <= 0.0:
170
+ raise ValueError('Aspect ratio must be positive')
171
+ self._aspectRatio = value
172
+
173
+ def get_projection_matrix(self, width=None, height=None):
174
+ """Return the OpenGL projection matrix for this camera.
175
+
176
+ Parameters
177
+ ----------
178
+ width : int
179
+ Width of the current viewport, in pixels.
180
+ height : int
181
+ Height of the current viewport, in pixels.
182
+ """
183
+ aspect_ratio = self.aspectRatio
184
+ if aspect_ratio is None:
185
+ if width is None or height is None:
186
+ raise ValueError('Aspect ratio of camera must be defined')
187
+ aspect_ratio = float(width) / float(height)
188
+
189
+ a = aspect_ratio
190
+ t = np.tan(self.yfov / 2.0)
191
+ n = self.znear
192
+ f = self.zfar
193
+
194
+ P = np.zeros((4,4))
195
+ P[0][0] = 1.0 / (a * t)
196
+ P[1][1] = 1.0 / t
197
+ P[3][2] = -1.0
198
+
199
+ if f is None:
200
+ P[2][2] = -1.0
201
+ P[2][3] = -2.0 * n
202
+ else:
203
+ P[2][2] = (f + n) / (n - f)
204
+ P[2][3] = (2 * f * n) / (n - f)
205
+
206
+ return P
207
+
208
+
209
+ class OrthographicCamera(Camera):
210
+ """An orthographic camera for orthographic projection.
211
+
212
+ Parameters
213
+ ----------
214
+ xmag : float
215
+ The floating-point horizontal magnification of the view.
216
+ ymag : float
217
+ The floating-point vertical magnification of the view.
218
+ znear : float
219
+ The floating-point distance to the near clipping plane.
220
+ If not specified, defaults to 0.05.
221
+ zfar : float
222
+ The floating-point distance to the far clipping plane.
223
+ ``zfar`` must be greater than ``znear``.
224
+ If not specified, defaults to 100.0.
225
+ name : str, optional
226
+ The user-defined name of this object.
227
+ """
228
+
229
+ def __init__(self,
230
+ xmag,
231
+ ymag,
232
+ znear=DEFAULT_Z_NEAR,
233
+ zfar=DEFAULT_Z_FAR,
234
+ name=None):
235
+ super(OrthographicCamera, self).__init__(
236
+ znear=znear,
237
+ zfar=zfar,
238
+ name=name,
239
+ )
240
+
241
+ self.xmag = xmag
242
+ self.ymag = ymag
243
+
244
+ @property
245
+ def xmag(self):
246
+ """float : The horizontal magnification of the view.
247
+ """
248
+ return self._xmag
249
+
250
+ @xmag.setter
251
+ def xmag(self, value):
252
+ value = float(value)
253
+ if value <= 0.0:
254
+ raise ValueError('X magnification must be positive')
255
+ self._xmag = value
256
+
257
+ @property
258
+ def ymag(self):
259
+ """float : The vertical magnification of the view.
260
+ """
261
+ return self._ymag
262
+
263
+ @ymag.setter
264
+ def ymag(self, value):
265
+ value = float(value)
266
+ if value <= 0.0:
267
+ raise ValueError('Y magnification must be positive')
268
+ self._ymag = value
269
+
270
+ @property
271
+ def znear(self):
272
+ """float : The distance to the near clipping plane.
273
+ """
274
+ return self._znear
275
+
276
+ @znear.setter
277
+ def znear(self, value):
278
+ value = float(value)
279
+ if value <= 0:
280
+ raise ValueError('z-near must be > 0.0')
281
+ self._znear = value
282
+
283
+ def get_projection_matrix(self, width=None, height=None):
284
+ """Return the OpenGL projection matrix for this camera.
285
+
286
+ Parameters
287
+ ----------
288
+ width : int
289
+ Width of the current viewport, in pixels.
290
+ Unused in this function.
291
+ height : int
292
+ Height of the current viewport, in pixels.
293
+ Unused in this function.
294
+ """
295
+ xmag = self.xmag
296
+ ymag = self.ymag
297
+
298
+ # If screen width/height defined, rescale xmag
299
+ if width is not None and height is not None:
300
+ xmag = width / height * ymag
301
+
302
+ n = self.znear
303
+ f = self.zfar
304
+ P = np.zeros((4,4))
305
+ P[0][0] = 1.0 / xmag
306
+ P[1][1] = 1.0 / ymag
307
+ P[2][2] = 2.0 / (n - f)
308
+ P[2][3] = (f + n) / (n - f)
309
+ P[3][3] = 1.0
310
+ return P
311
+
312
+
313
+ class IntrinsicsCamera(Camera):
314
+ """A perspective camera with custom intrinsics.
315
+
316
+ Parameters
317
+ ----------
318
+ fx : float
319
+ X-axis focal length in pixels.
320
+ fy : float
321
+ Y-axis focal length in pixels.
322
+ cx : float
323
+ X-axis optical center in pixels.
324
+ cy : float
325
+ Y-axis optical center in pixels.
326
+ znear : float
327
+ The floating-point distance to the near clipping plane.
328
+ If not specified, defaults to 0.05.
329
+ zfar : float
330
+ The floating-point distance to the far clipping plane.
331
+ ``zfar`` must be greater than ``znear``.
332
+ If not specified, defaults to 100.0.
333
+ name : str, optional
334
+ The user-defined name of this object.
335
+ """
336
+
337
+ def __init__(self,
338
+ fx,
339
+ fy,
340
+ cx,
341
+ cy,
342
+ znear=DEFAULT_Z_NEAR,
343
+ zfar=DEFAULT_Z_FAR,
344
+ name=None):
345
+ super(IntrinsicsCamera, self).__init__(
346
+ znear=znear,
347
+ zfar=zfar,
348
+ name=name,
349
+ )
350
+
351
+ self.fx = fx
352
+ self.fy = fy
353
+ self.cx = cx
354
+ self.cy = cy
355
+
356
+ @property
357
+ def fx(self):
358
+ """float : X-axis focal length in meters.
359
+ """
360
+ return self._fx
361
+
362
+ @fx.setter
363
+ def fx(self, value):
364
+ self._fx = float(value)
365
+
366
+ @property
367
+ def fy(self):
368
+ """float : Y-axis focal length in meters.
369
+ """
370
+ return self._fy
371
+
372
+ @fy.setter
373
+ def fy(self, value):
374
+ self._fy = float(value)
375
+
376
+ @property
377
+ def cx(self):
378
+ """float : X-axis optical center in pixels.
379
+ """
380
+ return self._cx
381
+
382
+ @cx.setter
383
+ def cx(self, value):
384
+ self._cx = float(value)
385
+
386
+ @property
387
+ def cy(self):
388
+ """float : Y-axis optical center in pixels.
389
+ """
390
+ return self._cy
391
+
392
+ @cy.setter
393
+ def cy(self, value):
394
+ self._cy = float(value)
395
+
396
+ def get_projection_matrix(self, width, height):
397
+ """Return the OpenGL projection matrix for this camera.
398
+
399
+ Parameters
400
+ ----------
401
+ width : int
402
+ Width of the current viewport, in pixels.
403
+ height : int
404
+ Height of the current viewport, in pixels.
405
+ """
406
+ width = float(width)
407
+ height = float(height)
408
+
409
+ cx, cy = self.cx, self.cy
410
+ fx, fy = self.fx, self.fy
411
+ if sys.platform == 'darwin':
412
+ cx = self.cx * 2.0
413
+ cy = self.cy * 2.0
414
+ fx = self.fx * 2.0
415
+ fy = self.fy * 2.0
416
+
417
+ P = np.zeros((4,4))
418
+ P[0][0] = 2.0 * fx / width
419
+ P[1][1] = 2.0 * fy / height
420
+ P[0][2] = 1.0 - 2.0 * cx / width
421
+ P[1][2] = 2.0 * cy / height - 1.0
422
+ P[3][2] = -1.0
423
+
424
+ n = self.znear
425
+ f = self.zfar
426
+ if f is None:
427
+ P[2][2] = -1.0
428
+ P[2][3] = -2.0 * n
429
+ else:
430
+ P[2][2] = (f + n) / (n - f)
431
+ P[2][3] = (2 * f * n) / (n - f)
432
+
433
+ return P
434
+
435
+
436
+ __all__ = ['Camera', 'PerspectiveCamera', 'OrthographicCamera',
437
+ 'IntrinsicsCamera']
pyrender/pyrender/constants.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DEFAULT_Z_NEAR = 0.05 # Near clipping plane, in meters
2
+ DEFAULT_Z_FAR = 100.0 # Far clipping plane, in meters
3
+ DEFAULT_SCENE_SCALE = 2.0 # Default scene scale
4
+ MAX_N_LIGHTS = 4 # Maximum number of lights of each type allowed
5
+ TARGET_OPEN_GL_MAJOR = 4 # Target OpenGL Major Version
6
+ TARGET_OPEN_GL_MINOR = 1 # Target OpenGL Minor Version
7
+ MIN_OPEN_GL_MAJOR = 3 # Minimum OpenGL Major Version
8
+ MIN_OPEN_GL_MINOR = 3 # Minimum OpenGL Minor Version
9
+ FLOAT_SZ = 4 # Byte size of GL float32
10
+ UINT_SZ = 4 # Byte size of GL uint32
11
+ SHADOW_TEX_SZ = 2048 # Width and Height of Shadow Textures
12
+ TEXT_PADDING = 20 # Width of padding for rendering text (px)
13
+
14
+
15
+ # Flags for render type
16
+ class RenderFlags(object):
17
+ """Flags for rendering in the scene.
18
+
19
+ Combine them with the bitwise or. For example,
20
+
21
+ >>> flags = OFFSCREEN | SHADOWS_DIRECTIONAL | VERTEX_NORMALS
22
+
23
+ would result in an offscreen render with directional shadows and
24
+ vertex normals enabled.
25
+ """
26
+ NONE = 0
27
+ """Normal PBR Render."""
28
+ DEPTH_ONLY = 1
29
+ """Only render the depth buffer."""
30
+ OFFSCREEN = 2
31
+ """Render offscreen and return the depth and (optionally) color buffers."""
32
+ FLIP_WIREFRAME = 4
33
+ """Invert the status of wireframe rendering for each mesh."""
34
+ ALL_WIREFRAME = 8
35
+ """Render all meshes as wireframes."""
36
+ ALL_SOLID = 16
37
+ """Render all meshes as solids."""
38
+ SHADOWS_DIRECTIONAL = 32
39
+ """Render shadows for directional lights."""
40
+ SHADOWS_POINT = 64
41
+ """Render shadows for point lights."""
42
+ SHADOWS_SPOT = 128
43
+ """Render shadows for spot lights."""
44
+ SHADOWS_ALL = 32 | 64 | 128
45
+ """Render shadows for all lights."""
46
+ VERTEX_NORMALS = 256
47
+ """Render vertex normals."""
48
+ FACE_NORMALS = 512
49
+ """Render face normals."""
50
+ SKIP_CULL_FACES = 1024
51
+ """Do not cull back faces."""
52
+ RGBA = 2048
53
+ """Render the color buffer with the alpha channel enabled."""
54
+ FLAT = 4096
55
+ """Render the color buffer flat, with no lighting computations."""
56
+ SEG = 8192
57
+
58
+
59
+ class TextAlign:
60
+ """Text alignment options for captions.
61
+
62
+ Only use one at a time.
63
+ """
64
+ CENTER = 0
65
+ """Center the text by width and height."""
66
+ CENTER_LEFT = 1
67
+ """Center the text by height and left-align it."""
68
+ CENTER_RIGHT = 2
69
+ """Center the text by height and right-align it."""
70
+ BOTTOM_LEFT = 3
71
+ """Put the text in the bottom-left corner."""
72
+ BOTTOM_RIGHT = 4
73
+ """Put the text in the bottom-right corner."""
74
+ BOTTOM_CENTER = 5
75
+ """Center the text by width and fix it to the bottom."""
76
+ TOP_LEFT = 6
77
+ """Put the text in the top-left corner."""
78
+ TOP_RIGHT = 7
79
+ """Put the text in the top-right corner."""
80
+ TOP_CENTER = 8
81
+ """Center the text by width and fix it to the top."""
82
+
83
+
84
+ class GLTF(object):
85
+ """Options for GL objects."""
86
+ NEAREST = 9728
87
+ """Nearest neighbor interpolation."""
88
+ LINEAR = 9729
89
+ """Linear interpolation."""
90
+ NEAREST_MIPMAP_NEAREST = 9984
91
+ """Nearest mipmapping."""
92
+ LINEAR_MIPMAP_NEAREST = 9985
93
+ """Linear mipmapping."""
94
+ NEAREST_MIPMAP_LINEAR = 9986
95
+ """Nearest mipmapping."""
96
+ LINEAR_MIPMAP_LINEAR = 9987
97
+ """Linear mipmapping."""
98
+ CLAMP_TO_EDGE = 33071
99
+ """Clamp to the edge of the texture."""
100
+ MIRRORED_REPEAT = 33648
101
+ """Mirror the texture."""
102
+ REPEAT = 10497
103
+ """Repeat the texture."""
104
+ POINTS = 0
105
+ """Render as points."""
106
+ LINES = 1
107
+ """Render as lines."""
108
+ LINE_LOOP = 2
109
+ """Render as a line loop."""
110
+ LINE_STRIP = 3
111
+ """Render as a line strip."""
112
+ TRIANGLES = 4
113
+ """Render as triangles."""
114
+ TRIANGLE_STRIP = 5
115
+ """Render as a triangle strip."""
116
+ TRIANGLE_FAN = 6
117
+ """Render as a triangle fan."""
118
+
119
+
120
+ class BufFlags(object):
121
+ POSITION = 0
122
+ NORMAL = 1
123
+ TANGENT = 2
124
+ TEXCOORD_0 = 4
125
+ TEXCOORD_1 = 8
126
+ COLOR_0 = 16
127
+ JOINTS_0 = 32
128
+ WEIGHTS_0 = 64
129
+
130
+
131
+ class TexFlags(object):
132
+ NONE = 0
133
+ NORMAL = 1
134
+ OCCLUSION = 2
135
+ EMISSIVE = 4
136
+ BASE_COLOR = 8
137
+ METALLIC_ROUGHNESS = 16
138
+ DIFFUSE = 32
139
+ SPECULAR_GLOSSINESS = 64
140
+
141
+
142
+ class ProgramFlags:
143
+ NONE = 0
144
+ USE_MATERIAL = 1
145
+ VERTEX_NORMALS = 2
146
+ FACE_NORMALS = 4
147
+
148
+
149
+ __all__ = ['RenderFlags', 'TextAlign', 'GLTF']
pyrender/pyrender/font.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Font texture loader and processor.
2
+
3
+ Author: Matthew Matl
4
+ """
5
+ import freetype
6
+ import numpy as np
7
+ import os
8
+
9
+ import OpenGL
10
+ from OpenGL.GL import *
11
+
12
+ from .constants import TextAlign, FLOAT_SZ
13
+ from .texture import Texture
14
+ from .sampler import Sampler
15
+
16
+
17
+ class FontCache(object):
18
+ """A cache for fonts.
19
+ """
20
+
21
+ def __init__(self, font_dir=None):
22
+ self._font_cache = {}
23
+ self.font_dir = font_dir
24
+ if self.font_dir is None:
25
+ base_dir, _ = os.path.split(os.path.realpath(__file__))
26
+ self.font_dir = os.path.join(base_dir, 'fonts')
27
+
28
+ def get_font(self, font_name, font_pt):
29
+ # If it's a file, load it directly, else, try to load from font dir.
30
+ if os.path.isfile(font_name):
31
+ font_filename = font_name
32
+ _, font_name = os.path.split(font_name)
33
+ font_name, _ = os.path.split(font_name)
34
+ else:
35
+ font_filename = os.path.join(self.font_dir, font_name) + '.ttf'
36
+
37
+ cid = OpenGL.contextdata.getContext()
38
+ key = (cid, font_name, int(font_pt))
39
+
40
+ if key not in self._font_cache:
41
+ self._font_cache[key] = Font(font_filename, font_pt)
42
+ return self._font_cache[key]
43
+
44
+ def clear(self):
45
+ for key in self._font_cache:
46
+ self._font_cache[key].delete()
47
+ self._font_cache = {}
48
+
49
+
50
+ class Character(object):
51
+ """A single character, with its texture and attributes.
52
+ """
53
+
54
+ def __init__(self, texture, size, bearing, advance):
55
+ self.texture = texture
56
+ self.size = size
57
+ self.bearing = bearing
58
+ self.advance = advance
59
+
60
+
61
+ class Font(object):
62
+ """A font object.
63
+
64
+ Parameters
65
+ ----------
66
+ font_file : str
67
+ The file to load the font from.
68
+ font_pt : int
69
+ The height of the font in pixels.
70
+ """
71
+
72
+ def __init__(self, font_file, font_pt=40):
73
+ self.font_file = font_file
74
+ self.font_pt = int(font_pt)
75
+ self._face = freetype.Face(font_file)
76
+ self._face.set_pixel_sizes(0, font_pt)
77
+ self._character_map = {}
78
+
79
+ for i in range(0, 128):
80
+
81
+ # Generate texture
82
+ face = self._face
83
+ face.load_char(chr(i))
84
+ buf = face.glyph.bitmap.buffer
85
+ src = (np.array(buf) / 255.0).astype(np.float32)
86
+ src = src.reshape((face.glyph.bitmap.rows,
87
+ face.glyph.bitmap.width))
88
+ tex = Texture(
89
+ sampler=Sampler(
90
+ magFilter=GL_LINEAR,
91
+ minFilter=GL_LINEAR,
92
+ wrapS=GL_CLAMP_TO_EDGE,
93
+ wrapT=GL_CLAMP_TO_EDGE
94
+ ),
95
+ source=src,
96
+ source_channels='R',
97
+ )
98
+ character = Character(
99
+ texture=tex,
100
+ size=np.array([face.glyph.bitmap.width,
101
+ face.glyph.bitmap.rows]),
102
+ bearing=np.array([face.glyph.bitmap_left,
103
+ face.glyph.bitmap_top]),
104
+ advance=face.glyph.advance.x
105
+ )
106
+ self._character_map[chr(i)] = character
107
+
108
+ self._vbo = None
109
+ self._vao = None
110
+
111
+ @property
112
+ def font_file(self):
113
+ """str : The file the font was loaded from.
114
+ """
115
+ return self._font_file
116
+
117
+ @font_file.setter
118
+ def font_file(self, value):
119
+ self._font_file = value
120
+
121
+ @property
122
+ def font_pt(self):
123
+ """int : The height of the font in pixels.
124
+ """
125
+ return self._font_pt
126
+
127
+ @font_pt.setter
128
+ def font_pt(self, value):
129
+ self._font_pt = int(value)
130
+
131
+ def _add_to_context(self):
132
+
133
+ self._vao = glGenVertexArrays(1)
134
+ glBindVertexArray(self._vao)
135
+ self._vbo = glGenBuffers(1)
136
+ glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
137
+ glBufferData(GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, None, GL_DYNAMIC_DRAW)
138
+ glEnableVertexAttribArray(0)
139
+ glVertexAttribPointer(
140
+ 0, 4, GL_FLOAT, GL_FALSE, 4 * FLOAT_SZ, ctypes.c_void_p(0)
141
+ )
142
+ glBindVertexArray(0)
143
+
144
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
145
+ for c in self._character_map:
146
+ ch = self._character_map[c]
147
+ if not ch.texture._in_context():
148
+ ch.texture._add_to_context()
149
+
150
+ def _remove_from_context(self):
151
+ for c in self._character_map:
152
+ ch = self._character_map[c]
153
+ ch.texture.delete()
154
+ if self._vao is not None:
155
+ glDeleteVertexArrays(1, [self._vao])
156
+ glDeleteBuffers(1, [self._vbo])
157
+ self._vao = None
158
+ self._vbo = None
159
+
160
+ def _in_context(self):
161
+ return self._vao is not None
162
+
163
+ def _bind(self):
164
+ glBindVertexArray(self._vao)
165
+
166
+ def _unbind(self):
167
+ glBindVertexArray(0)
168
+
169
+ def delete(self):
170
+ self._unbind()
171
+ self._remove_from_context()
172
+
173
+ def render_string(self, text, x, y, scale=1.0,
174
+ align=TextAlign.BOTTOM_LEFT):
175
+ """Render a string to the current view buffer.
176
+
177
+ Note
178
+ ----
179
+ Assumes correct shader program already bound w/ uniforms set.
180
+
181
+ Parameters
182
+ ----------
183
+ text : str
184
+ The text to render.
185
+ x : int
186
+ Horizontal pixel location of text.
187
+ y : int
188
+ Vertical pixel location of text.
189
+ scale : int
190
+ Scaling factor for text.
191
+ align : int
192
+ One of the TextAlign options which specifies where the ``x``
193
+ and ``y`` parameters lie on the text. For example,
194
+ :attr:`.TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate
195
+ the position of the bottom-left corner of the textbox.
196
+ """
197
+ glActiveTexture(GL_TEXTURE0)
198
+ glEnable(GL_BLEND)
199
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
200
+ glDisable(GL_DEPTH_TEST)
201
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
202
+ self._bind()
203
+
204
+ # Determine width and height of text relative to x, y
205
+ width = 0.0
206
+ height = 0.0
207
+ for c in text:
208
+ ch = self._character_map[c]
209
+ height = max(height, ch.bearing[1] * scale)
210
+ width += (ch.advance >> 6) * scale
211
+
212
+ # Determine offsets based on alignments
213
+ xoff = 0
214
+ yoff = 0
215
+ if align == TextAlign.BOTTOM_RIGHT:
216
+ xoff = -width
217
+ elif align == TextAlign.BOTTOM_CENTER:
218
+ xoff = -width / 2.0
219
+ elif align == TextAlign.TOP_LEFT:
220
+ yoff = -height
221
+ elif align == TextAlign.TOP_RIGHT:
222
+ yoff = -height
223
+ xoff = -width
224
+ elif align == TextAlign.TOP_CENTER:
225
+ yoff = -height
226
+ xoff = -width / 2.0
227
+ elif align == TextAlign.CENTER:
228
+ xoff = -width / 2.0
229
+ yoff = -height / 2.0
230
+ elif align == TextAlign.CENTER_LEFT:
231
+ yoff = -height / 2.0
232
+ elif align == TextAlign.CENTER_RIGHT:
233
+ xoff = -width
234
+ yoff = -height / 2.0
235
+
236
+ x += xoff
237
+ y += yoff
238
+
239
+ ch = None
240
+ for c in text:
241
+ ch = self._character_map[c]
242
+ xpos = x + ch.bearing[0] * scale
243
+ ypos = y - (ch.size[1] - ch.bearing[1]) * scale
244
+ w = ch.size[0] * scale
245
+ h = ch.size[1] * scale
246
+
247
+ vertices = np.array([
248
+ [xpos, ypos, 0.0, 0.0],
249
+ [xpos + w, ypos, 1.0, 0.0],
250
+ [xpos + w, ypos + h, 1.0, 1.0],
251
+ [xpos + w, ypos + h, 1.0, 1.0],
252
+ [xpos, ypos + h, 0.0, 1.0],
253
+ [xpos, ypos, 0.0, 0.0],
254
+ ], dtype=np.float32)
255
+
256
+ ch.texture._bind()
257
+
258
+ glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
259
+ glBufferData(
260
+ GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, vertices, GL_DYNAMIC_DRAW
261
+ )
262
+ # TODO MAKE THIS MORE EFFICIENT, lgBufferSubData is broken
263
+ # glBufferSubData(
264
+ # GL_ARRAY_BUFFER, 0, 6 * 4 * FLOAT_SZ,
265
+ # np.ascontiguousarray(vertices.flatten)
266
+ # )
267
+ glDrawArrays(GL_TRIANGLES, 0, 6)
268
+ x += (ch.advance >> 6) * scale
269
+
270
+ self._unbind()
271
+ if ch:
272
+ ch.texture._unbind()
pyrender/pyrender/fonts/OpenSans-Bold.ttf ADDED
Binary file (225 kB). View file
 
pyrender/pyrender/fonts/OpenSans-BoldItalic.ttf ADDED
Binary file (213 kB). View file
 
pyrender/pyrender/fonts/OpenSans-ExtraBold.ttf ADDED
Binary file (223 kB). View file
 
pyrender/pyrender/fonts/OpenSans-ExtraBoldItalic.ttf ADDED
Binary file (213 kB). View file
 
pyrender/pyrender/fonts/OpenSans-Italic.ttf ADDED
Binary file (213 kB). View file