ameerazam08 commited on
Commit
2725384
1 Parent(s): 706baed

nre papers added

Browse files
Files changed (1) hide show
  1. src/app/about-event.tsx +26 -0
src/app/about-event.tsx CHANGED
@@ -17,6 +17,32 @@ import React from 'react';
17
 
18
  const EVENT_INFO = [
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  {
21
  title: "Animatable Gaussians: Learning Pose-dependent Gaussian Maps for High-fidelity Human Avatar Modeling",
22
  description: "Modeling animatable human avatars from RGB videos is a long-standing and challenging problem. Recent works usually adopt MLP-based neural radiance fields (NeRF) to represent 3D humans, but it remains difficult for pure MLPs to regress pose-dependent garment details.",
 
17
 
18
  const EVENT_INFO = [
19
 
20
+ // {
21
+ // title: "Title",
22
+ // description: "",
23
+ // subTitle: "",
24
+ // imageName : "paper12.png",
25
+ // paper_links :""
26
+ // },
27
+
28
+ {
29
+ title: "Diffusion Self-Guidance for Controllable Image Generation",
30
+ description: "TL;DR: Self-guidance is a method for controllable image generation that guides sampling using only the attention and activations of a pretrained diffusion model. Without any extra models or training, you can move or resize objects, or even replace them with items from real images, without changing the rest of the scene. You can also borrow the appearance of another image or rearrange scenes into a desired layout.",
31
+ subTitle: "Diffusion/Controllable Image Generation",
32
+ imageName : "diffusion-control.png",
33
+ paper_links :"https://dave.ml/selfguidance/"
34
+ },
35
+
36
+
37
+
38
+ {
39
+ title: "VLOGGER: Multimodal Diffusion for Embodied Avatar Synthesis",
40
+ description: "VLOGGER is a novel framework to synthesize humans from audio. Given a single input image like the ones shown on the first column, and a sample audio input, our method generates photorealistic and temporally coherent videos of the person talking and vividly moving.",
41
+ subTitle: "Synthetic Humans/Embodied Avatar",
42
+ imageName : "video_translation.mp4",
43
+ paper_links :"https://enriccorona.github.io/vlogger/"
44
+ },
45
+
46
  {
47
  title: "Animatable Gaussians: Learning Pose-dependent Gaussian Maps for High-fidelity Human Avatar Modeling",
48
  description: "Modeling animatable human avatars from RGB videos is a long-standing and challenging problem. Recent works usually adopt MLP-based neural radiance fields (NeRF) to represent 3D humans, but it remains difficult for pure MLPs to regress pose-dependent garment details.",