ameerazam08 commited on
Commit
21c6539
1 Parent(s): bb62f4c

Update src/app/about-event.tsx

Browse files
Files changed (1) hide show
  1. src/app/about-event.tsx +12 -0
src/app/about-event.tsx CHANGED
@@ -13,6 +13,18 @@ import React from 'react';
13
  // paper_links :""
14
  // },
15
  const EVENT_INFO = [
 
 
 
 
 
 
 
 
 
 
 
 
16
  {
17
  title: "Deformable One-shot Face Stylization via DINO Semantic Guidance",
18
  description: "This paper presents a novel approach to one-shot face stylization, focusing on appearance and structure. They use a self-supervised vision transformer, DINO-ViT, and integrate spatial transformers into StyleGAN for deformation-aware stylization. Innovative constraints and style-mixing enhance deformability and efficiency, demonstrating superiority over existing methods through extensive comparisons. Code is available at https://github.com/zichongc/DoesFS. ",
 
13
  // paper_links :""
14
  // },
15
  const EVENT_INFO = [
16
+
17
+
18
+ {
19
+ title: "PeRFlow: Piecewise Rectified Flow as Universal Plug-and-Play Accelerator",
20
+ description: "PeRFlow trains piecewise-linear rectified flow models for fast sampling. These models can be initialized from pretrained diffusion models, such as Stable Diffusion (SD). The obtained weights of PeRFlow serve as a general accelerator module which is compatible with various fine-tuned stylized SD models as well as SD-based generation/editing pipelines. Specifically, \
21
+ are computed by the PeRFlow's weights minus the pretrained SD. One can fuse the PeRFlow.\
22
+ into various SD pipelines for (conditional) image generation/editing to enable high-quality few-step inference.",
23
+ subTitle: "Finetune LORAs / Diffusion Models / PeRFlow",
24
+ imageName : "perflow-v1.mp4",
25
+ paper_links :"https://piecewise-rectified-flow.github.io/"
26
+ },
27
+
28
  {
29
  title: "Deformable One-shot Face Stylization via DINO Semantic Guidance",
30
  description: "This paper presents a novel approach to one-shot face stylization, focusing on appearance and structure. They use a self-supervised vision transformer, DINO-ViT, and integrate spatial transformers into StyleGAN for deformation-aware stylization. Innovative constraints and style-mixing enhance deformability and efficiency, demonstrating superiority over existing methods through extensive comparisons. Code is available at https://github.com/zichongc/DoesFS. ",