hysts HF staff commited on
Commit
3f4e797
1 Parent(s): 3997dee

Update 18231

Browse files
Files changed (1) hide show
  1. data.json +14 -16
data.json CHANGED
@@ -11986,12 +11986,8 @@
11986
  "GitHub": [
11987
  "https://github.com/Vchitect/SEINE"
11988
  ],
11989
- "Space": [
11990
- "Vchitect/SEINE"
11991
- ],
11992
- "Model": [
11993
- "Vchitect/SEINE"
11994
- ],
11995
  "Dataset": []
11996
  },
11997
  {
@@ -24012,12 +24008,8 @@
24012
  "GitHub": [
24013
  "https://github.com/guoyww/AnimateDiff"
24014
  ],
24015
- "Space": [
24016
- "guoyww/AnimateDiff"
24017
- ],
24018
- "Model": [
24019
- "guoyww/animatediff"
24020
- ],
24021
  "Dataset": []
24022
  },
24023
  {
@@ -24871,10 +24863,16 @@
24871
  "abstract": "The most advanced text-to-image (T2I) models require significant training costs (e.g., millions of GPU hours), seriously hindering the fundamental innovation for the AIGC community while increasing CO2 emissions. This paper introduces PixArt-$\\alpha$, a Transformer-based T2I diffusion model whose image generation quality is competitive with state-of-the-art image generators (e.g., Imagen, SDXL, and even Midjourney), reaching near-commercial application standards. Additionally, it supports high-resolution image synthesis up to 1024px resolution with low training cost, as shown in Figure 1 and 2. To achieve this goal, three core designs are proposed: (1) Training strategy decomposition: We devise three distinct training steps that separately optimize pixel dependency, text-image alignment, and image aesthetic quality; (2) Efficient T2I Transformer: We incorporate cross-attention modules into Diffusion Transformer (DiT) to inject text conditions and streamline the computation-intensive class-condition branch; (3) High-informative data: We emphasize the significance of concept density in text-image pairs and leverage a large Vision-Language model to auto-label dense pseudo-captions to assist text-image alignment learning. As a result, PixArt-$\\alpha$'s training speed markedly surpasses existing large-scale T2I models, e.g., PixArt-$\\alpha$ only takes 10.8% of Stable Diffusion v1.5's training time (~675 vs. ~6,250 A100 GPU days), saving nearly \\\\$300,000 (\\\\$26,000 vs. \\\\$320,000) and reducing 90% CO2 emissions. Moreover, compared with a larger SOTA model, RAPHAEL, our training cost is merely 1%. Extensive experiments demonstrate that PixArt-$\\alpha$ excels in image quality, artistry, and semantic control. We hope PixArt-$\\alpha$ will provide new insights to the AIGC community and startups to accelerate building their own high-quality yet low-cost generative models from scratch.",
24872
  "type": "Spotlight Poster",
24873
  "OpenReview": "https://openreview.net/forum?id=eAKmQPe3m1",
24874
- "arxiv_id": "",
24875
- "GitHub": [],
24876
- "Space": [],
24877
- "Model": [],
 
 
 
 
 
 
24878
  "Dataset": []
24879
  },
24880
  {
 
11986
  "GitHub": [
11987
  "https://github.com/Vchitect/SEINE"
11988
  ],
11989
+ "Space": [],
11990
+ "Model": [],
 
 
 
 
11991
  "Dataset": []
11992
  },
11993
  {
 
24008
  "GitHub": [
24009
  "https://github.com/guoyww/AnimateDiff"
24010
  ],
24011
+ "Space": [],
24012
+ "Model": [],
 
 
 
 
24013
  "Dataset": []
24014
  },
24015
  {
 
24863
  "abstract": "The most advanced text-to-image (T2I) models require significant training costs (e.g., millions of GPU hours), seriously hindering the fundamental innovation for the AIGC community while increasing CO2 emissions. This paper introduces PixArt-$\\alpha$, a Transformer-based T2I diffusion model whose image generation quality is competitive with state-of-the-art image generators (e.g., Imagen, SDXL, and even Midjourney), reaching near-commercial application standards. Additionally, it supports high-resolution image synthesis up to 1024px resolution with low training cost, as shown in Figure 1 and 2. To achieve this goal, three core designs are proposed: (1) Training strategy decomposition: We devise three distinct training steps that separately optimize pixel dependency, text-image alignment, and image aesthetic quality; (2) Efficient T2I Transformer: We incorporate cross-attention modules into Diffusion Transformer (DiT) to inject text conditions and streamline the computation-intensive class-condition branch; (3) High-informative data: We emphasize the significance of concept density in text-image pairs and leverage a large Vision-Language model to auto-label dense pseudo-captions to assist text-image alignment learning. As a result, PixArt-$\\alpha$'s training speed markedly surpasses existing large-scale T2I models, e.g., PixArt-$\\alpha$ only takes 10.8% of Stable Diffusion v1.5's training time (~675 vs. ~6,250 A100 GPU days), saving nearly \\\\$300,000 (\\\\$26,000 vs. \\\\$320,000) and reducing 90% CO2 emissions. Moreover, compared with a larger SOTA model, RAPHAEL, our training cost is merely 1%. Extensive experiments demonstrate that PixArt-$\\alpha$ excels in image quality, artistry, and semantic control. We hope PixArt-$\\alpha$ will provide new insights to the AIGC community and startups to accelerate building their own high-quality yet low-cost generative models from scratch.",
24864
  "type": "Spotlight Poster",
24865
  "OpenReview": "https://openreview.net/forum?id=eAKmQPe3m1",
24866
+ "arxiv_id": "2310.00426",
24867
+ "GitHub": [
24868
+ "https://github.com/PixArt-alpha/PixArt-alpha"
24869
+ ],
24870
+ "Space": [
24871
+ "PixArt-alpha/PixArt-alpha"
24872
+ ],
24873
+ "Model": [
24874
+ "PixArt-alpha/PixArt-XL-2-1024-MS"
24875
+ ],
24876
  "Dataset": []
24877
  },
24878
  {