hysts HF staff commited on
Commit
bf742c5
β€’
1 Parent(s): 8612fe9
Files changed (3) hide show
  1. README.md +2 -1
  2. app.py +20 -40
  3. requirements.txt +5 -5
README.md CHANGED
@@ -4,9 +4,10 @@ emoji: πŸƒ
4
  colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
4
  colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
+ suggested_hardware: t4-small
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py CHANGED
@@ -20,23 +20,13 @@ if os.getenv('SYSTEM') == 'spaces':
20
 
21
  from model import Model
22
 
23
- DESCRIPTION = '''# Text2Human
24
 
25
- This is an unofficial demo for <a href="https://github.com/yumingj/Text2Human">https://github.com/yumingj/Text2Human</a>.
26
  You can modify sample steps and seeds. By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
27
 
28
  Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
29
  '''
30
 
31
-
32
- def set_example_image(example: list) -> dict:
33
- return gr.update(value=example[0])
34
-
35
-
36
- def set_example_text(example: list) -> dict:
37
- return gr.update(value=example[0])
38
-
39
-
40
  model = Model()
41
 
42
  with gr.Blocks(css='style.css') as demo:
@@ -51,9 +41,8 @@ with gr.Blocks(css='style.css') as demo:
51
  pose_data = gr.State()
52
  with gr.Row():
53
  paths = sorted(pathlib.Path('pose_images').glob('*.png'))
54
- example_images = gr.Dataset(components=[input_image],
55
- samples=[[path.as_posix()]
56
- for path in paths])
57
 
58
  with gr.Row():
59
  shape_text = gr.Textbox(
@@ -62,10 +51,10 @@ with gr.Blocks(css='style.css') as demo:
62
  '''<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
63
  Note: The outer clothing type and accessories can be omitted.''')
64
  with gr.Row():
65
- shape_example_texts = gr.Dataset(
66
- components=[shape_text],
67
- samples=[['man, sleeveless T-shirt, long pants'],
68
- ['woman, short-sleeve T-shirt, short jeans']])
69
  with gr.Row():
70
  generate_label_button = gr.Button('Generate Label Image')
71
 
@@ -83,19 +72,23 @@ Note: The outer clothing type and accessories can be omitted.''')
83
  Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.'''
84
  )
85
  with gr.Row():
86
- texture_example_texts = gr.Dataset(components=[texture_text],
87
- samples=[
88
- ['pure color, denim'],
89
- ['floral, stripe'],
90
- ])
91
  with gr.Row():
92
  sample_steps = gr.Slider(label='Sample Steps',
93
  minimum=10,
94
  maximum=300,
95
- value=10,
96
- step=10)
97
  with gr.Row():
98
- seed = gr.Slider(0, 1000000, value=0, step=1, label='Seed')
 
 
 
 
99
  with gr.Row():
100
  generate_human_button = gr.Button('Generate Human')
101
 
@@ -122,17 +115,4 @@ Note: Currently, only 5 types of textures are supported, i.e., pure color, strip
122
  seed,
123
  ],
124
  outputs=result)
125
- example_images.click(fn=set_example_image,
126
- inputs=example_images,
127
- outputs=example_images.components,
128
- queue=False)
129
- shape_example_texts.click(fn=set_example_text,
130
- inputs=shape_example_texts,
131
- outputs=shape_example_texts.components,
132
- queue=False)
133
- texture_example_texts.click(fn=set_example_text,
134
- inputs=texture_example_texts,
135
- outputs=texture_example_texts.components,
136
- queue=False)
137
-
138
- demo.queue().launch(show_api=False)
 
20
 
21
  from model import Model
22
 
23
+ DESCRIPTION = '''# [Text2Human](https://github.com/yumingj/Text2Human)
24
 
 
25
  You can modify sample steps and seeds. By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
26
 
27
  Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
28
  '''
29
 
 
 
 
 
 
 
 
 
 
30
  model = Model()
31
 
32
  with gr.Blocks(css='style.css') as demo:
 
41
  pose_data = gr.State()
42
  with gr.Row():
43
  paths = sorted(pathlib.Path('pose_images').glob('*.png'))
44
+ gr.Examples(examples=[[path.as_posix()] for path in paths],
45
+ inputs=input_image)
 
46
 
47
  with gr.Row():
48
  shape_text = gr.Textbox(
 
51
  '''<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
52
  Note: The outer clothing type and accessories can be omitted.''')
53
  with gr.Row():
54
+ gr.Examples(
55
+ examples=[['man, sleeveless T-shirt, long pants'],
56
+ ['woman, short-sleeve T-shirt, short jeans']],
57
+ inputs=shape_text)
58
  with gr.Row():
59
  generate_label_button = gr.Button('Generate Label Image')
60
 
 
72
  Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.'''
73
  )
74
  with gr.Row():
75
+ gr.Examples(examples=[
76
+ ['pure color, denim'],
77
+ ['floral, stripe'],
78
+ ],
79
+ inputs=texture_text)
80
  with gr.Row():
81
  sample_steps = gr.Slider(label='Sample Steps',
82
  minimum=10,
83
  maximum=300,
84
+ step=10,
85
+ value=10)
86
  with gr.Row():
87
+ seed = gr.Slider(label='Seed',
88
+ minimum=0,
89
+ maximum=1000000,
90
+ step=1,
91
+ value=0)
92
  with gr.Row():
93
  generate_human_button = gr.Button('Generate Human')
94
 
 
115
  seed,
116
  ],
117
  outputs=result)
118
+ demo.queue(max_size=10).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,12 +1,12 @@
1
- einops==0.4.1
2
  lpips==0.1.4
3
  mmcv-full==1.5.2
4
  mmsegmentation==0.24.1
5
- numpy==1.22.3
6
  openmim==0.1.5
7
- Pillow==9.1.1
8
  sentence-transformers==2.2.2
9
- tokenizers==0.12.1
10
  torch==1.11.0
11
  torchvision==0.12.0
12
- transformers==4.19.2
 
1
+ einops==0.6.1
2
  lpips==0.1.4
3
  mmcv-full==1.5.2
4
  mmsegmentation==0.24.1
5
+ numpy==1.23.5
6
  openmim==0.1.5
7
+ Pillow==9.5.0
8
  sentence-transformers==2.2.2
9
+ tokenizers==0.13.3
10
  torch==1.11.0
11
  torchvision==0.12.0
12
+ transformers==4.30.2