Colin Leong commited on
Commit
9f4c52d
·
1 Parent(s): 5190fcf

CDL: copying files over

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +213 -0
  3. requirements.txt +7 -0
  4. visualize_selected_points.png +0 -0
README.md CHANGED
@@ -10,4 +10,4 @@ pinned: false
10
  short_description: Visualize pose-format components and points.
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
10
  short_description: Visualize pose-format components and points.
11
  ---
12
 
13
+ Copied from https://github.com/cleong110/explore-pose-components
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit.runtime.uploaded_file_manager import UploadedFile
3
+ import pandas as pd
4
+ import numpy as np
5
+ from pose_format import Pose
6
+ from pose_format.pose_visualizer import PoseVisualizer
7
+ from pathlib import Path
8
+ from pyzstd import decompress
9
+ from PIL import Image
10
+ import cv2
11
+ import mediapipe as mp
12
+ import torch
13
+
14
+ mp_holistic = mp.solutions.holistic
15
+ FACEMESH_CONTOURS_POINTS = [str(p) for p in sorted(set([p for p_tup in list(mp_holistic.FACEMESH_CONTOURS) for p in p_tup]))]
16
+
17
+ def pose_normalization_info(pose_header):
18
+ if pose_header.components[0].name == "POSE_LANDMARKS":
19
+ return pose_header.normalization_info(p1=("POSE_LANDMARKS", "RIGHT_SHOULDER"),
20
+ p2=("POSE_LANDMARKS", "LEFT_SHOULDER"))
21
+
22
+ if pose_header.components[0].name == "BODY_135":
23
+ return pose_header.normalization_info(p1=("BODY_135", "RShoulder"), p2=("BODY_135", "LShoulder"))
24
+
25
+ if pose_header.components[0].name == "pose_keypoints_2d":
26
+ return pose_header.normalization_info(p1=("pose_keypoints_2d", "RShoulder"),
27
+ p2=("pose_keypoints_2d", "LShoulder"))
28
+
29
+
30
+ def pose_hide_legs(pose):
31
+ if pose.header.components[0].name == "POSE_LANDMARKS":
32
+ point_names = ["KNEE", "ANKLE", "HEEL", "FOOT_INDEX"]
33
+ # pylint: disable=protected-access
34
+ points = [
35
+ pose.header._get_point_index("POSE_LANDMARKS", side + "_" + n)
36
+ for n in point_names
37
+ for side in ["LEFT", "RIGHT"]
38
+ ]
39
+ pose.body.confidence[:, :, points] = 0
40
+ pose.body.data[:, :, points, :] = 0
41
+ return pose
42
+ else:
43
+ raise ValueError("Unknown pose header schema for hiding legs")
44
+
45
+
46
+ def preprocess_pose(pose):
47
+ pose = pose.get_components(["POSE_LANDMARKS", "FACE_LANDMARKS", "LEFT_HAND_LANDMARKS", "RIGHT_HAND_LANDMARKS"],
48
+ {"FACE_LANDMARKS": FACEMESH_CONTOURS_POINTS})
49
+
50
+ pose = pose.normalize(pose_normalization_info(pose.header))
51
+ pose = pose_hide_legs(pose)
52
+
53
+ # from sign_vq.data.normalize import pre_process_mediapipe, normalize_mean_std
54
+ # from pose_anonymization.appearance import remove_appearance
55
+
56
+ # pose = remove_appearance(pose)
57
+ # pose = pre_process_mediapipe(pose)
58
+ # pose = normalize_mean_std(pose)
59
+
60
+ feat = np.nan_to_num(pose.body.data)
61
+ feat = feat.reshape(feat.shape[0], -1)
62
+
63
+ pose_frames = torch.from_numpy(np.expand_dims(feat, axis=0)).float()
64
+
65
+ return pose_frames
66
+
67
+
68
+ # @st.cache_data(hash_funcs={UploadedFile: lambda p: str(p.name)})
69
+ def load_pose(uploaded_file:UploadedFile)->Pose:
70
+
71
+ # with input_path.open("rb") as f_in:
72
+ if uploaded_file.name.endswith(".zst"):
73
+ return Pose.read(decompress(uploaded_file.read()))
74
+ else:
75
+ return Pose.read(uploaded_file.read())
76
+
77
+ @st.cache_data(hash_funcs={Pose: lambda p: np.array(p.body.data)})
78
+ def get_pose_frames(pose:Pose, transparency: bool = False):
79
+ v = PoseVisualizer(pose)
80
+ frames = [frame_data for frame_data in v.draw()]
81
+
82
+ if transparency:
83
+ cv_code = v.cv2.COLOR_BGR2RGBA
84
+ else:
85
+ cv_code = v.cv2.COLOR_BGR2RGB
86
+ images = [Image.fromarray(v.cv2.cvtColor(frame, cv_code)) for frame in frames]
87
+ return frames, images
88
+
89
+ def get_pose_gif(pose:Pose, step:int=1, fps:int=None):
90
+ if fps is not None:
91
+ pose.body.fps = fps
92
+ v = PoseVisualizer(pose)
93
+ frames = [frame_data for frame_data in v.draw()]
94
+ frames = frames[::step]
95
+ return v.save_gif(None,frames=frames)
96
+
97
+
98
+ uploaded_file = st.file_uploader("gimme a .pose file", type=[".pose", ".pose.zst"])
99
+
100
+
101
+
102
+ if uploaded_file is not None:
103
+ with st.spinner(f"Loading {uploaded_file.name}"):
104
+ pose = load_pose(uploaded_file)
105
+ frames, images = get_pose_frames(pose=pose)
106
+ st.success("done loading!")
107
+ # st.write(f"pose shape: {pose.body.data.shape}")
108
+
109
+
110
+ header = pose.header
111
+ st.write("### File Info")
112
+ with st.expander(f"Show full Pose-format header from {uploaded_file.name}"):
113
+
114
+ st.write(header)
115
+ # st.write(pose.body.data.shape)
116
+ # st.write(pose.body.fps)
117
+
118
+ st.write(f"### Selection")
119
+
120
+ components = pose.header.components
121
+
122
+ component_names = [component.name for component in components]
123
+ chosen_component_names = component_names
124
+
125
+ component_selection = st.radio("How to select components?", options=["manual", "signclip"])
126
+ if component_selection == "manual":
127
+ st.write(f"### Component selection: ")
128
+ chosen_component_names = st.pills("Components to visualize", options=component_names, selection_mode="multi", default=component_names)
129
+
130
+ # st.write(chosen_component_names)
131
+
132
+
133
+
134
+ st.write("### Point selection:")
135
+ point_names = []
136
+ new_chosen_components =[]
137
+ points_dict = {}
138
+ for component in pose.header.components:
139
+ with st.expander(f"points for {component.name}"):
140
+
141
+ if component.name in chosen_component_names:
142
+
143
+ st.write(f"#### {component.name}")
144
+ selected_points = st.multiselect(f"points for component {component.name}:",options=component.points, default=component.points)
145
+ if selected_points == component.points:
146
+ st.write(f"All selected, no need to add a points dict entry for {component.name}")
147
+ else:
148
+ st.write(f"Adding dictionary for {component.name}")
149
+ points_dict[component.name] = selected_points
150
+
151
+
152
+ # selected_points = st.multiselect("points to visualize", options=point_names, default=point_names)
153
+ if chosen_component_names:
154
+
155
+ if not points_dict:
156
+ points_dict=None
157
+ # else:
158
+ # st.write(points_dict)
159
+ # st.write(chosen_component_names)
160
+
161
+ pose = pose.get_components(chosen_component_names,points=points_dict)
162
+ # st.write(pose.header)
163
+
164
+ elif component_selection == "signclip":
165
+ st.write("Selected landmarks used for SignCLIP. (Face countours only)")
166
+ pose = pose.get_components(["POSE_LANDMARKS", "FACE_LANDMARKS", "LEFT_HAND_LANDMARKS", "RIGHT_HAND_LANDMARKS"],
167
+ {"FACE_LANDMARKS": FACEMESH_CONTOURS_POINTS})
168
+
169
+ # pose = pose.normalize(pose_normalization_info(pose.header)) Visualization goes blank
170
+ pose = pose_hide_legs(pose)
171
+ with st.expander("Show facemesh contour points:"):
172
+ st.write(f"{FACEMESH_CONTOURS_POINTS}")
173
+ with st.expander(f"Show header:"):
174
+ st.write(pose.header)
175
+ # st.write(f"signclip selected, new header:")
176
+ # st.write(pose.body.data.shape)
177
+ # st.write(pose.header)
178
+ else:
179
+ pass
180
+
181
+
182
+
183
+ st.write(f"### Visualization")
184
+ width=st.select_slider("select width of images",list(range(1,pose.header.dimensions.width +1)),value=pose.header.dimensions.width/2)
185
+ step=st.select_slider("Step value to select every nth image",list(range(1,len(frames))),value=1)
186
+ fps=st.slider("fps for visualization: ", min_value=1.0, max_value=pose.body.fps,value=pose.body.fps)
187
+ visualize_clicked = st.button(f"Visualize!")
188
+
189
+
190
+
191
+ if visualize_clicked:
192
+
193
+ st.write(f"Generating gif...")
194
+
195
+ # st.write(pose.body.data.shape)
196
+
197
+ st.image(get_pose_gif(pose=pose, step=step, fps=fps))
198
+
199
+ with st.expander("See header"):
200
+ st.write(f"### header after filtering:")
201
+ st.write(pose.header)
202
+
203
+ # st.write(pose.body.data.shape)
204
+
205
+
206
+
207
+ # st.write(visualize_pose(pose=pose)) # bunch of ndarrays
208
+ # st.write([Image.fromarray(v.cv2.cvtColor(frame, cv_code)) for frame in frames])
209
+
210
+ # for i, image in enumerate(images[::n]):
211
+ # print(f"i={i}")
212
+ # st.image(image=image, width=width)
213
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ pose-format
3
+ vidgear
4
+ pyzstd
5
+ opencv-python
6
+ mediapipe
7
+ torch
visualize_selected_points.png ADDED