added missed edge detection, but it is not much better
Browse files- handcrafted_solution.py +127 -65
- script.py +5 -3
- test_solution.ipynb +21 -20
handcrafted_solution.py
CHANGED
@@ -26,7 +26,29 @@ def empty_solution():
|
|
26 |
return np.zeros((2, 3)), [(0, 1)]
|
27 |
|
28 |
|
29 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
image = image.astype('uint8')
|
31 |
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=4)
|
32 |
sizes = stats[:, -1]
|
@@ -47,7 +69,7 @@ def clean_image(image_gestalt) -> np.ndarray:
|
|
47 |
image_gestalt = np.array(image_gestalt)
|
48 |
unclassified_mask = cv2.inRange(image_gestalt, unclassified + 0.0, unclassified + 0.8)
|
49 |
unclassified_mask = cv2.bitwise_not(unclassified_mask)
|
50 |
-
mask =
|
51 |
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, np.ones((11, 11), np.uint8), iterations=11)
|
52 |
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, np.ones((11, 11), np.uint8), iterations=2)
|
53 |
|
@@ -105,32 +127,65 @@ def get_missed_vertices(vertices, inferred_centroids, *, min_missing_distance=20
|
|
105 |
vertices = KDTree(vertices)
|
106 |
closest = vertices.query(inferred_centroids, k=1, distance_upper_bound=min_missing_distance)
|
107 |
missed_points = inferred_centroids[closest[1] == len(vertices.data)]
|
|
|
108 |
return missed_points
|
109 |
|
110 |
|
111 |
-
def
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
|
|
|
|
132 |
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
**kwargs):
|
135 |
'''Get the vertices and edges from the gestalt segmentation mask of the house'''
|
136 |
# Apex
|
@@ -144,7 +199,8 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *, color_range=4., poi
|
|
144 |
# missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
145 |
# vertices = np.concatenate([vertices, missed_vertices])
|
146 |
|
147 |
-
|
|
|
148 |
|
149 |
# scale = 1
|
150 |
# vertex_size = np.zeros(vertices.shape[0])
|
@@ -153,8 +209,6 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *, color_range=4., poi
|
|
153 |
# radius = point_radius # np.clip(int(max_depth//2 + depth_np[coords[1], coords[0]]), 10, 30)#int(np.clip(max_depth - depth_np[coords[1], coords[0]], 10, 20))
|
154 |
# vertex_size[i] = (scale * radius) ** 2 # because we are using squared distances
|
155 |
|
156 |
-
if len(vertices.data) < 2:
|
157 |
-
return [], []
|
158 |
edges = []
|
159 |
line_directions = []
|
160 |
|
@@ -163,46 +217,54 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *, color_range=4., poi
|
|
163 |
threshold = 20 # minimum number of votes (intersections in Hough grid cell)
|
164 |
min_line_length = 60 # minimum number of pixels making up a line
|
165 |
max_line_gap = 40 # maximum gap in pixels between connectable line segments
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
edge_color = np.array(gestalt_color_mapping[edge_class])
|
170 |
-
|
171 |
-
mask = cv2.inRange(gest_seg_np,
|
172 |
-
edge_color - color_range,
|
173 |
-
edge_color + color_range)
|
174 |
-
mask = cv2.morphologyEx(mask,
|
175 |
-
cv2.MORPH_DILATE, np.ones((3, 3)), iterations=1)
|
176 |
-
|
177 |
-
if not np.any(mask):
|
178 |
-
continue
|
179 |
-
|
180 |
-
# Run Hough on edge detected image
|
181 |
-
# Output "lines" is an array containing endpoints of detected line segments
|
182 |
-
cv2.GaussianBlur(mask, (11, 11), 0, mask)
|
183 |
-
lines = cv2.HoughLinesP(mask, rho, theta, threshold, np.array([]),
|
184 |
-
min_line_length, max_line_gap)
|
185 |
-
|
186 |
-
if lines is None:
|
187 |
-
continue
|
188 |
-
|
189 |
-
for line_idx, line in enumerate(lines):
|
190 |
-
for x1, y1, x2, y2 in line:
|
191 |
-
if x1 < x2:
|
192 |
-
x1, y1, x2, y2 = x2, y2, x1, y1
|
193 |
-
direction = (np.array([x2 - x1, y2 - y1]))
|
194 |
-
direction = direction / np.linalg.norm(direction)
|
195 |
-
line_directions.append(direction)
|
196 |
-
|
197 |
-
direction = extend * direction
|
198 |
-
|
199 |
-
x1, y1 = (-direction + (x1, y1)).astype(np.int32)
|
200 |
-
x2, y2 = (+ direction + (x2, y2)).astype(np.int32)
|
201 |
-
|
202 |
-
edges.append((x1, y1, x2, y2))
|
203 |
|
204 |
-
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
if len(edges) < 1:
|
207 |
return [], []
|
208 |
# calculate the distances between the vertices and the edge ends
|
@@ -264,7 +326,7 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *, color_range=4., poi
|
|
264 |
connections.append(possible_connections[:, fitted_line_idx])
|
265 |
|
266 |
vertices = [{"xy": v, "type": "apex"} for v in apex_centroids]
|
267 |
-
|
268 |
vertices += [{"xy": v, "type": "eave_end_point"} for v in eave_end_point_centroids]
|
269 |
return vertices, connections
|
270 |
|
|
|
26 |
return np.zeros((2, 3)), [(0, 1)]
|
27 |
|
28 |
|
29 |
+
def convert_entry_to_human_readable(entry):
|
30 |
+
out = {}
|
31 |
+
already_good = {'__key__', 'wf_vertices', 'wf_edges', 'edge_semantics', 'mesh_vertices', 'mesh_faces',
|
32 |
+
'face_semantics', 'K', 'R', 't'}
|
33 |
+
for k, v in entry.items():
|
34 |
+
if k in already_good:
|
35 |
+
out[k] = v
|
36 |
+
continue
|
37 |
+
match k:
|
38 |
+
case 'points3d':
|
39 |
+
out[k] = read_points3D_binary(fid=io.BytesIO(v))
|
40 |
+
case 'cameras':
|
41 |
+
out[k] = read_cameras_binary(fid=io.BytesIO(v))
|
42 |
+
case 'images':
|
43 |
+
out[k] = read_images_binary(fid=io.BytesIO(v))
|
44 |
+
case 'ade20k' | 'gestalt':
|
45 |
+
out[k] = [PImage.open(io.BytesIO(x)).convert('RGB') for x in v]
|
46 |
+
case 'depthcm':
|
47 |
+
out[k] = [PImage.open(io.BytesIO(x)) for x in entry['depthcm']]
|
48 |
+
return out
|
49 |
+
|
50 |
+
|
51 |
+
def remove_undesired_objects(image):
|
52 |
image = image.astype('uint8')
|
53 |
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=4)
|
54 |
sizes = stats[:, -1]
|
|
|
69 |
image_gestalt = np.array(image_gestalt)
|
70 |
unclassified_mask = cv2.inRange(image_gestalt, unclassified + 0.0, unclassified + 0.8)
|
71 |
unclassified_mask = cv2.bitwise_not(unclassified_mask)
|
72 |
+
mask = remove_undesired_objects(unclassified_mask).astype(np.uint8)
|
73 |
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, np.ones((11, 11), np.uint8), iterations=11)
|
74 |
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, np.ones((11, 11), np.uint8), iterations=2)
|
75 |
|
|
|
127 |
vertices = KDTree(vertices)
|
128 |
closest = vertices.query(inferred_centroids, k=1, distance_upper_bound=min_missing_distance)
|
129 |
missed_points = inferred_centroids[closest[1] == len(vertices.data)]
|
130 |
+
|
131 |
return missed_points
|
132 |
|
133 |
|
134 |
+
def get_lines_and_directions(gest_seg_np, edge_class, *, color_range=4., rho, theta, threshold, min_line_length,
|
135 |
+
max_line_gap, extend, **kwargs):
|
136 |
+
edge_color = np.array(gestalt_color_mapping[edge_class])
|
137 |
+
|
138 |
+
mask = cv2.inRange(gest_seg_np,
|
139 |
+
edge_color - color_range,
|
140 |
+
edge_color + color_range)
|
141 |
+
mask = cv2.morphologyEx(mask,
|
142 |
+
cv2.MORPH_DILATE, np.ones((3, 3)), iterations=1)
|
143 |
+
|
144 |
+
if not np.any(mask):
|
145 |
+
return [], []
|
146 |
+
|
147 |
+
# Run Hough on edge detected image
|
148 |
+
# Output "lines" is an array containing endpoints of detected line segments
|
149 |
+
cv2.GaussianBlur(mask, (11, 11), 0, mask)
|
150 |
+
lines = cv2.HoughLinesP(mask, rho, theta, threshold, np.array([]),
|
151 |
+
min_line_length, max_line_gap)
|
152 |
+
|
153 |
+
if lines is None:
|
154 |
+
return [], []
|
155 |
+
|
156 |
+
line_directions = []
|
157 |
+
edges = []
|
158 |
+
for line_idx, line in enumerate(lines):
|
159 |
+
for x1, y1, x2, y2 in line:
|
160 |
+
if x1 < x2:
|
161 |
+
x1, y1, x2, y2 = x2, y2, x1, y1
|
162 |
+
direction = (np.array([x2 - x1, y2 - y1]))
|
163 |
+
direction = direction / np.linalg.norm(direction)
|
164 |
+
line_directions.append(direction)
|
165 |
+
|
166 |
+
direction = extend * direction
|
167 |
+
|
168 |
+
x1, y1 = (-direction + (x1, y1)).astype(np.int32)
|
169 |
+
x2, y2 = (+ direction + (x2, y2)).astype(np.int32)
|
170 |
|
171 |
+
edges.append((x1, y1, x2, y2))
|
172 |
+
return edges, line_directions
|
173 |
|
174 |
+
|
175 |
+
def infer_missing_vertices(ridge_edges, rake_edges):
|
176 |
+
ridge_edges = np.array(ridge_edges)
|
177 |
+
rake_edges = np.array(rake_edges)
|
178 |
+
ridge_ends = np.concatenate([ridge_edges[:, 2:], ridge_edges[:, :2]])
|
179 |
+
rake_ends = np.concatenate([rake_edges[:, 2:], rake_edges[:, :2]])
|
180 |
+
ridge_ends = KDTree(ridge_ends)
|
181 |
+
rake_ends = KDTree(rake_ends)
|
182 |
+
missing_candidates = rake_ends.query_ball_tree(ridge_ends, 5)
|
183 |
+
missing_candidates = np.concatenate([*missing_candidates])
|
184 |
+
missing_candidates = np.unique(missing_candidates).astype(np.int32)
|
185 |
+
return ridge_ends.data[missing_candidates]
|
186 |
+
|
187 |
+
|
188 |
+
def get_vertices_and_edges_from_segmentation(gest_seg_np, *, point_radius=30, max_angle=5.,
|
189 |
**kwargs):
|
190 |
'''Get the vertices and edges from the gestalt segmentation mask of the house'''
|
191 |
# Apex
|
|
|
199 |
# missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
200 |
# vertices = np.concatenate([vertices, missed_vertices])
|
201 |
|
202 |
+
if len(vertices) < 2:
|
203 |
+
return [], []
|
204 |
|
205 |
# scale = 1
|
206 |
# vertex_size = np.zeros(vertices.shape[0])
|
|
|
209 |
# radius = point_radius # np.clip(int(max_depth//2 + depth_np[coords[1], coords[0]]), 10, 30)#int(np.clip(max_depth - depth_np[coords[1], coords[0]], 10, 20))
|
210 |
# vertex_size[i] = (scale * radius) ** 2 # because we are using squared distances
|
211 |
|
|
|
|
|
212 |
edges = []
|
213 |
line_directions = []
|
214 |
|
|
|
217 |
threshold = 20 # minimum number of votes (intersections in Hough grid cell)
|
218 |
min_line_length = 60 # minimum number of pixels making up a line
|
219 |
max_line_gap = 40 # maximum gap in pixels between connectable line segments
|
220 |
+
ridge_edges, ridge_directions = get_lines_and_directions(gest_seg_np, "ridge",
|
221 |
+
rho=rho,
|
222 |
+
theta=theta,
|
223 |
+
threshold=threshold,
|
224 |
+
min_line_length=min_line_length,
|
225 |
+
max_line_gap=max_line_gap,
|
226 |
+
**kwargs)
|
227 |
+
|
228 |
+
rake_edges, rake_directions = get_lines_and_directions(gest_seg_np, "rake",
|
229 |
+
rho=rho,
|
230 |
+
theta=theta,
|
231 |
+
threshold=threshold,
|
232 |
+
min_line_length=min_line_length,
|
233 |
+
max_line_gap=max_line_gap,
|
234 |
+
**kwargs)
|
235 |
+
|
236 |
+
if len(ridge_edges) > 0:
|
237 |
+
edges.append(ridge_edges)
|
238 |
+
line_directions.append(ridge_directions)
|
239 |
+
|
240 |
+
if len(rake_edges) > 0:
|
241 |
+
edges.append(rake_edges)
|
242 |
+
line_directions.append(rake_directions)
|
243 |
+
|
244 |
+
missed_vertices = []
|
245 |
+
if len(ridge_edges) > 0 and len(rake_edges) > 0:
|
246 |
+
|
247 |
+
inferred_vertices = infer_missing_vertices(ridge_edges, rake_edges)
|
248 |
+
missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
249 |
+
vertices = np.concatenate([vertices, missed_vertices])
|
250 |
|
251 |
+
vertices = KDTree(vertices)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
+
for edge_class in ['eave', 'valley', 'flashing', 'step_flashing', 'hip']:
|
254 |
+
class_edges, class_directions = get_lines_and_directions(gest_seg_np, edge_class,
|
255 |
+
rho=rho,
|
256 |
+
theta=theta,
|
257 |
+
threshold=threshold,
|
258 |
+
min_line_length=min_line_length,
|
259 |
+
max_line_gap=max_line_gap,
|
260 |
+
**kwargs)
|
261 |
+
|
262 |
+
if len(class_edges) > 0:
|
263 |
+
edges.append(class_edges)
|
264 |
+
line_directions.append(class_directions)
|
265 |
+
|
266 |
+
edges = np.concatenate(edges).astype(np.float64)
|
267 |
+
line_directions = np.concatenate(line_directions).astype(np.float64)
|
268 |
if len(edges) < 1:
|
269 |
return [], []
|
270 |
# calculate the distances between the vertices and the edge ends
|
|
|
326 |
connections.append(possible_connections[:, fitted_line_idx])
|
327 |
|
328 |
vertices = [{"xy": v, "type": "apex"} for v in apex_centroids]
|
329 |
+
vertices += [{"xy": v, "type": "apex"} for v in missed_vertices]
|
330 |
vertices += [{"xy": v, "type": "eave_end_point"} for v in eave_end_point_centroids]
|
331 |
return vertices, connections
|
332 |
|
script.py
CHANGED
@@ -127,13 +127,15 @@ if __name__ == "__main__":
|
|
127 |
with ProcessPoolExecutor(max_workers=8) as pool:
|
128 |
results = []
|
129 |
for i, sample in enumerate(tqdm(dataset)):
|
130 |
-
results.append(pool.submit(predict, sample,
|
|
|
131 |
point_radius=25,
|
132 |
max_angle=15,
|
133 |
extend=30,
|
134 |
merge_th=3.0,
|
135 |
-
min_missing_distance=
|
136 |
-
scale_estimation_coefficient=4.5
|
|
|
137 |
|
138 |
for i, result in enumerate(tqdm(results)):
|
139 |
key, pred_vertices, pred_edges = result.result()
|
|
|
127 |
with ProcessPoolExecutor(max_workers=8) as pool:
|
128 |
results = []
|
129 |
for i, sample in enumerate(tqdm(dataset)):
|
130 |
+
results.append(pool.submit(predict, sample,
|
131 |
+
visualize=False,
|
132 |
point_radius=25,
|
133 |
max_angle=15,
|
134 |
extend=30,
|
135 |
merge_th=3.0,
|
136 |
+
min_missing_distance=300.0,
|
137 |
+
scale_estimation_coefficient=4.5,
|
138 |
+
))
|
139 |
|
140 |
for i, result in enumerate(tqdm(results)):
|
141 |
key, pred_vertices, pred_edges = result.result()
|
test_solution.ipynb
CHANGED
@@ -6,8 +6,8 @@
|
|
6 |
"metadata": {
|
7 |
"collapsed": true,
|
8 |
"ExecuteTime": {
|
9 |
-
"end_time": "2024-05-
|
10 |
-
"start_time": "2024-05-
|
11 |
}
|
12 |
},
|
13 |
"source": [
|
@@ -44,8 +44,8 @@
|
|
44 |
{
|
45 |
"metadata": {
|
46 |
"ExecuteTime": {
|
47 |
-
"end_time": "2024-05-
|
48 |
-
"start_time": "2024-05-
|
49 |
}
|
50 |
},
|
51 |
"cell_type": "code",
|
@@ -64,17 +64,17 @@
|
|
64 |
{
|
65 |
"metadata": {
|
66 |
"ExecuteTime": {
|
67 |
-
"end_time": "2024-05-
|
68 |
-
"start_time": "2024-05-
|
69 |
}
|
70 |
},
|
71 |
"cell_type": "code",
|
72 |
"source": [
|
73 |
"\n",
|
74 |
"\n",
|
75 |
-
"#for i, sample in tqdm(enumerate(dataset)):\n",
|
76 |
"# # if i > 170:\n",
|
77 |
-
"# predict(sample, visualize=False, point_radius=40, max_angle=5)"
|
78 |
],
|
79 |
"id": "f36ee7b8f0427f72",
|
80 |
"outputs": [],
|
@@ -83,8 +83,8 @@
|
|
83 |
{
|
84 |
"metadata": {
|
85 |
"ExecuteTime": {
|
86 |
-
"end_time": "2024-05-
|
87 |
-
"start_time": "2024-05-
|
88 |
}
|
89 |
},
|
90 |
"cell_type": "code",
|
@@ -101,8 +101,9 @@
|
|
101 |
" max_angle=15, \n",
|
102 |
" extend=30, \n",
|
103 |
" merge_th=3.0, \n",
|
104 |
-
" min_missing_distance=
|
105 |
-
" scale_estimation_coefficient=4.5
|
|
|
106 |
"\n",
|
107 |
" for i, result in enumerate(tqdm(results)):\n",
|
108 |
" key, pred_vertices, pred_edges = result.result()\n",
|
@@ -122,18 +123,18 @@
|
|
122 |
"name": "stderr",
|
123 |
"output_type": "stream",
|
124 |
"text": [
|
125 |
-
"346it [00:
|
126 |
-
"100%|ββββββββββ| 346/346 [01:28<00:00, 3.
|
127 |
]
|
128 |
}
|
129 |
],
|
130 |
-
"execution_count":
|
131 |
},
|
132 |
{
|
133 |
"metadata": {
|
134 |
"ExecuteTime": {
|
135 |
-
"end_time": "2024-05-
|
136 |
-
"start_time": "2024-05-
|
137 |
}
|
138 |
},
|
139 |
"cell_type": "code",
|
@@ -163,15 +164,15 @@
|
|
163 |
{
|
164 |
"data": {
|
165 |
"text/plain": [
|
166 |
-
"DescribeResult(nobs=173, minmax=(1.
|
167 |
]
|
168 |
},
|
169 |
-
"execution_count":
|
170 |
"metadata": {},
|
171 |
"output_type": "execute_result"
|
172 |
}
|
173 |
],
|
174 |
-
"execution_count":
|
175 |
},
|
176 |
{
|
177 |
"metadata": {},
|
|
|
6 |
"metadata": {
|
7 |
"collapsed": true,
|
8 |
"ExecuteTime": {
|
9 |
+
"end_time": "2024-05-31T20:17:44.768535Z",
|
10 |
+
"start_time": "2024-05-31T20:17:41.704428Z"
|
11 |
}
|
12 |
},
|
13 |
"source": [
|
|
|
44 |
{
|
45 |
"metadata": {
|
46 |
"ExecuteTime": {
|
47 |
+
"end_time": "2024-05-31T20:17:44.774691Z",
|
48 |
+
"start_time": "2024-05-31T20:17:44.769543Z"
|
49 |
}
|
50 |
},
|
51 |
"cell_type": "code",
|
|
|
64 |
{
|
65 |
"metadata": {
|
66 |
"ExecuteTime": {
|
67 |
+
"end_time": "2024-05-31T20:17:44.779814Z",
|
68 |
+
"start_time": "2024-05-31T20:17:44.775700Z"
|
69 |
}
|
70 |
},
|
71 |
"cell_type": "code",
|
72 |
"source": [
|
73 |
"\n",
|
74 |
"\n",
|
75 |
+
"# for i, sample in tqdm(enumerate(dataset)):\n",
|
76 |
"# # if i > 170:\n",
|
77 |
+
"# predict(sample, visualize=False, point_radius=40, max_angle=5, extend=30)"
|
78 |
],
|
79 |
"id": "f36ee7b8f0427f72",
|
80 |
"outputs": [],
|
|
|
83 |
{
|
84 |
"metadata": {
|
85 |
"ExecuteTime": {
|
86 |
+
"end_time": "2024-05-31T20:30:50.830835Z",
|
87 |
+
"start_time": "2024-05-31T20:29:08.806688Z"
|
88 |
}
|
89 |
},
|
90 |
"cell_type": "code",
|
|
|
101 |
" max_angle=15, \n",
|
102 |
" extend=30, \n",
|
103 |
" merge_th=3.0, \n",
|
104 |
+
" min_missing_distance=300.0, \n",
|
105 |
+
" scale_estimation_coefficient=4.5,\n",
|
106 |
+
" ))\n",
|
107 |
"\n",
|
108 |
" for i, result in enumerate(tqdm(results)):\n",
|
109 |
" key, pred_vertices, pred_edges = result.result()\n",
|
|
|
123 |
"name": "stderr",
|
124 |
"output_type": "stream",
|
125 |
"text": [
|
126 |
+
"346it [00:12, 27.31it/s] \n",
|
127 |
+
"100%|ββββββββββ| 346/346 [01:28<00:00, 3.93it/s]\n"
|
128 |
]
|
129 |
}
|
130 |
],
|
131 |
+
"execution_count": 15
|
132 |
},
|
133 |
{
|
134 |
"metadata": {
|
135 |
"ExecuteTime": {
|
136 |
+
"end_time": "2024-05-31T20:46:13.489460Z",
|
137 |
+
"start_time": "2024-05-31T20:46:13.290404Z"
|
138 |
}
|
139 |
},
|
140 |
"cell_type": "code",
|
|
|
164 |
{
|
165 |
"data": {
|
166 |
"text/plain": [
|
167 |
+
"DescribeResult(nobs=173, minmax=(1.1048503424519986, 3.2776734068655204), mean=2.123907112995204, variance=0.1767523302203136, skewness=0.3492868616641026, kurtosis=-0.24007805364057333)"
|
168 |
]
|
169 |
},
|
170 |
+
"execution_count": 17,
|
171 |
"metadata": {},
|
172 |
"output_type": "execute_result"
|
173 |
}
|
174 |
],
|
175 |
+
"execution_count": 17
|
176 |
},
|
177 |
{
|
178 |
"metadata": {},
|