Spaces:
Build error
Build error
arcan3
commited on
Commit
•
f9e67d5
1
Parent(s):
9ef6c6c
added animation video
Browse files- .gitignore +2 -0
- app.py +92 -4
- funcs/convertors.py +4 -5
- funcs/processor.py +3 -3
- wetransfer_files_2023-05-04_1807.zip +0 -3
.gitignore
CHANGED
@@ -3,10 +3,12 @@
|
|
3 |
*.mp4
|
4 |
*.xz
|
5 |
*.json
|
|
|
6 |
*.gif
|
7 |
*.zip
|
8 |
*.png
|
9 |
Data-*
|
|
|
10 |
drive-*
|
11 |
__pycache__/
|
12 |
*.py[cod]
|
|
|
3 |
*.mp4
|
4 |
*.xz
|
5 |
*.json
|
6 |
+
animation_table.csv
|
7 |
*.gif
|
8 |
*.zip
|
9 |
*.png
|
10 |
Data-*
|
11 |
+
Data/
|
12 |
drive-*
|
13 |
__pycache__/
|
14 |
*.py[cod]
|
app.py
CHANGED
@@ -1,7 +1,10 @@
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
|
|
2 |
import gradio as gr
|
3 |
-
import json
|
4 |
-
import os
|
5 |
|
6 |
from phate import PHATEAE
|
7 |
from funcs.som import ClusterSOM
|
@@ -18,6 +21,78 @@ reducer10d.load('models/r10d_3.pth')
|
|
18 |
cluster_som = ClusterSOM()
|
19 |
cluster_som.load("models/cluster_som3.pkl")
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
# ml inference
|
22 |
def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
|
23 |
|
@@ -38,10 +113,23 @@ def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
|
|
38 |
#compute the 10 dimensional embeding vector
|
39 |
embedding10d = reducer.transform(data)
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# prediction = cluster_som.predict(embedding10d)
|
42 |
fig = cluster.plot_activation_v2(embedding10d, slice_select)
|
43 |
|
44 |
-
return fig
|
45 |
|
46 |
def attach_label_to_json(json_file, label_text):
|
47 |
# Read the JSON file
|
@@ -114,7 +202,7 @@ with gr.Blocks(title='Cabasus') as cabasus_sensor:
|
|
114 |
slice_slider.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider],
|
115 |
outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
|
116 |
|
117 |
-
som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures])
|
118 |
button_label_Add.click(attach_label_to_json, inputs=[slice_json_box, label_name], outputs=[slice_json_label_box])
|
119 |
|
120 |
cabasus_sensor.queue(concurrency_count=2).launch(debug=True)
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
import csv
|
4 |
+
import json
|
5 |
import torch
|
6 |
+
import numpy as np
|
7 |
import gradio as gr
|
|
|
|
|
8 |
|
9 |
from phate import PHATEAE
|
10 |
from funcs.som import ClusterSOM
|
|
|
21 |
cluster_som = ClusterSOM()
|
22 |
cluster_som.load("models/cluster_som3.pkl")
|
23 |
|
24 |
+
def map_som2animation(som_value):
|
25 |
+
mapping = {
|
26 |
+
2: 0, # walk
|
27 |
+
1: 1, # trot
|
28 |
+
3: 2, # gallop
|
29 |
+
5: 3, # idle
|
30 |
+
4: 3, # other
|
31 |
+
-1:3, #other
|
32 |
+
}
|
33 |
+
|
34 |
+
return mapping.get(som_value, None)
|
35 |
+
|
36 |
+
def deviation_scores(tensor_data, scale=50):
|
37 |
+
if len(tensor_data) < 5:
|
38 |
+
raise ValueError("The input tensor must have at least 5 elements.")
|
39 |
+
|
40 |
+
# Extract the side values and reference value from the input tensor
|
41 |
+
side_values = tensor_data[-5:-1].numpy()
|
42 |
+
reference_value = tensor_data[-1].item()
|
43 |
+
|
44 |
+
# Calculate the absolute differences between the side values and the reference
|
45 |
+
absolute_differences = np.abs(side_values - reference_value)
|
46 |
+
|
47 |
+
# Check for zero division
|
48 |
+
if np.sum(absolute_differences) == 0:
|
49 |
+
# All side values are equal to the reference, so their deviation scores are 0
|
50 |
+
return int(reference_value/20*32768), [0, 0, 0, 0]
|
51 |
+
|
52 |
+
# Calculate the deviation scores for each side value
|
53 |
+
scores = absolute_differences * scale
|
54 |
+
|
55 |
+
# Clip the scores between 0 and 1
|
56 |
+
clipped_scores = np.clip(scores, 0, 1)
|
57 |
+
|
58 |
+
return int(reference_value/20*32768), clipped_scores.tolist()
|
59 |
+
|
60 |
+
def process_som_data(data, prediction):
|
61 |
+
processed_data = []
|
62 |
+
|
63 |
+
for i in range(0, len(data)):
|
64 |
+
TS, scores_list = deviation_scores(data[i][0])
|
65 |
+
|
66 |
+
# If TS is missing (None), interpolate it using surrounding values
|
67 |
+
if TS is None:
|
68 |
+
if i > 0 and i < len(data) - 1:
|
69 |
+
prev_TS = processed_data[-1][1]
|
70 |
+
next_TS = deviation_scores(data[i + 1][0])[0]
|
71 |
+
TS = (prev_TS + next_TS) // 2
|
72 |
+
elif i > 0:
|
73 |
+
TS = processed_data[-1][1] # Use the previous TS value
|
74 |
+
else:
|
75 |
+
TS = 0 # Default to 0 if no surrounding values are available
|
76 |
+
|
77 |
+
|
78 |
+
# Set Gait, State, and Condition
|
79 |
+
|
80 |
+
#0-walk 1-trot 2-gallop 3-idle
|
81 |
+
gait = map_som2animation(prediction[0][0])
|
82 |
+
state = 0
|
83 |
+
condition = 0
|
84 |
+
|
85 |
+
# Calculate Shape, Color, and Danger values
|
86 |
+
shape_values = scores_list
|
87 |
+
color_values = scores_list
|
88 |
+
danger_values = [1 if score == 1 else 0 for score in scores_list]
|
89 |
+
|
90 |
+
# Create a row with the required format
|
91 |
+
row = [gait, TS, state, condition] + shape_values + color_values + danger_values
|
92 |
+
processed_data.append(row)
|
93 |
+
|
94 |
+
return processed_data
|
95 |
+
|
96 |
# ml inference
|
97 |
def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
|
98 |
|
|
|
113 |
#compute the 10 dimensional embeding vector
|
114 |
embedding10d = reducer.transform(data)
|
115 |
|
116 |
+
# retrieve the prediction and get the animation
|
117 |
+
prediction = cluster_som.predict(embedding10d)
|
118 |
+
processed_data = process_som_data(data,prediction)
|
119 |
+
|
120 |
+
# Write the processed data to a CSV file
|
121 |
+
header = ['Gait', 'TS', 'State', 'Condition', 'Shape1', 'Shape2', 'Shape3', 'Shape4', 'Color1', 'Color2', 'Color3', 'Color4', 'Danger1', 'Danger2', 'Danger3', 'Danger4']
|
122 |
+
with open('animation_table.csv', 'w', newline='') as csvfile:
|
123 |
+
csv_writer = csv.writer(csvfile)
|
124 |
+
csv_writer.writerow(header)
|
125 |
+
csv_writer.writerows(processed_data)
|
126 |
+
|
127 |
+
os.system('curl -X POST -F "csv_file=@animation_table.csv" https://metric-space.ngrok.io/generate --output animation.mp4')
|
128 |
+
|
129 |
# prediction = cluster_som.predict(embedding10d)
|
130 |
fig = cluster.plot_activation_v2(embedding10d, slice_select)
|
131 |
|
132 |
+
return fig, 'animation.mp4'
|
133 |
|
134 |
def attach_label_to_json(json_file, label_text):
|
135 |
# Read the JSON file
|
|
|
202 |
slice_slider.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider],
|
203 |
outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
|
204 |
|
205 |
+
som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures, animation])
|
206 |
button_label_Add.click(attach_label_to_json, inputs=[slice_json_box, label_name], outputs=[slice_json_label_box])
|
207 |
|
208 |
cabasus_sensor.queue(concurrency_count=2).launch(debug=True)
|
funcs/convertors.py
CHANGED
@@ -17,11 +17,11 @@ def slice_csv_to_json(input_file, slice_size=64, min_slice_size=16, sample_rate=
|
|
17 |
|
18 |
gz_columns = [col for col in data.columns if col.startswith("GZ")]
|
19 |
all_peaks = []
|
20 |
-
upsample_factor = sample_rate
|
21 |
-
combined_smoothed_signals_upsampled = np.zeros(upsample_signal(data[gz_columns[0]].values,
|
22 |
for gz_col in gz_columns:
|
23 |
gz_signal = data[gz_col].values
|
24 |
-
upsampled_smoothed_signal, peaks = process_signals(gz_signal,
|
25 |
all_peaks.append(peaks)
|
26 |
combined_smoothed_signals_upsampled += upsampled_smoothed_signal
|
27 |
|
@@ -31,7 +31,7 @@ def slice_csv_to_json(input_file, slice_size=64, min_slice_size=16, sample_rate=
|
|
31 |
slices = []
|
32 |
start_index = 0
|
33 |
for i, precise_slice_point in enumerate(precise_slice_points):
|
34 |
-
end_index = round(precise_slice_point /
|
35 |
if i == 0:
|
36 |
start_index = end_index
|
37 |
continue
|
@@ -52,7 +52,6 @@ def slice_csv_to_json(input_file, slice_size=64, min_slice_size=16, sample_rate=
|
|
52 |
|
53 |
# Compute precise_timestamp and precise_time_diff for each GZ channel individually
|
54 |
for j, gz_col in enumerate(gz_columns):
|
55 |
-
#slice_data[f"{gz_col}_precise_timestamp"] = slices[-1][f"{gz_col}_precise_timestamp"] + all_peaks[j][i] - all_peaks[j][i - 1]
|
56 |
slice_data[f"{gz_col}_precise_time_diff"] = all_peaks[j][i] - all_peaks[j][i - 1]
|
57 |
else:
|
58 |
precise_timestamp = data.index.values[start_index]
|
|
|
17 |
|
18 |
gz_columns = [col for col in data.columns if col.startswith("GZ")]
|
19 |
all_peaks = []
|
20 |
+
# upsample_factor = sample_rate
|
21 |
+
combined_smoothed_signals_upsampled = np.zeros(upsample_signal(data[gz_columns[0]].values, sample_rate).size, dtype=float)
|
22 |
for gz_col in gz_columns:
|
23 |
gz_signal = data[gz_col].values
|
24 |
+
upsampled_smoothed_signal, peaks = process_signals(gz_signal, sample_rate, window_size=window_size)
|
25 |
all_peaks.append(peaks)
|
26 |
combined_smoothed_signals_upsampled += upsampled_smoothed_signal
|
27 |
|
|
|
31 |
slices = []
|
32 |
start_index = 0
|
33 |
for i, precise_slice_point in enumerate(precise_slice_points):
|
34 |
+
end_index = round(precise_slice_point / sample_rate)
|
35 |
if i == 0:
|
36 |
start_index = end_index
|
37 |
continue
|
|
|
52 |
|
53 |
# Compute precise_timestamp and precise_time_diff for each GZ channel individually
|
54 |
for j, gz_col in enumerate(gz_columns):
|
|
|
55 |
slice_data[f"{gz_col}_precise_time_diff"] = all_peaks[j][i] - all_peaks[j][i - 1]
|
56 |
else:
|
57 |
precise_timestamp = data.index.values[start_index]
|
funcs/processor.py
CHANGED
@@ -10,11 +10,11 @@ def process_data(input_file, slice_size=64, min_slice_size=16, sample_rate=20, w
|
|
10 |
# Read the data from the file, including the CRC column
|
11 |
try:
|
12 |
if input_file.name is None:
|
13 |
-
return None, None, None, None, None, None, None, None
|
14 |
data = pd.read_csv(input_file.name, delimiter=";", index_col="NR", usecols=["NR", "TS", "LEG", "GX", "GY", "GZ", "AX", "AY", "AZ", "CRC"])
|
15 |
except:
|
16 |
if input_file is None:
|
17 |
-
return None, None, None, None, None, None, None, None
|
18 |
data = pd.read_csv(input_file, delimiter=";", index_col="NR", usecols=["NR", "TS", "LEG", "GX", "GY", "GZ", "AX", "AY", "AZ", "CRC"])
|
19 |
|
20 |
|
@@ -70,7 +70,7 @@ def process_data(input_file, slice_size=64, min_slice_size=16, sample_rate=20, w
|
|
70 |
if not no_significant_change_index.empty:
|
71 |
# Save the data up to the point where no significant change appears in all channels
|
72 |
data = data.loc[:no_significant_change_index[0]]
|
73 |
-
return None, None, f'Warning: Significantly shortened > check the recordings', None, None, None, None, None
|
74 |
|
75 |
# Save the resulting DataFrame to a new file
|
76 |
data.to_csv('output.csv', sep=";", na_rep="NaN", float_format="%.0f")
|
|
|
10 |
# Read the data from the file, including the CRC column
|
11 |
try:
|
12 |
if input_file.name is None:
|
13 |
+
return None, None, None, None, None, None, None, None, None
|
14 |
data = pd.read_csv(input_file.name, delimiter=";", index_col="NR", usecols=["NR", "TS", "LEG", "GX", "GY", "GZ", "AX", "AY", "AZ", "CRC"])
|
15 |
except:
|
16 |
if input_file is None:
|
17 |
+
return None, None, None, None, None, None, None, None, None
|
18 |
data = pd.read_csv(input_file, delimiter=";", index_col="NR", usecols=["NR", "TS", "LEG", "GX", "GY", "GZ", "AX", "AY", "AZ", "CRC"])
|
19 |
|
20 |
|
|
|
70 |
if not no_significant_change_index.empty:
|
71 |
# Save the data up to the point where no significant change appears in all channels
|
72 |
data = data.loc[:no_significant_change_index[0]]
|
73 |
+
return None, None, f'Warning: Significantly shortened > check the recordings', None, None, None, None, None, None
|
74 |
|
75 |
# Save the resulting DataFrame to a new file
|
76 |
data.to_csv('output.csv', sep=";", na_rep="NaN", float_format="%.0f")
|
wetransfer_files_2023-05-04_1807.zip
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e555b7d207aecddbbd6f0a704801d9c05d53eb62218dc04a9aa9ea2995c9d565
|
3 |
-
size 9167229
|
|
|
|
|
|
|
|