arcan3 commited on
Commit
247dc37
1 Parent(s): 715f3b0

added mods

Browse files
Files changed (10) hide show
  1. .gitattributes +1 -0
  2. .gitignore +3 -0
  3. app.py +57 -12
  4. funcs/ml_inference.py +26 -0
  5. funcs/plot_func.py +2 -1
  6. funcs/processor.py +2 -1
  7. funcs/som.py +57 -4
  8. main.py +210 -0
  9. main_template.blend +3 -0
  10. ml_inference.py +0 -30
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.blend filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,4 +1,7 @@
1
  # Byte-compiled / optimized / DLL files
 
 
 
2
  *.zip
3
  *.png
4
  Data-*
 
1
  # Byte-compiled / optimized / DLL files
2
+ *.mp4
3
+ *.xz
4
+ *.gif
5
  *.zip
6
  *.png
7
  Data-*
app.py CHANGED
@@ -1,22 +1,60 @@
 
1
  import gradio as gr
2
 
 
 
 
3
  from funcs.processor import process_data
4
  from funcs.plot_func import plot_sensor_data_from_json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  with gr.Blocks(title='Cabasus') as cabasus_sensor:
7
  title = gr.Markdown("<h2><center>Data gathering and processing</center></h2>")
8
  with gr.Tab("Convert"):
9
- csv_file_box = gr.File(label='Upload CSV File')
10
  with gr.Row():
11
- processed_file_box = gr.File(label='Processed CSV File')
12
- json_file_box = gr.File(label='Generated Json file')
 
 
13
 
14
  with gr.Row():
15
- slice_size_slider = gr.inputs.Slider(16, 512, 1, 64, label="Slice Size")
16
- sample_rate = gr.inputs.Slider(1, 199, 1, 20, label="Sample rate")
17
  with gr.Row():
18
- window_size_slider = gr.inputs.Slider(0, 100, 2, 10, label="Window Size")
19
- repeat_process = gr.Button('Restart process')
20
  with gr.Row():
21
  leg_dropdown = gr.Dropdown(choices=['GZ1', 'GZ2', 'GZ3', 'GZ4'], label='select leg', value='GZ1')
22
  slice_slider = gr.Slider(minimum=1, maximum=300, label='Current slice', step=1)
@@ -28,14 +66,21 @@ with gr.Blocks(title='Cabasus') as cabasus_sensor:
28
  plot_slice_leg = gr.Plot(label="Sliced Signal Plot")
29
  get_all_slice = gr.Plot(label="Real Signal Plot")
30
 
 
31
  with gr.Row():
32
- animation = gr.PlayableVideo(label="Animated horse steps")
33
 
34
  slices_per_leg = gr.Textbox(label="Number of slices found per LEG")
35
 
36
- csv_file_box.change(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice])
37
- leg_dropdown.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider], outputs=[plot_box_leg, plot_slice_leg, get_all_slice])
38
- repeat_process.click(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice])
39
- slice_slider.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider], outputs=[plot_box_leg, plot_slice_leg, get_all_slice])
 
 
 
 
 
 
40
 
41
  cabasus_sensor.queue(concurrency_count=2).launch(debug=True)
 
1
+ import torch
2
  import gradio as gr
3
 
4
+ from phate import PHATEAE
5
+ from funcs.som import ClusterSOM
6
+
7
  from funcs.processor import process_data
8
  from funcs.plot_func import plot_sensor_data_from_json
9
+ from funcs.dataloader import BaseDataset2, read_json_files
10
+
11
+ DEVICE = torch.device("cpu")
12
+ reducer10d = PHATEAE(epochs=30, n_components=10, lr=.0001, batch_size=128, t='auto', knn=8, relax=True, metric='euclidean')
13
+ reducer10d.load('models/r10d_2.pth')
14
+
15
+ cluster_som = ClusterSOM()
16
+ cluster_som.load("models/cluster_som2.pkl")
17
+
18
+ # ml inference
19
+ def get_som_mp4(file, slice_select=1, reducer=reducer10d, cluster=cluster_som):
20
+
21
+ try:
22
+ train_x, train_y = read_json_files(file)
23
+ except:
24
+ train_x, train_y = read_json_files(file.name)
25
+
26
+ # Convert tensors to numpy arrays if necessary
27
+ if isinstance(train_x, torch.Tensor):
28
+ train_x = train_x.numpy()
29
+ if isinstance(train_y, torch.Tensor):
30
+ train_y = train_y.numpy()
31
+
32
+ # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
33
+ data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)
34
+
35
+ #compute the 10 dimensional embeding vector
36
+ embedding10d = reducer.transform(data)
37
+
38
+ # prediction = cluster_som.predict(embedding10d)
39
+ fig = cluster.plot_activation_v2(embedding10d, slice_select)
40
+
41
+ return fig
42
 
43
  with gr.Blocks(title='Cabasus') as cabasus_sensor:
44
  title = gr.Markdown("<h2><center>Data gathering and processing</center></h2>")
45
  with gr.Tab("Convert"):
 
46
  with gr.Row():
47
+ csv_file_box = gr.File(label='Upload CSV File')
48
+ with gr.Column():
49
+ processed_file_box = gr.File(label='Processed CSV File')
50
+ json_file_box = gr.File(label='Generated Json file')
51
 
52
  with gr.Row():
53
+ slice_size_slider = gr.Slider(minimum=16, maximum=512, step=1, value=64, label="Slice Size", visible=False)
54
+ sample_rate = gr.Slider(minimum=1, maximum=199, step=1, value=20, label="Sample rate", visible=False)
55
  with gr.Row():
56
+ window_size_slider = gr.Slider(minimum=0, maximum=100, step=2, value=10, label="Window Size", visible=False)
57
+ repeat_process = gr.Button('Restart process', visible=False)
58
  with gr.Row():
59
  leg_dropdown = gr.Dropdown(choices=['GZ1', 'GZ2', 'GZ3', 'GZ4'], label='select leg', value='GZ1')
60
  slice_slider = gr.Slider(minimum=1, maximum=300, label='Current slice', step=1)
 
66
  plot_slice_leg = gr.Plot(label="Sliced Signal Plot")
67
  get_all_slice = gr.Plot(label="Real Signal Plot")
68
 
69
+ som_create = gr.Button('generate som')
70
  with gr.Row():
71
+ som_figures = gr.Plot(label="som activations")
72
 
73
  slices_per_leg = gr.Textbox(label="Number of slices found per LEG")
74
 
75
+ csv_file_box.change(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
76
+ outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice])
77
+ leg_dropdown.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider],
78
+ outputs=[plot_box_leg, plot_slice_leg, get_all_slice])
79
+ repeat_process.click(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
80
+ outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice])
81
+ slice_slider.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider],
82
+ outputs=[plot_box_leg, plot_slice_leg, get_all_slice])
83
+
84
+ som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures])
85
 
86
  cabasus_sensor.queue(concurrency_count=2).launch(debug=True)
funcs/ml_inference.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from funcs.dataloader import BaseDataset2, read_json_files
3
+
4
+ def get_som_mp4(file, reducer10d, cluster_som, slice_select=1):
5
+
6
+ try:
7
+ train_x, train_y = read_json_files(file)
8
+ except:
9
+ train_x, train_y = read_json_files(file.name)
10
+
11
+ # Convert tensors to numpy arrays if necessary
12
+ if isinstance(train_x, torch.Tensor):
13
+ train_x = train_x.numpy()
14
+ if isinstance(train_y, torch.Tensor):
15
+ train_y = train_y.numpy()
16
+
17
+ # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
18
+ data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)
19
+
20
+ #compute the 10 dimensional embeding vector
21
+ embedding10d = reducer10d.transform(data)
22
+
23
+ # prediction = cluster_som.predict(embedding10d)
24
+ fig = cluster_som.plot_activation_v2(embedding10d, slice_select)
25
+
26
+ return fig
funcs/plot_func.py CHANGED
@@ -6,6 +6,8 @@ import pandas as pd
6
  import seaborn as sns
7
  import matplotlib.pyplot as plt
8
 
 
 
9
  matplotlib.use('Agg')
10
  plt.style.use('ggplot')
11
 
@@ -59,7 +61,6 @@ def plot_sensor_data_from_json(json_file, sensor, slice_select=1):
59
  plt.legend()
60
  plt.tight_layout()
61
 
62
-
63
  return fig, fig1, plot_other_sensor_with_same_timestamp(json_file, sensor, slice_select)
64
 
65
  def plot_overlay_data_from_json(json_file, sensors, use_precise_timestamp=False):
 
6
  import seaborn as sns
7
  import matplotlib.pyplot as plt
8
 
9
+ from funcs.ml_inference import get_som_mp4
10
+
11
  matplotlib.use('Agg')
12
  plt.style.use('ggplot')
13
 
 
61
  plt.legend()
62
  plt.tight_layout()
63
 
 
64
  return fig, fig1, plot_other_sensor_with_same_timestamp(json_file, sensor, slice_select)
65
 
66
  def plot_overlay_data_from_json(json_file, sensors, use_precise_timestamp=False):
funcs/processor.py CHANGED
@@ -4,8 +4,9 @@ import gradio as gr
4
 
5
  from funcs.convertors import slice_csv_to_json
6
  from funcs.plot_func import plot_sensor_data_from_json, plot_overlay_data_from_json
 
7
 
8
- def process_data(input_file, slice_size=64, min_slice_size=16, sample_rate=20, window_size=40, threshold=1000, span_limit=10000000,):
9
  # Read the data from the file, including the CRC column
10
  try:
11
  if input_file.name is None:
 
4
 
5
  from funcs.convertors import slice_csv_to_json
6
  from funcs.plot_func import plot_sensor_data_from_json, plot_overlay_data_from_json
7
+ from funcs.ml_inference import get_som_mp4
8
 
9
+ def process_data(input_file, slice_size=64, min_slice_size=16, sample_rate=20, window_size=40, threshold=1000, span_limit=10000000):
10
  # Read the data from the file, including the CRC column
11
  try:
12
  if input_file.name is None:
funcs/som.py CHANGED
@@ -344,8 +344,8 @@ class ClusterSOM:
344
  images = []
345
  for sample in tqdm(data[start:end], desc="Visualizing prediction output"):
346
  prediction = self.predict([sample])[0]
347
- if prediction[0] == -1: # Noise
348
- continue
349
 
350
  fig, axes = plt.subplots(1, len(self.som_models), figsize=(20, 5), sharex=True, sharey=True)
351
  fig.suptitle(f"Activation map for SOM {prediction[0]}, node {prediction[1]}", fontsize=16)
@@ -378,7 +378,10 @@ class ClusterSOM:
378
  # Create a colorbar for each frame
379
  fig.subplots_adjust(right=0.8)
380
  cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
381
- fig.colorbar(im_active, cax=cbar_ax)
 
 
 
382
 
383
  # Save the plot to a buffer
384
  buf = io.BytesIO()
@@ -422,4 +425,54 @@ class ClusterSOM:
422
 
423
  self.hdbscan_model, self.som_models, self.mean_values, self.sigma_values, self.cluster_mapping = model_data[:5]
424
  if len(model_data) > 5:
425
- self.label_centroids, self.label_encodings = model_data[5:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
  images = []
345
  for sample in tqdm(data[start:end], desc="Visualizing prediction output"):
346
  prediction = self.predict([sample])[0]
347
+ # if prediction[0] == -1: # Noise
348
+ # continue
349
 
350
  fig, axes = plt.subplots(1, len(self.som_models), figsize=(20, 5), sharex=True, sharey=True)
351
  fig.suptitle(f"Activation map for SOM {prediction[0]}, node {prediction[1]}", fontsize=16)
 
378
  # Create a colorbar for each frame
379
  fig.subplots_adjust(right=0.8)
380
  cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
381
+ try:
382
+ fig.colorbar(im_active, cax=cbar_ax)
383
+ except:
384
+ pass
385
 
386
  # Save the plot to a buffer
387
  buf = io.BytesIO()
 
425
 
426
  self.hdbscan_model, self.som_models, self.mean_values, self.sigma_values, self.cluster_mapping = model_data[:5]
427
  if len(model_data) > 5:
428
+ self.label_centroids, self.label_encodings = model_data[5:]
429
+
430
+
431
+ def plot_activation_v2(self, data, slice_select=1):
432
+ """
433
+ Generate a GIF visualization of the prediction output using the activation maps of individual SOMs.
434
+ """
435
+ if len(self.som_models) == 0:
436
+ raise ValueError("SOM models not trained yet.")
437
+
438
+ prediction = self.predict([data[int(slice_select)-1]])[0]
439
+
440
+ fig, axes = plt.subplots(1, len(self.som_models), figsize=(20, 5), sharex=True, sharey=True)
441
+ fig.suptitle(f"Activation map for SOM {prediction[0]}, node {prediction[1]}", fontsize=16)
442
+
443
+ for idx, (som_key, som) in enumerate(self.som_models.items()):
444
+ ax = axes[idx]
445
+ activation_map = np.zeros(som._weights.shape[:2])
446
+ for x in range(som._weights.shape[0]):
447
+ for y in range(som._weights.shape[1]):
448
+ activation_map[x, y] = np.linalg.norm(data[int(slice_select)-1] - som._weights[x, y])
449
+
450
+ winner = som.winner(data[int(slice_select)-1]) # Find the BMU for this SOM
451
+ activation_map[winner] = 0 # Set the BMU's value to 0 so it will be red in the colormap
452
+
453
+ if som_key == prediction[0]: # Active SOM
454
+ im_active = ax.imshow(activation_map, cmap='viridis', origin='lower', interpolation='none')
455
+ ax.plot(winner[1], winner[0], 'r+') # Mark the BMU with a red plus sign
456
+ ax.set_title(f"SOM {som_key}", color='blue', fontweight='bold')
457
+ if hasattr(self, 'label_centroids'):
458
+ label_idx = self.label_encodings.inverse_transform([som_key - 1])[0]
459
+ ax.set_xlabel(f"Label: {label_idx}", fontsize=12)
460
+ else: # Inactive SOM
461
+ im_inactive = ax.imshow(activation_map, cmap='gray', origin='lower', interpolation='none')
462
+ ax.set_title(f"SOM {som_key}")
463
+
464
+ ax.set_xticks(range(activation_map.shape[1]))
465
+ ax.set_yticks(range(activation_map.shape[0]))
466
+ ax.grid(True, linestyle='-', linewidth=0.5)
467
+
468
+ # Create a colorbar for each frame
469
+ # fig.subplots_adjust(right=0.8)
470
+ # cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
471
+
472
+ plt.tight_layout()
473
+ # try:
474
+ # fig.colorbar(im_active, cax=cbar_ax)
475
+ # except:
476
+ # pass
477
+
478
+ return fig
main.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import bpy
3
+ from mathutils import *
4
+ import sys
5
+
6
+ argv = sys.argv
7
+ argv = argv[argv.index("--") + 1:] # get all args after "--"
8
+
9
+ D = bpy.data
10
+ C = bpy.context
11
+
12
+ def copy_armature_animation(
13
+ source_armature_name,
14
+ destination_armature_name,
15
+ start_frame_source,
16
+ end_frame_source,
17
+ start_frame_target,
18
+ ):
19
+ # Get the source and destination armature objects
20
+ source_armature = bpy.data.objects[source_armature_name]
21
+ destination_armature = bpy.data.objects[destination_armature_name]
22
+
23
+ # Copy the animation data from the source armature to the destination armature
24
+
25
+ #print(f"copying animation {start_frame_source}-{end_frame_source} to {start_frame_target} ")
26
+ for i, frame in enumerate(range(start_frame_source, end_frame_source + 1)):
27
+ bpy.context.scene.frame_set(frame)
28
+ # Set the location and rotation of the destination bone at the given frame
29
+ for bone in source_armature.pose.bones:
30
+ destination_bone = destination_armature.pose.bones.get(bone.name)
31
+
32
+ destination_bone.location = bone.location
33
+ destination_bone.rotation_quaternion = bone.rotation_quaternion
34
+ destination_bone.keyframe_insert(
35
+ data_path="location", frame=start_frame_target + i
36
+ )
37
+ destination_bone.keyframe_insert(
38
+ data_path="rotation_quaternion", frame=start_frame_target + i
39
+ )
40
+ # Copy the bone's animation data for each frame in the range
41
+
42
+
43
+ def adjust_armature_animation_timing(
44
+ armature_name, old_start, old_end, new_start, new_end
45
+ ):
46
+
47
+ # Get the armature object by name
48
+ armature = bpy.data.objects[armature_name]
49
+
50
+ # Calculate the ratio between the old and new keyframe ranges
51
+ old_range = old_end - old_start
52
+ new_range = new_end - new_start
53
+ ratio = new_range / old_range
54
+ #print(f"Adjsuting timming from {old_start}-{old_end} to {new_start}-{new_end} with ratio: {ratio}")
55
+
56
+
57
+ # Iterate through all the armature bones and modify their animation keyframes
58
+ for fcurve in armature.animation_data.action.fcurves:
59
+ if fcurve.array_index < 4:
60
+ for keyframe in fcurve.keyframe_points:
61
+ # Adjust the keyframe position based on the ratio
62
+ if keyframe.co.x in range (old_start, old_end):
63
+ #print(f"keyframe move from {keyframe.co.x}")
64
+ keyframe.co.x = (keyframe.co.x - old_start) * ratio + new_start
65
+ #print(f"to {keyframe.co.x}")
66
+ else:
67
+ continue
68
+
69
+ def calculate_new_frame_range(frame_rate, timestamp, start_frame):
70
+
71
+ adjusted_end_frame = int(timestamp / 1000 * frame_rate + start_frame)
72
+ return adjusted_end_frame
73
+
74
+
75
+ def change_material_property(
76
+ material_name, node_name, new_value, start_frame, end_frame
77
+ ):
78
+ value = bpy.data.materials[material_name].node_tree.nodes[node_name].outputs[0]
79
+ value.default_value = new_value
80
+ value.keyframe_insert(data_path="default_value", frame=start_frame+1)
81
+ value.keyframe_insert(data_path="default_value", frame=end_frame-1)
82
+
83
+
84
+ def set_cues_state(
85
+ global_state,
86
+ global_color,
87
+ c1_state,
88
+ c1_color,
89
+ c2_state,
90
+ c2_color,
91
+ c3_state,
92
+ c3_color,
93
+ c4_state,
94
+ c4_color,
95
+ start_frame,
96
+ end_frame,
97
+ ):
98
+ # cue 1 noise
99
+ change_material_property(
100
+ "CUE_MAT_1", "Distortion", float(c1_state), start_frame, end_frame
101
+ )
102
+
103
+ # cue 2 noise
104
+ change_material_property(
105
+ "CUE_MAT_2", "Distortion", float(c2_state), start_frame, end_frame
106
+ )
107
+
108
+ # cue 3 noise
109
+ change_material_property(
110
+ "CUE_MAT_3", "Distortion", float(c3_state), start_frame, end_frame
111
+ )
112
+
113
+ # cue 4 noise
114
+ change_material_property(
115
+ "CUE_MAT_4", "Distortion", float(c4_state), start_frame, end_frame
116
+ )
117
+
118
+ # cue 1 color
119
+ change_material_property(
120
+ "CUE_MAT_1", "Color", float(c1_color), start_frame, end_frame
121
+ )
122
+
123
+ # cue 2 color
124
+ change_material_property(
125
+ "CUE_MAT_2", "Color", float(c2_color), start_frame, end_frame
126
+ )
127
+
128
+ # cue 3 color
129
+ change_material_property(
130
+ "CUE_MAT_3", "Color", float(c3_color), start_frame, end_frame
131
+ )
132
+
133
+ # cue 4 color
134
+ change_material_property(
135
+ "CUE_MAT_4", "Color", float(c4_color), start_frame, end_frame
136
+ )
137
+
138
+ # global cue on
139
+ change_material_property(
140
+ "GLOBAL_CUE_MAT", "MixFactor", float(global_state), start_frame, end_frame
141
+ )
142
+
143
+ # global cue color
144
+ change_material_property(
145
+ "GLOBAL_CUE_MAT", "Color", float(global_color), start_frame, end_frame
146
+ )
147
+
148
+
149
+ def animate_cycle(source_armature, target_armature, csv_file):
150
+ # Get the current scene
151
+ scene = bpy.context.scene
152
+
153
+ # Get the frame rate of the current scene
154
+ frame_rate = scene.render.fps / scene.render.fps_base
155
+
156
+ gait_to_keyframe_start = {0: 10, 1: 80, 2: 60, 3: 250}
157
+
158
+ gait_to_keyframe_end = {0: 49, 1: 99, 2: 74, 3: 310}
159
+
160
+ # column_names = ["Gait", "TS", "State", "Condition", "Shape1", "Shape2", "Shape3", "Shape4", "Color1", "Color2", "Color3", "Color4", "Danger1", "Danger2", "Danger3", "Danger4"]
161
+ previous_end_frame = 0
162
+ with open(csv_file, "r") as csvfile:
163
+ datareader = csv.reader(csvfile)
164
+ for i, row in enumerate(datareader):
165
+ if i == 0 or row[0] is None:
166
+ continue
167
+
168
+ copy_armature_animation(
169
+ source_armature,
170
+ target_armature,
171
+ gait_to_keyframe_start[int(row[0])],
172
+ gait_to_keyframe_end[int(row[0])],
173
+ previous_end_frame + 1,
174
+ )
175
+ adjusted_end_frame = calculate_new_frame_range(
176
+ frame_rate=frame_rate,
177
+ timestamp=int(float(row[1])),
178
+ start_frame=previous_end_frame + 1,
179
+ )
180
+ adjust_armature_animation_timing(
181
+ target_armature,
182
+ previous_end_frame + 1,
183
+ previous_end_frame + 1 + gait_to_keyframe_end[int(row[0])] - gait_to_keyframe_start[int(row[0])],
184
+ previous_end_frame + 1,
185
+ adjusted_end_frame,
186
+ )
187
+ set_cues_state(
188
+ row[3],
189
+ row[4],
190
+ row[8],
191
+ row[5],
192
+ row[9],
193
+ row[6],
194
+ row[10],
195
+ row[7],
196
+ row[11],
197
+ row[2],
198
+ previous_end_frame,
199
+ adjusted_end_frame,
200
+ )
201
+ #print(previous_end_frame, adjusted_end_frame)
202
+ previous_end_frame = adjusted_end_frame
203
+ print(f"Animated row {i}")
204
+
205
+ bpy.context.scene.frame_end = previous_end_frame
206
+ bpy.ops.render.render(animation=True)
207
+
208
+ print("Cycle_device:")
209
+ print(bpy.context.preferences.addons["cycles"].preferences.has_active_device())
210
+ animate_cycle("Deformation", "Deformation.001", str(argv[0]))
main_template.blend ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9e2e198820ce4f0448c667e293537ad9e821017209d5241156937807c40b24
3
+ size 256848708
ml_inference.py DELETED
@@ -1,30 +0,0 @@
1
- import torch
2
- from phate import PHATEAE
3
- from funcs.som import ClusterSOM
4
-
5
- from funcs.dataloader import BaseDataset2, read_json_files
6
-
7
-
8
- DEVICE = torch.device("cpu")
9
-
10
- reducer10d = PHATEAE(epochs=30, n_components=10, lr=.0001, batch_size=128, t='auto', knn=8, relax=True, metric='euclidean')
11
- reducer10d.load('models/r10d_2.pth')
12
-
13
- cluster_som = ClusterSOM()
14
- cluster_som.load("models/cluster_som2.pkl")
15
-
16
- train_x, train_y = read_json_files('output.json')
17
- # Convert tensors to numpy arrays if necessary
18
- if isinstance(train_x, torch.Tensor):
19
- train_x = train_x.numpy()
20
- if isinstance(train_y, torch.Tensor):
21
- train_y = train_y.numpy()
22
-
23
- # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
24
- data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)
25
-
26
- #compute the 10 dimensional embeding vector
27
- embedding10d = reducer10d.transform(data)
28
-
29
- prediction = cluster_som.predict(embedding10d)
30
- cluster_som.plot_activation(embedding10d)