File size: 3,338 Bytes
a5bd089
 
 
 
 
 
63e4f49
a5bd089
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63e4f49
a5bd089
 
090cc7b
 
a5bd089
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import numpy as np
import pandas as pd

from funcs.convertors import slice_csv_to_json
from funcs.plot_func import plot_sensor_data_from_json, plot_overlay_data_from_json

def process_data(input_file, slice_size=64, min_slice_size=16, sample_rate=20, window_size=40, threshold=1000, span_limit=10000000,):
    # Read the data from the file, including the CRC column
    try:
        data = pd.read_csv(input_file.name, delimiter=";", index_col="NR", usecols=["NR", "TS", "LEG", "GX", "GY", "GZ", "AX", "AY", "AZ", "CRC"])
    except:
        data = pd.read_csv(input_file, delimiter=";", index_col="NR", usecols=["NR", "TS", "LEG", "GX", "GY", "GZ", "AX", "AY", "AZ", "CRC"])

    # Replace the values with NaN when the CRC value is not zero
    data.loc[data["CRC"] != 0, ["GX", "GY", "GZ", "AX", "AY", "AZ"]] = np.nan

    # Drop the CRC column as it is not needed anymore
    data = data.drop(columns="CRC")
    
    # Pivot the table to have only one line per timestamp but more columns
    data = data.pivot_table(values=["GX", "GY", "GZ", "AX", "AY", "AZ"], index="TS", columns="LEG")

    # Flatten the multi-level columns
    data.columns = [f"{col[0]}{col[1]}" for col in data.columns]

    # Sort the index (timestamps)
    data = data.sort_index()

    # Check if the span between min and max is too large, and limit it if necessary
    min_ts = data.index.min()
    max_ts = data.index.max()
    
    if (max_ts - min_ts) > span_limit:
        max_ts = min_ts + span_limit
        data = data[data.index <= max_ts]

    # Check if the timestamp distance is 20 ms and add timestamps necessary
    new_index = pd.RangeIndex(start=min_ts, stop=max_ts + 20, step=20)
    data = data.reindex(new_index)

    # Fill missing values with NaN
    data = data.replace(0, np.nan)

    # Check if the gap between two timestamps is bigger than 80 ms and show a warning
    gaps = data.isna().all(axis=1).astype(int).groupby(data.notna().all(axis=1).astype(int).cumsum()).sum()
    big_gaps = gaps[gaps > 3]
    if not big_gaps.empty:
        gap_start_index = big_gaps.index[0] * 20
        gap_size = big_gaps.iloc[0] * 20
        print(f"Warning: gap of {gap_size} ms found at line {gap_start_index}")
        # Save the data up to the point where there is a gap of more than 80 ms
        data = data.iloc[:gap_start_index]

    # Calculate the absolute differences between consecutive rows for all channels
    differences = data.diff().abs()

    # Find the index where all differences are below the threshold
    no_significant_change_index = differences[differences.lt(threshold).all(axis=1)].index

    if not no_significant_change_index.empty:
        # Save the data up to the point where no significant change appears in all channels
        data = data.loc[:no_significant_change_index[0]]
        print(f"Warning: Shorten")

    # Save the resulting DataFrame to a new file
    data.to_csv('output.csv', sep=";", na_rep="NaN", float_format="%.0f")

    file, len_ = slice_csv_to_json('output.csv', slice_size, min_slice_size, sample_rate, window_size=window_size)

    #get the plot automatically
    sensor_fig = plot_sensor_data_from_json(file, "GZ1")
    overlay_fig = plot_overlay_data_from_json(file, ["GZ1", "GZ2", "GZ3", "GZ4"], use_precise_timestamp=True)


    return 'output.csv', file, len_, sensor_fig, overlay_fig