-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathkrypton.py
More file actions
102 lines (78 loc) · 3.11 KB
/
krypton.py
File metadata and controls
102 lines (78 loc) · 3.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 09:53:16 2018
@author: Robin Amsters
@email: robin.amsters@kuleuven.be
Functions to obtain and manipulate data from .mat files generated by the
krypton K600 measurement system
"""
import scipy.io
import numpy as np
#==============================================================================
# KRYPTON MAT FILE FUNCTIONS
#==============================================================================
def get_krypton_reference(filePath):
"""
Function that returns the x,y and z positions contained in a .mat file
containing krypton camera measurements of a dynamic frame.
"""
# Open .mat file and get raw data
mat = scipy.io.loadmat(filePath)
data = mat.get('dynamicrobotframe')
x_filtered, y_filtered, z_filtered, x_first, y_first, z_first = filter_krypton_data(data)
t_real = np.linspace(0.0, len(z_filtered)*0.02, len(z_filtered)) # Assume sampling at 50Hz
# Take initial position as 0 and convert from mm to m
x_filtered = (x_filtered - x_first)/1000.0
y_filtered = (y_filtered - y_first)/1000.0
z_filtered = (z_filtered - z_first)/1000.0
# Transform to world coordinates
x_real = x_filtered
y_real = y_filtered
z_real = z_filtered
return x_real, y_real, z_real, t_real
def filter_krypton_data(data):
"""
Function to filter camera data. The camera either gives zero or a fixed
large number when it loses vision. This function replaces this with NaN.
"""
x_filtered = data[0]
y_filtered = data[1]
z_filtered = data[2]
# Points to filter out, this is the coordinate that the camera resets to when it loses vision
filter_x = 4894
filter_y = 268
filter_z = 682
filter_lim = 0.01 # Percentage range around the filter point
# Filter points where camera lost vision
for i in range(len(x_filtered)):
x = x_filtered[i]
y = y_filtered[i]
z = z_filtered[i]
if x >= filter_x*(1-filter_lim) and x<= filter_x*(1+filter_lim):
x_filtered[i] = np.NaN
if y >= filter_y*(1-filter_lim) and y<= filter_y*(1+filter_lim):
y_filtered[i] = np.NaN
if z >= filter_z*(1-filter_lim) and z<= filter_z*(1+filter_lim):
z_filtered[i] = np.NaN
# Get first point where camera has vision
x_first = x_filtered[0]
x_first_set = False
y_first = y_filtered[0]
y_first_set = False
z_first = z_filtered[0]
z_first_set = False
for i in range(len(x_filtered)):
x = x_filtered[i]
y = y_filtered[i]
z = z_filtered[i]
if not np.isnan(x) and not x_first_set:
x_first = x
x_first_set = True
if not np.isnan(y) and not y_first_set:
y_first = y
y_first_set = True
if not np.isnan(z)and not z_first_set:
z_first = z
z_first_set = True
return x_filtered, y_filtered, z_filtered, x_first, y_first, z_first