|
1 | 1 | # function to turn points loaded via pdal into a pyg Data object, with additional channels |
| 2 | +from typing import List |
2 | 3 | import numpy as np |
| 4 | +import torch |
3 | 5 | from torch_geometric.data import Data |
4 | 6 |
|
5 | 7 | COLORS_NORMALIZATION_MAX_VALUE = 255.0 * 256.0 |
6 | 8 | RETURN_NUMBER_NORMALIZATION_MAX_VALUE = 7.0 |
7 | 9 |
|
8 | 10 |
|
9 | | -def lidar_hd_pre_transform(points): |
| 11 | +def lidar_hd_pre_transform(points, pos_keys: List[str], features_keys: List[str], color_keys: List[str]): |
10 | 12 | """Turn pdal points into torch-geometric Data object. |
11 | 13 |
|
12 | 14 | Builds a composite (average) color channel on the fly. Calculate NDVI on the fly. |
13 | 15 |
|
14 | 16 | Args: |
15 | 17 | las_filepath (str): path to the LAS file. |
16 | | -
|
| 18 | + pos_keys (List[str]): list of keys for positions and base features |
| 19 | + features_keys (List[str]): list of keys for |
17 | 20 | Returns: |
18 | 21 | Data: the point cloud formatted for later deep learning training. |
19 | 22 |
|
20 | 23 | """ |
| 24 | + |
| 25 | + features = pos_keys + features_keys + color_keys |
21 | 26 | # Positions and base features |
22 | | - pos = np.asarray([points["X"], points["Y"], points["Z"]], dtype=np.float32).transpose() |
| 27 | + pos = np.asarray([points[k] for k in pos_keys], dtype=np.float32).transpose() |
23 | 28 | # normalization |
24 | | - occluded_points = points["ReturnNumber"] > 1 |
| 29 | + if "ReturnNumber" in features: |
| 30 | + occluded_points = points["ReturnNumber"] > 1 |
| 31 | + points["ReturnNumber"] = (points["ReturnNumber"]) / (RETURN_NUMBER_NORMALIZATION_MAX_VALUE) |
| 32 | + points["NumberOfReturns"] = (points["NumberOfReturns"]) / ( |
| 33 | + RETURN_NUMBER_NORMALIZATION_MAX_VALUE |
| 34 | + ) |
| 35 | + else: |
| 36 | + occluded_points = np.zeros(pos.shape[0], dtype=np.bool_) |
25 | 37 |
|
26 | | - points["ReturnNumber"] = (points["ReturnNumber"]) / (RETURN_NUMBER_NORMALIZATION_MAX_VALUE) |
27 | | - points["NumberOfReturns"] = (points["NumberOfReturns"]) / ( |
28 | | - RETURN_NUMBER_NORMALIZATION_MAX_VALUE |
29 | | - ) |
30 | | - |
31 | | - for color in ["Red", "Green", "Blue", "Infrared"]: |
| 38 | + for color in color_keys: |
32 | 39 | assert points[color].max() <= COLORS_NORMALIZATION_MAX_VALUE |
33 | 40 | points[color][:] = points[color] / COLORS_NORMALIZATION_MAX_VALUE |
34 | 41 | points[color][occluded_points] = 0.0 |
35 | 42 |
|
36 | 43 | # Additional features : |
37 | 44 | # Average color, that will be normalized on the fly based on single-sample |
38 | | - rgb_avg = ( |
39 | | - np.asarray([points["Red"], points["Green"], points["Blue"]], dtype=np.float32) |
40 | | - .transpose() |
41 | | - .mean(axis=1) |
42 | | - ) |
| 45 | + if "Red" in color_keys and "Green" in color_keys and "Blue" in color_keys: |
| 46 | + rgb_avg = ( |
| 47 | + np.asarray([points["Red"], points["Green"], points["Blue"]], dtype=np.float32) |
| 48 | + .transpose() |
| 49 | + .mean(axis=1) |
| 50 | + ) |
| 51 | + else: |
| 52 | + rgb_avg = None |
43 | 53 |
|
44 | 54 | # NDVI |
45 | | - ndvi = (points["Infrared"] - points["Red"]) / (points["Infrared"] + points["Red"] + 10**-6) |
| 55 | + if "Infrared" in color_keys and "Red" in color_keys: |
| 56 | + ndvi = (points["Infrared"] - points["Red"]) / (points["Infrared"] + points["Red"] + 10**-6) |
| 57 | + else: |
| 58 | + ndvi = None |
| 59 | + |
| 60 | + additional_color_features = [] |
| 61 | + additional_color_keys = [] |
| 62 | + if rgb_avg is not None: |
| 63 | + additional_color_features.append(rgb_avg) |
| 64 | + additional_color_keys.append("rgb_avg") |
| 65 | + if ndvi is not None: |
| 66 | + additional_color_features.append(ndvi) |
| 67 | + additional_color_keys.append("ndvi") |
46 | 68 |
|
47 | | - # todo |
48 | 69 | x = np.stack( |
49 | 70 | [ |
50 | 71 | points[name] |
51 | | - for name in [ |
52 | | - "Intensity", |
53 | | - "ReturnNumber", |
54 | | - "NumberOfReturns", |
55 | | - "Red", |
56 | | - "Green", |
57 | | - "Blue", |
58 | | - "Infrared", |
59 | | - ] |
| 72 | + for name in features_keys + color_keys |
60 | 73 | ] |
61 | | - + [rgb_avg, ndvi], |
| 74 | + + additional_color_features, |
62 | 75 | axis=0, |
63 | 76 | ).transpose() |
64 | | - x_features_names = [ |
65 | | - "Intensity", |
66 | | - "ReturnNumber", |
67 | | - "NumberOfReturns", |
68 | | - "Red", |
69 | | - "Green", |
70 | | - "Blue", |
71 | | - "Infrared", |
72 | | - "rgb_avg", |
73 | | - "ndvi", |
74 | | - ] |
| 77 | + x_features_names = [s.encode('utf-8') for s in (features_keys + color_keys + additional_color_keys)] |
75 | 78 | y = points["Classification"] |
76 | 79 |
|
77 | | - data = Data(pos=pos, x=x, y=y, x_features_names=x_features_names) |
| 80 | + data = Data(pos=torch.from_numpy(pos), x=torch.from_numpy(x), y=y, x_features_names=x_features_names) |
78 | 81 |
|
79 | 82 | return data |
0 commit comments