-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathpreprocessor.py
More file actions
62 lines (54 loc) · 1.86 KB
/
preprocessor.py
File metadata and controls
62 lines (54 loc) · 1.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import torch
from einops import rearrange
class Preprocessor:
def __init__(self,
action_mean,
action_std,
state_mean,
state_std,
proprio_mean,
proprio_std,
transform,
):
self.action_mean = action_mean
self.action_std = action_std
self.state_mean = state_mean
self.state_std = state_std
self.proprio_mean = proprio_mean
self.proprio_std = proprio_std
self.transform = transform
def normalize_actions(self, actions):
'''
actions: (b, t, action_dim)
'''
return (actions - self.action_mean) / self.action_std
def denormalize_actions(self, actions):
'''
actions: (b, t, action_dim)
'''
return actions * self.action_std + self.action_mean
def normalize_proprios(self, proprio):
'''
input shape (..., proprio_dim)
'''
return (proprio - self.proprio_mean) / self.proprio_std
def normalize_states(self, state):
'''
input shape (..., state_dim)
'''
return (state - self.state_mean) / self.state_std
def preprocess_obs_visual(self, obs_visual):
return rearrange(obs_visual, "b t h w c -> b t c h w") / 255.0
def transform_obs_visual(self, obs_visual):
transformed_obs_visual = torch.tensor(obs_visual)
transformed_obs_visual = self.preprocess_obs_visual(transformed_obs_visual)
transformed_obs_visual = self.transform(transformed_obs_visual)
return transformed_obs_visual
def transform_obs(self, obs):
'''
np arrays to tensors
'''
transformed_obs = {}
transformed_obs['visual'] = self.transform_obs_visual(obs['visual'])
transformed_obs['proprio'] = self.normalize_proprios(torch.tensor(obs['proprio']))
return transformed_obs