forked from rail-berkeley/rlkit
-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathdiayn.py
More file actions
132 lines (123 loc) · 4.05 KB
/
diayn.py
File metadata and controls
132 lines (123 loc) · 4.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import gym
import argparse
#from gym.envs.mujoco import HalfCheetahEnv
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.sac.diayn.diayn_env_replay_buffer import DIAYNEnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.torch.sac.diayn.diayn_path_collector import DIAYNMdpPathCollector
from rlkit.samplers.data_collector.step_collector import MdpStepCollector
from rlkit.torch.sac.diayn.policies import SkillTanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.diayn.diayn import DIAYNTrainer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.sac.diayn.diayn_torch_online_rl_algorithm import DIAYNTorchOnlineRLAlgorithm
def experiment(variant, args):
expl_env = NormalizedBoxEnv(gym.make(str(args.env)))
eval_env = NormalizedBoxEnv(gym.make(str(args.env)))
# expl_env = NormalizedBoxEnv(HalfCheetahEnv())
# eval_env = NormalizedBoxEnv(HalfCheetahEnv())
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
skill_dim = args.skill_dim
M = variant['layer_size']
qf1 = FlattenMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
)
df = FlattenMlp(
input_size=obs_dim,
output_size=skill_dim,
hidden_sizes=[M, M],
)
policy = SkillTanhGaussianPolicy(
obs_dim=obs_dim + skill_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
skill_dim=skill_dim
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = DIAYNMdpPathCollector(
eval_env,
eval_policy,
)
expl_step_collector = MdpStepCollector(
expl_env,
policy,
)
replay_buffer = DIAYNEnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
skill_dim,
)
trainer = DIAYNTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
df=df,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = DIAYNTorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_step_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str,
help='environment')
parser.add_argument('--skill_dim', type=int, default=10,
help='skill dimension')
args = parser.parse_args()
# noinspection PyTypeChecker
variant = dict(
algorithm="DIAYN",
version="normal",
layer_size=256,
replay_buffer_size=int(1E6),
algorithm_kwargs=dict(
num_epochs=1000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
)
setup_logger('DIAYN_' + str(args.skill_dim) + '_' + args.env, variant=variant)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant, args)