-
Notifications
You must be signed in to change notification settings - Fork 75
/
demo.py
50 lines (48 loc) · 2.04 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import torch
from rl_modules.models import actor
from arguments import get_args
import gym
import numpy as np
# process the inputs
def process_inputs(o, g, o_mean, o_std, g_mean, g_std, args):
o_clip = np.clip(o, -args.clip_obs, args.clip_obs)
g_clip = np.clip(g, -args.clip_obs, args.clip_obs)
o_norm = np.clip((o_clip - o_mean) / (o_std), -args.clip_range, args.clip_range)
g_norm = np.clip((g_clip - g_mean) / (g_std), -args.clip_range, args.clip_range)
inputs = np.concatenate([o_norm, g_norm])
inputs = torch.tensor(inputs, dtype=torch.float32)
return inputs
if __name__ == '__main__':
args = get_args()
# load the model param
model_path = args.save_dir + args.env_name + '/model.pt'
o_mean, o_std, g_mean, g_std, model = torch.load(model_path, map_location=lambda storage, loc: storage)
# create the environment
env = gym.make(args.env_name)
# get the env param
observation = env.reset()
# get the environment params
env_params = {'obs': observation['observation'].shape[0],
'goal': observation['desired_goal'].shape[0],
'action': env.action_space.shape[0],
'action_max': env.action_space.high[0],
}
# create the actor network
actor_network = actor(env_params)
actor_network.load_state_dict(model)
actor_network.eval()
for i in range(args.demo_length):
observation = env.reset()
# start to do the demo
obs = observation['observation']
g = observation['desired_goal']
for t in range(env._max_episode_steps):
env.render()
inputs = process_inputs(obs, g, o_mean, o_std, g_mean, g_std, args)
with torch.no_grad():
pi = actor_network(inputs)
action = pi.detach().numpy().squeeze()
# put actions into the environment
observation_new, reward, _, info = env.step(action)
obs = observation_new['observation']
print('the episode is: {}, is success: {}'.format(i, info['is_success']))