-
Notifications
You must be signed in to change notification settings - Fork 12
/
main.py
96 lines (68 loc) · 2.65 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import GameEnv
import pygame
import numpy as np
from ddqn_keras import DDQNAgent
from collections import deque
import random, math
TOTAL_GAMETIME = 1000 # Max game time for one episode
N_EPISODES = 10000
REPLACE_TARGET = 50
game = GameEnv.RacingEnv()
game.fps = 60
GameTime = 0
GameHistory = []
renderFlag = False
ddqn_agent = DDQNAgent(alpha=0.0005, gamma=0.99, n_actions=5, epsilon=1.00, epsilon_end=0.10, epsilon_dec=0.9995, replace_target= REPLACE_TARGET, batch_size=512, input_dims=19)
# if you want to load the existing model uncomment this line.
# careful an existing model might be overwritten
#ddqn_agent.load_model()
ddqn_scores = []
eps_history = []
def run():
for e in range(N_EPISODES):
game.reset() #reset env
done = False
score = 0
counter = 0
observation_, reward, done = game.step(0)
observation = np.array(observation_)
gtime = 0 # set game time back to 0
renderFlag = False # if you want to render every episode set to true
if e % 10 == 0 and e > 0: # render every 10 episodes
renderFlag = True
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
action = ddqn_agent.choose_action(observation)
observation_, reward, done = game.step(action)
observation_ = np.array(observation_)
# This is a countdown if no reward is collected the car will be done within 100 ticks
if reward == 0:
counter += 1
if counter > 100:
done = True
else:
counter = 0
score += reward
ddqn_agent.remember(observation, action, reward, observation_, int(done))
observation = observation_
ddqn_agent.learn()
gtime += 1
if gtime >= TOTAL_GAMETIME:
done = True
if renderFlag:
game.render(action)
eps_history.append(ddqn_agent.epsilon)
ddqn_scores.append(score)
avg_score = np.mean(ddqn_scores[max(0, e-100):(e+1)])
if e % REPLACE_TARGET == 0 and e > REPLACE_TARGET:
ddqn_agent.update_network_parameters()
if e % 10 == 0 and e > 10:
ddqn_agent.save_model()
print("save model")
print('episode: ', e,'score: %.2f' % score,
' average score %.2f' % avg_score,
' epsolon: ', ddqn_agent.epsilon,
' memory size', ddqn_agent.memory.mem_cntr % ddqn_agent.memory.mem_size)
run()