深度增强学习--Deep Q Network
从这里开始换个游戏演示,cartpole游戏
import sys
import gym
import pylab
import random
import numpy as np
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential EPISODES = 300 # DQN Agent for the Cartpole
# it uses Neural Network to approximate q function,使用神经网络近似q-learning的q函数
# and experience replay memory & fixed target q network
class DQNAgent:
def __init__(self, state_size, action_size):
# if you want to see Cartpole learning, then change to True
self.render = True
self.load_model = False # get size of state and action
self.state_size = state_size
self.action_size = action_size # These are hyper parameters for the DQN
self.discount_factor = 0.99
self.learning_rate = 0.001
self.epsilon = 1.0
self.epsilon_decay = 0.999
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 1000
# create replay memory using deque
self.memory = deque(maxlen=2000) # create main model and target model
self.model = self.build_model()
self.target_model = self.build_model() # initialize target model
self.update_target_model() if self.load_model:
self.model.load_weights("./save_model/cartpole_dqn.h5") # approximate Q function using Neural Network
# state is input and Q Value of each action is output of network
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(24, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='linear',
kernel_initializer='he_uniform'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model # after some time interval update the target model to be same with model
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights()) # get action from model using epsilon-greedy policy
def get_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
else:
q_value = self.model.predict(state)#2,q(s,a),利用模型预测不同action的q值,选大的作为下一action
return np.argmax(q_value[0]) # save sample <s,a,r,s'> to the replay memory
def append_sample(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay # pick samples randomly from replay memory (with batch_size)
def train_model(self):
if len(self.memory) < self.train_start:
return
import pdb; pdb.set_trace()
batch_size = min(self.batch_size, len(self.memory))
mini_batch = random.sample(self.memory, batch_size)#64list
#(array([[-0.04263461, -0.00657423, 0.00506589, -0.00200269]]), 0, 1.0, array([[-0.04276609, -0.20176846, 0.00502584, 0.29227427]]), False) update_input = np.zeros((batch_size, self.state_size))
update_target = np.zeros((batch_size, self.state_size))
action, reward, done = [], [], [] for i in range(self.batch_size):
update_input[i] = mini_batch[i][0]
action.append(mini_batch[i][1])
reward.append(mini_batch[i][2])
update_target[i] = mini_batch[i][3]
done.append(mini_batch[i][4]) target = self.model.predict(update_input)#(64,2)
target_val = self.target_model.predict(update_target)#(64, 2) for i in range(self.batch_size):
# Q Learning: get maximum Q value at s' from target model
if done[i]:
target[i][action[i]] = reward[i]
else:
target[i][action[i]] = reward[i] + self.discount_factor * (
np.amax(target_val[i]))#off-policy 更新 # and do the model fit!
self.model.fit(update_input, target, batch_size=self.batch_size,
epochs=1, verbose=0) if __name__ == "__main__":
# In case of CartPole-v1, maximum length of episode is 500
env = gym.make('CartPole-v1')
# get size of state and action from environment
state_size = env.observation_space.shape[0]#
action_size = env.action_space.n# agent = DQNAgent(state_size, action_size) scores, episodes = [], [] for e in range(EPISODES):
done = False
score = 0
state = env.reset()
state = np.reshape(state, [1, state_size]) while not done:
if agent.render:
env.render() # get action for the current state and go one step in environment
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
# if an action make the episode end, then gives penalty of -100
reward = reward if not done or score == 499 else -100 # save the sample <s, a, r, s'> to the replay memory
agent.append_sample(state, action, reward, next_state, done)
# every time step do the training
agent.train_model()
score += reward
state = next_state if done:
# every episode update the target model to be same with model
agent.update_target_model() # every episode, plot the play time
score = score if score == 500 else score + 100
scores.append(score)
episodes.append(e)
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/cartpole_dqn.png")
print("episode:", e, " score:", score, " memory length:",
len(agent.memory), " epsilon:", agent.epsilon) # if the mean of scores of last 10 episode is bigger than 490
# stop training
if np.mean(scores[-min(10, len(scores)):]) > 490:
sys.exit() # save the model
if e % 50 == 0:
agent.model.save_weights("./save_model/cartpole_dqn.h5")
深度增强学习--Deep Q Network的更多相关文章
- AlphaGo的前世今生(一)Deep Q Network and Game Search Tree:Road to AI Revolution
这一个专题将会是有关AlphaGo的前世今生以及其带来的AI革命,总共分成三节.本人水平有限,如有错误还望指正.如需转载,须征得本人同意. Road to AI Revolution(通往AI革命之路 ...
- 强化学习系列之:Deep Q Network (DQN)
文章目录 [隐藏] 1. 强化学习和深度学习结合 2. Deep Q Network (DQN) 算法 3. 后续发展 3.1 Double DQN 3.2 Prioritized Replay 3. ...
- Deep Q Network(DQN)原理解析
1. 前言 在前面的章节中我们介绍了时序差分算法(TD)和Q-Learning,当状态和动作空间是离散且维数不高时可使用Q-Table储存每个状态动作对的Q值,而当状态和动作空间是高维连续时,使用Q- ...
- 【转】【强化学习】Deep Q Network(DQN)算法详解
原文地址:https://blog.csdn.net/qq_30615903/article/details/80744083 DQN(Deep Q-Learning)是将深度学习deeplearni ...
- 深度增强学习--DDPG
DDPG DDPG介绍2 ddpg输出的不是行为的概率, 而是具体的行为, 用于连续动作 (continuous action) 的预测 公式推导 推导 代码实现的gym的pendulum游戏,这个游 ...
- 深度增强学习--A3C
A3C 它会创建多个并行的环境, 让多个拥有副结构的 agent 同时在这些并行环境上更新主结构中的参数. 并行中的 agent 们互不干扰, 而主结构的参数更新受到副结构提交更新的不连续性干扰, 所 ...
- 深度增强学习--Actor Critic
Actor Critic value-based和policy-based的结合 实例代码 import sys import gym import pylab import numpy as np ...
- 深度增强学习--Policy Gradient
前面都是value based的方法,现在看一种直接预测动作的方法 Policy Based Policy Gradient 一个介绍 karpathy的博客 一个推导 下面的例子实现的REINFOR ...
- 深度增强学习--DPPO
PPO DPPO介绍 PPO实现 代码DPPO
随机推荐
- django2.0的reverse
导入: 官方文档地址:https://yiyibooks.cn/xx/Django_1.11.6/topics/http/urls.html from django.urls import rever ...
- Bash Shell 下打开一个TCP / UDP SOCKET
Bash Shell 下打开一个TCP / UDP SOCKET http://jingyan.baidu.com/article/636f38bb6166c3d6b84610d1.html
- Github上的几个C++开源项目
Github上的几个C++开源项目 http://blog.csdn.net/fyifei0558/article/details/47001677 http://www.zhihu.com/ques ...
- wscript运行js文件
wscript运行js文件 http://www.cnblogs.com/jxgxy/archive/2013/09/20/3330818.html wscript运行js文件 wscript ad ...
- 最简单的DLL
静态链接库与动态链接库都是共享代码的方式,如果采用静态链接库,则无论你愿不愿意,lib 中的指令都全部被直接包含在最终生成的 EXE 文件中了.但是若使用 DLL,该 DLL 不必被包含在最终 EXE ...
- ECharts问题--散点图中对散点添加点击事件
1. 我们这次就没有先讲解怎么使用散点图了,这个跟之前的一些图还是很类似的,不会的可以去官网上面查看 API 使用.我们这次讲解的是为散点图中的散点添加点击事件,然后在图表之外的一个 div 里面显示 ...
- Selenium2+python自动化45-18种定位方法(find_elements)【转载】
前言 江湖传言,武林中流传八种定位,其中xpath是宝刀屠龙,css是倚天剑. 除了这八种,其实还有十种定位方法,眼看就快失传了,今天小编让失传已久的定位方法重出江湖! 一.十八种定位方法 前八种是大 ...
- 第三篇:使用firewall-cmd
本文来源:Working with firewalld 其它参考文档:redhat官方中文文档 1.查询类命令 2.设置类命令 3.运行时zone设置 4.永久性zone设置 5.直接 ...
- Android 曲线动画animation,类似加入购物车动画
按照惯例先放效果图:图中小球做抛物线运动 资源图片 1.首先布局文件activity_main.xml,布局很简单,就一个测试按钮 <RelativeLayout xmlns:android=& ...
- AC日记——[NOIP2015]运输计划 cogs 2109
[NOIP2015] 运输计划 思路: 树剖+二分: 代码: #include <cstdio> #include <cstring> #include <iostrea ...