Implement train_pybullet_racecar.py and enjoy_pybullet_racecar.py using OpenAI baselines DQN for the RacecarGymEnv.

This commit is contained in:
Erwin Coumans
2017-06-09 19:26:07 -07:00
parent 82e3c553b9
commit b361722500
4 changed files with 138 additions and 33 deletions

View File

@@ -0,0 +1,45 @@
import gym
from envs.bullet.racecarGymEnv import RacecarGymEnv
from baselines import deepq
def main():
env = RacecarGymEnv(render=True)
act = deepq.load("racecar_model.pkl")
print(act)
while True:
obs, done = env.reset(), False
print("===================================")
print("obs")
print(obs)
episode_rew = 0
while not done:
#env.render()
print("!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("obs")
print(obs)
print("???????????????????????????")
print("obs[None]")
print(obs[None])
o = obs[None]
print("o")
print(o)
aa = act(o)
print("aa")
print (aa)
a = aa[0]
print("a")
print(a)
obs, rew, done, _ = env.step(a)
print("===================================")
print("obs")
print(obs)
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()

View File

@@ -22,7 +22,7 @@ class Racecar:
self.motorizedWheels = [2] self.motorizedWheels = [2]
self.steeringLinks=[4,6] self.steeringLinks=[4,6]
self.speedMultiplier = 10. self.speedMultiplier = 10.
def getActionDimension(self): def getActionDimension(self):
return self.nMotors return self.nMotors
@@ -33,22 +33,25 @@ class Racecar:
def getObservation(self): def getObservation(self):
observation = [] observation = []
pos,orn=p.getBasePositionAndOrientation(self.racecarUniqueId) pos,orn=p.getBasePositionAndOrientation(self.racecarUniqueId)
observation.extend(list(pos)) observation.extend(list(pos))
observation.extend(list(orn))
return observation return observation
def applyAction(self, motorCommands): def applyAction(self, motorCommands):
targetVelocity=motorCommands[0]*self.speedMultiplier targetVelocity=motorCommands[0]*self.speedMultiplier
print("targetVelocity") #print("targetVelocity")
print(targetVelocity) #print(targetVelocity)
steeringAngle = motorCommands[1] steeringAngle = motorCommands[1]
print("steeringAngle") #print("steeringAngle")
print(steeringAngle) #print(steeringAngle)
print("maxForce") #print("maxForce")
print(self.maxForce) #print(self.maxForce)
for motor in self.motorizedwheels: for motor in self.motorizedwheels:
p.setJointMotorControl2(self.racecarUniqueId,motor,p.VELOCITY_CONTROL,targetVelocity=targetVelocity,force=self.maxForce) p.setJointMotorControl2(self.racecarUniqueId,motor,p.VELOCITY_CONTROL,targetVelocity=targetVelocity,force=self.maxForce)
for steer in self.steeringLinks: for steer in self.steeringLinks:
p.setJointMotorControl2(self.racecarUniqueId,steer,p.POSITION_CONTROL,targetPosition=steeringAngle) p.setJointMotorControl2(self.racecarUniqueId,steer,p.POSITION_CONTROL,targetPosition=steeringAngle)

View File

@@ -6,6 +6,7 @@ import numpy as np
import time import time
import pybullet as p import pybullet as p
from . import racecar from . import racecar
import random
class RacecarGymEnv(gym.Env): class RacecarGymEnv(gym.Env):
metadata = { metadata = {
@@ -34,11 +35,12 @@ class RacecarGymEnv(gym.Env):
p.connect(p.DIRECT) p.connect(p.DIRECT)
self._seed() self._seed()
self.reset() self.reset()
observationDim = self._racecar.getObservationDimension() observationDim = len(self.getExtendedObservation())
observation_high = np.array([np.finfo(np.float32).max] * observationDim) #print("observationDim")
actionDim = 8 #print(observationDim)
action_high = np.array([1] * actionDim)
self.action_space = spaces.Box(-action_high, action_high) observation_high = np.array([np.finfo(np.float32).max] * observationDim)
self.action_space = spaces.Discrete(9)
self.observation_space = spaces.Box(-observation_high, observation_high) self.observation_space = spaces.Box(-observation_high, observation_high)
self.viewer = None self.viewer = None
@@ -49,14 +51,21 @@ class RacecarGymEnv(gym.Env):
#p.loadURDF("%splane.urdf" % self._urdfRoot) #p.loadURDF("%splane.urdf" % self._urdfRoot)
p.loadSDF("%sstadium.sdf" % self._urdfRoot) p.loadSDF("%sstadium.sdf" % self._urdfRoot)
self._ballUniqueId = p.loadURDF("sphere2.urdf",[20,20,1]) dist = 1.+10.*random.random()
ang = 2.*3.1415925438*random.random()
ballx = dist * math.sin(ang)
bally = dist * math.cos(ang)
ballz = 1
self._ballUniqueId = p.loadURDF("sphere2.urdf",[ballx,bally,ballz])
p.setGravity(0,0,-10) p.setGravity(0,0,-10)
self._racecar = racecar.Racecar(urdfRootPath=self._urdfRoot, timeStep=self._timeStep) self._racecar = racecar.Racecar(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
self._envStepCounter = 0 self._envStepCounter = 0
for i in range(100): for i in range(100):
p.stepSimulation() p.stepSimulation()
self._observation = self._racecar.getObservation() self._observation = self.getExtendedObservation()
return self._observation return np.array(self._observation)
def __del__(self): def __del__(self):
p.disconnect() p.disconnect()
@@ -65,44 +74,52 @@ class RacecarGymEnv(gym.Env):
self.np_random, seed = seeding.np_random(seed) self.np_random, seed = seeding.np_random(seed)
return [seed] return [seed]
def getExtendedObservation(self):
self._observation = self._racecar.getObservation()
pos,orn = p.getBasePositionAndOrientation(self._ballUniqueId)
self._observation.extend(list(pos))
return self._observation
def _step(self, action): def _step(self, action):
if (self._render): if (self._render):
basePos,orn = p.getBasePositionAndOrientation(self._racecar.racecarUniqueId) basePos,orn = p.getBasePositionAndOrientation(self._racecar.racecarUniqueId)
p.resetDebugVisualizerCamera(1, 30, -40, basePos) #p.resetDebugVisualizerCamera(1, 30, -40, basePos)
if len(action) != self._racecar.getActionDimension(): fwd = [-1,-1,-1,0,0,0,1,1,1]
raise ValueError("We expect {} continuous action not {}.".format(self._racecar.getActionDimension(), len(action))) steerings = [-0.5,0,0.5,-0.5,0,0.5,-0.5,0,0.5]
forward = fwd[action]
for i in range(len(action)): steer = steerings[action]
if not -1.01 <= action[i] <= 1.01: realaction = [forward,steer]
raise ValueError("{}th action should be between -1 and 1 not {}.".format(i, action[i])) self._racecar.applyAction(realaction)
self._racecar.applyAction(action)
for i in range(self._actionRepeat): for i in range(self._actionRepeat):
p.stepSimulation() p.stepSimulation()
if self._render: if self._render:
time.sleep(self._timeStep) time.sleep(self._timeStep)
self._observation = self._racecar.getObservation() self._observation = self.getExtendedObservation()
if self._termination(): if self._termination():
break break
self._envStepCounter += 1 self._envStepCounter += 1
reward = self._reward() reward = self._reward()
done = self._termination() done = self._termination()
#print("len=%r" % len(self._observation))
return np.array(self._observation), reward, done, {} return np.array(self._observation), reward, done, {}
def _render(self, mode='human', close=False): def _render(self, mode='human', close=False):
return return
def _termination(self): def _termination(self):
return False return self._envStepCounter>1000
def _reward(self): def _reward(self):
closestPoints = p.getClosestPoints(self._racecar.racecarUniqueId,self._ballUniqueId,10000) closestPoints = p.getClosestPoints(self._racecar.racecarUniqueId,self._ballUniqueId,10000)
numPt = len(closestPoints) numPt = len(closestPoints)
reward=-1000 reward=-1000
print(numPt) #print(numPt)
if (numPt>0): if (numPt>0):
print("reward:") #print("reward:")
reward = closestPoints[0][8] reward = -closestPoints[0][8]
print(reward) #print(reward)
return reward return reward

View File

@@ -0,0 +1,40 @@
import gym
from envs.bullet.racecarGymEnv import RacecarGymEnv
from baselines import deepq
import datetime
def callback(lcl, glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
#uniq_filename = "racecar_model" + str(datetime.datetime.now().date()) + '_' + str(datetime.datetime.now().time()).replace(':', '.')
#print("uniq_filename=")
#print(uniq_filename)
#act.save(uniq_filename)
return is_solved
def main():
env = RacecarGymEnv(render=False)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=10000000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to racecar_model.pkl")
act.save("racecar_model.pkl")
if __name__ == '__main__':
main()