From b361722500d05a53eaacdc3e5ded402658713338 Mon Sep 17 00:00:00 2001 From: Erwin Coumans Date: Fri, 9 Jun 2017 19:26:07 -0700 Subject: [PATCH] Implement train_pybullet_racecar.py and enjoy_pybullet_racecar.py using OpenAI baselines DQN for the RacecarGymEnv. --- .../pybullet/gym/enjoy_pybullet_racecar.py | 45 +++++++++++++ examples/pybullet/gym/envs/bullet/racecar.py | 21 +++--- .../pybullet/gym/envs/bullet/racecarGymEnv.py | 65 ++++++++++++------- .../pybullet/gym/train_pybullet_racecar.py | 40 ++++++++++++ 4 files changed, 138 insertions(+), 33 deletions(-) create mode 100644 examples/pybullet/gym/enjoy_pybullet_racecar.py create mode 100644 examples/pybullet/gym/train_pybullet_racecar.py diff --git a/examples/pybullet/gym/enjoy_pybullet_racecar.py b/examples/pybullet/gym/enjoy_pybullet_racecar.py new file mode 100644 index 000000000..b2c46337a --- /dev/null +++ b/examples/pybullet/gym/enjoy_pybullet_racecar.py @@ -0,0 +1,45 @@ +import gym +from envs.bullet.racecarGymEnv import RacecarGymEnv + +from baselines import deepq + + +def main(): + + env = RacecarGymEnv(render=True) + act = deepq.load("racecar_model.pkl") + print(act) + while True: + obs, done = env.reset(), False + print("===================================") + print("obs") + print(obs) + episode_rew = 0 + while not done: + #env.render() + + print("!!!!!!!!!!!!!!!!!!!!!!!!!!") + print("obs") + print(obs) + print("???????????????????????????") + print("obs[None]") + print(obs[None]) + o = obs[None] + print("o") + print(o) + aa = act(o) + print("aa") + print (aa) + a = aa[0] + print("a") + print(a) + obs, rew, done, _ = env.step(a) + print("===================================") + print("obs") + print(obs) + episode_rew += rew + print("Episode reward", episode_rew) + + +if __name__ == '__main__': + main() diff --git a/examples/pybullet/gym/envs/bullet/racecar.py b/examples/pybullet/gym/envs/bullet/racecar.py index 3ceb923f5..9737ede90 100644 --- a/examples/pybullet/gym/envs/bullet/racecar.py +++ b/examples/pybullet/gym/envs/bullet/racecar.py @@ -22,7 +22,7 @@ class Racecar: self.motorizedWheels = [2] self.steeringLinks=[4,6] self.speedMultiplier = 10. - + def getActionDimension(self): return self.nMotors @@ -33,22 +33,25 @@ class Racecar: def getObservation(self): observation = [] pos,orn=p.getBasePositionAndOrientation(self.racecarUniqueId) + observation.extend(list(pos)) + observation.extend(list(orn)) + return observation def applyAction(self, motorCommands): targetVelocity=motorCommands[0]*self.speedMultiplier - print("targetVelocity") - print(targetVelocity) + #print("targetVelocity") + #print(targetVelocity) steeringAngle = motorCommands[1] - print("steeringAngle") - print(steeringAngle) - print("maxForce") - print(self.maxForce) + #print("steeringAngle") + #print(steeringAngle) + #print("maxForce") + #print(self.maxForce) for motor in self.motorizedwheels: - p.setJointMotorControl2(self.racecarUniqueId,motor,p.VELOCITY_CONTROL,targetVelocity=targetVelocity,force=self.maxForce) + p.setJointMotorControl2(self.racecarUniqueId,motor,p.VELOCITY_CONTROL,targetVelocity=targetVelocity,force=self.maxForce) for steer in self.steeringLinks: p.setJointMotorControl2(self.racecarUniqueId,steer,p.POSITION_CONTROL,targetPosition=steeringAngle) - \ No newline at end of file + \ No newline at end of file diff --git a/examples/pybullet/gym/envs/bullet/racecarGymEnv.py b/examples/pybullet/gym/envs/bullet/racecarGymEnv.py index 87c2d0be5..6e12bdbde 100644 --- a/examples/pybullet/gym/envs/bullet/racecarGymEnv.py +++ b/examples/pybullet/gym/envs/bullet/racecarGymEnv.py @@ -6,6 +6,7 @@ import numpy as np import time import pybullet as p from . import racecar +import random class RacecarGymEnv(gym.Env): metadata = { @@ -34,11 +35,12 @@ class RacecarGymEnv(gym.Env): p.connect(p.DIRECT) self._seed() self.reset() - observationDim = self._racecar.getObservationDimension() - observation_high = np.array([np.finfo(np.float32).max] * observationDim) - actionDim = 8 - action_high = np.array([1] * actionDim) - self.action_space = spaces.Box(-action_high, action_high) + observationDim = len(self.getExtendedObservation()) + #print("observationDim") + #print(observationDim) + + observation_high = np.array([np.finfo(np.float32).max] * observationDim) + self.action_space = spaces.Discrete(9) self.observation_space = spaces.Box(-observation_high, observation_high) self.viewer = None @@ -49,14 +51,21 @@ class RacecarGymEnv(gym.Env): #p.loadURDF("%splane.urdf" % self._urdfRoot) p.loadSDF("%sstadium.sdf" % self._urdfRoot) - self._ballUniqueId = p.loadURDF("sphere2.urdf",[20,20,1]) + dist = 1.+10.*random.random() + ang = 2.*3.1415925438*random.random() + + ballx = dist * math.sin(ang) + bally = dist * math.cos(ang) + ballz = 1 + + self._ballUniqueId = p.loadURDF("sphere2.urdf",[ballx,bally,ballz]) p.setGravity(0,0,-10) self._racecar = racecar.Racecar(urdfRootPath=self._urdfRoot, timeStep=self._timeStep) self._envStepCounter = 0 for i in range(100): p.stepSimulation() - self._observation = self._racecar.getObservation() - return self._observation + self._observation = self.getExtendedObservation() + return np.array(self._observation) def __del__(self): p.disconnect() @@ -65,44 +74,52 @@ class RacecarGymEnv(gym.Env): self.np_random, seed = seeding.np_random(seed) return [seed] + def getExtendedObservation(self): + self._observation = self._racecar.getObservation() + pos,orn = p.getBasePositionAndOrientation(self._ballUniqueId) + self._observation.extend(list(pos)) + return self._observation + def _step(self, action): if (self._render): basePos,orn = p.getBasePositionAndOrientation(self._racecar.racecarUniqueId) - p.resetDebugVisualizerCamera(1, 30, -40, basePos) - - if len(action) != self._racecar.getActionDimension(): - raise ValueError("We expect {} continuous action not {}.".format(self._racecar.getActionDimension(), len(action))) - - for i in range(len(action)): - if not -1.01 <= action[i] <= 1.01: - raise ValueError("{}th action should be between -1 and 1 not {}.".format(i, action[i])) - - self._racecar.applyAction(action) + #p.resetDebugVisualizerCamera(1, 30, -40, basePos) + + fwd = [-1,-1,-1,0,0,0,1,1,1] + steerings = [-0.5,0,0.5,-0.5,0,0.5,-0.5,0,0.5] + forward = fwd[action] + steer = steerings[action] + realaction = [forward,steer] + self._racecar.applyAction(realaction) for i in range(self._actionRepeat): p.stepSimulation() if self._render: time.sleep(self._timeStep) - self._observation = self._racecar.getObservation() + self._observation = self.getExtendedObservation() + if self._termination(): break self._envStepCounter += 1 reward = self._reward() done = self._termination() + #print("len=%r" % len(self._observation)) + return np.array(self._observation), reward, done, {} def _render(self, mode='human', close=False): return def _termination(self): - return False + return self._envStepCounter>1000 def _reward(self): closestPoints = p.getClosestPoints(self._racecar.racecarUniqueId,self._ballUniqueId,10000) + numPt = len(closestPoints) reward=-1000 - print(numPt) + #print(numPt) if (numPt>0): - print("reward:") - reward = closestPoints[0][8] - print(reward) + #print("reward:") + reward = -closestPoints[0][8] + #print(reward) return reward diff --git a/examples/pybullet/gym/train_pybullet_racecar.py b/examples/pybullet/gym/train_pybullet_racecar.py new file mode 100644 index 000000000..3ce064d8d --- /dev/null +++ b/examples/pybullet/gym/train_pybullet_racecar.py @@ -0,0 +1,40 @@ +import gym +from envs.bullet.racecarGymEnv import RacecarGymEnv + +from baselines import deepq + +import datetime + + + +def callback(lcl, glb): + # stop training if reward exceeds 199 + is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199 + #uniq_filename = "racecar_model" + str(datetime.datetime.now().date()) + '_' + str(datetime.datetime.now().time()).replace(':', '.') + #print("uniq_filename=") + #print(uniq_filename) + #act.save(uniq_filename) + return is_solved + + +def main(): + + env = RacecarGymEnv(render=False) + model = deepq.models.mlp([64]) + act = deepq.learn( + env, + q_func=model, + lr=1e-3, + max_timesteps=10000000, + buffer_size=50000, + exploration_fraction=0.1, + exploration_final_eps=0.02, + print_freq=10, + callback=callback + ) + print("Saving model to racecar_model.pkl") + act.save("racecar_model.pkl") + + +if __name__ == '__main__': + main()