hook up kuka grasping to OpenAI dqn
This commit is contained in:
@@ -16,7 +16,7 @@ class KukaGymEnv(gym.Env):
|
||||
|
||||
def __init__(self,
|
||||
urdfRoot="",
|
||||
actionRepeat=50,
|
||||
actionRepeat=1,
|
||||
isEnableSelfCollision=True,
|
||||
renders=True):
|
||||
print("init")
|
||||
@@ -32,6 +32,7 @@ class KukaGymEnv(gym.Env):
|
||||
p.connect(p.GUI)
|
||||
else:
|
||||
p.connect(p.DIRECT)
|
||||
#timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json")
|
||||
self._seed()
|
||||
self.reset()
|
||||
observationDim = len(self.getExtendedObservation())
|
||||
@@ -39,7 +40,7 @@ class KukaGymEnv(gym.Env):
|
||||
#print(observationDim)
|
||||
|
||||
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
|
||||
self.action_space = spaces.Discrete(9)
|
||||
self.action_space = spaces.Discrete(7)
|
||||
self.observation_space = spaces.Box(-observation_high, observation_high)
|
||||
self.viewer = None
|
||||
|
||||
@@ -48,14 +49,16 @@ class KukaGymEnv(gym.Env):
|
||||
p.setPhysicsEngineParameter(numSolverIterations=150)
|
||||
p.setTimeStep(self._timeStep)
|
||||
p.loadURDF("%splane.urdf" % self._urdfRoot,[0,0,-1])
|
||||
if self._renders:
|
||||
p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])
|
||||
p.loadURDF("table/table.urdf", 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
|
||||
|
||||
dist = 5 +2.*random.random()
|
||||
ang = 2.*3.1415925438*random.random()
|
||||
|
||||
ballx = dist * math.sin(ang)
|
||||
bally = dist * math.cos(ang)
|
||||
ballz = 1
|
||||
|
||||
xpos = 0.5 +0.25*random.random()
|
||||
ypos = 0 +0.22*random.random()
|
||||
ang = 3.1415925438*random.random()
|
||||
orn = p.getQuaternionFromEuler([0,0,ang])
|
||||
self.blockUid =p.loadURDF("block.urdf", xpos,ypos,0.02,orn[0],orn[1],orn[2],orn[3])
|
||||
|
||||
p.setGravity(0,0,-10)
|
||||
self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
|
||||
self._envStepCounter = 0
|
||||
@@ -72,9 +75,22 @@ class KukaGymEnv(gym.Env):
|
||||
|
||||
def getExtendedObservation(self):
|
||||
self._observation = self._kuka.getObservation()
|
||||
pos,orn = p.getBasePositionAndOrientation(self.blockUid)
|
||||
self._observation.extend(list(pos))
|
||||
self._observation.extend(list(orn))
|
||||
|
||||
return self._observation
|
||||
|
||||
|
||||
def _step(self, action):
|
||||
dv = 0.005
|
||||
dx = [0,-dv,dv,0,0,0,0][action]
|
||||
dy = [0,0,0,-dv,dv,0,0][action]
|
||||
da = [0,0,0,0,0,-dv,dv][action]
|
||||
f = 0.3
|
||||
realAction = [dx,dy,-0.1,da,f]
|
||||
return self.step2( realAction)
|
||||
|
||||
def step2(self, action):
|
||||
self._kuka.applyAction(action)
|
||||
for i in range(self._actionRepeat):
|
||||
p.stepSimulation()
|
||||
@@ -84,8 +100,11 @@ class KukaGymEnv(gym.Env):
|
||||
if self._termination():
|
||||
break
|
||||
self._envStepCounter += 1
|
||||
reward = self._reward()
|
||||
#print("self._envStepCounter")
|
||||
#print(self._envStepCounter)
|
||||
|
||||
done = self._termination()
|
||||
reward = self._reward()
|
||||
#print("len=%r" % len(self._observation))
|
||||
|
||||
return np.array(self._observation), reward, done, {}
|
||||
@@ -94,10 +113,36 @@ class KukaGymEnv(gym.Env):
|
||||
return
|
||||
|
||||
def _termination(self):
|
||||
return self._envStepCounter>1000
|
||||
#print (self._kuka.endEffectorPos[2])
|
||||
if (self._envStepCounter>1000):
|
||||
for i in range (1000):
|
||||
p.stepSimulation()
|
||||
#start grasp and terminate
|
||||
fingerAngle = 0.3
|
||||
|
||||
|
||||
for i in range (5000):
|
||||
graspAction = [0,0,0,0,fingerAngle]
|
||||
self._kuka.applyAction(graspAction)
|
||||
p.stepSimulation()
|
||||
fingerAngle = fingerAngle-(0.3/5000.)
|
||||
|
||||
for i in range (5000):
|
||||
graspAction = [0,0,0.0001,0,0]
|
||||
self._kuka.applyAction(graspAction)
|
||||
p.stepSimulation()
|
||||
fingerAngle = fingerAngle-(0.3/10000.)
|
||||
|
||||
self._observation = self.getExtendedObservation()
|
||||
return True
|
||||
return False
|
||||
|
||||
def _reward(self):
|
||||
#todo
|
||||
reward=0
|
||||
|
||||
#rewards is height of target object
|
||||
pos,orn=p.getBasePositionAndOrientation(self.blockUid)
|
||||
reward = pos[2]
|
||||
if (reward>0.2):
|
||||
print("reward")
|
||||
print(reward)
|
||||
return reward
|
||||
Reference in New Issue
Block a user