rotate goal with start pos human model
This commit is contained in:
@@ -1,6 +1,8 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
import random
|
import random
|
||||||
import math
|
import math
|
||||||
|
import pybullet as pb
|
||||||
|
import numpy
|
||||||
|
|
||||||
def randomVal(lowerBound, upperbound):
|
def randomVal(lowerBound, upperbound):
|
||||||
difference = upperbound - lowerBound
|
difference = upperbound - lowerBound
|
||||||
@@ -26,10 +28,12 @@ class GoalType(Enum):
|
|||||||
class Goal:
|
class Goal:
|
||||||
def __init__(self, goal_type: GoalType):
|
def __init__(self, goal_type: GoalType):
|
||||||
self.goal_type = goal_type
|
self.goal_type = goal_type
|
||||||
self.generateGoalData()
|
self.follow_rot = True
|
||||||
self.is_hit_prev = False
|
self.is_hit_prev = False
|
||||||
|
|
||||||
def generateGoalData(self, modelPos=[0,0]):
|
self.generateGoalData()
|
||||||
|
|
||||||
|
def generateGoalData(self, modelPos=[0,0,0], modelOrient=[0,0,0,1]):
|
||||||
if self.goal_type == GoalType.NoGoal:
|
if self.goal_type == GoalType.NoGoal:
|
||||||
self.goal_data = []
|
self.goal_data = []
|
||||||
|
|
||||||
@@ -51,7 +55,24 @@ class Goal:
|
|||||||
|
|
||||||
# Y axis up, z axis in different direction
|
# Y axis up, z axis in different direction
|
||||||
self.goal_data = [-x, z, y]
|
self.goal_data = [-x, z, y]
|
||||||
self.world_pos = [-x + modelPos[0], z, y + modelPos[1]]
|
|
||||||
|
if self.follow_rot:
|
||||||
|
# Take rotation of human model into account
|
||||||
|
eulerAngles = pb.getEulerFromQuaternion(modelOrient)
|
||||||
|
# Only Y angle matters
|
||||||
|
eulerAngles = [0, eulerAngles[1], 0]
|
||||||
|
yQuat = pb.getQuaternionFromEuler(eulerAngles)
|
||||||
|
rotMatList = pb.getMatrixFromQuaternion(yQuat)
|
||||||
|
rotMat = numpy.array([rotMatList[0:3], rotMatList[3:6], rotMatList[6:9]])
|
||||||
|
vec = numpy.array(self.goal_data)
|
||||||
|
rotatedVec = numpy.dot(rotMat, vec)
|
||||||
|
self.world_pos = rotatedVec.tolist()
|
||||||
|
|
||||||
|
self.world_pos = [ self.world_pos[0] + modelPos[0],
|
||||||
|
self.world_pos[1],
|
||||||
|
self.world_pos[2] + modelPos[2]]
|
||||||
|
else:
|
||||||
|
self.world_pos = [-x + modelPos[0], z, y + modelPos[2]]
|
||||||
|
|
||||||
elif self.goal_type == GoalType.TargetHeading:
|
elif self.goal_type == GoalType.TargetHeading:
|
||||||
# Direction: 2D unit vector
|
# Direction: 2D unit vector
|
||||||
|
|||||||
@@ -938,5 +938,5 @@ class HumanoidStablePD(object):
|
|||||||
return self._pybullet_client\
|
return self._pybullet_client\
|
||||||
.getBasePositionAndOrientation(self._sim_model)
|
.getBasePositionAndOrientation(self._sim_model)
|
||||||
|
|
||||||
def getLinkPosition(self, link_id):
|
def getLinkPositionAndOrientation(self, link_id):
|
||||||
return self._pybullet_client.getLinkState(self._sim_model, link_id)[0]
|
return tuple(self._pybullet_client.getLinkState(self._sim_model, link_id)[0:2])
|
||||||
|
|||||||
@@ -101,8 +101,8 @@ class PyBulletDeepMimicEnv(Env):
|
|||||||
#self._pybullet_client.stepSimulation()
|
#self._pybullet_client.stepSimulation()
|
||||||
self._humanoid.resetPose()
|
self._humanoid.resetPose()
|
||||||
# generate new goal
|
# generate new goal
|
||||||
humanPos = self._humanoid.getLinkPosition(0)
|
humanPos, humanOrient = self._humanoid.getLinkPositionAndOrientation(0)
|
||||||
self.goal.generateGoalData([humanPos[0], humanPos[2]])
|
self.goal.generateGoalData(humanPos, humanOrient)
|
||||||
self.needs_update_time = self.t - 1 #force update
|
self.needs_update_time = self.t - 1 #force update
|
||||||
|
|
||||||
def get_num_agents(self):
|
def get_num_agents(self):
|
||||||
@@ -257,7 +257,7 @@ class PyBulletDeepMimicEnv(Env):
|
|||||||
goal_weight = 0.3
|
goal_weight = 0.3
|
||||||
|
|
||||||
if self.goal.goal_type == GoalType.Strike:
|
if self.goal.goal_type == GoalType.Strike:
|
||||||
linkPos = self._humanoid.getLinkPosition(HumanoidLinks.rightAnkle)
|
linkPos, linkOrient = self._humanoid.getLinkPositionAndOrientation(HumanoidLinks.rightAnkle)
|
||||||
reward = mimic_weight * reward + goal_weight * self.calcStrikeGoalReward(linkPos)
|
reward = mimic_weight * reward + goal_weight * self.calcStrikeGoalReward(linkPos)
|
||||||
|
|
||||||
return reward
|
return reward
|
||||||
@@ -289,7 +289,7 @@ class PyBulletDeepMimicEnv(Env):
|
|||||||
#print("pybullet_deep_mimic_env:update timeStep=",timeStep," t=",self.t)
|
#print("pybullet_deep_mimic_env:update timeStep=",timeStep," t=",self.t)
|
||||||
self._pybullet_client.setTimeStep(timeStep)
|
self._pybullet_client.setTimeStep(timeStep)
|
||||||
self._humanoid._timeStep = timeStep
|
self._humanoid._timeStep = timeStep
|
||||||
self.updateGoal(self._humanoid.getLinkPosition(HumanoidLinks.rightAnkle))
|
self.updateGoal(self._humanoid.getLinkPositionAndOrientation(HumanoidLinks.rightAnkle)[0])
|
||||||
if self.target_id is not None: # TODO: check goal type
|
if self.target_id is not None: # TODO: check goal type
|
||||||
self.updateDrawStrikeGoal()
|
self.updateDrawStrikeGoal()
|
||||||
|
|
||||||
@@ -417,8 +417,8 @@ class PyBulletDeepMimicEnv(Env):
|
|||||||
if self.goal.goal_type == GoalType.Strike:
|
if self.goal.goal_type == GoalType.Strike:
|
||||||
if self.prevCycleCount != self._humanoid.cycleCount:
|
if self.prevCycleCount != self._humanoid.cycleCount:
|
||||||
# generate new goal
|
# generate new goal
|
||||||
humanPos = self._humanoid.getLinkPosition(0)
|
humanPos, humanOrient = self._humanoid.getLinkPositionAndOrientation(0)
|
||||||
self.goal.generateGoalData([humanPos[0], humanPos[2]])
|
self.goal.generateGoalData(humanPos, humanOrient)
|
||||||
self.prevCycleCount = self._humanoid.cycleCount
|
self.prevCycleCount = self._humanoid.cycleCount
|
||||||
|
|
||||||
goalPos = self.goal.world_pos
|
goalPos = self.goal.world_pos
|
||||||
|
|||||||
Reference in New Issue
Block a user