Merge remote-tracking branch 'upstream/master'

This commit is contained in:
stolk
2018-11-09 16:05:25 -08:00
18 changed files with 252 additions and 114 deletions

View File

@@ -1,6 +1,9 @@
#include <stdio.h>
#include "LinearMath/btTransform.h"
#include "LinearMath/btThreads.h"
#include "BulletDynamics/Featherstone/btMultiBodyDynamicsWorld.h"
#include "BulletCollision/CollisionShapes/btCompoundShape.h"
#include "BulletCollision/CollisionShapes/btBvhTriangleMeshShape.h"
@@ -28,10 +31,13 @@ static btVector4 colors[4] =
static btVector4 selectColor2()
{
static btSpinMutex sMutex;
sMutex.lock();
static int curColor = 0;
btVector4 color = colors[curColor];
curColor++;
curColor &= 3;
sMutex.unlock();
return color;
}

View File

@@ -48,9 +48,6 @@ InvertedPendulumPDControl::~InvertedPendulumPDControl()
{
}
///this is a temporary global, until we determine if we need the option or not
extern bool gJointFeedbackInWorldSpace;
extern bool gJointFeedbackInJointFrame;
btMultiBody* createInvertedPendulumMultiBody(btMultiBodyDynamicsWorld* world, GUIHelperInterface* guiHelper, const btTransform& baseWorldTrans, bool fixedBase)
{
@@ -315,8 +312,10 @@ void InvertedPendulumPDControl::initPhysics()
}
int upAxis = 1;
gJointFeedbackInWorldSpace = true;
gJointFeedbackInJointFrame = true;
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = true;
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = true;
m_guiHelper->setUpAxis(upAxis);

View File

@@ -44,14 +44,9 @@ MultiBodyConstraintFeedbackSetup::~MultiBodyConstraintFeedbackSetup()
{
}
///this is a temporary global, until we determine if we need the option or not
extern bool gJointFeedbackInWorldSpace;
extern bool gJointFeedbackInJointFrame;
void MultiBodyConstraintFeedbackSetup::initPhysics()
{
int upAxis = 2;
gJointFeedbackInWorldSpace = true;
gJointFeedbackInJointFrame = true;
m_guiHelper->setUpAxis(upAxis);
btVector4 colors[4] =
@@ -69,6 +64,10 @@ void MultiBodyConstraintFeedbackSetup::initPhysics()
//btIDebugDraw::DBG_DrawConstraints
+btIDebugDraw::DBG_DrawWireframe + btIDebugDraw::DBG_DrawContactPoints + btIDebugDraw::DBG_DrawAabb); //+btIDebugDraw::DBG_DrawConstraintLimits);
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = true;
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = true;
//create a static ground object
if (1)
{

View File

@@ -44,15 +44,10 @@ TestJointTorqueSetup::~TestJointTorqueSetup()
{
}
///this is a temporary global, until we determine if we need the option or not
extern bool gJointFeedbackInWorldSpace;
extern bool gJointFeedbackInJointFrame;
void TestJointTorqueSetup::initPhysics()
{
int upAxis = 1;
gJointFeedbackInWorldSpace = true;
gJointFeedbackInJointFrame = true;
m_guiHelper->setUpAxis(upAxis);
@@ -71,6 +66,10 @@ void TestJointTorqueSetup::initPhysics()
//btIDebugDraw::DBG_DrawConstraints
+btIDebugDraw::DBG_DrawWireframe + btIDebugDraw::DBG_DrawContactPoints + btIDebugDraw::DBG_DrawAabb); //+btIDebugDraw::DBG_DrawConstraintLimits);
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = true;
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = true;
//create a static ground object
if (1)
{

View File

@@ -96,8 +96,6 @@
#include "BulletDynamics/Featherstone/btMultiBodyDynamicsWorld.h"
#endif
extern bool gJointFeedbackInWorldSpace;
extern bool gJointFeedbackInJointFrame;
int gInternalSimFlags = 0;
bool gResetSimulation = 0;
@@ -5583,6 +5581,11 @@ bool PhysicsServerCommandProcessor::processSendDesiredStateCommand(const struct
args.m_ints[1] = bodyUniqueId;
//find the joint motors and apply the desired velocity and maximum force/torque
{
args.m_numInts = 0;
args.m_numFloats = 0;
//syncBodies is expensive/slow, use it only once
m_data->m_pluginManager.executePluginCommand(m_data->m_pdControlPlugin, &args);
int velIndex = 6; //skip the 3 linear + 3 angular degree of freedom velocity entries of the base
int posIndex = 7; //skip 3 positional and 4 orientation (quaternion) positional degrees of freedom of the base
for (int link = 0; link < mb->getNumLinks(); link++)
@@ -7697,11 +7700,11 @@ bool PhysicsServerCommandProcessor::processRequestPhysicsSimulationParametersCom
serverCmd.m_simulationParameterResultArgs.m_gravityAcceleration[2] = grav[2];
serverCmd.m_simulationParameterResultArgs.m_internalSimFlags = gInternalSimFlags;
serverCmd.m_simulationParameterResultArgs.m_jointFeedbackMode = 0;
if (gJointFeedbackInWorldSpace)
if (m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace)
{
serverCmd.m_simulationParameterResultArgs.m_jointFeedbackMode |= JOINT_FEEDBACK_IN_WORLD_SPACE;
}
if (gJointFeedbackInJointFrame)
if (m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame)
{
serverCmd.m_simulationParameterResultArgs.m_jointFeedbackMode |= JOINT_FEEDBACK_IN_JOINT_FRAME;
}
@@ -7747,8 +7750,8 @@ bool PhysicsServerCommandProcessor::processSendPhysicsParametersCommand(const st
if (clientCmd.m_updateFlags & SIM_PARAM_UPDATE_JOINT_FEEDBACK_MODE)
{
gJointFeedbackInWorldSpace = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_WORLD_SPACE) != 0;
gJointFeedbackInJointFrame = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_JOINT_FRAME) != 0;
m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_WORLD_SPACE) != 0;
m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_JOINT_FRAME) != 0;
}
if (clientCmd.m_updateFlags & SIM_PARAM_UPDATE_DELTA_TIME)

View File

@@ -27,7 +27,6 @@ bool gEnableRendering = true;
bool gActivedVRRealTimeSimulation = false;
bool gEnableSyncPhysicsRendering = true;
bool gEnableUpdateDebugDrawLines = true;
static int gCamVisualizerWidth = 228;
static int gCamVisualizerHeight = 192;
@@ -177,6 +176,7 @@ struct MotionArgs
{
MotionArgs()
: m_debugDrawFlags(0),
m_enableUpdateDebugDrawLines(true),
m_physicsServerPtr(0)
{
for (int i = 0; i < MAX_VR_CONTROLLERS; i++)
@@ -201,7 +201,7 @@ struct MotionArgs
b3CriticalSection* m_csGUI;
int m_debugDrawFlags;
bool m_enableUpdateDebugDrawLines;
btAlignedObjectArray<MyMouseCommand> m_mouseCommands;
b3VRControllerEvent m_vrControllerEvents[MAX_VR_CONTROLLERS];
@@ -424,13 +424,13 @@ void MotionThreadFunc(void* userPtr, void* lsMemory)
args->m_physicsServerPtr->stepSimulationRealTime(deltaTimeInSeconds, args->m_sendVrControllerEvents, numSendVrControllers, keyEvents, args->m_sendKeyEvents.size(), mouseEvents, args->m_sendMouseEvents.size());
}
{
if (gEnableUpdateDebugDrawLines)
args->m_csGUI->lock();
if (args->m_enableUpdateDebugDrawLines)
{
args->m_csGUI->lock();
args->m_physicsServerPtr->physicsDebugDraw(args->m_debugDrawFlags);
gEnableUpdateDebugDrawLines = false;
args->m_csGUI->unlock();
args->m_enableUpdateDebugDrawLines = false;
}
args->m_csGUI->unlock();
}
deltaTimeInSeconds = 0;
}
@@ -1763,9 +1763,9 @@ void PhysicsServerExample::initPhysics()
#endif
}
}
m_args[0].m_cs->lock();
m_args[0].m_cs->setSharedParam(1, eGUIHelperIdle);
m_args[0].m_cs->unlock();
m_args[0].m_cs2->lock();
{
@@ -2844,7 +2844,7 @@ void PhysicsServerExample::physicsDebugDraw(int debugDrawFlags)
//draw stuff and flush?
this->m_multiThreadedHelper->m_debugDraw->drawDebugDrawerLines();
m_args[0].m_debugDrawFlags = debugDrawFlags;
gEnableUpdateDebugDrawLines = true;
m_args[0].m_enableUpdateDebugDrawLines = true;
m_args[0].m_csGUI->unlock();
}
}

View File

@@ -89,7 +89,11 @@ B3_SHARED_API int executePluginCommand_pdControlPlugin(struct b3PluginContext* c
{
MyPDControlContainer* obj = (MyPDControlContainer*)context->m_userPointer;
obj->m_api.syncBodies();
if (arguments->m_numInts == 0)
{
obj->m_api.syncBodies();
return -1;
}
int numObj = obj->m_api.getNumBodies();
//printf("numObj = %d\n", numObj);

View File

@@ -21,28 +21,6 @@ colSphereId = p.createCollisionShape(p.GEOM_SPHERE,radius=sphereRadius)
#convex mesh from obj
stoneId = p.createCollisionShape(p.GEOM_MESH,fileName="stone.obj")
#concave mesh from obj
stoneId = p.createCollisionShape(p.GEOM_MESH,fileName="stone.obj", flags=p.GEOM_FORCE_CONCAVE_TRIMESH)
verts=[[-0.246350, -0.246483, -0.000624],
[ -0.151407, -0.176325, 0.172867],
[ -0.246350, 0.249205, -0.000624],
[ -0.151407, 0.129477, 0.172867],
[ 0.249338, -0.246483, -0.000624],
[ 0.154395, -0.176325, 0.172867],
[ 0.249338, 0.249205, -0.000624],
[ 0.154395, 0.129477, 0.172867]]
#convex mesh from vertices
stoneConvexId = p.createCollisionShape(p.GEOM_MESH,vertices=verts)
indices=[0,3,2,3,6,2,7,4,6,5,0,4,6,0,2,3,5,7,0,1,3,3,7,6,7,5,4,5,1,0,6,4,0,3,1,5]
#concave mesh from vertices+indices
stoneConcaveId = p.createCollisionShape(p.GEOM_MESH,vertices=verts, indices=indices)
stoneId = stoneConvexId
#stoneId = stoneConcaveId
boxHalfLength = 0.5

View File

@@ -0,0 +1,142 @@
# AI 2018
# Importing the libraries
import os
import numpy as np
import gym
from gym import wrappers
import pybullet_envs
# Setting the Hyper Parameters
class Hp():
def __init__(self):
self.nb_steps = 1000
self.episode_length = 1000
self.learning_rate = 0.02
self.nb_directions = 16
self.nb_best_directions = 16
assert self.nb_best_directions <= self.nb_directions
self.noise = 0.03
self.seed = 1
self.env_name = 'HalfCheetahBulletEnv-v0'
# Normalizing the states
class Normalizer():
def __init__(self, nb_inputs):
self.n = np.zeros(nb_inputs)
self.mean = np.zeros(nb_inputs)
self.mean_diff = np.zeros(nb_inputs)
self.var = np.zeros(nb_inputs)
def observe(self, x):
self.n += 1.
last_mean = self.mean.copy()
self.mean += (x - self.mean) / self.n
self.mean_diff += (x - last_mean) * (x - self.mean)
self.var = (self.mean_diff / self.n).clip(min = 1e-2)
def normalize(self, inputs):
obs_mean = self.mean
obs_std = np.sqrt(self.var)
return (inputs - obs_mean) / obs_std
# Building the AI
class Policy():
def __init__(self, input_size, output_size):
self.theta = np.zeros((output_size, input_size))
print("self.theta=",self.theta)
def evaluate(self, input, delta = None, direction = None):
if direction is None:
return np.clip(self.theta.dot(input), -1.0, 1.0)
elif direction == "positive":
return np.clip((self.theta + hp.noise*delta).dot(input), -1.0, 1.0)
else:
return np.clip((self.theta - hp.noise*delta).dot(input), -1.0, 1.0)
def sample_deltas(self):
return [np.random.randn(*self.theta.shape) for _ in range(hp.nb_directions)]
def update(self, rollouts, sigma_r):
step = np.zeros(self.theta.shape)
for r_pos, r_neg, d in rollouts:
step += (r_pos - r_neg) * d
self.theta += hp.learning_rate / (hp.nb_best_directions * sigma_r) * step
# Exploring the policy on one specific direction and over one episode
def explore(env, normalizer, policy, direction = None, delta = None):
state = env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < hp.episode_length:
normalizer.observe(state)
state = normalizer.normalize(state)
action = policy.evaluate(state, delta, direction)
state, reward, done, _ = env.step(action)
reward = max(min(reward, 1), -1)
sum_rewards += reward
num_plays += 1
return sum_rewards
# Training the AI
def train(env, policy, normalizer, hp):
for step in range(hp.nb_steps):
# Initializing the perturbations deltas and the positive/negative rewards
deltas = policy.sample_deltas()
positive_rewards = [0] * hp.nb_directions
negative_rewards = [0] * hp.nb_directions
# Getting the positive rewards in the positive directions
for k in range(hp.nb_directions):
positive_rewards[k] = explore(env, normalizer, policy, direction = "positive", delta = deltas[k])
# Getting the negative rewards in the negative/opposite directions
for k in range(hp.nb_directions):
negative_rewards[k] = explore(env, normalizer, policy, direction = "negative", delta = deltas[k])
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
all_rewards = np.array(positive_rewards + negative_rewards)
sigma_r = all_rewards.std()
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
scores = {k:max(r_pos, r_neg) for k,(r_pos,r_neg) in enumerate(zip(positive_rewards, negative_rewards))}
order = sorted(scores.keys(), key = lambda x:scores[x])[:hp.nb_best_directions]
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
# Updating our policy
policy.update(rollouts, sigma_r)
# Printing the final reward of the policy after the update
reward_evaluation = explore(env, normalizer, policy)
print('Step:', step, 'Reward:', reward_evaluation)
# Running the main code
def mkdir(base, name):
path = os.path.join(base, name)
if not os.path.exists(path):
os.makedirs(path)
return path
work_dir = mkdir('exp', 'brs')
monitor_dir = mkdir(work_dir, 'monitor')
hp = Hp()
np.random.seed(hp.seed)
env = gym.make(hp.env_name)
# env.render(mode = "human")
#env = wrappers.Monitor(env, monitor_dir, force = True)
nb_inputs = env.observation_space.shape[0]
nb_outputs = env.action_space.shape[0]
policy = Policy(nb_inputs, nb_outputs)
normalizer = Normalizer(nb_inputs)
train(env, policy, normalizer, hp)

View File

@@ -2039,7 +2039,7 @@ static PyObject* pybullet_setJointMotorControl(PyObject* self, PyObject* args)
(controlMode != CONTROL_MODE_TORQUE) &&
(controlMode != CONTROL_MODE_POSITION_VELOCITY_PD))
{
PyErr_SetString(SpamError, "Illegral control mode.");
PyErr_SetString(SpamError, "Illegal control mode.");
return NULL;
}
@@ -2140,9 +2140,10 @@ static PyObject* pybullet_setJointMotorControlArray(PyObject* self, PyObject* ar
if ((controlMode != CONTROL_MODE_VELOCITY) &&
(controlMode != CONTROL_MODE_TORQUE) &&
(controlMode != CONTROL_MODE_POSITION_VELOCITY_PD))
(controlMode != CONTROL_MODE_POSITION_VELOCITY_PD)&&
(controlMode != CONTROL_MODE_PD))
{
PyErr_SetString(SpamError, "Illegral control mode.");
PyErr_SetString(SpamError, "Illegal control mode.");
return NULL;
}
@@ -2340,7 +2341,7 @@ static PyObject* pybullet_setJointMotorControlArray(PyObject* self, PyObject* ar
break;
}
case CONTROL_MODE_POSITION_VELOCITY_PD:
default:
{
b3JointControlSetDesiredPosition(commandHandle, info.m_qIndex,
targetPosition);
@@ -2351,9 +2352,6 @@ static PyObject* pybullet_setJointMotorControlArray(PyObject* self, PyObject* ar
b3JointControlSetMaximumForce(commandHandle, info.m_uIndex, force);
break;
}
default:
{
}
};
}
@@ -2440,7 +2438,7 @@ static PyObject* pybullet_setJointMotorControl2(PyObject* self, PyObject* args,
(controlMode != CONTROL_MODE_POSITION_VELOCITY_PD) &&
(controlMode != CONTROL_MODE_PD))
{
PyErr_SetString(SpamError, "Illegral control mode.");
PyErr_SetString(SpamError, "Illegal control mode.");
return NULL;
}