Merge branch 'master' of https://github.com/erwincoumans/bullet3
This commit is contained in:
@@ -176,7 +176,7 @@ int compareInverseAndForwardDynamics(vecx &q, vecx &u, vecx &dot_u, btVector3 &g
|
||||
btAlignedObjectArray<btQuaternion> world_to_local;
|
||||
btAlignedObjectArray<btVector3> local_origin;
|
||||
btmb->forwardKinematics(world_to_local, local_origin);
|
||||
btmb->computeAccelerationsArticulatedBodyAlgorithmMultiDof(dt, scratch_r, scratch_v, scratch_m, isConstraintPass);
|
||||
btmb->computeAccelerationsArticulatedBodyAlgorithmMultiDof(dt, scratch_r, scratch_v, scratch_m, isConstraintPass, false, false);
|
||||
|
||||
// read generalized accelerations back from btMultiBody
|
||||
// the mapping from scratch variables to accelerations is taken from the implementation
|
||||
|
||||
586
data/humanoid.urdf
Normal file
586
data/humanoid.urdf
Normal file
@@ -0,0 +1,586 @@
|
||||
<?xml version="0.0" ?>
|
||||
<robot name="">
|
||||
<link name="torso">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="-0.01000 0.00000 -0.12000"/>
|
||||
<mass value="8.90746"/>
|
||||
<inertia ixx="0.21526" ixy="0" ixz="0" iyy="0.18112" iyz="0" izz="0.08225"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-1.57080 0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.14000" radius="0.07000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
<collision>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.19000"/>
|
||||
<geometry>
|
||||
<sphere radius="0.09000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
<collision>
|
||||
<origin rpy="-1.57080 0.00000 0.00000" xyz="-0.01000 0.00000 -0.12000"/>
|
||||
<geometry>
|
||||
<capsule length="0.12000" radius="0.06000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_2">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_3">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="lwaist">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="2.26195"/>
|
||||
<inertia ixx="0.01357" ixy="0" ixz="0" iyy="0.00543" iyz="0" izz="0.01357"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-1.57080 0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.12000" radius="0.06000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_5">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="pelvis">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="-0.02000 0.00000 0.00000"/>
|
||||
<mass value="6.61619"/>
|
||||
<inertia ixx="0.07432" ixy="0" ixz="0" iyy="0.03573" iyz="0" izz="0.07432"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-1.57080 0.00000 0.00000" xyz="-0.02000 0.00000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.14000" radius="0.09000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_7">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_8">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_9">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="right_thigh">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00500 -0.17000"/>
|
||||
<mass value="4.75175"/>
|
||||
<inertia ixx="0.09212" ixy="0" ixz="0" iyy="0.09076" iyz="0" izz="0.01276"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-3.11219 -0.00000 0.00000" xyz="0.00000 0.00500 -0.17000"/>
|
||||
<geometry>
|
||||
<capsule length="0.34015" radius="0.06000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_11">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="right_shin">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.15000"/>
|
||||
<mass value="2.75570"/>
|
||||
<inertia ixx="0.03858" ixy="0" ixz="0" iyy="0.03858" iyz="0" izz="0.00441"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="3.14159 -0.00000 3.14159" xyz="0.00000 0.00000 -0.15000"/>
|
||||
<geometry>
|
||||
<capsule length="0.30000" radius="0.04900"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_13">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_14">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="right_foot">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.03500 0.01000 0.00000"/>
|
||||
<mass value="1.13114"/>
|
||||
<inertia ixx="0.00177" ixy="0" ixz="0" iyy="0.00716" iyz="0" izz="0.00828"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="1.57080 1.47585 1.47585" xyz="0.03500 -0.03000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.21095" radius="0.02700"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
<collision>
|
||||
<origin rpy="-1.57080 1.47585 -1.47585" xyz="0.03500 0.01000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.21095" radius="0.02700"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_16">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_17">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_18">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="left_thigh">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 -0.00500 -0.17000"/>
|
||||
<mass value="4.75175"/>
|
||||
<inertia ixx="0.09212" ixy="0" ixz="0" iyy="0.09076" iyz="0" izz="0.01276"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="3.11219 -0.00000 0.00000" xyz="0.00000 -0.00500 -0.17000"/>
|
||||
<geometry>
|
||||
<capsule length="0.34015" radius="0.06000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_20">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="left_shin">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.15000"/>
|
||||
<mass value="2.75570"/>
|
||||
<inertia ixx="0.03858" ixy="0" ixz="0" iyy="0.03858" iyz="0" izz="0.00441"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="3.14159 -0.00000 3.14159" xyz="0.00000 0.00000 -0.15000"/>
|
||||
<geometry>
|
||||
<capsule length="0.30000" radius="0.04900"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_22">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_23">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="left_foot">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.03500 -0.01000 0.00000"/>
|
||||
<mass value="1.13114"/>
|
||||
<inertia ixx="0.00177" ixy="0" ixz="0" iyy="0.00716" iyz="0" izz="0.00828"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-1.57080 1.47585 -1.47585" xyz="0.03500 0.03000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.21095" radius="0.02700"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
<collision>
|
||||
<origin rpy="1.57080 1.47585 1.47585" xyz="0.03500 -0.01000 0.00000"/>
|
||||
<geometry>
|
||||
<capsule length="0.21095" radius="0.02700"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_25">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_26">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="right_upper_arm">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.08000 -0.08000 -0.08000"/>
|
||||
<mass value="1.66108"/>
|
||||
<inertia ixx="0.02368" ixy="0" ixz="0" iyy="0.02368" iyz="0" izz="0.02267"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="2.35619 0.61548 1.30900" xyz="0.08000 -0.08000 -0.08000"/>
|
||||
<geometry>
|
||||
<capsule length="0.27713" radius="0.04000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_28">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="right_lower_arm">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="1.22954"/>
|
||||
<inertia ixx="0.01419" ixy="0" ixz="0" iyy="0.01419" iyz="0" izz="0.01374"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-0.78540 0.61548 -0.26180" xyz="0.09000 0.09000 0.09000"/>
|
||||
<geometry>
|
||||
<capsule length="0.27713" radius="0.03100"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
<collision>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.18000 0.18000 0.18000"/>
|
||||
<geometry>
|
||||
<sphere radius="0.04000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_30">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="link1_31">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="left_upper_arm">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.08000 0.08000 -0.08000"/>
|
||||
<mass value="1.66108"/>
|
||||
<inertia ixx="0.02368" ixy="0" ixz="0" iyy="0.02368" iyz="0" izz="0.02267"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="-2.35619 0.61548 -1.30900" xyz="0.08000 0.08000 -0.08000"/>
|
||||
<geometry>
|
||||
<capsule length="0.27713" radius="0.04000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<link name="link1_33">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="0.00000"/>
|
||||
<inertia ixx="0.00000" ixy="0" ixz="0" iyy="0.00000" iyz="0" izz="0.00000"/>
|
||||
</inertial>
|
||||
</link>
|
||||
<link name="left_lower_arm">
|
||||
<inertial>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<mass value="1.22954"/>
|
||||
<inertia ixx="0.01419" ixy="0" ixz="0" iyy="0.01419" iyz="0" izz="0.01374"/>
|
||||
</inertial>
|
||||
<collision>
|
||||
<origin rpy="0.78540 0.61548 0.26180" xyz="0.09000 -0.09000 0.09000"/>
|
||||
<geometry>
|
||||
<capsule length="0.27713" radius="0.03100"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
<collision>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.18000 -0.18000 0.18000"/>
|
||||
<geometry>
|
||||
<sphere radius="0.04000"/>
|
||||
</geometry>
|
||||
</collision>
|
||||
</link>
|
||||
<joint name="abdomen_z" type="continuous">
|
||||
<parent link="torso"/>
|
||||
<child link="link1_2"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00400 0.00000" xyz="-0.01026 0.00000 -0.19500"/>
|
||||
<axis xyz="0.00000 0.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="abdomen_y" type="continuous">
|
||||
<parent link="link1_2"/>
|
||||
<child link="link1_3"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_7_3" type="fixed">
|
||||
<parent link="link1_3"/>
|
||||
<child link="lwaist"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.06500"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="abdomen_x" type="continuous">
|
||||
<parent link="lwaist"/>
|
||||
<child link="link1_5"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00400 0.00000" xyz="-0.00040 0.00000 -0.06500"/>
|
||||
<axis xyz="1.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_6_5" type="fixed">
|
||||
<parent link="link1_5"/>
|
||||
<child link="pelvis"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.10000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_hip_x" type="continuous">
|
||||
<parent link="pelvis"/>
|
||||
<child link="link1_7"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 -0.10000 -0.04000"/>
|
||||
<axis xyz="1.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_hip_z" type="continuous">
|
||||
<parent link="link1_7"/>
|
||||
<child link="link1_8"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="right_hip_y" type="continuous">
|
||||
<parent link="link1_8"/>
|
||||
<child link="link1_9"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_2_9" type="fixed">
|
||||
<parent link="link1_9"/>
|
||||
<child link="right_thigh"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_knee" type="continuous">
|
||||
<parent link="right_thigh"/>
|
||||
<child link="link1_11"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.01000 -0.38300"/>
|
||||
<axis xyz="0.00000 -1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_1_11" type="fixed">
|
||||
<parent link="link1_11"/>
|
||||
<child link="right_shin"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.02000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_ankle_y" type="continuous">
|
||||
<parent link="right_shin"/>
|
||||
<child link="link1_13"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.31000"/>
|
||||
<axis xyz="0.00000 1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_ankle_x" type="continuous">
|
||||
<parent link="link1_13"/>
|
||||
<child link="link1_14"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.04000"/>
|
||||
<axis xyz="1.00000 0.00000 0.50000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_0_14" type="fixed">
|
||||
<parent link="link1_14"/>
|
||||
<child link="right_foot"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.08000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_hip_x" type="continuous">
|
||||
<parent link="pelvis"/>
|
||||
<child link="link1_16"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.10000 -0.04000"/>
|
||||
<axis xyz="-1.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_hip_z" type="continuous">
|
||||
<parent link="link1_16"/>
|
||||
<child link="link1_17"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 -1.00000"/>
|
||||
</joint>
|
||||
<joint name="left_hip_y" type="continuous">
|
||||
<parent link="link1_17"/>
|
||||
<child link="link1_18"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_5_18" type="fixed">
|
||||
<parent link="link1_18"/>
|
||||
<child link="left_thigh"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_knee" type="continuous">
|
||||
<parent link="left_thigh"/>
|
||||
<child link="link1_20"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 -0.01000 -0.38300"/>
|
||||
<axis xyz="0.00000 -1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_4_20" type="fixed">
|
||||
<parent link="link1_20"/>
|
||||
<child link="left_shin"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.02000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_ankle_y" type="continuous">
|
||||
<parent link="left_shin"/>
|
||||
<child link="link1_22"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.31000"/>
|
||||
<axis xyz="0.00000 1.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_ankle_x" type="continuous">
|
||||
<parent link="link1_22"/>
|
||||
<child link="link1_23"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.04000"/>
|
||||
<axis xyz="1.00000 0.00000 0.50000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_3_23" type="fixed">
|
||||
<parent link="link1_23"/>
|
||||
<child link="left_foot"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 -0.08000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_shoulder1" type="continuous">
|
||||
<parent link="torso"/>
|
||||
<child link="link1_25"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 -0.17000 0.06000"/>
|
||||
<axis xyz="2.00000 1.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="right_shoulder2" type="continuous">
|
||||
<parent link="link1_25"/>
|
||||
<child link="link1_26"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 -1.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_9_26" type="fixed">
|
||||
<parent link="link1_26"/>
|
||||
<child link="right_upper_arm"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="right_elbow" type="continuous">
|
||||
<parent link="right_upper_arm"/>
|
||||
<child link="link1_28"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.18000 -0.18000 -0.18000"/>
|
||||
<axis xyz="0.00000 -1.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_8_28" type="fixed">
|
||||
<parent link="link1_28"/>
|
||||
<child link="right_lower_arm"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_shoulder1" type="continuous">
|
||||
<parent link="torso"/>
|
||||
<child link="link1_30"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.17000 0.06000"/>
|
||||
<axis xyz="2.00000 -1.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="left_shoulder2" type="continuous">
|
||||
<parent link="link1_30"/>
|
||||
<child link="link1_31"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 1.00000 1.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_11_31" type="fixed">
|
||||
<parent link="link1_31"/>
|
||||
<child link="left_upper_arm"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
<joint name="left_elbow" type="continuous">
|
||||
<parent link="left_upper_arm"/>
|
||||
<child link="link1_33"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.18000 0.18000 -0.18000"/>
|
||||
<axis xyz="0.00000 -1.00000 -1.00000"/>
|
||||
</joint>
|
||||
<joint name="jointfix_10_33" type="fixed">
|
||||
<parent link="link1_33"/>
|
||||
<child link="left_lower_arm"/>
|
||||
<dynamics damping="1.0" friction="0.0001"/>
|
||||
<origin rpy="0.00000 -0.00000 0.00000" xyz="0.00000 0.00000 0.00000"/>
|
||||
<axis xyz="0.00000 0.00000 0.00000"/>
|
||||
</joint>
|
||||
</robot>
|
||||
Binary file not shown.
@@ -1008,7 +1008,7 @@ void BenchmarkDemo::createTest4()
|
||||
}
|
||||
|
||||
//this will enable polyhedral contact clipping, better quality, slightly slower
|
||||
//convexHullShape->initializePolyhedralFeatures();
|
||||
convexHullShape->initializePolyhedralFeatures();
|
||||
|
||||
btTransform trans;
|
||||
trans.setIdentity();
|
||||
|
||||
@@ -251,7 +251,6 @@ void MyKeyboardCallback(int key, int state)
|
||||
|
||||
if (key == 'p')
|
||||
{
|
||||
#ifndef BT_NO_PROFILE
|
||||
if (state)
|
||||
{
|
||||
b3ChromeUtilsStartTimings();
|
||||
@@ -260,7 +259,6 @@ void MyKeyboardCallback(int key, int state)
|
||||
{
|
||||
b3ChromeUtilsStopTimingsAndWriteJsonFile("timings");
|
||||
}
|
||||
#endif //BT_NO_PROFILE
|
||||
}
|
||||
|
||||
#ifndef NO_OPENGL3
|
||||
@@ -1129,6 +1127,7 @@ bool OpenGLExampleBrowser::init(int argc, char* argv[])
|
||||
|
||||
gui2->registerFileOpenCallback(fileOpenCallback);
|
||||
gui2->registerQuitCallback(quitCallback);
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -883,6 +883,15 @@ void BulletURDFImporter::convertURDFToVisualShapeInternal(const UrdfVisual* visu
|
||||
|
||||
switch (visual->m_geometry.m_type)
|
||||
{
|
||||
case URDF_GEOM_CAPSULE:
|
||||
{
|
||||
btScalar radius = visual->m_geometry.m_capsuleRadius;
|
||||
btScalar height = visual->m_geometry.m_capsuleHeight;
|
||||
btCapsuleShapeZ* capsuleShape = new btCapsuleShapeZ(radius, height);
|
||||
convexColShape = capsuleShape;
|
||||
convexColShape->setMargin(gUrdfDefaultCollisionMargin);
|
||||
break;
|
||||
}
|
||||
case URDF_GEOM_CYLINDER:
|
||||
{
|
||||
btAlignedObjectArray<btVector3> vertices;
|
||||
|
||||
@@ -48,9 +48,6 @@ InvertedPendulumPDControl::~InvertedPendulumPDControl()
|
||||
{
|
||||
}
|
||||
|
||||
///this is a temporary global, until we determine if we need the option or not
|
||||
extern bool gJointFeedbackInWorldSpace;
|
||||
extern bool gJointFeedbackInJointFrame;
|
||||
|
||||
btMultiBody* createInvertedPendulumMultiBody(btMultiBodyDynamicsWorld* world, GUIHelperInterface* guiHelper, const btTransform& baseWorldTrans, bool fixedBase)
|
||||
{
|
||||
@@ -315,8 +312,10 @@ void InvertedPendulumPDControl::initPhysics()
|
||||
}
|
||||
|
||||
int upAxis = 1;
|
||||
gJointFeedbackInWorldSpace = true;
|
||||
gJointFeedbackInJointFrame = true;
|
||||
|
||||
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = true;
|
||||
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = true;
|
||||
|
||||
|
||||
m_guiHelper->setUpAxis(upAxis);
|
||||
|
||||
|
||||
@@ -44,14 +44,9 @@ MultiBodyConstraintFeedbackSetup::~MultiBodyConstraintFeedbackSetup()
|
||||
{
|
||||
}
|
||||
|
||||
///this is a temporary global, until we determine if we need the option or not
|
||||
extern bool gJointFeedbackInWorldSpace;
|
||||
extern bool gJointFeedbackInJointFrame;
|
||||
void MultiBodyConstraintFeedbackSetup::initPhysics()
|
||||
{
|
||||
int upAxis = 2;
|
||||
gJointFeedbackInWorldSpace = true;
|
||||
gJointFeedbackInJointFrame = true;
|
||||
m_guiHelper->setUpAxis(upAxis);
|
||||
|
||||
btVector4 colors[4] =
|
||||
@@ -69,6 +64,10 @@ void MultiBodyConstraintFeedbackSetup::initPhysics()
|
||||
//btIDebugDraw::DBG_DrawConstraints
|
||||
+btIDebugDraw::DBG_DrawWireframe + btIDebugDraw::DBG_DrawContactPoints + btIDebugDraw::DBG_DrawAabb); //+btIDebugDraw::DBG_DrawConstraintLimits);
|
||||
|
||||
|
||||
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = true;
|
||||
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = true;
|
||||
|
||||
//create a static ground object
|
||||
if (1)
|
||||
{
|
||||
|
||||
@@ -44,15 +44,10 @@ TestJointTorqueSetup::~TestJointTorqueSetup()
|
||||
{
|
||||
}
|
||||
|
||||
///this is a temporary global, until we determine if we need the option or not
|
||||
extern bool gJointFeedbackInWorldSpace;
|
||||
extern bool gJointFeedbackInJointFrame;
|
||||
|
||||
void TestJointTorqueSetup::initPhysics()
|
||||
{
|
||||
int upAxis = 1;
|
||||
gJointFeedbackInWorldSpace = true;
|
||||
gJointFeedbackInJointFrame = true;
|
||||
|
||||
m_guiHelper->setUpAxis(upAxis);
|
||||
|
||||
@@ -71,6 +66,10 @@ void TestJointTorqueSetup::initPhysics()
|
||||
//btIDebugDraw::DBG_DrawConstraints
|
||||
+btIDebugDraw::DBG_DrawWireframe + btIDebugDraw::DBG_DrawContactPoints + btIDebugDraw::DBG_DrawAabb); //+btIDebugDraw::DBG_DrawConstraintLimits);
|
||||
|
||||
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = true;
|
||||
m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = true;
|
||||
|
||||
|
||||
//create a static ground object
|
||||
if (1)
|
||||
{
|
||||
|
||||
@@ -96,8 +96,6 @@
|
||||
#include "BulletDynamics/Featherstone/btMultiBodyDynamicsWorld.h"
|
||||
#endif
|
||||
|
||||
extern bool gJointFeedbackInWorldSpace;
|
||||
extern bool gJointFeedbackInJointFrame;
|
||||
|
||||
int gInternalSimFlags = 0;
|
||||
bool gResetSimulation = 0;
|
||||
@@ -7697,11 +7695,11 @@ bool PhysicsServerCommandProcessor::processRequestPhysicsSimulationParametersCom
|
||||
serverCmd.m_simulationParameterResultArgs.m_gravityAcceleration[2] = grav[2];
|
||||
serverCmd.m_simulationParameterResultArgs.m_internalSimFlags = gInternalSimFlags;
|
||||
serverCmd.m_simulationParameterResultArgs.m_jointFeedbackMode = 0;
|
||||
if (gJointFeedbackInWorldSpace)
|
||||
if (m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace)
|
||||
{
|
||||
serverCmd.m_simulationParameterResultArgs.m_jointFeedbackMode |= JOINT_FEEDBACK_IN_WORLD_SPACE;
|
||||
}
|
||||
if (gJointFeedbackInJointFrame)
|
||||
if (m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame)
|
||||
{
|
||||
serverCmd.m_simulationParameterResultArgs.m_jointFeedbackMode |= JOINT_FEEDBACK_IN_JOINT_FRAME;
|
||||
}
|
||||
@@ -7747,8 +7745,8 @@ bool PhysicsServerCommandProcessor::processSendPhysicsParametersCommand(const st
|
||||
|
||||
if (clientCmd.m_updateFlags & SIM_PARAM_UPDATE_JOINT_FEEDBACK_MODE)
|
||||
{
|
||||
gJointFeedbackInWorldSpace = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_WORLD_SPACE) != 0;
|
||||
gJointFeedbackInJointFrame = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_JOINT_FRAME) != 0;
|
||||
m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInWorldSpace = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_WORLD_SPACE) != 0;
|
||||
m_data->m_dynamicsWorld->getSolverInfo().m_jointFeedbackInJointFrame = (clientCmd.m_physSimParamArgs.m_jointFeedbackMode & JOINT_FEEDBACK_IN_JOINT_FRAME) != 0;
|
||||
}
|
||||
|
||||
if (clientCmd.m_updateFlags & SIM_PARAM_UPDATE_DELTA_TIME)
|
||||
@@ -9642,6 +9640,16 @@ int PhysicsServerCommandProcessor::extractCollisionShapes(const btCollisionShape
|
||||
|
||||
switch (colShape->getShapeType())
|
||||
{
|
||||
case STATIC_PLANE_PROXYTYPE:
|
||||
{
|
||||
btStaticPlaneShape* plane = (btStaticPlaneShape*) colShape;
|
||||
collisionShapeBuffer[0].m_collisionGeometryType = GEOM_PLANE;
|
||||
collisionShapeBuffer[0].m_dimensions[0] = plane->getPlaneNormal()[0];
|
||||
collisionShapeBuffer[0].m_dimensions[1] = plane->getPlaneNormal()[1];
|
||||
collisionShapeBuffer[0].m_dimensions[2] = plane->getPlaneNormal()[2];
|
||||
numConverted += 1;
|
||||
break;
|
||||
}
|
||||
case CONVEX_HULL_SHAPE_PROXYTYPE:
|
||||
{
|
||||
UrdfCollision* urdfCol = m_data->m_bulletCollisionShape2UrdfCollision.find(colShape);
|
||||
|
||||
@@ -27,7 +27,6 @@ bool gEnableRendering = true;
|
||||
bool gActivedVRRealTimeSimulation = false;
|
||||
|
||||
bool gEnableSyncPhysicsRendering = true;
|
||||
bool gEnableUpdateDebugDrawLines = true;
|
||||
static int gCamVisualizerWidth = 228;
|
||||
static int gCamVisualizerHeight = 192;
|
||||
|
||||
@@ -177,6 +176,7 @@ struct MotionArgs
|
||||
{
|
||||
MotionArgs()
|
||||
: m_debugDrawFlags(0),
|
||||
m_enableUpdateDebugDrawLines(true),
|
||||
m_physicsServerPtr(0)
|
||||
{
|
||||
for (int i = 0; i < MAX_VR_CONTROLLERS; i++)
|
||||
@@ -201,7 +201,7 @@ struct MotionArgs
|
||||
b3CriticalSection* m_csGUI;
|
||||
|
||||
int m_debugDrawFlags;
|
||||
|
||||
bool m_enableUpdateDebugDrawLines;
|
||||
btAlignedObjectArray<MyMouseCommand> m_mouseCommands;
|
||||
|
||||
b3VRControllerEvent m_vrControllerEvents[MAX_VR_CONTROLLERS];
|
||||
@@ -424,13 +424,13 @@ void MotionThreadFunc(void* userPtr, void* lsMemory)
|
||||
args->m_physicsServerPtr->stepSimulationRealTime(deltaTimeInSeconds, args->m_sendVrControllerEvents, numSendVrControllers, keyEvents, args->m_sendKeyEvents.size(), mouseEvents, args->m_sendMouseEvents.size());
|
||||
}
|
||||
{
|
||||
if (gEnableUpdateDebugDrawLines)
|
||||
args->m_csGUI->lock();
|
||||
if (args->m_enableUpdateDebugDrawLines)
|
||||
{
|
||||
args->m_csGUI->lock();
|
||||
args->m_physicsServerPtr->physicsDebugDraw(args->m_debugDrawFlags);
|
||||
gEnableUpdateDebugDrawLines = false;
|
||||
args->m_csGUI->unlock();
|
||||
args->m_enableUpdateDebugDrawLines = false;
|
||||
}
|
||||
args->m_csGUI->unlock();
|
||||
}
|
||||
deltaTimeInSeconds = 0;
|
||||
}
|
||||
@@ -1737,6 +1737,11 @@ void PhysicsServerExample::initPhysics()
|
||||
m_args[w].m_cs2 = m_threadSupport->createCriticalSection();
|
||||
m_args[w].m_cs3 = m_threadSupport->createCriticalSection();
|
||||
m_args[w].m_csGUI = m_threadSupport->createCriticalSection();
|
||||
m_multiThreadedHelper->setCriticalSection(m_args[w].m_cs);
|
||||
m_multiThreadedHelper->setCriticalSection2(m_args[w].m_cs2);
|
||||
m_multiThreadedHelper->setCriticalSection3(m_args[w].m_cs3);
|
||||
m_multiThreadedHelper->setCriticalSectionGUI(m_args[w].m_csGUI);
|
||||
|
||||
m_args[w].m_cs->lock();
|
||||
m_args[w].m_cs->setSharedParam(0, eMotionIsUnInitialized);
|
||||
m_args[w].m_cs->unlock();
|
||||
@@ -1758,13 +1763,9 @@ void PhysicsServerExample::initPhysics()
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
m_args[0].m_cs->lock();
|
||||
m_args[0].m_cs->setSharedParam(1, eGUIHelperIdle);
|
||||
m_multiThreadedHelper->setCriticalSection(m_args[0].m_cs);
|
||||
m_multiThreadedHelper->setCriticalSection2(m_args[0].m_cs2);
|
||||
m_multiThreadedHelper->setCriticalSection3(m_args[0].m_cs3);
|
||||
m_multiThreadedHelper->setCriticalSectionGUI(m_args[0].m_csGUI);
|
||||
|
||||
m_args[0].m_cs->unlock();
|
||||
m_args[0].m_cs2->lock();
|
||||
|
||||
{
|
||||
@@ -2843,7 +2844,7 @@ void PhysicsServerExample::physicsDebugDraw(int debugDrawFlags)
|
||||
//draw stuff and flush?
|
||||
this->m_multiThreadedHelper->m_debugDraw->drawDebugDrawerLines();
|
||||
m_args[0].m_debugDrawFlags = debugDrawFlags;
|
||||
gEnableUpdateDebugDrawLines = true;
|
||||
m_args[0].m_enableUpdateDebugDrawLines = true;
|
||||
m_args[0].m_csGUI->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -928,7 +928,7 @@ enum eFileIOTypes
|
||||
};
|
||||
|
||||
//limits for vertices/indices in PyBullet::createCollisionShape
|
||||
#define B3_MAX_NUM_VERTICES 1024
|
||||
#define B3_MAX_NUM_INDICES 1024
|
||||
#define B3_MAX_NUM_VERTICES 16
|
||||
#define B3_MAX_NUM_INDICES 16
|
||||
|
||||
#endif //SHARED_MEMORY_PUBLIC_H
|
||||
|
||||
@@ -174,7 +174,7 @@ void MyEnterProfileZoneFunc(const char* msg)
|
||||
{
|
||||
if (gProfileDisabled)
|
||||
return;
|
||||
#ifndef BT_NO_PROFILE
|
||||
|
||||
int threadId = btQuickprofGetCurrentThreadIndex2();
|
||||
if (threadId < 0 || threadId >= BT_QUICKPROF_MAX_THREAD_COUNT)
|
||||
return;
|
||||
@@ -191,13 +191,13 @@ void MyEnterProfileZoneFunc(const char* msg)
|
||||
gStartTimes[threadId][gStackDepths[threadId]] = 1 + gStartTimes[threadId][gStackDepths[threadId] - 1];
|
||||
}
|
||||
gStackDepths[threadId]++;
|
||||
#endif
|
||||
|
||||
}
|
||||
void MyLeaveProfileZoneFunc()
|
||||
{
|
||||
if (gProfileDisabled)
|
||||
return;
|
||||
#ifndef BT_NO_PROFILE
|
||||
|
||||
int threadId = btQuickprofGetCurrentThreadIndex2();
|
||||
if (threadId < 0 || threadId >= BT_QUICKPROF_MAX_THREAD_COUNT)
|
||||
return;
|
||||
@@ -214,7 +214,7 @@ void MyLeaveProfileZoneFunc()
|
||||
|
||||
unsigned long long int endTime = clk.getTimeNanoseconds();
|
||||
gTimings[threadId].addTiming(name, threadId, startTime, endTime);
|
||||
#endif //BT_NO_PROFILE
|
||||
|
||||
}
|
||||
|
||||
void b3ChromeUtilsStartTimings()
|
||||
|
||||
@@ -21,28 +21,6 @@ colSphereId = p.createCollisionShape(p.GEOM_SPHERE,radius=sphereRadius)
|
||||
#convex mesh from obj
|
||||
stoneId = p.createCollisionShape(p.GEOM_MESH,fileName="stone.obj")
|
||||
|
||||
#concave mesh from obj
|
||||
stoneId = p.createCollisionShape(p.GEOM_MESH,fileName="stone.obj", flags=p.GEOM_FORCE_CONCAVE_TRIMESH)
|
||||
|
||||
|
||||
verts=[[-0.246350, -0.246483, -0.000624],
|
||||
[ -0.151407, -0.176325, 0.172867],
|
||||
[ -0.246350, 0.249205, -0.000624],
|
||||
[ -0.151407, 0.129477, 0.172867],
|
||||
[ 0.249338, -0.246483, -0.000624],
|
||||
[ 0.154395, -0.176325, 0.172867],
|
||||
[ 0.249338, 0.249205, -0.000624],
|
||||
[ 0.154395, 0.129477, 0.172867]]
|
||||
#convex mesh from vertices
|
||||
stoneConvexId = p.createCollisionShape(p.GEOM_MESH,vertices=verts)
|
||||
|
||||
indices=[0,3,2,3,6,2,7,4,6,5,0,4,6,0,2,3,5,7,0,1,3,3,7,6,7,5,4,5,1,0,6,4,0,3,1,5]
|
||||
|
||||
#concave mesh from vertices+indices
|
||||
stoneConcaveId = p.createCollisionShape(p.GEOM_MESH,vertices=verts, indices=indices)
|
||||
|
||||
stoneId = stoneConvexId
|
||||
#stoneId = stoneConcaveId
|
||||
|
||||
|
||||
boxHalfLength = 0.5
|
||||
|
||||
@@ -1,397 +1,142 @@
|
||||
"""Internal implementation of the Augmented Random Search method."""
|
||||
# AI 2018
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os, inspect
|
||||
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
|
||||
os.sys.path.insert(0,currentdir)
|
||||
|
||||
from concurrent import futures
|
||||
import copy
|
||||
# Importing the libraries
|
||||
import os
|
||||
import time
|
||||
import gym
|
||||
import numpy as np
|
||||
import logz
|
||||
import utils
|
||||
import optimizers
|
||||
#from google3.pyglib import gfile
|
||||
import policies
|
||||
import shared_noise
|
||||
import utility
|
||||
|
||||
class Worker(object):
|
||||
"""Object class for parallel rollout generation."""
|
||||
|
||||
def __init__(self,
|
||||
env_seed,
|
||||
env_callback,
|
||||
policy_params=None,
|
||||
deltas=None,
|
||||
rollout_length=1000,
|
||||
delta_std=0.02):
|
||||
|
||||
# initialize OpenAI environment for each worker
|
||||
self.env = env_callback()
|
||||
self.env.seed(env_seed)
|
||||
|
||||
# each worker gets access to the shared noise table
|
||||
# with independent random streams for sampling
|
||||
# from the shared noise table.
|
||||
self.deltas = shared_noise.SharedNoiseTable(deltas, env_seed + 7)
|
||||
self.policy_params = policy_params
|
||||
if policy_params['type'] == 'linear':
|
||||
self.policy = policies.LinearPolicy(policy_params)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
self.delta_std = delta_std
|
||||
self.rollout_length = rollout_length
|
||||
|
||||
def get_weights_plus_stats(self):
|
||||
"""
|
||||
Get current policy weights and current statistics of past states.
|
||||
"""
|
||||
assert self.policy_params['type'] == 'linear'
|
||||
return self.policy.get_weights_plus_stats()
|
||||
|
||||
def rollout(self, shift=0., rollout_length=None):
|
||||
"""Performs one rollout of maximum length rollout_length.
|
||||
|
||||
At each time-step it substracts shift from the reward.
|
||||
"""
|
||||
|
||||
if rollout_length is None:
|
||||
rollout_length = self.rollout_length
|
||||
|
||||
total_reward = 0.
|
||||
steps = 0
|
||||
|
||||
ob = self.env.reset()
|
||||
for i in range(rollout_length):
|
||||
action = self.policy.act(ob)
|
||||
ob, reward, done, _ = self.env.step(action)
|
||||
steps += 1
|
||||
total_reward += (reward - shift)
|
||||
if done:
|
||||
break
|
||||
|
||||
return total_reward, steps
|
||||
|
||||
def do_rollouts(self, w_policy, num_rollouts=1, shift=1, evaluate=False):
|
||||
"""
|
||||
Generate multiple rollouts with a policy parametrized by w_policy.
|
||||
"""
|
||||
print('Doing {} rollouts'.format(num_rollouts))
|
||||
rollout_rewards, deltas_idx = [], []
|
||||
steps = 0
|
||||
|
||||
for i in range(num_rollouts):
|
||||
|
||||
if evaluate:
|
||||
self.policy.update_weights(w_policy)
|
||||
deltas_idx.append(-1)
|
||||
|
||||
# set to false so that evaluation rollouts are not used for updating state statistics
|
||||
self.policy.update_filter = False
|
||||
|
||||
# for evaluation we do not shift the rewards (shift = 0) and we use the
|
||||
# default rollout length (1000 for the MuJoCo locomotion tasks)
|
||||
reward, r_steps = self.rollout(
|
||||
shift=0., rollout_length=self.rollout_length)
|
||||
rollout_rewards.append(reward)
|
||||
|
||||
else:
|
||||
idx, delta = self.deltas.get_delta(w_policy.size)
|
||||
|
||||
delta = (self.delta_std * delta).reshape(w_policy.shape)
|
||||
deltas_idx.append(idx)
|
||||
|
||||
# set to true so that state statistics are updated
|
||||
self.policy.update_filter = True
|
||||
|
||||
# compute reward and number of timesteps used for positive perturbation rollout
|
||||
self.policy.update_weights(w_policy + delta)
|
||||
pos_reward, pos_steps = self.rollout(shift=shift)
|
||||
|
||||
# compute reward and number of timesteps used for negative pertubation rollout
|
||||
self.policy.update_weights(w_policy - delta)
|
||||
neg_reward, neg_steps = self.rollout(shift=shift)
|
||||
steps += pos_steps + neg_steps
|
||||
|
||||
rollout_rewards.append([pos_reward, neg_reward])
|
||||
|
||||
return {
|
||||
'deltas_idx': deltas_idx,
|
||||
'rollout_rewards': rollout_rewards,
|
||||
'steps': steps
|
||||
}
|
||||
|
||||
def stats_increment(self):
|
||||
self.policy.observation_filter.stats_increment()
|
||||
return
|
||||
|
||||
def get_weights(self):
|
||||
return self.policy.get_weights()
|
||||
|
||||
def get_filter(self):
|
||||
return self.policy.observation_filter
|
||||
|
||||
def sync_filter(self, other):
|
||||
self.policy.observation_filter.sync(other)
|
||||
return
|
||||
|
||||
|
||||
class ARSLearner(object):
|
||||
"""
|
||||
Object class implementing the ARS algorithm.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
env_callback,
|
||||
policy_params=None,
|
||||
num_workers=32,
|
||||
num_deltas=320,
|
||||
deltas_used=320,
|
||||
delta_std=0.02,
|
||||
logdir=None,
|
||||
rollout_length=1000,
|
||||
step_size=0.01,
|
||||
shift='constant zero',
|
||||
params=None,
|
||||
seed=123):
|
||||
|
||||
logz.configure_output_dir(logdir)
|
||||
# params_to_save = copy.deepcopy(params)
|
||||
# params_to_save['env'] = None
|
||||
# logz.save_params(params_to_save)
|
||||
utility.save_config(params, logdir)
|
||||
env = env_callback()
|
||||
|
||||
self.timesteps = 0
|
||||
self.action_size = env.action_space.shape[0]
|
||||
self.ob_size = env.observation_space.shape[0]
|
||||
self.num_deltas = num_deltas
|
||||
self.deltas_used = deltas_used
|
||||
self.rollout_length = rollout_length
|
||||
self.step_size = step_size
|
||||
self.delta_std = delta_std
|
||||
self.logdir = logdir
|
||||
self.shift = shift
|
||||
self.params = params
|
||||
self.max_past_avg_reward = float('-inf')
|
||||
self.num_episodes_used = float('inf')
|
||||
|
||||
# create shared table for storing noise
|
||||
print('Creating deltas table.')
|
||||
deltas = shared_noise.create_shared_noise()
|
||||
self.deltas = shared_noise.SharedNoiseTable(deltas, seed=seed + 3)
|
||||
print('Created deltas table.')
|
||||
|
||||
# initialize workers with different random seeds
|
||||
print('Initializing workers.')
|
||||
self.num_workers = num_workers
|
||||
self.workers = [
|
||||
Worker(
|
||||
seed + 7 * i,
|
||||
env_callback=env_callback,
|
||||
policy_params=policy_params,
|
||||
deltas=deltas,
|
||||
rollout_length=rollout_length,
|
||||
delta_std=delta_std) for i in range(num_workers)
|
||||
]
|
||||
|
||||
# initialize policy
|
||||
if policy_params['type'] == 'linear':
|
||||
self.policy = policies.LinearPolicy(policy_params)
|
||||
self.w_policy = self.policy.get_weights()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
# initialize optimization algorithm
|
||||
self.optimizer = optimizers.SGD(self.w_policy, self.step_size)
|
||||
print('Initialization of ARS complete.')
|
||||
|
||||
def aggregate_rollouts(self, num_rollouts=None, evaluate=False):
|
||||
"""
|
||||
Aggregate update step from rollouts generated in parallel.
|
||||
"""
|
||||
|
||||
if num_rollouts is None:
|
||||
num_deltas = self.num_deltas
|
||||
else:
|
||||
num_deltas = num_rollouts
|
||||
|
||||
results_one = [] #rollout_ids_one
|
||||
results_two = [] #rollout_ids_two
|
||||
|
||||
t1 = time.time()
|
||||
num_rollouts = int(num_deltas / self.num_workers)
|
||||
# if num_rollouts > 0:
|
||||
# with futures.ThreadPoolExecutor(
|
||||
# max_workers=self.num_workers) as executor:
|
||||
# workers = [
|
||||
# executor.submit(
|
||||
# worker.do_rollouts,
|
||||
# self.w_policy,
|
||||
# num_rollouts=num_rollouts,
|
||||
# shift=self.shift,
|
||||
# evaluate=evaluate) for worker in self.workers
|
||||
# ]
|
||||
# for worker in futures.as_completed(workers):
|
||||
# results_one.append(worker.result())
|
||||
#
|
||||
# workers = [
|
||||
# executor.submit(
|
||||
# worker.do_rollouts,
|
||||
# self.w_policy,
|
||||
# num_rollouts=1,
|
||||
# shift=self.shift,
|
||||
# evaluate=evaluate)
|
||||
# for worker in self.workers[:(num_deltas % self.num_workers)]
|
||||
# ]
|
||||
# for worker in futures.as_completed(workers):
|
||||
# results_two.append(worker.result())
|
||||
|
||||
# parallel generation of rollouts
|
||||
rollout_ids_one = [
|
||||
worker.do_rollouts(
|
||||
self.w_policy,
|
||||
num_rollouts=num_rollouts,
|
||||
shift=self.shift,
|
||||
evaluate=evaluate) for worker in self.workers
|
||||
]
|
||||
|
||||
rollout_ids_two = [
|
||||
worker.do_rollouts(
|
||||
self.w_policy, num_rollouts=1, shift=self.shift, evaluate=evaluate)
|
||||
for worker in self.workers[:(num_deltas % self.num_workers)]
|
||||
]
|
||||
results_one = rollout_ids_one
|
||||
results_two = rollout_ids_two
|
||||
# gather results
|
||||
|
||||
rollout_rewards, deltas_idx = [], []
|
||||
|
||||
for result in results_one:
|
||||
if not evaluate:
|
||||
self.timesteps += result['steps']
|
||||
deltas_idx += result['deltas_idx']
|
||||
rollout_rewards += result['rollout_rewards']
|
||||
|
||||
for result in results_two:
|
||||
if not evaluate:
|
||||
self.timesteps += result['steps']
|
||||
deltas_idx += result['deltas_idx']
|
||||
rollout_rewards += result['rollout_rewards']
|
||||
|
||||
deltas_idx = np.array(deltas_idx)
|
||||
rollout_rewards = np.array(rollout_rewards, dtype=np.float64)
|
||||
|
||||
print('Maximum reward of collected rollouts:', rollout_rewards.max())
|
||||
info_dict = {
|
||||
"max_reward": rollout_rewards.max()
|
||||
}
|
||||
t2 = time.time()
|
||||
|
||||
print('Time to generate rollouts:', t2 - t1)
|
||||
|
||||
if evaluate:
|
||||
return rollout_rewards
|
||||
|
||||
# select top performing directions if deltas_used < num_deltas
|
||||
max_rewards = np.max(rollout_rewards, axis=1)
|
||||
if self.deltas_used > self.num_deltas:
|
||||
self.deltas_used = self.num_deltas
|
||||
|
||||
idx = np.arange(max_rewards.size)[max_rewards >= np.percentile(
|
||||
max_rewards, 100 * (1 - (self.deltas_used / self.num_deltas)))]
|
||||
deltas_idx = deltas_idx[idx]
|
||||
rollout_rewards = rollout_rewards[idx, :]
|
||||
|
||||
# normalize rewards by their standard deviation
|
||||
rollout_rewards /= np.std(rollout_rewards)
|
||||
|
||||
t1 = time.time()
|
||||
# aggregate rollouts to form g_hat, the gradient used to compute SGD step
|
||||
g_hat, count = utils.batched_weighted_sum(
|
||||
rollout_rewards[:, 0] - rollout_rewards[:, 1],
|
||||
(self.deltas.get(idx, self.w_policy.size) for idx in deltas_idx),
|
||||
batch_size=500)
|
||||
g_hat /= deltas_idx.size
|
||||
t2 = time.time()
|
||||
print('time to aggregate rollouts', t2 - t1)
|
||||
return g_hat, info_dict
|
||||
|
||||
def train_step(self):
|
||||
"""
|
||||
Perform one update step of the policy weights.
|
||||
"""
|
||||
|
||||
g_hat, info_dict = self.aggregate_rollouts()
|
||||
print('Euclidean norm of update step:', np.linalg.norm(g_hat))
|
||||
self.w_policy -= self.optimizer._compute_step(g_hat).reshape(
|
||||
self.w_policy.shape)
|
||||
return info_dict
|
||||
|
||||
def train(self, num_iter):
|
||||
|
||||
start = time.time()
|
||||
for i in range(num_iter):
|
||||
|
||||
t1 = time.time()
|
||||
info_dict = self.train_step()
|
||||
t2 = time.time()
|
||||
print('total time of one step', t2 - t1)
|
||||
print('iter ', i, ' done')
|
||||
|
||||
# record statistics every 10 iterations
|
||||
if ((i) % 10 == 0):
|
||||
|
||||
rewards = self.aggregate_rollouts(num_rollouts=8, evaluate=True)
|
||||
w = self.workers[0].get_weights_plus_stats()
|
||||
|
||||
checkpoint_filename = os.path.join(
|
||||
self.logdir, 'lin_policy_plus_{:03d}.npz'.format(i))
|
||||
print('Save checkpoints to {}...', checkpoint_filename)
|
||||
checkpoint_file = open(checkpoint_filename, 'w')
|
||||
np.savez(checkpoint_file, w)
|
||||
print('End save checkpoints.')
|
||||
print(sorted(self.params.items()))
|
||||
logz.log_tabular('Time', time.time() - start)
|
||||
logz.log_tabular('Iteration', i + 1)
|
||||
logz.log_tabular('AverageReward', np.mean(rewards))
|
||||
logz.log_tabular('StdRewards', np.std(rewards))
|
||||
logz.log_tabular('MaxRewardRollout', np.max(rewards))
|
||||
logz.log_tabular('MinRewardRollout', np.min(rewards))
|
||||
logz.log_tabular('timesteps', self.timesteps)
|
||||
logz.dump_tabular()
|
||||
|
||||
t1 = time.time()
|
||||
# get statistics from all workers
|
||||
for j in range(self.num_workers):
|
||||
self.policy.observation_filter.update(self.workers[j].get_filter())
|
||||
self.policy.observation_filter.stats_increment()
|
||||
|
||||
# make sure master filter buffer is clear
|
||||
self.policy.observation_filter.clear_buffer()
|
||||
# sync all workers
|
||||
#filter_id = ray.put(self.policy.observation_filter)
|
||||
setting_filters_ids = [
|
||||
worker.sync_filter(self.policy.observation_filter)
|
||||
for worker in self.workers
|
||||
]
|
||||
# waiting for sync of all workers
|
||||
#ray.get(setting_filters_ids)
|
||||
|
||||
increment_filters_ids = [
|
||||
worker.stats_increment() for worker in self.workers
|
||||
]
|
||||
# waiting for increment of all workers
|
||||
#ray.get(increment_filters_ids)
|
||||
t2 = time.time()
|
||||
print('Time to sync statistics:', t2 - t1)
|
||||
|
||||
return info_dict
|
||||
import gym
|
||||
from gym import wrappers
|
||||
import pybullet_envs
|
||||
|
||||
# Setting the Hyper Parameters
|
||||
|
||||
class Hp():
|
||||
|
||||
def __init__(self):
|
||||
self.nb_steps = 1000
|
||||
self.episode_length = 1000
|
||||
self.learning_rate = 0.02
|
||||
self.nb_directions = 16
|
||||
self.nb_best_directions = 16
|
||||
assert self.nb_best_directions <= self.nb_directions
|
||||
self.noise = 0.03
|
||||
self.seed = 1
|
||||
self.env_name = 'HalfCheetahBulletEnv-v0'
|
||||
|
||||
# Normalizing the states
|
||||
|
||||
class Normalizer():
|
||||
|
||||
def __init__(self, nb_inputs):
|
||||
self.n = np.zeros(nb_inputs)
|
||||
self.mean = np.zeros(nb_inputs)
|
||||
self.mean_diff = np.zeros(nb_inputs)
|
||||
self.var = np.zeros(nb_inputs)
|
||||
|
||||
def observe(self, x):
|
||||
self.n += 1.
|
||||
last_mean = self.mean.copy()
|
||||
self.mean += (x - self.mean) / self.n
|
||||
self.mean_diff += (x - last_mean) * (x - self.mean)
|
||||
self.var = (self.mean_diff / self.n).clip(min = 1e-2)
|
||||
|
||||
def normalize(self, inputs):
|
||||
obs_mean = self.mean
|
||||
obs_std = np.sqrt(self.var)
|
||||
return (inputs - obs_mean) / obs_std
|
||||
|
||||
# Building the AI
|
||||
|
||||
class Policy():
|
||||
|
||||
def __init__(self, input_size, output_size):
|
||||
self.theta = np.zeros((output_size, input_size))
|
||||
print("self.theta=",self.theta)
|
||||
def evaluate(self, input, delta = None, direction = None):
|
||||
if direction is None:
|
||||
return np.clip(self.theta.dot(input), -1.0, 1.0)
|
||||
elif direction == "positive":
|
||||
return np.clip((self.theta + hp.noise*delta).dot(input), -1.0, 1.0)
|
||||
else:
|
||||
return np.clip((self.theta - hp.noise*delta).dot(input), -1.0, 1.0)
|
||||
|
||||
def sample_deltas(self):
|
||||
return [np.random.randn(*self.theta.shape) for _ in range(hp.nb_directions)]
|
||||
|
||||
def update(self, rollouts, sigma_r):
|
||||
step = np.zeros(self.theta.shape)
|
||||
for r_pos, r_neg, d in rollouts:
|
||||
step += (r_pos - r_neg) * d
|
||||
self.theta += hp.learning_rate / (hp.nb_best_directions * sigma_r) * step
|
||||
|
||||
# Exploring the policy on one specific direction and over one episode
|
||||
|
||||
def explore(env, normalizer, policy, direction = None, delta = None):
|
||||
state = env.reset()
|
||||
done = False
|
||||
num_plays = 0.
|
||||
sum_rewards = 0
|
||||
while not done and num_plays < hp.episode_length:
|
||||
normalizer.observe(state)
|
||||
state = normalizer.normalize(state)
|
||||
action = policy.evaluate(state, delta, direction)
|
||||
state, reward, done, _ = env.step(action)
|
||||
reward = max(min(reward, 1), -1)
|
||||
sum_rewards += reward
|
||||
num_plays += 1
|
||||
return sum_rewards
|
||||
|
||||
# Training the AI
|
||||
|
||||
def train(env, policy, normalizer, hp):
|
||||
|
||||
for step in range(hp.nb_steps):
|
||||
|
||||
# Initializing the perturbations deltas and the positive/negative rewards
|
||||
deltas = policy.sample_deltas()
|
||||
positive_rewards = [0] * hp.nb_directions
|
||||
negative_rewards = [0] * hp.nb_directions
|
||||
|
||||
# Getting the positive rewards in the positive directions
|
||||
for k in range(hp.nb_directions):
|
||||
positive_rewards[k] = explore(env, normalizer, policy, direction = "positive", delta = deltas[k])
|
||||
|
||||
# Getting the negative rewards in the negative/opposite directions
|
||||
for k in range(hp.nb_directions):
|
||||
negative_rewards[k] = explore(env, normalizer, policy, direction = "negative", delta = deltas[k])
|
||||
|
||||
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
|
||||
all_rewards = np.array(positive_rewards + negative_rewards)
|
||||
sigma_r = all_rewards.std()
|
||||
|
||||
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
|
||||
scores = {k:max(r_pos, r_neg) for k,(r_pos,r_neg) in enumerate(zip(positive_rewards, negative_rewards))}
|
||||
order = sorted(scores.keys(), key = lambda x:scores[x])[:hp.nb_best_directions]
|
||||
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
|
||||
|
||||
# Updating our policy
|
||||
policy.update(rollouts, sigma_r)
|
||||
|
||||
# Printing the final reward of the policy after the update
|
||||
reward_evaluation = explore(env, normalizer, policy)
|
||||
print('Step:', step, 'Reward:', reward_evaluation)
|
||||
|
||||
# Running the main code
|
||||
|
||||
def mkdir(base, name):
|
||||
path = os.path.join(base, name)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
work_dir = mkdir('exp', 'brs')
|
||||
monitor_dir = mkdir(work_dir, 'monitor')
|
||||
|
||||
hp = Hp()
|
||||
np.random.seed(hp.seed)
|
||||
env = gym.make(hp.env_name)
|
||||
# env.render(mode = "human")
|
||||
#env = wrappers.Monitor(env, monitor_dir, force = True)
|
||||
nb_inputs = env.observation_space.shape[0]
|
||||
nb_outputs = env.action_space.shape[0]
|
||||
policy = Policy(nb_inputs, nb_outputs)
|
||||
normalizer = Normalizer(nb_inputs)
|
||||
train(env, policy, normalizer, hp)
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
"""
|
||||
blaze build -c opt //experimental/users/jietan/ARS:ars_server
|
||||
|
||||
blaze-bin/experimental/users/jietan/ARS/ars_server \
|
||||
--config_name=MINITAUR_GYM_CONFIG
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import time
|
||||
from absl import app
|
||||
from absl import flags
|
||||
from concurrent import futures
|
||||
import grpc
|
||||
from grpc import loas2
|
||||
from google3.robotics.reinforcement_learning.minitaur.envs import minitaur_gym_env
|
||||
from google3.robotics.reinforcement_learning.minitaur.envs import minitaur_reactive_env
|
||||
from google3.robotics.reinforcement_learning.minitaur.envs.env_randomizers import minitaur_env_randomizer
|
||||
from google3.robotics.reinforcement_learning.minitaur.envs.env_randomizers import minitaur_env_randomizer_from_config as randomizer_config_lib
|
||||
from google3.experimental.users.jietan.ARS import ars_evaluation_service_pb2_grpc
|
||||
from google3.experimental.users.jietan.ARS import ars_evaluation_service
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer("server_id", 0, "number of servers")
|
||||
flags.DEFINE_integer("port", 20000, "port number.")
|
||||
flags.DEFINE_string("config_name", None, "The name of the config dictionary.")
|
||||
flags.DEFINE_bool('run_on_borg', False,
|
||||
'Whether the servers are running on borg.')
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
|
||||
def main(unused_argv):
|
||||
servers = []
|
||||
server_creds = loas2.loas2_server_credentials()
|
||||
port = FLAGS.port
|
||||
if not FLAGS.run_on_borg:
|
||||
port = 20000 + FLAGS.server_id
|
||||
server = grpc.server(
|
||||
futures.ThreadPoolExecutor(max_workers=10), ports=(port,))
|
||||
servicer = ars_evaluation_service.ParameterEvaluationServicer(
|
||||
FLAGS.config_name, worker_id=FLAGS.server_id)
|
||||
ars_evaluation_service_pb2_grpc.add_EvaluationServicer_to_server(
|
||||
servicer, server)
|
||||
server.add_secure_port("[::]:{}".format(port), server_creds)
|
||||
servers.append(server)
|
||||
server.start()
|
||||
print("Start server {}".format(FLAGS.server_id))
|
||||
|
||||
# prevent the main thread from exiting
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
for server in servers:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(main)
|
||||
@@ -1,83 +0,0 @@
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import functools
|
||||
from pybullet_envs.minitaur.envs import minitaur_gym_env
|
||||
from pybullet_envs.minitaur.envs import minitaur_reactive_env
|
||||
from pybullet_envs.minitaur.envs.env_randomizers import minitaur_env_randomizer
|
||||
from pybullet_envs.minitaur.envs.env_randomizers import minitaur_env_randomizer_from_config as randomizer_config_lib
|
||||
|
||||
MAX_LENGTH = 1000
|
||||
|
||||
|
||||
def merge_two_dicts(x, y):
|
||||
"""Given two dicts, merge them into a new dict as a shallow copy."""
|
||||
z = dict(x)
|
||||
z.update(y)
|
||||
return z
|
||||
|
||||
|
||||
# The default configurations.
|
||||
DEFAULT_CONFIG = dict(
|
||||
num_workers=8,
|
||||
num_directions=8,
|
||||
num_iterations=1000,
|
||||
deltas_used=8,
|
||||
step_size=0.02,
|
||||
delta_std=0.03,
|
||||
rollout_length=MAX_LENGTH,
|
||||
shift=0,
|
||||
seed=237,
|
||||
policy_type="linear",
|
||||
filter="MeanStdFilter",
|
||||
)
|
||||
|
||||
# Configuration specific to minitaur_gym_env.MinitaurGymEnv class.
|
||||
MINITAUR_GYM_CONFIG_ADDITIONS = dict(
|
||||
env=functools.partial(
|
||||
minitaur_gym_env.MinitaurGymEnv,
|
||||
urdf_version=minitaur_gym_env.DERPY_V0_URDF_VERSION,
|
||||
accurate_motor_model_enabled=True,
|
||||
motor_overheat_protection=True,
|
||||
pd_control_enabled=True,
|
||||
env_randomizer=None,#minitaur_env_randomizer.MinitaurEnvRandomizer(),
|
||||
render=False,
|
||||
num_steps_to_log=MAX_LENGTH))
|
||||
MINITAUR_GYM_CONFIG = merge_two_dicts(DEFAULT_CONFIG,
|
||||
MINITAUR_GYM_CONFIG_ADDITIONS)
|
||||
|
||||
# Configuration specific to MinitaurReactiveEnv class.
|
||||
MINITAUR_REACTIVE_CONFIG_ADDITIONS = dict(
|
||||
env=functools.partial(
|
||||
minitaur_reactive_env.MinitaurReactiveEnv,
|
||||
urdf_version=minitaur_gym_env.RAINBOW_DASH_V0_URDF_VERSION,
|
||||
energy_weight=0.005,
|
||||
accurate_motor_model_enabled=True,
|
||||
pd_latency=0.003,
|
||||
control_latency=0.02,
|
||||
motor_kd=0.015,
|
||||
remove_default_joint_damping=True,
|
||||
env_randomizer=None,
|
||||
render=False,
|
||||
num_steps_to_log=MAX_LENGTH))
|
||||
MINITAUR_REACTIVE_CONFIG = merge_two_dicts(DEFAULT_CONFIG,
|
||||
MINITAUR_REACTIVE_CONFIG_ADDITIONS)
|
||||
|
||||
# Configuration specific to MinitaurReactiveEnv class with randomizer.
|
||||
MINITAUR_REACTIVE_RANDOMIZER_CONFIG_ADDITIONS = dict(
|
||||
env=functools.partial(
|
||||
minitaur_reactive_env.MinitaurReactiveEnv,
|
||||
urdf_version=minitaur_gym_env.RAINBOW_DASH_V0_URDF_VERSION,
|
||||
energy_weight=0.005,
|
||||
accurate_motor_model_enabled=True,
|
||||
pd_latency=0.003,
|
||||
control_latency=0.02,
|
||||
motor_kd=0.015,
|
||||
remove_default_joint_damping=True,
|
||||
env_randomizer=randomizer_config_lib.MinitaurEnvRandomizerFromConfig(),
|
||||
render=False,
|
||||
num_steps_to_log=MAX_LENGTH))
|
||||
MINITAUR_REACTIVE_RANDOMIZER_CONFIG = merge_two_dicts(
|
||||
DEFAULT_CONFIG, MINITAUR_REACTIVE_RANDOMIZER_CONFIG_ADDITIONS)
|
||||
@@ -1,99 +0,0 @@
|
||||
"""
|
||||
blaze run -c opt //experimental/users/jietan/ARS:eval_ars -- \
|
||||
--logdir=/cns/ij-d/home/jietan/experiment/ARS/ars_react_nr01.191950338.191950550/ \
|
||||
--checkpoint=lin_policy_plus_990.npz \
|
||||
--num_rollouts=10
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os, inspect
|
||||
import time
|
||||
|
||||
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
|
||||
os.sys.path.insert(0,currentdir)
|
||||
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
||||
import pdb
|
||||
import os
|
||||
import numpy as np
|
||||
import gym
|
||||
import config_ars
|
||||
import utility
|
||||
import policies
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string('logdir', None, 'The path of the checkpoint.')
|
||||
flags.DEFINE_string('checkpoint', None, 'The file name of the checkpoint.')
|
||||
flags.DEFINE_integer('num_rollouts', 1, 'The number of rollouts.')
|
||||
|
||||
|
||||
def main(argv):
|
||||
del argv # Unused.
|
||||
|
||||
print('loading and building expert policy')
|
||||
checkpoint_file = os.path.join(FLAGS.logdir, FLAGS.checkpoint)
|
||||
lin_policy = np.load(checkpoint_file, encoding='bytes')
|
||||
lin_policy = lin_policy.items()[0][1]
|
||||
|
||||
M = lin_policy[0]
|
||||
# mean and std of state vectors estimated online by ARS.
|
||||
mean = lin_policy[1]
|
||||
std = lin_policy[2]
|
||||
|
||||
config = utility.load_config(FLAGS.logdir)
|
||||
print("config=",config)
|
||||
env = config['env'](hard_reset=True, render=True)
|
||||
ob_dim = env.observation_space.shape[0]
|
||||
ac_dim = env.action_space.shape[0]
|
||||
|
||||
# set policy parameters. Possible filters: 'MeanStdFilter' for v2, 'NoFilter' for v1.
|
||||
policy_params = {
|
||||
'type': 'linear',
|
||||
'ob_filter': config['filter'],
|
||||
'ob_dim': ob_dim,
|
||||
'ac_dim': ac_dim,
|
||||
"weights": M,
|
||||
"mean": mean,
|
||||
"std": std,
|
||||
}
|
||||
policy = policies.LinearPolicy(policy_params, update_filter=False)
|
||||
returns = []
|
||||
observations = []
|
||||
actions = []
|
||||
for i in range(FLAGS.num_rollouts):
|
||||
print('iter', i)
|
||||
obs = env.reset()
|
||||
done = False
|
||||
totalr = 0.
|
||||
steps = 0
|
||||
while not done:
|
||||
action = policy.act(obs)
|
||||
observations.append(obs)
|
||||
actions.append(action)
|
||||
|
||||
obs, r, done, _ = env.step(action)
|
||||
time.sleep(1./100.)
|
||||
totalr += r
|
||||
steps += 1
|
||||
if steps % 100 == 0:
|
||||
print('%i/%i' % (steps, config['rollout_length']))
|
||||
if steps >= config['rollout_length']:
|
||||
break
|
||||
returns.append(totalr)
|
||||
|
||||
print('returns', returns)
|
||||
print('mean return', np.mean(returns))
|
||||
print('std of return', np.std(returns))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.mark_flag_as_required('logdir')
|
||||
flags.mark_flag_as_required('checkpoint')
|
||||
app.run(main)
|
||||
@@ -1,280 +0,0 @@
|
||||
# Code in this file is copied and adapted from
|
||||
# https://github.com/ray-project/ray/blob/master/python/ray/rllib/utils/filter.py
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class Filter(object):
|
||||
"""Processes input, possibly statefully."""
|
||||
|
||||
def update(self, other, *args, **kwargs):
|
||||
"""Updates self with "new state" from other filter."""
|
||||
raise NotImplementedError
|
||||
|
||||
def copy(self):
|
||||
"""Creates a new object with same state as self.
|
||||
|
||||
Returns:
|
||||
copy (Filter): Copy of self"""
|
||||
raise NotImplementedError
|
||||
|
||||
def sync(self, other):
|
||||
"""Copies all state from other filter to self."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class NoFilter(Filter):
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
def __call__(self, x, update=True):
|
||||
return np.asarray(x, dtype = np.float64)
|
||||
|
||||
def update(self, other, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def copy(self):
|
||||
return self
|
||||
|
||||
def sync(self, other):
|
||||
pass
|
||||
|
||||
def stats_increment(self):
|
||||
pass
|
||||
|
||||
def clear_buffer(self):
|
||||
pass
|
||||
|
||||
def get_stats(self):
|
||||
return 0, 1
|
||||
|
||||
@property
|
||||
def mean(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def var(self):
|
||||
return 1
|
||||
|
||||
@property
|
||||
def std(self):
|
||||
return 1
|
||||
|
||||
|
||||
|
||||
# http://www.johndcook.com/blog/standard_deviation/
|
||||
class RunningStat(object):
|
||||
|
||||
def __init__(self, shape=None):
|
||||
self._n = 0
|
||||
self._M = np.zeros(shape, dtype = np.float64)
|
||||
self._S = np.zeros(shape, dtype = np.float64)
|
||||
self._M2 = np.zeros(shape, dtype = np.float64)
|
||||
|
||||
def copy(self):
|
||||
other = RunningStat()
|
||||
other._n = self._n
|
||||
other._M = np.copy(self._M)
|
||||
other._S = np.copy(self._S)
|
||||
return other
|
||||
|
||||
def push(self, x):
|
||||
x = np.asarray(x)
|
||||
# Unvectorized update of the running statistics.
|
||||
assert x.shape == self._M.shape, ("x.shape = {}, self.shape = {}"
|
||||
.format(x.shape, self._M.shape))
|
||||
n1 = self._n
|
||||
self._n += 1
|
||||
if self._n == 1:
|
||||
self._M[...] = x
|
||||
else:
|
||||
delta = x - self._M
|
||||
deltaM2 = np.square(x) - self._M2
|
||||
self._M[...] += delta / self._n
|
||||
self._S[...] += delta * delta * n1 / self._n
|
||||
|
||||
|
||||
def update(self, other):
|
||||
n1 = self._n
|
||||
n2 = other._n
|
||||
n = n1 + n2
|
||||
delta = self._M - other._M
|
||||
delta2 = delta * delta
|
||||
M = (n1 * self._M + n2 * other._M) / n
|
||||
S = self._S + other._S + delta2 * n1 * n2 / n
|
||||
self._n = n
|
||||
self._M = M
|
||||
self._S = S
|
||||
|
||||
def __repr__(self):
|
||||
return '(n={}, mean_mean={}, mean_std={})'.format(
|
||||
self.n, np.mean(self.mean), np.mean(self.std))
|
||||
|
||||
@property
|
||||
def n(self):
|
||||
return self._n
|
||||
|
||||
@property
|
||||
def mean(self):
|
||||
return self._M
|
||||
|
||||
@property
|
||||
def var(self):
|
||||
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
|
||||
|
||||
@property
|
||||
def std(self):
|
||||
return np.sqrt(self.var)
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
return self._M.shape
|
||||
|
||||
|
||||
class MeanStdFilter(Filter):
|
||||
"""Keeps track of a running mean for seen states"""
|
||||
|
||||
def __init__(self, shape, demean=True, destd=True):
|
||||
self.shape = shape
|
||||
self.demean = demean
|
||||
self.destd = destd
|
||||
self.rs = RunningStat(shape)
|
||||
# In distributed rollouts, each worker sees different states.
|
||||
# The buffer is used to keep track of deltas amongst all the
|
||||
# observation filters.
|
||||
|
||||
self.buffer = RunningStat(shape)
|
||||
|
||||
self.mean = np.zeros(shape, dtype = np.float64)
|
||||
self.std = np.ones(shape, dtype = np.float64)
|
||||
|
||||
def clear_buffer(self):
|
||||
self.buffer = RunningStat(self.shape)
|
||||
return
|
||||
|
||||
def update(self, other, copy_buffer=False):
|
||||
"""Takes another filter and only applies the information from the
|
||||
buffer.
|
||||
|
||||
Using notation `F(state, buffer)`
|
||||
Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,
|
||||
`update` modifies `Filter1` to `Filter1(x1 + yt, y1)`
|
||||
If `copy_buffer`, then `Filter1` is modified to
|
||||
`Filter1(x1 + yt, yt)`.
|
||||
"""
|
||||
self.rs.update(other.buffer)
|
||||
if copy_buffer:
|
||||
self.buffer = other.buffer.copy()
|
||||
return
|
||||
|
||||
def copy(self):
|
||||
"""Returns a copy of Filter."""
|
||||
other = MeanStdFilter(self.shape)
|
||||
other.demean = self.demean
|
||||
other.destd = self.destd
|
||||
other.rs = self.rs.copy()
|
||||
other.buffer = self.buffer.copy()
|
||||
return other
|
||||
|
||||
def sync(self, other):
|
||||
"""Syncs all fields together from other filter.
|
||||
|
||||
Using notation `F(state, buffer)`
|
||||
Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,
|
||||
`sync` modifies `Filter1` to `Filter1(x2, yt)`
|
||||
"""
|
||||
assert other.shape == self.shape, "Shapes don't match!"
|
||||
self.demean = other.demean
|
||||
self.destd = other.destd
|
||||
self.rs = other.rs.copy()
|
||||
self.buffer = other.buffer.copy()
|
||||
return
|
||||
|
||||
def __call__(self, x, update=True):
|
||||
x = np.asarray(x, dtype = np.float64)
|
||||
if update:
|
||||
if len(x.shape) == len(self.rs.shape) + 1:
|
||||
# The vectorized case.
|
||||
for i in range(x.shape[0]):
|
||||
self.rs.push(x[i])
|
||||
self.buffer.push(x[i])
|
||||
else:
|
||||
# The unvectorized case.
|
||||
self.rs.push(x)
|
||||
self.buffer.push(x)
|
||||
if self.demean:
|
||||
x = x - self.mean
|
||||
if self.destd:
|
||||
x = x / (self.std + 1e-8)
|
||||
return x
|
||||
|
||||
def stats_increment(self):
|
||||
self.mean = self.rs.mean
|
||||
self.std = self.rs.std
|
||||
|
||||
# Set values for std less than 1e-7 to +inf to avoid
|
||||
# dividing by zero. State elements with zero variance
|
||||
# are set to zero as a result.
|
||||
self.std[self.std < 1e-7] = float("inf")
|
||||
return
|
||||
|
||||
def get_stats(self):
|
||||
return self.rs.mean, (self.rs.std + 1e-8)
|
||||
|
||||
def __repr__(self):
|
||||
return 'MeanStdFilter({}, {}, {}, {}, {}, {})'.format(
|
||||
self.shape, self.demean,
|
||||
self.rs, self.buffer)
|
||||
|
||||
|
||||
def get_filter(filter_config, shape = None):
|
||||
if filter_config == "MeanStdFilter":
|
||||
return MeanStdFilter(shape)
|
||||
elif filter_config == "NoFilter":
|
||||
return NoFilter()
|
||||
else:
|
||||
raise Exception("Unknown observation_filter: " +
|
||||
str(filter_config))
|
||||
|
||||
|
||||
def test_running_stat():
|
||||
for shp in ((), (3,), (3, 4)):
|
||||
li = []
|
||||
rs = RunningStat(shp)
|
||||
for _ in range(5):
|
||||
val = np.random.randn(*shp)
|
||||
rs.push(val)
|
||||
li.append(val)
|
||||
m = np.mean(li, axis=0)
|
||||
assert np.allclose(rs.mean, m)
|
||||
v = np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0)
|
||||
assert np.allclose(rs.var, v)
|
||||
|
||||
|
||||
def test_combining_stat():
|
||||
for shape in [(), (3,), (3, 4)]:
|
||||
li = []
|
||||
rs1 = RunningStat(shape)
|
||||
rs2 = RunningStat(shape)
|
||||
rs = RunningStat(shape)
|
||||
for _ in range(5):
|
||||
val = np.random.randn(*shape)
|
||||
rs1.push(val)
|
||||
rs.push(val)
|
||||
li.append(val)
|
||||
for _ in range(9):
|
||||
rs2.push(val)
|
||||
rs.push(val)
|
||||
li.append(val)
|
||||
rs1.update(rs2)
|
||||
assert np.allclose(rs.mean, rs1.mean)
|
||||
assert np.allclose(rs.std, rs1.std)
|
||||
|
||||
|
||||
test_running_stat()
|
||||
test_combining_stat()
|
||||
@@ -1,29 +0,0 @@
|
||||
delta_std: 0.03
|
||||
deltas_used: 8
|
||||
env: !!python/object/apply:functools.partial
|
||||
args:
|
||||
- &id001 !!python/name:pybullet_envs.minitaur.envs.minitaur_reactive_env.MinitaurReactiveEnv ''
|
||||
state: !!python/tuple
|
||||
- *id001
|
||||
- !!python/tuple []
|
||||
- accurate_motor_model_enabled: true
|
||||
control_latency: 0.02
|
||||
energy_weight: 0.005
|
||||
env_randomizer: null
|
||||
motor_kd: 0.015
|
||||
num_steps_to_log: 1000
|
||||
pd_latency: 0.003
|
||||
remove_default_joint_damping: true
|
||||
render: false
|
||||
urdf_version: rainbow_dash_v0
|
||||
- null
|
||||
filter: MeanStdFilter
|
||||
num_directions: 8
|
||||
num_iterations: 1000
|
||||
num_workers: 8
|
||||
policy_type: linear
|
||||
rollout_length: 1000
|
||||
seed: 237
|
||||
shift: 0
|
||||
step_size: 0.02
|
||||
|
||||
Binary file not shown.
@@ -1,104 +0,0 @@
|
||||
# Code in this file is copied and adapted from
|
||||
# https://github.com/berkeleydeeprlcourse
|
||||
|
||||
import json
|
||||
|
||||
"""
|
||||
|
||||
Some simple logging functionality, inspired by rllab's logging.
|
||||
Assumes that each diagnostic gets logged each iteration
|
||||
|
||||
Call logz.configure_output_dir() to start logging to a
|
||||
tab-separated-values file (some_folder_name/log.txt)
|
||||
|
||||
"""
|
||||
|
||||
import os.path as osp, shutil, time, atexit, os, subprocess
|
||||
|
||||
color2num = dict(
|
||||
gray=30,
|
||||
red=31,
|
||||
green=32,
|
||||
yellow=33,
|
||||
blue=34,
|
||||
magenta=35,
|
||||
cyan=36,
|
||||
white=37,
|
||||
crimson=38
|
||||
)
|
||||
|
||||
def colorize(string, color, bold=False, highlight=False):
|
||||
attr = []
|
||||
num = color2num[color]
|
||||
if highlight: num += 10
|
||||
attr.append(str(num))
|
||||
if bold: attr.append('1')
|
||||
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
|
||||
|
||||
class G(object):
|
||||
output_dir = None
|
||||
output_file = None
|
||||
first_row = True
|
||||
log_headers = []
|
||||
log_current_row = {}
|
||||
|
||||
def configure_output_dir(d=None):
|
||||
"""
|
||||
Set output directory to d, or to /tmp/somerandomnumber if d is None
|
||||
"""
|
||||
G.first_row = True
|
||||
G.log_headers = []
|
||||
G.log_current_row = {}
|
||||
|
||||
G.output_dir = d or "/tmp/experiments/%i"%int(time.time())
|
||||
if not osp.exists(G.output_dir):
|
||||
os.makedirs(G.output_dir)
|
||||
G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w')
|
||||
atexit.register(G.output_file.close)
|
||||
print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True))
|
||||
|
||||
def log_tabular(key, val):
|
||||
"""
|
||||
Log a value of some diagnostic
|
||||
Call this once for each diagnostic quantity, each iteration
|
||||
"""
|
||||
if G.first_row:
|
||||
G.log_headers.append(key)
|
||||
else:
|
||||
assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
|
||||
assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key
|
||||
G.log_current_row[key] = val
|
||||
|
||||
|
||||
def save_params(params):
|
||||
with open(osp.join(G.output_dir, "params.json"), 'w') as out:
|
||||
out.write(json.dumps(params, separators=(',\n','\t:\t'), sort_keys=True))
|
||||
|
||||
|
||||
def dump_tabular():
|
||||
"""
|
||||
Write all of the diagnostics from the current iteration
|
||||
"""
|
||||
vals = []
|
||||
key_lens = [len(key) for key in G.log_headers]
|
||||
max_key_len = max(15,max(key_lens))
|
||||
keystr = '%'+'%d'%max_key_len
|
||||
fmt = "| " + keystr + "s | %15s |"
|
||||
n_slashes = 22 + max_key_len
|
||||
print("-"*n_slashes)
|
||||
for key in G.log_headers:
|
||||
val = G.log_current_row.get(key, "")
|
||||
if hasattr(val, "__float__"): valstr = "%8.3g"%val
|
||||
else: valstr = val
|
||||
print(fmt%(key, valstr))
|
||||
vals.append(val)
|
||||
print("-"*n_slashes)
|
||||
if G.output_file is not None:
|
||||
if G.first_row:
|
||||
G.output_file.write("\t".join(G.log_headers))
|
||||
G.output_file.write("\n")
|
||||
G.output_file.write("\t".join(map(str,vals)))
|
||||
G.output_file.write("\n")
|
||||
G.output_file.flush()
|
||||
G.log_current_row.clear()
|
||||
G.first_row=False
|
||||
@@ -1,35 +0,0 @@
|
||||
# Code in this file is copied and adapted from
|
||||
# https://github.com/openai/evolution-strategies-starter.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
# OPTIMIZERS FOR MINIMIZING OBJECTIVES
|
||||
class Optimizer(object):
|
||||
def __init__(self, w_policy):
|
||||
self.w_policy = w_policy.flatten()
|
||||
self.dim = w_policy.size
|
||||
self.t = 0
|
||||
|
||||
def update(self, globalg):
|
||||
self.t += 1
|
||||
step = self._compute_step(globalg)
|
||||
ratio = np.linalg.norm(step) / (np.linalg.norm(self.w_policy) + 1e-5)
|
||||
return self.w_policy + step, ratio
|
||||
|
||||
def _compute_step(self, globalg):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SGD(Optimizer):
|
||||
def __init__(self, pi, stepsize):
|
||||
Optimizer.__init__(self, pi)
|
||||
self.stepsize = stepsize
|
||||
|
||||
def _compute_step(self, globalg):
|
||||
step = -self.stepsize * globalg
|
||||
return step
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
"""
|
||||
Policy class for computing action from weights and observation vector.
|
||||
Horia Mania --- hmania@berkeley.edu
|
||||
Aurelia Guy
|
||||
Benjamin Recht
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import filter
|
||||
|
||||
|
||||
class Policy(object):
|
||||
|
||||
def __init__(self, policy_params):
|
||||
|
||||
self.ob_dim = policy_params['ob_dim']
|
||||
self.ac_dim = policy_params['ac_dim']
|
||||
self.weights = np.empty(0)
|
||||
|
||||
# a filter for updating statistics of the observations and normalizing
|
||||
# inputs to the policies
|
||||
self.observation_filter = filter.get_filter(
|
||||
policy_params['ob_filter'], shape=(self.ob_dim,))
|
||||
self.update_filter = True
|
||||
|
||||
def update_weights(self, new_weights):
|
||||
self.weights[:] = new_weights[:]
|
||||
return
|
||||
|
||||
def get_weights(self):
|
||||
return self.weights
|
||||
|
||||
def get_observation_filter(self):
|
||||
return self.observation_filter
|
||||
|
||||
def act(self, ob):
|
||||
raise NotImplementedError
|
||||
|
||||
def copy(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LinearPolicy(Policy):
|
||||
"""
|
||||
Linear policy class that computes action as <w, ob>.
|
||||
"""
|
||||
|
||||
def __init__(self, policy_params, update_filter=True):
|
||||
Policy.__init__(self, policy_params)
|
||||
self.weights = np.zeros(self.ac_dim * self.ob_dim, dtype=np.float64)
|
||||
if "weights" in policy_params:
|
||||
self.weights = policy_params["weights"]
|
||||
if "mean" in policy_params:
|
||||
self.observation_filter.mean = policy_params["mean"]
|
||||
if "std" in policy_params:
|
||||
self.observation_filter.std = policy_params["std"]
|
||||
self.update_filter = update_filter
|
||||
|
||||
def act(self, ob):
|
||||
ob = self.observation_filter(ob, update=self.update_filter)
|
||||
matrix_weights = np.reshape(self.weights, (self.ac_dim, self.ob_dim))
|
||||
return np.clip(np.dot(matrix_weights, ob), -1.0, 1.0)
|
||||
|
||||
def get_weights_plus_stats(self):
|
||||
|
||||
mu, std = self.observation_filter.get_stats()
|
||||
aux = np.asarray([self.weights, mu, std])
|
||||
return aux
|
||||
@@ -1,40 +0,0 @@
|
||||
"""
|
||||
Code in this file is copied and adapted from
|
||||
https://github.com/ray-project/ray/tree/master/python/ray/rllib/es
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import numpy as np
|
||||
|
||||
|
||||
def create_shared_noise():
|
||||
"""
|
||||
Create a large array of noise to be shared by all workers. Used
|
||||
for avoiding the communication of the random perturbations delta.
|
||||
"""
|
||||
|
||||
seed = 12345
|
||||
count = 250000000
|
||||
noise = np.random.RandomState(seed).randn(count).astype(np.float64)
|
||||
return noise
|
||||
|
||||
|
||||
class SharedNoiseTable(object):
|
||||
def __init__(self, noise, seed = 11):
|
||||
|
||||
self.rg = np.random.RandomState(seed)
|
||||
self.noise = noise
|
||||
assert self.noise.dtype == np.float64
|
||||
|
||||
def get(self, i, dim):
|
||||
return self.noise[i:i + dim]
|
||||
|
||||
def sample_index(self, dim):
|
||||
return self.rg.randint(0, len(self.noise) - dim + 1)
|
||||
|
||||
def get_delta(self, dim):
|
||||
idx = self.sample_index(dim)
|
||||
return idx, self.get(idx, dim)
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
"""
|
||||
|
||||
blaze build -c opt //experimental/users/jietan/ARS:start_ars_servers
|
||||
blaze-bin/experimental/users/jietan/ARS/start_ars_servers
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import time
|
||||
import subprocess
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer("num_servers", 8, "number of servers")
|
||||
|
||||
def main(argv):
|
||||
del argv # Unused.
|
||||
for server_id in xrange(FLAGS.num_servers):
|
||||
args = ["blaze-bin/experimental/users/jietan/ARS/ars_server", "--config_name=MINITAUR_GYM_CONFIG", "--server_id={}".format(server_id), "--run_on_borg=False"]
|
||||
subprocess.Popen(args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(main)
|
||||
@@ -1,93 +0,0 @@
|
||||
// Example borg file to do a parameter sweep.
|
||||
//
|
||||
// To run:
|
||||
// echo `srcfs get_readonly`-`g4 p | head -1 | awk '{print $2}'`
|
||||
// blaze build -c opt experimental/users/jietan/ARS:ars_server.par
|
||||
// blaze build -c opt experimental/users/jietan/ARS:ars_client.par
|
||||
// borgcfg --skip_confirmation --vars 'base_cl=191950338,my_cl=191950550,label=ars_react_nr01,config=MINITAUR_REACTIVE_CONFIG' experimental/users/jietan/ARS/train_ars.borg reload
|
||||
// borgcfg --skip_confirmation --vars 'base_cl=191950338,my_cl=191950550,label=ars_react_rd01,config=MINITAUR_REACTIVE_RANDOMIZER_CONFIG' experimental/users/jietan/ARS/train_ars.borg reload
|
||||
|
||||
|
||||
import '//production/borg/templates/lambda/buildtool_support.borg' as build
|
||||
import '//production/borg/templates/lambda/dnsname.borg' as dns
|
||||
|
||||
vars = {
|
||||
cell = 'atlanta'
|
||||
charged_user = 'robotics'
|
||||
base_cl = 0
|
||||
my_cl = 0
|
||||
label = external
|
||||
user = real_username()
|
||||
workers = 8
|
||||
config = external
|
||||
cns_home = "/cns/ij-d/home/%user%"
|
||||
logdir = "%cns_home%/experiment/ARS/%label%.%base_cl%.%my_cl%/"
|
||||
}
|
||||
|
||||
service augmented_random_search {
|
||||
runtime {
|
||||
cell = vars.cell
|
||||
}
|
||||
|
||||
scheduling = {
|
||||
priority = 100
|
||||
batch_quota = {
|
||||
strategy = 'RUN_SOON'
|
||||
}
|
||||
deadline = 3600 * 24
|
||||
}
|
||||
accounting = {
|
||||
charged_user = vars.charged_user
|
||||
}
|
||||
requirements {
|
||||
autopilot = true
|
||||
}
|
||||
params = {
|
||||
mygoogle3 = build.google3dir(myfilename())
|
||||
experiment_dir = 'experimental/users/jietan/ARS/'
|
||||
}
|
||||
|
||||
job ars_server = {
|
||||
runtime {
|
||||
cell = vars.cell
|
||||
}
|
||||
name = real_username() + '_server_' + vars.label
|
||||
replicas = vars.workers
|
||||
binary_path = build.binfile_v2(params.mygoogle3,
|
||||
params.experiment_dir + 'ars_server')
|
||||
runfiles = binary_path + '.runfiles/google3/'
|
||||
packages = {
|
||||
package third_party = {
|
||||
directory = runfiles + 'third_party/'
|
||||
}
|
||||
}
|
||||
binary = build.binfile(params.mygoogle3,
|
||||
params.experiment_dir + 'ars_server.par')
|
||||
args = {
|
||||
server_id = '%task%'
|
||||
config_name = vars.config
|
||||
port = '%port%'
|
||||
run_on_borg = true
|
||||
}
|
||||
}
|
||||
job ars_client = {
|
||||
name = real_username() + '_client_' + vars.label
|
||||
binary_path = build.binfile_v2(params.mygoogle3,
|
||||
params.experiment_dir + 'ars_client')
|
||||
runfiles = binary_path + '.runfiles/google3/'
|
||||
packages = {
|
||||
package third_party = {
|
||||
directory = runfiles + 'third_party/'
|
||||
}
|
||||
}
|
||||
binary = build.binfile(params.mygoogle3,
|
||||
params.experiment_dir + 'ars_client.par')
|
||||
args = {
|
||||
server_address = dns.borg_dns_name(ars_server)
|
||||
num_servers = vars.workers
|
||||
config_name = vars.config
|
||||
logdir = vars.logdir
|
||||
run_on_borg = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
"""
|
||||
|
||||
blaze build -c opt //experimental/users/jietan/ARS:train_ars
|
||||
blaze-bin/experimental/users/jietan/ARS/train_ars \
|
||||
--logdir=/cns/ij-d/home/jietan/experiment/ARS/test1 \
|
||||
--config_name=MINITAUR_GYM_CONFIG
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import os
|
||||
from absl import app
|
||||
from absl import flags
|
||||
import ars
|
||||
import config_ars
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('logdir', None, 'The directory to write the log file.')
|
||||
flags.DEFINE_string('config_name', None, 'The name of the config dictionary')
|
||||
|
||||
|
||||
def run_ars(config, logdir):
|
||||
|
||||
env = config["env"]()
|
||||
ob_dim = env.observation_space.shape[0]
|
||||
ac_dim = env.action_space.shape[0]
|
||||
|
||||
# set policy parameters. Possible filters: 'MeanStdFilter' for v2, 'NoFilter' for v1.
|
||||
policy_params = {
|
||||
'type': 'linear',
|
||||
'ob_filter': config['filter'],
|
||||
'ob_dim': ob_dim,
|
||||
'ac_dim': ac_dim
|
||||
}
|
||||
|
||||
ARS = ars.ARSLearner(
|
||||
env_callback=config['env'],
|
||||
policy_params=policy_params,
|
||||
num_deltas=config['num_directions'],
|
||||
deltas_used=config['deltas_used'],
|
||||
step_size=config['step_size'],
|
||||
delta_std=config['delta_std'],
|
||||
logdir=logdir,
|
||||
rollout_length=config['rollout_length'],
|
||||
shift=config['shift'],
|
||||
params=config,
|
||||
seed=config['seed'])
|
||||
|
||||
return ARS.train(config['num_iterations'])
|
||||
|
||||
|
||||
def main(argv):
|
||||
del argv # Unused.
|
||||
config = getattr(config_ars, FLAGS.config_name)
|
||||
run_ars(config=config, logdir=FLAGS.logdir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.mark_flag_as_required('logdir')
|
||||
flags.mark_flag_as_required('config_name')
|
||||
app.run(main)
|
||||
@@ -1,29 +0,0 @@
|
||||
"""Tests for google3.experimental.users.jietan.ARS.train_ars.
|
||||
blaze build -c opt //experimental/users/jietan/ARS:train_ars_test
|
||||
blaze-bin/experimental/users/jietan/ARS/train_ars_test
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl import flags
|
||||
from google3.testing.pybase import googletest
|
||||
from google3.experimental.users.jietan.ARS import train_ars
|
||||
from google3.experimental.users.jietan.ARS import config_ars
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
MAX_RETURN_AFTER_TWO_ITEATIONS = 0.0890905394617
|
||||
|
||||
class TrainArsTest(googletest.TestCase):
|
||||
|
||||
def testArsTwoStepResult(self):
|
||||
config = getattr(config_ars, "MINITAUR_REACTIVE_CONFIG")
|
||||
config['num_iterations'] = 2
|
||||
info = train_ars.run_ars(config=config, logdir=FLAGS.test_tmpdir)
|
||||
print (info)
|
||||
self.assertAlmostEqual(info["max_reward"], MAX_RETURN_AFTER_TWO_ITEATIONS)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
googletest.main()
|
||||
@@ -1,52 +0,0 @@
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import ruamel.yaml as yaml
|
||||
|
||||
def save_config(config, logdir):
|
||||
"""Save a new configuration by name.
|
||||
|
||||
If a logging directory is specified, is will be created and the configuration
|
||||
will be stored there. Otherwise, a log message will be printed.
|
||||
|
||||
Args:
|
||||
config: Configuration object.
|
||||
logdir: Location for writing summaries and checkpoints if specified.
|
||||
|
||||
Returns:
|
||||
Configuration object.
|
||||
"""
|
||||
message = 'Start a new run and write summaries and checkpoints to {}.'
|
||||
print(message.format(logdir))
|
||||
config_path = os.path.join(logdir, 'config.yaml')
|
||||
yaml.dump(config, config_path, default_flow_style=False)
|
||||
return config
|
||||
|
||||
|
||||
def load_config(logdir):
|
||||
"""Load a configuration from the log directory.
|
||||
|
||||
Args:
|
||||
logdir: The logging directory containing the configuration file.
|
||||
|
||||
Raises:
|
||||
IOError: The logging directory does not contain a configuration file.
|
||||
|
||||
Returns:
|
||||
Configuration object.
|
||||
"""
|
||||
config_path = logdir and os.path.join(logdir, 'config.yaml')
|
||||
if not config_path:
|
||||
message = (
|
||||
'Cannot resume an existing run since the logging directory does not '
|
||||
'contain a configuration file.')
|
||||
raise IOError(message)
|
||||
print("config_path=",config_path)
|
||||
|
||||
stream = open(config_path, 'r')
|
||||
config = yaml.load(stream)
|
||||
message = 'Resume run and write summaries and checkpoints to {}.'
|
||||
print(message.format(logdir))
|
||||
return config
|
||||
@@ -1,28 +0,0 @@
|
||||
# Code in this file is copied and adapted from
|
||||
# https://github.com/openai/evolution-strategies-starter.
|
||||
|
||||
import numpy as np
|
||||
|
||||
def itergroups(items, group_size):
|
||||
assert group_size >= 1
|
||||
group = []
|
||||
for x in items:
|
||||
group.append(x)
|
||||
if len(group) == group_size:
|
||||
yield tuple(group)
|
||||
del group[:]
|
||||
if group:
|
||||
yield tuple(group)
|
||||
|
||||
|
||||
|
||||
def batched_weighted_sum(weights, vecs, batch_size):
|
||||
total = 0
|
||||
num_items_summed = 0
|
||||
for batch_weights, batch_vecs in zip(itergroups(weights, batch_size),
|
||||
itergroups(vecs, batch_size)):
|
||||
assert len(batch_weights) == len(batch_vecs) <= batch_size
|
||||
total += np.dot(np.asarray(batch_weights, dtype=np.float64),
|
||||
np.asarray(batch_vecs, dtype=np.float64))
|
||||
num_items_summed += len(batch_weights)
|
||||
return total, num_items_summed
|
||||
21
examples/pybullet/gym/pybullet_utils/examples/mjcf2urdf.py
Normal file
21
examples/pybullet/gym/pybullet_utils/examples/mjcf2urdf.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#rudimentary MuJoCo mjcf to ROS URDF converter using the UrdfEditor
|
||||
|
||||
import pybullet_utils.bullet_client as bc
|
||||
import pybullet_data as pd
|
||||
|
||||
import pybullet_utils.urdfEditor as ed
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument('--mjcf', help='MuJoCo xml file to be converted to URDF', default='mjcf/humanoid.xml')
|
||||
args = parser.parse_args()
|
||||
|
||||
p = bc.BulletClient()
|
||||
p.setAdditionalSearchPath(pd.getDataPath())
|
||||
objs = p.loadMJCF(args.mjcf, flags=p.URDF_USE_IMPLICIT_CYLINDER)
|
||||
|
||||
for o in objs:
|
||||
#print("o=",o, p.getBodyInfo(o), p.getNumJoints(o))
|
||||
humanoid = objs[o]
|
||||
ed0 = ed.UrdfEditor()
|
||||
ed0.initializeFromBulletBody(humanoid, p._client)
|
||||
ed0.saveUrdf(p.getBodyInfo(0)[1]+"_"+p.getBodyInfo(o)[0]+".urdf")
|
||||
@@ -1,7 +1,6 @@
|
||||
import pybullet as p
|
||||
import time
|
||||
|
||||
|
||||
class UrdfInertial(object):
|
||||
def __init__(self):
|
||||
self.mass = 1
|
||||
@@ -201,6 +200,10 @@ class UrdfEditor(object):
|
||||
file.write("\t\t</inertial>\n")
|
||||
|
||||
def writeVisualShape(self,file,urdfVisual, precision=5):
|
||||
#we don't support loading capsule types from visuals, so auto-convert from collision shape
|
||||
if urdfVisual.geom_type == p.GEOM_CAPSULE:
|
||||
return
|
||||
|
||||
file.write("\t\t<visual>\n")
|
||||
str = '\t\t\t<origin rpy="{:.{prec}f} {:.{prec}f} {:.{prec}f}" xyz="{:.{prec}f} {:.{prec}f} {:.{prec}f}"/>\n'.format(\
|
||||
urdfVisual.origin_rpy[0],urdfVisual.origin_rpy[1],urdfVisual.origin_rpy[2],
|
||||
@@ -276,8 +279,13 @@ class UrdfEditor(object):
|
||||
file.write("\">\n")
|
||||
|
||||
self.writeInertial(file,urdfLink.urdf_inertial)
|
||||
hasCapsules = False
|
||||
for v in urdfLink.urdf_visual_shapes:
|
||||
self.writeVisualShape(file,v)
|
||||
if (v.geom_type == p.GEOM_CAPSULE):
|
||||
hasCapsules = True
|
||||
if (not hasCapsules):
|
||||
for v in urdfLink.urdf_visual_shapes:
|
||||
self.writeVisualShape(file,v)
|
||||
for c in urdfLink.urdf_collision_shapes:
|
||||
self.writeCollisionShape(file,c)
|
||||
file.write("\t</link>\n")
|
||||
|
||||
@@ -6464,7 +6464,7 @@ static PyObject* pybullet_createCollisionShape(PyObject* self, PyObject* args, P
|
||||
{
|
||||
pybullet_internalSetVector4d(collisionFrameOrientationObj, collisionFrameOrientation);
|
||||
}
|
||||
b3CreateVisualShapeSetChildTransform(commandHandle, shapeIndex, collisionFramePosition, collisionFrameOrientation);
|
||||
b3CreateCollisionShapeSetChildTransform(commandHandle, shapeIndex, collisionFramePosition, collisionFrameOrientation);
|
||||
}
|
||||
statusHandle = b3SubmitClientCommandAndWaitStatus(sm, commandHandle);
|
||||
statusType = b3GetStatusType(statusHandle);
|
||||
|
||||
9
setup.py
9
setup.py
@@ -463,15 +463,16 @@ egl_renderer_sources = \
|
||||
+["src/BulletCollision/CollisionShapes/btConvexInternalShape.cpp"]\
|
||||
+["src/Bullet3Common/b3Logging.cpp"]\
|
||||
+["src/LinearMath/btAlignedAllocator.cpp"]\
|
||||
+["src/LinearMath/btGeometryUtil.cpp"]\
|
||||
+["src/LinearMath/btConvexHull.cpp"]\
|
||||
+["src/LinearMath/btConvexHullComputer.cpp"]\
|
||||
+["src/LinearMath/btConvexHullComputer.cpp"] \
|
||||
+["src/LinearMath/btGeometryUtil.cpp"]\
|
||||
+["src/LinearMath/btQuickprof.cpp"] \
|
||||
+["src/LinearMath/btThreads.cpp"] \
|
||||
+["src/Bullet3Common/b3AlignedAllocator.cpp"] \
|
||||
+["examples/ThirdPartyLibs/glad/gl.c"]\
|
||||
+["examples/OpenGLWindow/GLInstancingRenderer.cpp"]\
|
||||
+["examples/OpenGLWindow/GLRenderToTexture.cpp"] \
|
||||
+["examples/OpenGLWindow/LoadShader.cpp"] \
|
||||
+["src/LinearMath/btQuickprof.cpp"]
|
||||
+["examples/OpenGLWindow/LoadShader.cpp"]
|
||||
|
||||
if 'BT_USE_EGL' in CXX_FLAGS:
|
||||
sources += ['examples/ThirdPartyLibs/glad/egl.c']
|
||||
|
||||
@@ -15,9 +15,11 @@ subject to the following restrictions:
|
||||
|
||||
#include "b3AlignedAllocator.h"
|
||||
|
||||
#ifdef B3_ALLOCATOR_STATISTICS
|
||||
int b3g_numAlignedAllocs = 0;
|
||||
int b3g_numAlignedFree = 0;
|
||||
int b3g_totalBytesAlignedAllocs = 0; //detect memory leaks
|
||||
#endif
|
||||
|
||||
static void *b3AllocDefault(size_t size)
|
||||
{
|
||||
@@ -109,10 +111,10 @@ void *b3AlignedAllocInternal(size_t size, int alignment, int line, char *filenam
|
||||
{
|
||||
void *ret;
|
||||
char *real;
|
||||
|
||||
#ifdef B3_ALLOCATOR_STATISTICS
|
||||
b3g_totalBytesAlignedAllocs += size;
|
||||
b3g_numAlignedAllocs++;
|
||||
|
||||
#endif
|
||||
real = (char *)b3s_allocFunc(size + 2 * sizeof(void *) + (alignment - 1));
|
||||
if (real)
|
||||
{
|
||||
@@ -135,14 +137,16 @@ void *b3AlignedAllocInternal(size_t size, int alignment, int line, char *filenam
|
||||
void b3AlignedFreeInternal(void *ptr, int line, char *filename)
|
||||
{
|
||||
void *real;
|
||||
#ifdef B3_ALLOCATOR_STATISTICS
|
||||
b3g_numAlignedFree++;
|
||||
|
||||
#endif
|
||||
if (ptr)
|
||||
{
|
||||
real = *((void **)(ptr)-1);
|
||||
int size = *((int *)(ptr)-2);
|
||||
#ifdef B3_ALLOCATOR_STATISTICS
|
||||
b3g_totalBytesAlignedAllocs -= size;
|
||||
|
||||
#endif
|
||||
b3Printf("free #%d at address %x, from %s,line %d, size %d\n", b3g_numAlignedFree, real, filename, line, size);
|
||||
|
||||
b3s_freeFunc(real);
|
||||
@@ -157,7 +161,9 @@ void b3AlignedFreeInternal(void *ptr, int line, char *filename)
|
||||
|
||||
void *b3AlignedAllocInternal(size_t size, int alignment)
|
||||
{
|
||||
#ifdef B3_ALLOCATOR_STATISTICS
|
||||
b3g_numAlignedAllocs++;
|
||||
#endif
|
||||
void *ptr;
|
||||
ptr = b3s_alignedAllocFunc(size, alignment);
|
||||
// b3Printf("b3AlignedAllocInternal %d, %x\n",size,ptr);
|
||||
@@ -170,8 +176,9 @@ void b3AlignedFreeInternal(void *ptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef B3_ALLOCATOR_STATISTICS
|
||||
b3g_numAlignedFree++;
|
||||
#endif
|
||||
// b3Printf("b3AlignedFreeInternal %x\n",ptr);
|
||||
b3s_alignedFreeFunc(ptr);
|
||||
}
|
||||
|
||||
@@ -34,6 +34,8 @@ class btCollisionObject;
|
||||
|
||||
class btCollisionShape;
|
||||
|
||||
extern btShapePairCallback gCompoundCompoundChildShapePairCallback;
|
||||
|
||||
/// btCompoundCompoundCollisionAlgorithm supports collision between two btCompoundCollisionShape shapes
|
||||
class btCompoundCompoundCollisionAlgorithm : public btCompoundCollisionAlgorithm
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef GIM_BOX_SET_H_INCLUDED
|
||||
#define GIM_BOX_SET_H_INCLUDED
|
||||
#ifndef BT_GIMPACT_BVH_H_INCLUDED
|
||||
#define BT_GIMPACT_BVH_H_INCLUDED
|
||||
|
||||
/*! \file gim_box_set.h
|
||||
\author Francisco Leon Najera
|
||||
@@ -306,4 +306,4 @@ public:
|
||||
btPairSet& collision_pairs);
|
||||
};
|
||||
|
||||
#endif // GIM_BOXPRUNING_H_INCLUDED
|
||||
#endif // BT_GIMPACT_BVH_H_INCLUDED
|
||||
|
||||
@@ -36,9 +36,6 @@ btScalar gGjkEpaPenetrationTolerance = 1.0e-12;
|
||||
btScalar gGjkEpaPenetrationTolerance = 0.001;
|
||||
#endif
|
||||
|
||||
//temp globals, to improve GJK/EPA/penetration calculations
|
||||
int gNumDeepPenetrationChecks = 0;
|
||||
int gNumGjkChecks = 0;
|
||||
|
||||
btGjkPairDetector::btGjkPairDetector(const btConvexShape *objectA, const btConvexShape *objectB, btSimplexSolverInterface *simplexSolver, btConvexPenetrationDepthSolver *penetrationDepthSolver)
|
||||
: m_cachedSeparatingAxis(btScalar(0.), btScalar(1.), btScalar(0.)),
|
||||
@@ -708,7 +705,6 @@ void btGjkPairDetector::getClosestPointsNonVirtual(const ClosestPointInput &inpu
|
||||
btScalar marginA = m_marginA;
|
||||
btScalar marginB = m_marginB;
|
||||
|
||||
gNumGjkChecks++;
|
||||
|
||||
//for CCD we don't use margins
|
||||
if (m_ignoreMargin)
|
||||
@@ -1021,7 +1017,6 @@ void btGjkPairDetector::getClosestPointsNonVirtual(const ClosestPointInput &inpu
|
||||
// Penetration depth case.
|
||||
btVector3 tmpPointOnA, tmpPointOnB;
|
||||
|
||||
gNumDeepPenetrationChecks++;
|
||||
m_cachedSeparatingAxis.setZero();
|
||||
|
||||
bool isValid2 = m_penetrationDepthSolver->calcPenDepth(
|
||||
|
||||
@@ -62,6 +62,8 @@ struct btContactSolverInfoData
|
||||
btScalar m_singleAxisRollingFrictionThreshold;
|
||||
btScalar m_leastSquaresResidualThreshold;
|
||||
btScalar m_restitutionVelocityThreshold;
|
||||
bool m_jointFeedbackInWorldSpace;
|
||||
bool m_jointFeedbackInJointFrame;
|
||||
};
|
||||
|
||||
struct btContactSolverInfo : public btContactSolverInfoData
|
||||
@@ -94,6 +96,8 @@ struct btContactSolverInfo : public btContactSolverInfoData
|
||||
m_singleAxisRollingFrictionThreshold = 1e30f; ///if the velocity is above this threshold, it will use a single constraint row (axis), otherwise 3 rows.
|
||||
m_leastSquaresResidualThreshold = 0.f;
|
||||
m_restitutionVelocityThreshold = 0.2f; //if the relative velocity is below this threshold, there is zero restitution
|
||||
m_jointFeedbackInWorldSpace = false;
|
||||
m_jointFeedbackInJointFrame = false;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -30,9 +30,6 @@
|
||||
//#include "Bullet3Common/b3Logging.h"
|
||||
// #define INCLUDE_GYRO_TERM
|
||||
|
||||
///todo: determine if we need these options. If so, make a proper API, otherwise delete those globals
|
||||
bool gJointFeedbackInWorldSpace = false;
|
||||
bool gJointFeedbackInJointFrame = false;
|
||||
|
||||
namespace
|
||||
{
|
||||
@@ -718,10 +715,12 @@ inline btMatrix3x3 outerProduct(const btVector3 &v0, const btVector3 &v1) //ren
|
||||
//
|
||||
|
||||
void btMultiBody::computeAccelerationsArticulatedBodyAlgorithmMultiDof(btScalar dt,
|
||||
btAlignedObjectArray<btScalar> &scratch_r,
|
||||
btAlignedObjectArray<btVector3> &scratch_v,
|
||||
btAlignedObjectArray<btMatrix3x3> &scratch_m,
|
||||
bool isConstraintPass)
|
||||
btAlignedObjectArray<btScalar> &scratch_r,
|
||||
btAlignedObjectArray<btVector3> &scratch_v,
|
||||
btAlignedObjectArray<btMatrix3x3> &scratch_m,
|
||||
bool isConstraintPass,
|
||||
bool jointFeedbackInWorldSpace,
|
||||
bool jointFeedbackInJointFrame)
|
||||
{
|
||||
// Implement Featherstone's algorithm to calculate joint accelerations (q_double_dot)
|
||||
// and the base linear & angular accelerations.
|
||||
@@ -1124,7 +1123,7 @@ void btMultiBody::computeAccelerationsArticulatedBodyAlgorithmMultiDof(btScalar
|
||||
btVector3 angularBotVec = (spatInertia[i + 1] * spatAcc[i + 1] + zeroAccSpatFrc[i + 1]).m_bottomVec;
|
||||
btVector3 linearTopVec = (spatInertia[i + 1] * spatAcc[i + 1] + zeroAccSpatFrc[i + 1]).m_topVec;
|
||||
|
||||
if (gJointFeedbackInJointFrame)
|
||||
if (jointFeedbackInJointFrame)
|
||||
{
|
||||
//shift the reaction forces to the joint frame
|
||||
//linear (force) component is the same
|
||||
@@ -1132,7 +1131,7 @@ void btMultiBody::computeAccelerationsArticulatedBodyAlgorithmMultiDof(btScalar
|
||||
angularBotVec = angularBotVec - linearTopVec.cross(m_links[i].m_dVector);
|
||||
}
|
||||
|
||||
if (gJointFeedbackInWorldSpace)
|
||||
if (jointFeedbackInWorldSpace)
|
||||
{
|
||||
if (isConstraintPass)
|
||||
{
|
||||
|
||||
@@ -338,17 +338,20 @@ public:
|
||||
btAlignedObjectArray<btScalar> & scratch_r,
|
||||
btAlignedObjectArray<btVector3> & scratch_v,
|
||||
btAlignedObjectArray<btMatrix3x3> & scratch_m,
|
||||
bool isConstraintPass = false);
|
||||
bool isConstraintPass,
|
||||
bool jointFeedbackInWorldSpace,
|
||||
bool jointFeedbackInJointFrame
|
||||
);
|
||||
|
||||
///stepVelocitiesMultiDof is deprecated, use computeAccelerationsArticulatedBodyAlgorithmMultiDof instead
|
||||
void stepVelocitiesMultiDof(btScalar dt,
|
||||
btAlignedObjectArray<btScalar> & scratch_r,
|
||||
btAlignedObjectArray<btVector3> & scratch_v,
|
||||
btAlignedObjectArray<btMatrix3x3> & scratch_m,
|
||||
bool isConstraintPass = false)
|
||||
{
|
||||
computeAccelerationsArticulatedBodyAlgorithmMultiDof(dt, scratch_r, scratch_v, scratch_m, isConstraintPass);
|
||||
}
|
||||
//void stepVelocitiesMultiDof(btScalar dt,
|
||||
// btAlignedObjectArray<btScalar> & scratch_r,
|
||||
// btAlignedObjectArray<btVector3> & scratch_v,
|
||||
// btAlignedObjectArray<btMatrix3x3> & scratch_m,
|
||||
// bool isConstraintPass = false)
|
||||
//{
|
||||
// computeAccelerationsArticulatedBodyAlgorithmMultiDof(dt, scratch_r, scratch_v, scratch_m, isConstraintPass, false, false);
|
||||
//}
|
||||
|
||||
// calcAccelerationDeltasMultiDof
|
||||
// input: force vector (in same format as jacobian, i.e.:
|
||||
|
||||
@@ -491,11 +491,14 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
m_scratch_v.resize(bod->getNumLinks() + 1);
|
||||
m_scratch_m.resize(bod->getNumLinks() + 1);
|
||||
bool doNotUpdatePos = false;
|
||||
|
||||
bool isConstraintPass = false;
|
||||
{
|
||||
if (!bod->isUsingRK4Integration())
|
||||
{
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(solverInfo.m_timeStep, m_scratch_r, m_scratch_v, m_scratch_m);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(solverInfo.m_timeStep,
|
||||
m_scratch_r, m_scratch_v, m_scratch_m,isConstraintPass,
|
||||
getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -593,7 +596,9 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
btScalar h = solverInfo.m_timeStep;
|
||||
#define output &m_scratch_r[bod->getNumDofs()]
|
||||
//calc qdd0 from: q0 & qd0
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m,
|
||||
isConstraintPass,getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
pCopy(output, scratch_qdd0, 0, numDofs);
|
||||
//calc q1 = q0 + h/2 * qd0
|
||||
pResetQx();
|
||||
@@ -603,7 +608,9 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
//
|
||||
//calc qdd1 from: q1 & qd1
|
||||
pCopyToVelocityVector(bod, scratch_qd1);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m,
|
||||
isConstraintPass,getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
pCopy(output, scratch_qdd1, 0, numDofs);
|
||||
//calc q2 = q0 + h/2 * qd1
|
||||
pResetQx();
|
||||
@@ -613,7 +620,9 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
//
|
||||
//calc qdd2 from: q2 & qd2
|
||||
pCopyToVelocityVector(bod, scratch_qd2);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m,
|
||||
isConstraintPass,getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
pCopy(output, scratch_qdd2, 0, numDofs);
|
||||
//calc q3 = q0 + h * qd2
|
||||
pResetQx();
|
||||
@@ -623,7 +632,9 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
//
|
||||
//calc qdd3 from: q3 & qd3
|
||||
pCopyToVelocityVector(bod, scratch_qd3);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0., m_scratch_r, m_scratch_v, m_scratch_m,
|
||||
isConstraintPass,getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
pCopy(output, scratch_qdd3, 0, numDofs);
|
||||
|
||||
//
|
||||
@@ -660,7 +671,9 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
{
|
||||
for (int link = 0; link < bod->getNumLinks(); ++link)
|
||||
bod->getLink(link).updateCacheMultiDof();
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0, m_scratch_r, m_scratch_v, m_scratch_m);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(0, m_scratch_r, m_scratch_v, m_scratch_m,
|
||||
isConstraintPass,getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -708,7 +721,9 @@ void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
|
||||
if (!bod->isUsingRK4Integration())
|
||||
{
|
||||
bool isConstraintPass = true;
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(solverInfo.m_timeStep, m_scratch_r, m_scratch_v, m_scratch_m, isConstraintPass);
|
||||
bod->computeAccelerationsArticulatedBodyAlgorithmMultiDof(solverInfo.m_timeStep, m_scratch_r, m_scratch_v, m_scratch_m, isConstraintPass,
|
||||
getSolverInfo().m_jointFeedbackInWorldSpace,
|
||||
getSolverInfo().m_jointFeedbackInJointFrame);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
/*
|
||||
Bullet Continuous Collision Detection and Physics Library
|
||||
Copyright (c) 2003-2018 Erwin Coumans http://bulletphysics.com
|
||||
@@ -72,7 +73,7 @@ public:
|
||||
pthread_t thread;
|
||||
//each tread will wait until this signal to start its work
|
||||
sem_t* startSemaphore;
|
||||
|
||||
btCriticalSection* m_cs;
|
||||
// this is a copy of m_mainSemaphore,
|
||||
//each tread will signal once it is finished with its work
|
||||
sem_t* m_mainSemaphore;
|
||||
@@ -90,7 +91,7 @@ private:
|
||||
void startThreads(const ConstructionInfo& threadInfo);
|
||||
void stopThreads();
|
||||
int waitForResponse();
|
||||
|
||||
btCriticalSection* m_cs;
|
||||
public:
|
||||
btThreadSupportPosix(const ConstructionInfo& threadConstructionInfo);
|
||||
virtual ~btThreadSupportPosix();
|
||||
@@ -119,6 +120,7 @@ public:
|
||||
|
||||
btThreadSupportPosix::btThreadSupportPosix(const ConstructionInfo& threadConstructionInfo)
|
||||
{
|
||||
m_cs = createCriticalSection();
|
||||
startThreads(threadConstructionInfo);
|
||||
}
|
||||
|
||||
@@ -126,6 +128,8 @@ btThreadSupportPosix::btThreadSupportPosix(const ConstructionInfo& threadConstru
|
||||
btThreadSupportPosix::~btThreadSupportPosix()
|
||||
{
|
||||
stopThreads();
|
||||
deleteCriticalSection(m_cs);
|
||||
m_cs=0;
|
||||
}
|
||||
|
||||
#if (defined(__APPLE__))
|
||||
@@ -181,21 +185,23 @@ static void* threadFunction(void* argument)
|
||||
{
|
||||
btAssert(status->m_status);
|
||||
status->m_userThreadFunc(userPtr);
|
||||
status->m_cs->lock();
|
||||
status->m_status = 2;
|
||||
status->m_cs->unlock();
|
||||
checkPThreadFunction(sem_post(status->m_mainSemaphore));
|
||||
status->threadUsed++;
|
||||
}
|
||||
else
|
||||
{
|
||||
//exit Thread
|
||||
status->m_cs->lock();
|
||||
status->m_status = 3;
|
||||
status->m_cs->unlock();
|
||||
checkPThreadFunction(sem_post(status->m_mainSemaphore));
|
||||
printf("Thread with taskId %i exiting\n", status->m_taskId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
printf("Thread TERMINATED\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -206,7 +212,7 @@ void btThreadSupportPosix::runTask(int threadIndex, void* userData)
|
||||
btThreadStatus& threadStatus = m_activeThreadStatus[threadIndex];
|
||||
btAssert(threadIndex >= 0);
|
||||
btAssert(threadIndex < m_activeThreadStatus.size());
|
||||
|
||||
threadStatus.m_cs = m_cs;
|
||||
threadStatus.m_commandId = 1;
|
||||
threadStatus.m_status = 1;
|
||||
threadStatus.m_userPtr = userData;
|
||||
@@ -231,7 +237,10 @@ int btThreadSupportPosix::waitForResponse()
|
||||
|
||||
for (size_t t = 0; t < size_t(m_activeThreadStatus.size()); ++t)
|
||||
{
|
||||
if (2 == m_activeThreadStatus[t].m_status)
|
||||
m_cs->lock();
|
||||
bool hasFinished = (2 == m_activeThreadStatus[t].m_status);
|
||||
m_cs->unlock();
|
||||
if (hasFinished)
|
||||
{
|
||||
last = t;
|
||||
break;
|
||||
@@ -261,7 +270,6 @@ void btThreadSupportPosix::waitForAllTasks()
|
||||
void btThreadSupportPosix::startThreads(const ConstructionInfo& threadConstructionInfo)
|
||||
{
|
||||
m_numThreads = btGetNumHardwareThreads() - 1; // main thread exists already
|
||||
printf("%s creating %i threads.\n", __FUNCTION__, m_numThreads);
|
||||
m_activeThreadStatus.resize(m_numThreads);
|
||||
m_startedThreadsMask = 0;
|
||||
|
||||
@@ -270,20 +278,18 @@ void btThreadSupportPosix::startThreads(const ConstructionInfo& threadConstructi
|
||||
|
||||
for (int i = 0; i < m_numThreads; i++)
|
||||
{
|
||||
printf("starting thread %d\n", i);
|
||||
btThreadStatus& threadStatus = m_activeThreadStatus[i];
|
||||
threadStatus.startSemaphore = createSem("threadLocal");
|
||||
checkPThreadFunction(pthread_create(&threadStatus.thread, NULL, &threadFunction, (void*)&threadStatus));
|
||||
|
||||
threadStatus.m_userPtr = 0;
|
||||
threadStatus.m_cs = m_cs;
|
||||
threadStatus.m_taskId = i;
|
||||
threadStatus.m_commandId = 0;
|
||||
threadStatus.m_status = 0;
|
||||
threadStatus.m_mainSemaphore = m_mainSemaphore;
|
||||
threadStatus.m_userThreadFunc = threadConstructionInfo.m_userThreadFunc;
|
||||
threadStatus.threadUsed = 0;
|
||||
checkPThreadFunction(pthread_create(&threadStatus.thread, NULL, &threadFunction, (void*)&threadStatus));
|
||||
|
||||
printf("started thread %d \n", i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,20 +299,15 @@ void btThreadSupportPosix::stopThreads()
|
||||
for (size_t t = 0; t < size_t(m_activeThreadStatus.size()); ++t)
|
||||
{
|
||||
btThreadStatus& threadStatus = m_activeThreadStatus[t];
|
||||
printf("%s: Thread %i used: %ld\n", __FUNCTION__, int(t), threadStatus.threadUsed);
|
||||
|
||||
threadStatus.m_userPtr = 0;
|
||||
checkPThreadFunction(sem_post(threadStatus.startSemaphore));
|
||||
checkPThreadFunction(sem_wait(m_mainSemaphore));
|
||||
|
||||
printf("destroy semaphore\n");
|
||||
destroySem(threadStatus.startSemaphore);
|
||||
printf("semaphore destroyed\n");
|
||||
checkPThreadFunction(pthread_join(threadStatus.thread, 0));
|
||||
}
|
||||
printf("destroy main semaphore\n");
|
||||
destroySem(m_mainSemaphore);
|
||||
printf("main semaphore destroyed\n");
|
||||
m_activeThreadStatus.clear();
|
||||
}
|
||||
|
||||
|
||||
@@ -694,6 +694,24 @@ void CProfileManager::dumpAll()
|
||||
CProfileManager::Release_Iterator(profileIterator);
|
||||
}
|
||||
|
||||
|
||||
void btEnterProfileZoneDefault(const char* name)
|
||||
{
|
||||
}
|
||||
void btLeaveProfileZoneDefault()
|
||||
{
|
||||
}
|
||||
|
||||
#else
|
||||
void btEnterProfileZoneDefault(const char* name)
|
||||
{
|
||||
}
|
||||
void btLeaveProfileZoneDefault()
|
||||
{
|
||||
}
|
||||
#endif //BT_NO_PROFILE
|
||||
|
||||
|
||||
// clang-format off
|
||||
#if defined(_WIN32) && (defined(__MINGW32__) || defined(__MINGW64__))
|
||||
#define BT_HAVE_TLS 1
|
||||
@@ -743,22 +761,6 @@ unsigned int btQuickprofGetCurrentThreadIndex2()
|
||||
#endif //BT_THREADSAFE
|
||||
}
|
||||
|
||||
void btEnterProfileZoneDefault(const char* name)
|
||||
{
|
||||
}
|
||||
void btLeaveProfileZoneDefault()
|
||||
{
|
||||
}
|
||||
|
||||
#else
|
||||
void btEnterProfileZoneDefault(const char* name)
|
||||
{
|
||||
}
|
||||
void btLeaveProfileZoneDefault()
|
||||
{
|
||||
}
|
||||
#endif //BT_NO_PROFILE
|
||||
|
||||
static btEnterProfileZoneFunc* bts_enterFunc = btEnterProfileZoneDefault;
|
||||
static btLeaveProfileZoneFunc* bts_leaveFunc = btLeaveProfileZoneDefault;
|
||||
|
||||
|
||||
@@ -61,18 +61,19 @@ btLeaveProfileZoneFunc* btGetCurrentLeaveProfileZoneFunc();
|
||||
void btSetCustomEnterProfileZoneFunc(btEnterProfileZoneFunc* enterFunc);
|
||||
void btSetCustomLeaveProfileZoneFunc(btLeaveProfileZoneFunc* leaveFunc);
|
||||
|
||||
#ifndef BT_NO_PROFILE // FIX redefinition
|
||||
//To disable built-in profiling, please comment out next line
|
||||
//#define BT_NO_PROFILE 1
|
||||
#ifndef BT_ENABLE_PROFILE
|
||||
#define BT_NO_PROFILE 1
|
||||
#endif //BT_NO_PROFILE
|
||||
|
||||
const unsigned int BT_QUICKPROF_MAX_THREAD_COUNT = 64;
|
||||
|
||||
#ifndef BT_NO_PROFILE
|
||||
//btQuickprofGetCurrentThreadIndex will return -1 if thread index cannot be determined,
|
||||
//otherwise returns thread index in range [0..maxThreads]
|
||||
unsigned int btQuickprofGetCurrentThreadIndex2();
|
||||
|
||||
#ifndef BT_NO_PROFILE
|
||||
|
||||
|
||||
#include <stdio.h> //@todo remove this, backwards compatibility
|
||||
|
||||
#include "btAlignedAllocator.h"
|
||||
|
||||
Reference in New Issue
Block a user