From 59d58ce430f9d4ec88fb27df67f4f766a617f6fe Mon Sep 17 00:00:00 2001 From: Somedaywilldo <714439471@qq.com> Date: Wed, 13 Feb 2019 00:13:56 +0800 Subject: [PATCH] add Human3.6M support for deep_mimic env --- .../pybullet_envs/deep_mimic/mocap/README.md | 43 + .../pybullet_envs/deep_mimic/mocap/camera.py | 26 + .../mocap/deepmimic_json_generator.py | 56 + .../deep_mimic/mocap/h36m_dataset.py | 248 +++ .../deep_mimic/mocap/humanoid.py | 691 +++++++ .../deep_mimic/mocap/inverse_kinematics.py | 150 ++ .../deep_mimic/mocap/mocap_dataset.py | 36 + .../deep_mimic/mocap/quaternion.py | 30 + .../deep_mimic/mocap/render_reference.py | 144 ++ .../deep_mimic/mocap/skeleton.py | 81 + .../deep_mimic/mocap/transformation.py | 1585 +++++++++++++++++ 11 files changed, 3090 insertions(+) create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/README.md create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/camera.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/deepmimic_json_generator.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/h36m_dataset.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/humanoid.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/inverse_kinematics.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/mocap_dataset.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/quaternion.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/render_reference.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/skeleton.py create mode 100644 examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/transformation.py diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/README.md b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/README.md new file mode 100644 index 000000000..133215bce --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/README.md @@ -0,0 +1,43 @@ +# Support for Human3.6M Dataset + +Developer: Somedaywilldo (somedaywilldo@foxmail.com) + +### Inverse Kinect and Reference Rendering + +If you want to learn movements directly from coordinates, you can use **inverse_kinect.py** and **reder_reference.py**, currently it just support a dataset created by [VideoPose3D](https://github.com/facebookresearch/VideoPose3D). + +Download the pre-prosessed Human3.6M dataset of Videopose3D at [here](https://www.dropbox.com/s/z5bwig0h6mww590/data_3d_h36m.npz?dl=0). + +After downloading the **data_3d_h36m.npz** file to this directory, then you can try to run this command: + +```shell +$ python render_reference.py \ + --dataset_path= \ + --subject=S11 \ + --action=Walking \ + --json_path= \ + --fps=24 \ + --loop=wrap \ + --draw_gt +``` + +"--draw_gt" will draw the ground truth using **pybullet.addUserDebugLine()**, the right part of the humanoid lines will be red, other parts will be black. This is just for debugging, the render process will be much faster without the '--draw_gt' flag. + + + +If no errors shows, it should be look like this [video](https://www.youtube.com/watch?v=goew_FmUtOE). + +### Contact + +Inverse kinect and reference rendering module is developed by Somedaywilldo. + +Email: somedaywilldo@foxmail.com + +### Reference + +[1] DeepMimic: Example-Guided Deep Reinforcement Learning of Physics-Based Character Skills [[Link](https://arxiv.org/abs/1804.02717)] + +[2] 3D human pose estimation in video with temporal convolutions and semi-supervised training [[Link](https://arxiv.org/abs/1811.11742)] + + + diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/camera.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/camera.py new file mode 100644 index 000000000..329eb4b65 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/camera.py @@ -0,0 +1,26 @@ +import numpy as np + +from quaternion import qrot, qinverse + + +def normalize_screen_coordinates(X, w, h): + assert X.shape[-1] == 2 + # Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio + return X/w*2 - [1, h/w] + +def image_coordinates(X, w, h): + assert X.shape[-1] == 2 + # Reverse camera frame normalization + return (X + [1, h/w])*w/2 + +def world_to_camera(X, R, t): + Rt = qinverse(R) + Q = np.tile(Rt, (*X.shape[:-1], 1)) + V = X - t + return qrot(Q, V) + +def camera_to_world(X, R, t): + Q = np.tile(R, (*X.shape[:-1], 1)) + V = X + return qrot(Q, V) + t + diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/deepmimic_json_generator.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/deepmimic_json_generator.py new file mode 100644 index 000000000..23525cd72 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/deepmimic_json_generator.py @@ -0,0 +1,56 @@ +from inverse_kinematics import * +from h36m_dataset import Human36mDataset +from camera import * + +import numpy as np +# In[2]: + +joint_info = { + 'joint_name':['root', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', 'chest', 'neck', 'nose', 'eye', 'left_shoulder', 'left_elbow', 'left_wrist', 'right_shoulder', 'right_elbow', 'right_wrist'], + 'father':[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15], + 'side':['middle', 'right', 'right', 'right', 'left', 'left', 'left', 'middle', 'middle', 'middle', 'middle', 'left', 'left', 'left', 'right', 'right', 'right'] +} + +# In[3]: + +def init_fb_h36m_dataset(dataset_path): + dataset = Human36mDataset(dataset_path) + print('Preparing Facebook Human3.6M Dataset...') + for subject in dataset.subjects(): + for action in dataset[subject].keys(): + anim = dataset[subject][action] + positions_3d = [] + for cam in anim['cameras']: + pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation']) + pos_3d[:, 1:] -= pos_3d[:, :1] # Remove global offset, but keep trajectory in first position + positions_3d.append(pos_3d) + anim['positions_3d'] = positions_3d + return dataset + +def pose3D_from_fb_h36m(dataset, subject, action, shift): + pose_seq = dataset[subject][action]['positions_3d'][0].copy() + trajectory = pose_seq[:, :1] + pose_seq[:, 1:] += trajectory + # Invert camera transformation + cam = dataset.cameras()[subject][0] + pose_seq = camera_to_world(pose_seq, + R=cam['orientation'], + t=cam['translation']) + x = pose_seq[:,:,0:1] + y = pose_seq[:,:,1:2] * -1 + z = pose_seq[:,:,2:3] + pose_seq = np.concatenate((x,z,y),axis=2) + # plus shift + pose_seq += np.array([[shift for i in range(pose_seq.shape[1])] for j in range(pose_seq.shape[0])]) + return pose_seq + +def rot_seq_to_deepmimic_json(rot_seq, loop, json_path): + to_json = {"Loop": loop, "Frames":[]} + rot_seq = np.around(rot_seq, decimals=6) + to_json["Frames"] = rot_seq.tolist() + # In[14]: + to_file = json.dumps(to_json) + file = open(json_path,"w") + file.write(to_file) + file.close() + diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/h36m_dataset.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/h36m_dataset.py new file mode 100644 index 000000000..bc961fbf4 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/h36m_dataset.py @@ -0,0 +1,248 @@ +import numpy as np +import copy +from skeleton import Skeleton +from mocap_dataset import MocapDataset +from camera import normalize_screen_coordinates, image_coordinates + +h36m_skeleton = Skeleton(parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12, + 16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30], + joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23], + joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31]) + +h36m_cameras_intrinsic_params = [ + { + 'id': '54138969', + 'center': [512.54150390625, 515.4514770507812], + 'focal_length': [1145.0494384765625, 1143.7811279296875], + 'radial_distortion': [-0.20709891617298126, 0.24777518212795258, -0.0030751503072679043], + 'tangential_distortion': [-0.0009756988729350269, -0.00142447161488235], + 'res_w': 1000, + 'res_h': 1002, + 'azimuth': 70, # Only used for visualization + }, + { + 'id': '55011271', + 'center': [508.8486328125, 508.0649108886719], + 'focal_length': [1149.6756591796875, 1147.5916748046875], + 'radial_distortion': [-0.1942136287689209, 0.2404085397720337, 0.006819975562393665], + 'tangential_distortion': [-0.0016190266469493508, -0.0027408944442868233], + 'res_w': 1000, + 'res_h': 1000, + 'azimuth': -70, # Only used for visualization + }, + { + 'id': '58860488', + 'center': [519.8158569335938, 501.40264892578125], + 'focal_length': [1149.1407470703125, 1148.7989501953125], + 'radial_distortion': [-0.2083381861448288, 0.25548800826072693, -0.0024604974314570427], + 'tangential_distortion': [0.0014843869721516967, -0.0007599993259645998], + 'res_w': 1000, + 'res_h': 1000, + 'azimuth': 110, # Only used for visualization + }, + { + 'id': '60457274', + 'center': [514.9682006835938, 501.88201904296875], + 'focal_length': [1145.5113525390625, 1144.77392578125], + 'radial_distortion': [-0.198384091258049, 0.21832367777824402, -0.008947807364165783], + 'tangential_distortion': [-0.0005872055771760643, -0.0018133620033040643], + 'res_w': 1000, + 'res_h': 1002, + 'azimuth': -110, # Only used for visualization + }, +] + +h36m_cameras_extrinsic_params = { + 'S1': [ + { + 'orientation': [0.1407056450843811, -0.1500701755285263, -0.755240797996521, 0.6223280429840088], + 'translation': [1841.1070556640625, 4955.28466796875, 1563.4454345703125], + }, + { + 'orientation': [0.6157187819480896, -0.764836311340332, -0.14833825826644897, 0.11794740706682205], + 'translation': [1761.278564453125, -5078.0068359375, 1606.2650146484375], + }, + { + 'orientation': [0.14651472866535187, -0.14647851884365082, 0.7653023600578308, -0.6094175577163696], + 'translation': [-1846.7777099609375, 5215.04638671875, 1491.972412109375], + }, + { + 'orientation': [0.5834008455276489, -0.7853162288665771, 0.14548823237419128, -0.14749594032764435], + 'translation': [-1794.7896728515625, -3722.698974609375, 1574.8927001953125], + }, + ], + 'S2': [ + {}, + {}, + {}, + {}, + ], + 'S3': [ + {}, + {}, + {}, + {}, + ], + 'S4': [ + {}, + {}, + {}, + {}, + ], + 'S5': [ + { + 'orientation': [0.1467377245426178, -0.162370964884758, -0.7551892995834351, 0.6178938746452332], + 'translation': [2097.3916015625, 4880.94482421875, 1605.732421875], + }, + { + 'orientation': [0.6159758567810059, -0.7626792192459106, -0.15728192031383514, 0.1189815029501915], + 'translation': [2031.7008056640625, -5167.93310546875, 1612.923095703125], + }, + { + 'orientation': [0.14291371405124664, -0.12907841801643372, 0.7678384780883789, -0.6110143065452576], + 'translation': [-1620.5948486328125, 5171.65869140625, 1496.43701171875], + }, + { + 'orientation': [0.5920479893684387, -0.7814217805862427, 0.1274748593568802, -0.15036417543888092], + 'translation': [-1637.1737060546875, -3867.3173828125, 1547.033203125], + }, + ], + 'S6': [ + { + 'orientation': [0.1337897777557373, -0.15692396461963654, -0.7571090459823608, 0.6198879480361938], + 'translation': [1935.4517822265625, 4950.24560546875, 1618.0838623046875], + }, + { + 'orientation': [0.6147197484970093, -0.7628812789916992, -0.16174767911434174, 0.11819244921207428], + 'translation': [1969.803955078125, -5128.73876953125, 1632.77880859375], + }, + { + 'orientation': [0.1529948115348816, -0.13529130816459656, 0.7646096348762512, -0.6112781167030334], + 'translation': [-1769.596435546875, 5185.361328125, 1476.993408203125], + }, + { + 'orientation': [0.5916101336479187, -0.7804774045944214, 0.12832270562648773, -0.1561593860387802], + 'translation': [-1721.668701171875, -3884.13134765625, 1540.4879150390625], + }, + ], + 'S7': [ + { + 'orientation': [0.1435241848230362, -0.1631336808204651, -0.7548328638076782, 0.6188824772834778], + 'translation': [1974.512939453125, 4926.3544921875, 1597.8326416015625], + }, + { + 'orientation': [0.6141672730445862, -0.7638262510299683, -0.1596645563840866, 0.1177929937839508], + 'translation': [1937.0584716796875, -5119.7900390625, 1631.5665283203125], + }, + { + 'orientation': [0.14550060033798218, -0.12874816358089447, 0.7660516500473022, -0.6127139329910278], + 'translation': [-1741.8111572265625, 5208.24951171875, 1464.8245849609375], + }, + { + 'orientation': [0.5912848114967346, -0.7821764349937439, 0.12445473670959473, -0.15196487307548523], + 'translation': [-1734.7105712890625, -3832.42138671875, 1548.5830078125], + }, + ], + 'S8': [ + { + 'orientation': [0.14110587537288666, -0.15589867532253265, -0.7561917304992676, 0.619644045829773], + 'translation': [2150.65185546875, 4896.1611328125, 1611.9046630859375], + }, + { + 'orientation': [0.6169601678848267, -0.7647668123245239, -0.14846350252628326, 0.11158157885074615], + 'translation': [2219.965576171875, -5148.453125, 1613.0440673828125], + }, + { + 'orientation': [0.1471444070339203, -0.13377119600772858, 0.7670128345489502, -0.6100369691848755], + 'translation': [-1571.2215576171875, 5137.0185546875, 1498.1761474609375], + }, + { + 'orientation': [0.5927824378013611, -0.7825870513916016, 0.12147816270589828, -0.14631995558738708], + 'translation': [-1476.913330078125, -3896.7412109375, 1547.97216796875], + }, + ], + 'S9': [ + { + 'orientation': [0.15540587902069092, -0.15548215806484222, -0.7532095313072205, 0.6199594736099243], + 'translation': [2044.45849609375, 4935.1171875, 1481.2275390625], + }, + { + 'orientation': [0.618784487247467, -0.7634735107421875, -0.14132238924503326, 0.11933968216180801], + 'translation': [1990.959716796875, -5123.810546875, 1568.8048095703125], + }, + { + 'orientation': [0.13357827067375183, -0.1367100477218628, 0.7689454555511475, -0.6100738644599915], + 'translation': [-1670.9921875, 5211.98583984375, 1528.387939453125], + }, + { + 'orientation': [0.5879399180412292, -0.7823407053947449, 0.1427614390850067, -0.14794869720935822], + 'translation': [-1696.04345703125, -3827.099853515625, 1591.4127197265625], + }, + ], + 'S11': [ + { + 'orientation': [0.15232472121715546, -0.15442320704460144, -0.7547563314437866, 0.6191070079803467], + 'translation': [2098.440185546875, 4926.5546875, 1500.278564453125], + }, + { + 'orientation': [0.6189449429512024, -0.7600917220115662, -0.15300633013248444, 0.1255258321762085], + 'translation': [2083.182373046875, -4912.1728515625, 1561.07861328125], + }, + { + 'orientation': [0.14943228662014008, -0.15650227665901184, 0.7681233882904053, -0.6026304364204407], + 'translation': [-1609.8153076171875, 5177.3359375, 1537.896728515625], + }, + { + 'orientation': [0.5894251465797424, -0.7818877100944519, 0.13991211354732513, -0.14715361595153809], + 'translation': [-1590.738037109375, -3854.1689453125, 1578.017578125], + }, + ], +} + +class Human36mDataset(MocapDataset): + def __init__(self, path, remove_static_joints=True): + super().__init__(fps=50, skeleton=h36m_skeleton) + + self._cameras = copy.deepcopy(h36m_cameras_extrinsic_params) + for cameras in self._cameras.values(): + for i, cam in enumerate(cameras): + cam.update(h36m_cameras_intrinsic_params[i]) + for k, v in cam.items(): + if k not in ['id', 'res_w', 'res_h']: + cam[k] = np.array(v, dtype='float32') + + # Normalize camera frame + cam['center'] = normalize_screen_coordinates(cam['center'], w=cam['res_w'], h=cam['res_h']).astype('float32') + cam['focal_length'] = cam['focal_length']/cam['res_w']*2 + if 'translation' in cam: + cam['translation'] = cam['translation']/1000 # mm to meters + + # Add intrinsic parameters vector + cam['intrinsic'] = np.concatenate((cam['focal_length'], + cam['center'], + cam['radial_distortion'], + cam['tangential_distortion'])) + + # Load serialized dataset + data = np.load(path)['positions_3d'].item() + + self._data = {} + for subject, actions in data.items(): + self._data[subject] = {} + for action_name, positions in actions.items(): + self._data[subject][action_name] = { + 'positions': positions, + 'cameras': self._cameras[subject], + } + + if remove_static_joints: + # Bring the skeleton to 17 joints instead of the original 32 + self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31]) + + # Rewire shoulders to the correct parents + self._skeleton._parents[11] = 8 + self._skeleton._parents[14] = 8 + + def supports_semi_supervised(self): + return True + \ No newline at end of file diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/humanoid.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/humanoid.py new file mode 100644 index 000000000..f55e3cadb --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/humanoid.py @@ -0,0 +1,691 @@ +import os, inspect +import math +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(os.path.dirname(currentdir)) +os.sys.path.insert(0,parentdir) + +from pybullet_utils.bullet_client import BulletClient +import pybullet_data + +jointTypes = ["JOINT_REVOLUTE","JOINT_PRISMATIC", + "JOINT_SPHERICAL","JOINT_PLANAR","JOINT_FIXED"] + +class HumanoidPose(object): + def __init__(self): + pass + + def Reset(self): + + self._basePos = [0,0,0] + self._baseLinVel = [0,0,0] + self._baseOrn = [0,0,0,1] + self._baseAngVel = [0,0,0] + + self._chestRot = [0,0,0,1] + self._chestVel = [0,0,0] + self._neckRot = [0,0,0,1] + self._neckVel = [0,0,0] + + self._rightHipRot = [0,0,0,1] + self._rightHipVel = [0,0,0] + self._rightKneeRot = [0] + self._rightKneeVel = [0] + self._rightAnkleRot = [0,0,0,1] + self._rightAnkleVel = [0,0,0] + + self._rightShoulderRot = [0,0,0,1] + self._rightShoulderVel = [0,0,0] + self._rightElbowRot = [0] + self._rightElbowVel = [0] + + self._leftHipRot = [0,0,0,1] + self._leftHipVel = [0,0,0] + self._leftKneeRot = [0] + self._leftKneeVel = [0] + self._leftAnkleRot = [0,0,0,1] + self._leftAnkleVel = [0,0,0] + + self._leftShoulderRot = [0,0,0,1] + self._leftShoulderVel = [0,0,0] + self._leftElbowRot = [0] + self._leftElbowVel = [0] + + def ComputeLinVel(self,posStart, posEnd, deltaTime): + vel = [(posEnd[0]-posStart[0])/deltaTime,(posEnd[1]-posStart[1])/deltaTime,(posEnd[2]-posStart[2])/deltaTime] + return vel + + def ComputeAngVel(self,ornStart, ornEnd, deltaTime, bullet_client): + dorn = bullet_client.getDifferenceQuaternion(ornStart,ornEnd) + axis,angle = bullet_client.getAxisAngleFromQuaternion(dorn) + angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime] + return angVel + + def NormalizeQuaternion(self, orn): + length2 = orn[0]*orn[0]+orn[1]*orn[1]+orn[2]*orn[2]+orn[3]*orn[3] + if (length2>0): + length = math.sqrt(length2) + #print("Normalize? length=",length) + + + def PostProcessMotionData(self, frameData): + baseOrn1Start = [frameData[5],frameData[6], frameData[7],frameData[4]] + self.NormalizeQuaternion(baseOrn1Start) + chestRotStart = [frameData[9],frameData[10],frameData[11],frameData[8]] + + neckRotStart = [frameData[13],frameData[14],frameData[15],frameData[12]] + rightHipRotStart = [frameData[17],frameData[18],frameData[19],frameData[16]] + rightAnkleRotStart = [frameData[22],frameData[23],frameData[24],frameData[21]] + rightShoulderRotStart = [frameData[26],frameData[27],frameData[28],frameData[25]] + leftHipRotStart = [frameData[31],frameData[32],frameData[33],frameData[30]] + leftAnkleRotStart = [frameData[36],frameData[37],frameData[38],frameData[35]] + leftShoulderRotStart = [frameData[40],frameData[41],frameData[42],frameData[39]] + + + def Slerp(self, frameFraction, frameData, frameDataNext,bullet_client ): + keyFrameDuration = frameData[0] + basePos1Start = [frameData[1],frameData[2],frameData[3]] + basePos1End = [frameDataNext[1],frameDataNext[2],frameDataNext[3]] + self._basePos = [basePos1Start[0]+frameFraction*(basePos1End[0]-basePos1Start[0]), + basePos1Start[1]+frameFraction*(basePos1End[1]-basePos1Start[1]), + basePos1Start[2]+frameFraction*(basePos1End[2]-basePos1Start[2])] + self._baseLinVel = self.ComputeLinVel(basePos1Start,basePos1End, keyFrameDuration) + baseOrn1Start = [frameData[5],frameData[6], frameData[7],frameData[4]] + baseOrn1Next = [frameDataNext[5],frameDataNext[6], frameDataNext[7],frameDataNext[4]] + self._baseOrn = bullet_client.getQuaternionSlerp(baseOrn1Start,baseOrn1Next,frameFraction) + self._baseAngVel = self.ComputeAngVel(baseOrn1Start,baseOrn1Next, keyFrameDuration, bullet_client) + + ##pre-rotate to make z-up + #y2zPos=[0,0,0.0] + #y2zOrn = p.getQuaternionFromEuler([1.57,0,0]) + #basePos,baseOrn = p.multiplyTransforms(y2zPos, y2zOrn,basePos1,baseOrn1) + + chestRotStart = [frameData[9],frameData[10],frameData[11],frameData[8]] + chestRotEnd = [frameDataNext[9],frameDataNext[10],frameDataNext[11],frameDataNext[8]] + self._chestRot = bullet_client.getQuaternionSlerp(chestRotStart,chestRotEnd,frameFraction) + self._chestVel = self.ComputeAngVel(chestRotStart,chestRotEnd,keyFrameDuration,bullet_client) + + neckRotStart = [frameData[13],frameData[14],frameData[15],frameData[12]] + neckRotEnd= [frameDataNext[13],frameDataNext[14],frameDataNext[15],frameDataNext[12]] + self._neckRot = bullet_client.getQuaternionSlerp(neckRotStart,neckRotEnd,frameFraction) + self._neckVel = self.ComputeAngVel(neckRotStart,neckRotEnd,keyFrameDuration,bullet_client) + + rightHipRotStart = [frameData[17],frameData[18],frameData[19],frameData[16]] + rightHipRotEnd = [frameDataNext[17],frameDataNext[18],frameDataNext[19],frameDataNext[16]] + self._rightHipRot = bullet_client.getQuaternionSlerp(rightHipRotStart,rightHipRotEnd,frameFraction) + self._rightHipVel = self.ComputeAngVel(rightHipRotStart,rightHipRotEnd,keyFrameDuration,bullet_client) + + rightKneeRotStart = [frameData[20]] + rightKneeRotEnd = [frameDataNext[20]] + self._rightKneeRot = [rightKneeRotStart[0]+frameFraction*(rightKneeRotEnd[0]-rightKneeRotStart[0])] + self._rightKneeVel = [(rightKneeRotEnd[0]-rightKneeRotStart[0])/keyFrameDuration] + + rightAnkleRotStart = [frameData[22],frameData[23],frameData[24],frameData[21]] + rightAnkleRotEnd = [frameDataNext[22],frameDataNext[23],frameDataNext[24],frameDataNext[21]] + self._rightAnkleRot = bullet_client.getQuaternionSlerp(rightAnkleRotStart,rightAnkleRotEnd,frameFraction) + self._rightAnkleVel = self.ComputeAngVel(rightAnkleRotStart,rightAnkleRotEnd,keyFrameDuration,bullet_client) + + rightShoulderRotStart = [frameData[26],frameData[27],frameData[28],frameData[25]] + rightShoulderRotEnd = [frameDataNext[26],frameDataNext[27],frameDataNext[28],frameDataNext[25]] + self._rightShoulderRot = bullet_client.getQuaternionSlerp(rightShoulderRotStart,rightShoulderRotEnd,frameFraction) + self._rightShoulderVel = self.ComputeAngVel(rightShoulderRotStart,rightShoulderRotEnd, keyFrameDuration,bullet_client) + + rightElbowRotStart = [frameData[29]] + rightElbowRotEnd = [frameDataNext[29]] + self._rightElbowRot = [rightElbowRotStart[0]+frameFraction*(rightElbowRotEnd[0]-rightElbowRotStart[0])] + self._rightElbowVel = [(rightElbowRotEnd[0]-rightElbowRotStart[0])/keyFrameDuration] + + leftHipRotStart = [frameData[31],frameData[32],frameData[33],frameData[30]] + leftHipRotEnd = [frameDataNext[31],frameDataNext[32],frameDataNext[33],frameDataNext[30]] + self._leftHipRot = bullet_client.getQuaternionSlerp(leftHipRotStart,leftHipRotEnd,frameFraction) + self._leftHipVel = self.ComputeAngVel(leftHipRotStart, leftHipRotEnd,keyFrameDuration,bullet_client) + + leftKneeRotStart = [frameData[34]] + leftKneeRotEnd = [frameDataNext[34]] + self._leftKneeRot = [leftKneeRotStart[0] +frameFraction*(leftKneeRotEnd[0]-leftKneeRotStart[0]) ] + self._leftKneeVel = [(leftKneeRotEnd[0]-leftKneeRotStart[0])/keyFrameDuration] + + leftAnkleRotStart = [frameData[36],frameData[37],frameData[38],frameData[35]] + leftAnkleRotEnd = [frameDataNext[36],frameDataNext[37],frameDataNext[38],frameDataNext[35]] + self._leftAnkleRot = bullet_client.getQuaternionSlerp(leftAnkleRotStart,leftAnkleRotEnd,frameFraction) + self._leftAnkleVel = self.ComputeAngVel(leftAnkleRotStart,leftAnkleRotEnd,keyFrameDuration,bullet_client) + + leftShoulderRotStart = [frameData[40],frameData[41],frameData[42],frameData[39]] + leftShoulderRotEnd = [frameDataNext[40],frameDataNext[41],frameDataNext[42],frameDataNext[39]] + self._leftShoulderRot = bullet_client.getQuaternionSlerp(leftShoulderRotStart,leftShoulderRotEnd,frameFraction) + self._leftShoulderVel = self.ComputeAngVel(leftShoulderRotStart,leftShoulderRotEnd,keyFrameDuration,bullet_client) + + leftElbowRotStart = [frameData[43]] + leftElbowRotEnd = [frameDataNext[43]] + self._leftElbowRot = [leftElbowRotStart[0]+frameFraction*(leftElbowRotEnd[0]-leftElbowRotStart[0])] + self._leftElbowVel = [(leftElbowRotEnd[0]-leftElbowRotStart[0])/keyFrameDuration] + + +class Humanoid(object): + def __init__(self, pybullet_client, motion_data, baseShift): + """Constructs a humanoid and reset it to the initial states. + Args: + pybullet_client: The instance of BulletClient to manage different + simulations. + """ + self._baseShift = baseShift + self._pybullet_client = pybullet_client + + self.kin_client = BulletClient(pybullet_client.DIRECT)# use SHARED_MEMORY for visual debugging, start a GUI physics server first + self.kin_client.resetSimulation() + self.kin_client.setAdditionalSearchPath(pybullet_data.getDataPath()) + self.kin_client.configureDebugVisualizer(self.kin_client.COV_ENABLE_Y_AXIS_UP,1) + self.kin_client.setGravity(0,-9.8,0) + + self._motion_data = motion_data + print("LOADING humanoid!") + self._humanoid = self._pybullet_client.loadURDF( + "humanoid/humanoid.urdf", [0,0.9,0],globalScaling=0.25, useFixedBase=False) + + self._kinematicHumanoid = self.kin_client.loadURDF( + "humanoid/humanoid.urdf", [0,0.9,0],globalScaling=0.25, useFixedBase=False) + + + #print("human #joints=", self._pybullet_client.getNumJoints(self._humanoid)) + pose = HumanoidPose() + + for i in range (self._motion_data.NumFrames()-1): + frameData = self._motion_data._motion_data['Frames'][i] + pose.PostProcessMotionData(frameData) + + self._pybullet_client.resetBasePositionAndOrientation(self._humanoid,self._baseShift,[0,0,0,1]) + self._pybullet_client.changeDynamics(self._humanoid, -1, linearDamping=0, angularDamping=0) + for j in range (self._pybullet_client.getNumJoints(self._humanoid)): + ji = self._pybullet_client.getJointInfo(self._humanoid,j) + self._pybullet_client.changeDynamics(self._humanoid, j, linearDamping=0, angularDamping=0) + self._pybullet_client.changeVisualShape(self._humanoid, j , rgbaColor=[1,1,1,1]) + #print("joint[",j,"].type=",jointTypes[ji[2]]) + #print("joint[",j,"].name=",ji[1]) + + self._initial_state = self._pybullet_client.saveState() + self._allowed_body_parts=[11,14] + self.Reset() + + def Reset(self): + self._pybullet_client.restoreState(self._initial_state) + self.SetSimTime(0) + pose = self.InitializePoseFromMotionData() + self.ApplyPose(pose, True, True, self._humanoid, self._pybullet_client) + + def RenderReference(self, t): + self.SetSimTime(t) + frameData = self._motion_data._motion_data['Frames'][self._frame] + frameDataNext = self._motion_data._motion_data['Frames'][self._frameNext] + pose = HumanoidPose() + pose.Slerp(self._frameFraction, frameData, frameDataNext, self._pybullet_client) + self.ApplyPose(pose, True, True, self._humanoid, self._pybullet_client) + + def CalcCycleCount(self, simTime, cycleTime): + phases = simTime / cycleTime; + count = math.floor(phases) + loop = True + #count = (loop) ? count : cMathUtil::Clamp(count, 0, 1); + return count + + def SetSimTime(self, t): + self._simTime = t + #print("SetTimeTime time =",t) + keyFrameDuration = self._motion_data.KeyFrameDuraction() + cycleTime = keyFrameDuration*(self._motion_data.NumFrames()-1) + #print("self._motion_data.NumFrames()=",self._motion_data.NumFrames()) + #print("cycleTime=",cycleTime) + cycles = self.CalcCycleCount(t, cycleTime) + #print("cycles=",cycles) + frameTime = t - cycles*cycleTime + if (frameTime<0): + frameTime += cycleTime + + #print("keyFrameDuration=",keyFrameDuration) + #print("frameTime=",frameTime) + self._frame = int(frameTime/keyFrameDuration) + #print("self._frame=",self._frame) + + self._frameNext = self._frame+1 + if (self._frameNext >= self._motion_data.NumFrames()): + self._frameNext = self._frame + + self._frameFraction = (frameTime - self._frame*keyFrameDuration)/(keyFrameDuration) + #print("self._frameFraction=",self._frameFraction) + + def Terminates(self): + #check if any non-allowed body part hits the ground + terminates=False + pts = self._pybullet_client.getContactPoints() + for p in pts: + part = -1 + if (p[1]==self._humanoid): + part=p[3] + if (p[2]==self._humanoid): + part=p[4] + if (part >=0 and part not in self._allowed_body_parts): + terminates=True + + return terminates + + def BuildHeadingTrans(self, rootOrn): + #align root transform 'forward' with world-space x axis + eul = self._pybullet_client.getEulerFromQuaternion(rootOrn) + refDir = [1,0,0] + rotVec = self._pybullet_client.rotateVector(rootOrn, refDir) + heading = math.atan2(-rotVec[2], rotVec[0]) + heading2=eul[1] + #print("heading=",heading) + headingOrn = self._pybullet_client.getQuaternionFromAxisAngle([0,1,0],-heading) + return headingOrn + + def GetPhase(self): + keyFrameDuration = self._motion_data.KeyFrameDuraction() + cycleTime = keyFrameDuration*(self._motion_data.NumFrames()-1) + phase = self._simTime / cycleTime + phase = math.fmod(phase,1.0) + if (phase<0): + phase += 1 + return phase + + def BuildOriginTrans(self): + rootPos,rootOrn = self._pybullet_client.getBasePositionAndOrientation(self._humanoid) + + #print("rootPos=",rootPos, " rootOrn=",rootOrn) + invRootPos=[-rootPos[0], 0, -rootPos[2]] + #invOrigTransPos, invOrigTransOrn = self._pybullet_client.invertTransform(rootPos,rootOrn) + headingOrn = self.BuildHeadingTrans(rootOrn) + #print("headingOrn=",headingOrn) + headingMat = self._pybullet_client.getMatrixFromQuaternion(headingOrn) + #print("headingMat=",headingMat) + #dummy, rootOrnWithoutHeading = self._pybullet_client.multiplyTransforms([0,0,0],headingOrn, [0,0,0], rootOrn) + #dummy, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0,0,0],rootOrnWithoutHeading, invOrigTransPos, invOrigTransOrn) + + invOrigTransPos, invOrigTransOrn = self._pybullet_client.multiplyTransforms( [0,0,0],headingOrn, invRootPos,[0,0,0,1]) + #print("invOrigTransPos=",invOrigTransPos) + #print("invOrigTransOrn=",invOrigTransOrn) + invOrigTransMat = self._pybullet_client.getMatrixFromQuaternion(invOrigTransOrn) + #print("invOrigTransMat =",invOrigTransMat ) + return invOrigTransPos, invOrigTransOrn + + def InitializePoseFromMotionData(self): + frameData = self._motion_data._motion_data['Frames'][self._frame] + frameDataNext = self._motion_data._motion_data['Frames'][self._frameNext] + pose = HumanoidPose() + pose.Slerp(self._frameFraction, frameData, frameDataNext, self._pybullet_client) + return pose + + + + + def ApplyAction(self, action): + #turn action into pose + pose = HumanoidPose() + pose.Reset() + index=0 + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._chestRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + #print("pose._chestRot=",pose._chestRot) + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._neckRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._rightHipRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + index+=1 + pose._rightKneeRot = [angle] + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._rightAnkleRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._rightShoulderRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + index+=1 + pose._rightElbowRot = [angle] + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._leftHipRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + index+=1 + pose._leftKneeRot = [angle] + + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._leftAnkleRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + axis = [action[index+1],action[index+2],action[index+3]] + index+=4 + pose._leftShoulderRot = self._pybullet_client.getQuaternionFromAxisAngle(axis,angle) + + angle = action[index] + index+=1 + pose._leftElbowRot = [angle] + + + #print("index=",index) + + initializeBase = False + initializeVelocities = False + self.ApplyPose(pose, initializeBase, initializeVelocities, self._humanoid, self._pybullet_client) + + + def ApplyPose(self, pose, initializeBase, initializeVelocities, humanoid,bc): + #todo: get tunable parametes from a json file or from URDF (kd, maxForce) + if (initializeBase): + bc.changeVisualShape(humanoid, 2 , rgbaColor=[1,0,0,1]) + basePos=[pose._basePos[0]+self._baseShift[0],pose._basePos[1]+self._baseShift[1],pose._basePos[2]+self._baseShift[2]] + + bc.resetBasePositionAndOrientation(humanoid, + basePos, pose._baseOrn) + if initializeVelocities: + bc.resetBaseVelocity(humanoid, pose._baseLinVel, pose._baseAngVel) + #print("resetBaseVelocity=",pose._baseLinVel) + else: + bc.changeVisualShape(humanoid, 2 , rgbaColor=[1,1,1,1]) + + + + kp=0.03 + chest=1 + neck=2 + rightShoulder=3 + rightElbow=4 + leftShoulder=6 + leftElbow = 7 + rightHip = 9 + rightKnee=10 + rightAnkle=11 + leftHip = 12 + leftKnee=13 + leftAnkle=14 + controlMode = bc.POSITION_CONTROL + + if (initializeBase): + if initializeVelocities: + bc.resetJointStateMultiDof(humanoid,chest,pose._chestRot, pose._chestVel) + bc.resetJointStateMultiDof(humanoid,neck,pose._neckRot, pose._neckVel) + bc.resetJointStateMultiDof(humanoid,rightHip,pose._rightHipRot, pose._rightHipVel) + bc.resetJointStateMultiDof(humanoid,rightKnee,pose._rightKneeRot, pose._rightKneeVel) + bc.resetJointStateMultiDof(humanoid,rightAnkle,pose._rightAnkleRot, pose._rightAnkleVel) + bc.resetJointStateMultiDof(humanoid,rightShoulder,pose._rightShoulderRot, pose._rightShoulderVel) + bc.resetJointStateMultiDof(humanoid,rightElbow, pose._rightElbowRot, pose._rightElbowVel) + bc.resetJointStateMultiDof(humanoid,leftHip, pose._leftHipRot, pose._leftHipVel) + bc.resetJointStateMultiDof(humanoid,leftKnee, pose._leftKneeRot, pose._leftKneeVel) + bc.resetJointStateMultiDof(humanoid,leftAnkle, pose._leftAnkleRot, pose._leftAnkleVel) + bc.resetJointStateMultiDof(humanoid,leftShoulder, pose._leftShoulderRot, pose._leftShoulderVel) + bc.resetJointStateMultiDof(humanoid,leftElbow, pose._leftElbowRot, pose._leftElbowVel) + else: + bc.resetJointStateMultiDof(humanoid,chest,pose._chestRot) + bc.resetJointStateMultiDof(humanoid,neck,pose._neckRot) + bc.resetJointStateMultiDof(humanoid,rightHip,pose._rightHipRot) + bc.resetJointStateMultiDof(humanoid,rightKnee,pose._rightKneeRot) + bc.resetJointStateMultiDof(humanoid,rightAnkle,pose._rightAnkleRot) + bc.resetJointStateMultiDof(humanoid,rightShoulder,pose._rightShoulderRot) + bc.resetJointStateMultiDof(humanoid,rightElbow, pose._rightElbowRot) + bc.resetJointStateMultiDof(humanoid,leftHip, pose._leftHipRot) + bc.resetJointStateMultiDof(humanoid,leftKnee, pose._leftKneeRot) + bc.resetJointStateMultiDof(humanoid,leftAnkle, pose._leftAnkleRot) + bc.resetJointStateMultiDof(humanoid,leftShoulder, pose._leftShoulderRot) + bc.resetJointStateMultiDof(humanoid,leftElbow, pose._leftElbowRot) + + bc.setJointMotorControlMultiDof(humanoid,chest,controlMode, targetPosition=pose._chestRot,positionGain=kp, force=[200]) + bc.setJointMotorControlMultiDof(humanoid,neck,controlMode,targetPosition=pose._neckRot,positionGain=kp, force=[50]) + bc.setJointMotorControlMultiDof(humanoid,rightHip,controlMode,targetPosition=pose._rightHipRot,positionGain=kp, force=[200]) + bc.setJointMotorControlMultiDof(humanoid,rightKnee,controlMode,targetPosition=pose._rightKneeRot,positionGain=kp, force=[150]) + bc.setJointMotorControlMultiDof(humanoid,rightAnkle,controlMode,targetPosition=pose._rightAnkleRot,positionGain=kp, force=[90]) + bc.setJointMotorControlMultiDof(humanoid,rightShoulder,controlMode,targetPosition=pose._rightShoulderRot,positionGain=kp, force=[100]) + bc.setJointMotorControlMultiDof(humanoid,rightElbow, controlMode,targetPosition=pose._rightElbowRot,positionGain=kp, force=[60]) + bc.setJointMotorControlMultiDof(humanoid,leftHip, controlMode,targetPosition=pose._leftHipRot,positionGain=kp, force=[200]) + bc.setJointMotorControlMultiDof(humanoid,leftKnee, controlMode,targetPosition=pose._leftKneeRot,positionGain=kp, force=[150]) + bc.setJointMotorControlMultiDof(humanoid,leftAnkle, controlMode,targetPosition=pose._leftAnkleRot,positionGain=kp, force=[90]) + bc.setJointMotorControlMultiDof(humanoid,leftShoulder, controlMode,targetPosition=pose._leftShoulderRot,positionGain=kp, force=[100]) + bc.setJointMotorControlMultiDof(humanoid,leftElbow, controlMode,targetPosition=pose._leftElbowRot,positionGain=kp, force=[60]) + + #debug space + #if (False): + # for j in range (bc.getNumJoints(self._humanoid)): + # js = bc.getJointState(self._humanoid, j) + # bc.resetJointState(self._humanoidDebug, j,js[0]) + # jsm = bc.getJointStateMultiDof(self._humanoid, j) + # if (len(jsm[0])>0): + # bc.resetJointStateMultiDof(self._humanoidDebug,j,jsm[0]) + + def GetState(self): + + stateVector = [] + phase = self.GetPhase() + #print("phase=",phase) + stateVector.append(phase) + + rootTransPos, rootTransOrn=self.BuildOriginTrans() + basePos,baseOrn = self._pybullet_client.getBasePositionAndOrientation(self._humanoid) + + rootPosRel, dummy = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn, basePos,[0,0,0,1]) + #print("!!!rootPosRel =",rootPosRel ) + #print("rootTransPos=",rootTransPos) + #print("basePos=",basePos) + localPos,localOrn = self._pybullet_client.multiplyTransforms( rootTransPos, rootTransOrn , basePos,baseOrn ) + + localPos=[localPos[0]-rootPosRel[0],localPos[1]-rootPosRel[1],localPos[2]-rootPosRel[2]] + #print("localPos=",localPos) + + stateVector.append(rootPosRel[1]) + + self.pb2dmJoints=[0,1,2,9,10,11,3,4,5,12,13,14,6,7,8] + + for pbJoint in range (self._pybullet_client.getNumJoints(self._humanoid)): + j = self.pb2dmJoints[pbJoint] + #print("joint order:",j) + ls = self._pybullet_client.getLinkState(self._humanoid, j, computeForwardKinematics=True) + linkPos = ls[0] + linkOrn = ls[1] + linkPosLocal, linkOrnLocal = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn, linkPos,linkOrn) + if (linkOrnLocal[3]<0): + linkOrnLocal=[-linkOrnLocal[0],-linkOrnLocal[1],-linkOrnLocal[2],-linkOrnLocal[3]] + linkPosLocal=[linkPosLocal[0]-rootPosRel[0],linkPosLocal[1]-rootPosRel[1],linkPosLocal[2]-rootPosRel[2]] + for l in linkPosLocal: + stateVector.append(l) + + #re-order the quaternion, DeepMimic uses w,x,y,z + stateVector.append(linkOrnLocal[3]) + stateVector.append(linkOrnLocal[0]) + stateVector.append(linkOrnLocal[1]) + stateVector.append(linkOrnLocal[2]) + + + for pbJoint in range (self._pybullet_client.getNumJoints(self._humanoid)): + j = self.pb2dmJoints[pbJoint] + ls = self._pybullet_client.getLinkState(self._humanoid, j, computeLinkVelocity=True) + linkLinVel = ls[6] + linkAngVel = ls[7] + for l in linkLinVel: + stateVector.append(l) + for l in linkAngVel: + stateVector.append(l) + + #print("stateVector len=",len(stateVector)) + #for st in range (len(stateVector)): + # print("state[",st,"]=",stateVector[st]) + return stateVector + + + def GetReward(self): + #from DeepMimic double cSceneImitate::CalcRewardImitate + pose_w = 0.5 + vel_w = 0.05 + end_eff_w = 0 #0.15 + root_w = 0 #0.2 + com_w = 0.1 + + total_w = pose_w + vel_w + end_eff_w + root_w + com_w + pose_w /= total_w + vel_w /= total_w + end_eff_w /= total_w + root_w /= total_w + com_w /= total_w + + pose_scale = 2 + vel_scale = 0.1 + end_eff_scale = 40 + root_scale = 5 + com_scale = 10 + err_scale = 1 + + reward = 0 + + pose_err = 0 + vel_err = 0 + end_eff_err = 0 + root_err = 0 + com_err = 0 + heading_err = 0 + + #create a mimic reward, comparing the dynamics humanoid with a kinematic one + + pose = self.InitializePoseFromMotionData() + #print("self._kinematicHumanoid=",self._kinematicHumanoid) + #print("kinematicHumanoid #joints=",self.kin_client.getNumJoints(self._kinematicHumanoid)) + self.ApplyPose(pose, True, True, self._kinematicHumanoid, self.kin_client) + + #const Eigen::VectorXd& pose0 = sim_char.GetPose(); + #const Eigen::VectorXd& vel0 = sim_char.GetVel(); + #const Eigen::VectorXd& pose1 = kin_char.GetPose(); + #const Eigen::VectorXd& vel1 = kin_char.GetVel(); + #tMatrix origin_trans = sim_char.BuildOriginTrans(); + #tMatrix kin_origin_trans = kin_char.BuildOriginTrans(); + # + #tVector com0_world = sim_char.CalcCOM(); + #tVector com_vel0_world = sim_char.CalcCOMVel(); + #tVector com1_world; + #tVector com_vel1_world; + #cRBDUtil::CalcCoM(joint_mat, body_defs, pose1, vel1, com1_world, com_vel1_world); + # + root_id = 0 + #tVector root_pos0 = cKinTree::GetRootPos(joint_mat, pose0); + #tVector root_pos1 = cKinTree::GetRootPos(joint_mat, pose1); + #tQuaternion root_rot0 = cKinTree::GetRootRot(joint_mat, pose0); + #tQuaternion root_rot1 = cKinTree::GetRootRot(joint_mat, pose1); + #tVector root_vel0 = cKinTree::GetRootVel(joint_mat, vel0); + #tVector root_vel1 = cKinTree::GetRootVel(joint_mat, vel1); + #tVector root_ang_vel0 = cKinTree::GetRootAngVel(joint_mat, vel0); + #tVector root_ang_vel1 = cKinTree::GetRootAngVel(joint_mat, vel1); + + mJointWeights = [0.20833,0.10416, 0.0625, 0.10416, + 0.0625, 0.041666666666666671, 0.0625, 0.0416, + 0.00, 0.10416, 0.0625, 0.0416, 0.0625, 0.0416, 0.0000] + + num_end_effs = 0 + num_joints = 15 + + root_rot_w = mJointWeights[root_id] + #pose_err += root_rot_w * cKinTree::CalcRootRotErr(joint_mat, pose0, pose1) + #vel_err += root_rot_w * cKinTree::CalcRootAngVelErr(joint_mat, vel0, vel1) + + for j in range (num_joints): + curr_pose_err = 0 + curr_vel_err = 0 + w = mJointWeights[j]; + + simJointInfo = self._pybullet_client.getJointStateMultiDof(self._humanoid, j) + + #print("simJointInfo.pos=",simJointInfo[0]) + #print("simJointInfo.vel=",simJointInfo[1]) + kinJointInfo = self.kin_client.getJointStateMultiDof(self._kinematicHumanoid,j) + #print("kinJointInfo.pos=",kinJointInfo[0]) + #print("kinJointInfo.vel=",kinJointInfo[1]) + if (len(simJointInfo[0])==1): + angle = simJointInfo[0][0]-kinJointInfo[0][0] + curr_pose_err = angle*angle + velDiff = simJointInfo[1][0]-kinJointInfo[1][0] + curr_vel_err = velDiff*velDiff + if (len(simJointInfo[0])==4): + #print("quaternion diff") + diffQuat = self._pybullet_client.getDifferenceQuaternion(simJointInfo[0],kinJointInfo[0]) + axis,angle = self._pybullet_client.getAxisAngleFromQuaternion(diffQuat) + curr_pose_err = angle*angle + diffVel = [simJointInfo[1][0]-kinJointInfo[1][0],simJointInfo[1][1]-kinJointInfo[1][1],simJointInfo[1][2]-kinJointInfo[1][2]] + curr_vel_err = diffVel[0]*diffVel[0]+diffVel[1]*diffVel[1]+diffVel[2]*diffVel[2] + + + pose_err += w * curr_pose_err + vel_err += w * curr_vel_err + + # bool is_end_eff = sim_char.IsEndEffector(j) + # if (is_end_eff) + # { + # tVector pos0 = sim_char.CalcJointPos(j) + # tVector pos1 = cKinTree::CalcJointWorldPos(joint_mat, pose1, j) + # double ground_h0 = mGround->SampleHeight(pos0) + # double ground_h1 = kin_char.GetOriginPos()[1] + # + # tVector pos_rel0 = pos0 - root_pos0 + # tVector pos_rel1 = pos1 - root_pos1 + # pos_rel0[1] = pos0[1] - ground_h0 + # pos_rel1[1] = pos1[1] - ground_h1 + # + # pos_rel0 = origin_trans * pos_rel0 + # pos_rel1 = kin_origin_trans * pos_rel1 + # + # curr_end_err = (pos_rel1 - pos_rel0).squaredNorm() + # end_eff_err += curr_end_err; + # ++num_end_effs; + # } + #} + #if (num_end_effs > 0): + # end_eff_err /= num_end_effs + # + #double root_ground_h0 = mGround->SampleHeight(sim_char.GetRootPos()) + #double root_ground_h1 = kin_char.GetOriginPos()[1] + #root_pos0[1] -= root_ground_h0 + #root_pos1[1] -= root_ground_h1 + #root_pos_err = (root_pos0 - root_pos1).squaredNorm() + # + #root_rot_err = cMathUtil::QuatDiffTheta(root_rot0, root_rot1) + #root_rot_err *= root_rot_err + + #root_vel_err = (root_vel1 - root_vel0).squaredNorm() + #root_ang_vel_err = (root_ang_vel1 - root_ang_vel0).squaredNorm() + + #root_err = root_pos_err + # + 0.1 * root_rot_err + # + 0.01 * root_vel_err + # + 0.001 * root_ang_vel_err + #com_err = 0.1 * (com_vel1_world - com_vel0_world).squaredNorm() + + #print("pose_err=",pose_err) + #print("vel_err=",vel_err) + pose_reward = math.exp(-err_scale * pose_scale * pose_err) + vel_reward = math.exp(-err_scale * vel_scale * vel_err) + end_eff_reward = math.exp(-err_scale * end_eff_scale * end_eff_err) + root_reward = math.exp(-err_scale * root_scale * root_err) + com_reward = math.exp(-err_scale * com_scale * com_err) + + reward = pose_w * pose_reward + vel_w * vel_reward + end_eff_w * end_eff_reward + root_w * root_reward + com_w * com_reward + + #print("reward = %f (pose_reward=%f, vel_reward=%f, end_eff_reward=%f, root_reward=%f, com_reward=%f)\n", reward, + # pose_reward,vel_reward,end_eff_reward, root_reward, com_reward); + + return reward + + def GetBasePosition(self): + pos,orn = self._pybullet_client.getBasePositionAndOrientation(self._humanoid) + return pos + diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/inverse_kinematics.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/inverse_kinematics.py new file mode 100644 index 000000000..74f7686b7 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/inverse_kinematics.py @@ -0,0 +1,150 @@ +# coding: utf-8 + +import numpy as np +from math import * +import pickle +import json +# import quaternion +from transformation import * + +from pyquaternion import Quaternion + +def get_angle(vec1, vec2): + cos_theta = np.dot(vec1, vec2)/(np.linalg.norm(vec1) * np.linalg.norm(vec2)) + return acos(cos_theta) + + +def get_quaternion(ox, oy, oz, x, y, z): + # given transformed axis in x-y-z order return a quaternion + ox /= np.linalg.norm(ox) + oy /= np.linalg.norm(oy) + oz /= np.linalg.norm(oz) + + set1 = np.vstack((ox,oy,oz)) + + x /= np.linalg.norm(x) + y /= np.linalg.norm(y) + z /= np.linalg.norm(z) + + set2 = np.vstack((x,y,z)) + rot_mat = superimposition_matrix(set1, set2, scale=False, usesvd=True) + rot_qua = quaternion_from_matrix(rot_mat) + + return rot_qua + + +# 3D coord to deepmimic rotations +def coord_to_rot(frameNum, frame, frame_duration): + eps = 0.001 + axis_rotate_rate = 0.3 + + frame = np.array(frame) + tmp = [[] for i in range(15)] + # duration of frame in seconds (1D), + tmp[0] = [frame_duration] + # root position (3D), + tmp[1] = frame[0] + # root rotation (4D), + root_y = (frame[7] - frame[0]) + root_z = (frame[1] - frame[0]) + root_x = np.cross(root_y, root_z) + + x = np.array([1.0,0,0]) + y = np.array([0,1.0,0]) + z = np.array([0,0,1.0]) + + rot_qua = get_quaternion(root_x, root_y, root_z, x, y, z) + tmp[2] = list(rot_qua) + + # chest rotation (4D), + chest_y = (frame[8] - frame[7]) + chest_z = (frame[14] - frame[8]) + chest_x = np.cross(chest_y, chest_z) + rot_qua = get_quaternion(chest_x, chest_y, chest_z, root_x, root_y, root_z) + tmp[3] = list(rot_qua) + + # neck rotation (4D), + neck_y = (frame[10] - frame[8]) + neck_z = np.cross(frame[10]-frame[9], frame[8]-frame[9]) + neck_x = np.cross(neck_y, neck_z) + rot_qua = get_quaternion(neck_x, neck_y, neck_z, chest_x, chest_y, chest_z) + tmp[4] = list(rot_qua) + + # right hip rotation (4D), + r_hip_y = (frame[1] - frame[2]) + r_hip_z = np.cross(frame[1]-frame[2], frame[3]-frame[2]) + r_hip_x = np.cross(r_hip_y, r_hip_z) + rot_qua = get_quaternion(r_hip_x, r_hip_y, r_hip_z, root_x, root_y, root_z) + tmp[5] = list(rot_qua) + + # right knee rotation (1D), + vec1 = frame[1] - frame[2] + vec2 = frame[3] - frame[2] + angle1 = get_angle(vec1, vec2) + tmp[6] = [angle1-pi] + + # right ankle rotation (4D), + tmp[7] = [1,0,0,0] + + # right shoulder rotation (4D), + r_shou_y = (frame[14] - frame[15]) + r_shou_z = np.cross(frame[16]-frame[15], frame[14]-frame[15]) + r_shou_x = np.cross(r_shou_y, r_shou_z) + rot_qua = get_quaternion(r_shou_x, r_shou_y, r_shou_z, chest_x, chest_y, chest_z) + tmp[8] = list(rot_qua) + + # right elbow rotation (1D), + vec1 = frame[14] - frame[15] + vec2 = frame[16] - frame[15] + angle1 = get_angle(vec1, vec2) + tmp[9] = [pi-angle1] + + # left hip rotation (4D), + l_hip_y = (frame[4] - frame[5]) + l_hip_z = np.cross(frame[4]-frame[5], frame[6]-frame[5]) + l_hip_x = np.cross(l_hip_y, l_hip_z) + rot_qua = get_quaternion(l_hip_x, l_hip_y, l_hip_z, root_x, root_y, root_z) + tmp[10] = list(rot_qua) + + # left knee rotation (1D), + vec1 = frame[4] - frame[5] + vec2 = frame[6] - frame[5] + angle1 = get_angle(vec1, vec2) + tmp[11] = [angle1-pi] + + # left ankle rotation (4D), + tmp[12] = [1,0,0,0] + + # left shoulder rotation (4D), + l_shou_y = (frame[11] - frame[12]) + l_shou_z = np.cross(frame[13]-frame[12], frame[11]-frame[12]) + l_shou_x = np.cross(l_shou_y, l_shou_z) + rot_qua = get_quaternion(l_shou_x, l_shou_y, l_shou_z, chest_x, chest_y, chest_z) + tmp[13] = list(rot_qua) + + # left elbow rotation (1D) + vec1 = frame[11] - frame[12] + vec2 = frame[13] - frame[12] + angle1 = get_angle(vec1, vec2) + tmp[14] = [pi-angle1] + + ret = [] + for i in tmp: + ret += list(i) + return np.array(ret) + +# In[6]: + + +def coord_seq_to_rot_seq(coord_seq, frame_duration): + ret = [] + for i in range(len(coord_seq)): + tmp = coord_to_rot( i, coord_seq[i], frame_duration) + ret.append(list(tmp)) + return ret + + + + + + diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/mocap_dataset.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/mocap_dataset.py new file mode 100644 index 000000000..18bc27ae4 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/mocap_dataset.py @@ -0,0 +1,36 @@ +import numpy as np +from skeleton import Skeleton + +class MocapDataset: + def __init__(self, fps, skeleton): + self._skeleton = skeleton + self._fps = fps + self._data = None # Must be filled by subclass + self._cameras = None # Must be filled by subclass + + def remove_joints(self, joints_to_remove): + kept_joints = self._skeleton.remove_joints(joints_to_remove) + for subject in self._data.keys(): + for action in self._data[subject].keys(): + s = self._data[subject][action] + s['positions'] = s['positions'][:, kept_joints] + + + def __getitem__(self, key): + return self._data[key] + + def subjects(self): + return self._data.keys() + + def fps(self): + return self._fps + + def skeleton(self): + return self._skeleton + + def cameras(self): + return self._cameras + + def supports_semi_supervised(self): + # This method can be overridden + return False diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/quaternion.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/quaternion.py new file mode 100644 index 000000000..e4e9a7753 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/quaternion.py @@ -0,0 +1,30 @@ +import numpy as np + +def qrot(q, v): + """ + Rotate vector(s) v about the rotation described by quaternion(s) q. + Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, + where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + assert v.shape[-1] == 3 + assert q.shape[:-1] == v.shape[:-1] + + qvec = q[..., 1:] + + uv = np.cross(qvec, v) + uuv = np.cross(qvec, uv) + + return (v + 2 * (q[..., :1] * uv + uuv)) + +def qinverse(q, inplace=False): + # We assume the quaternion to be normalized + if inplace: + q[..., 1:] *= -1 + return q + else: + w = q[..., :1] + xyz = q[..., 1:] + return np.hstack((w, -xyz)) + \ No newline at end of file diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/render_reference.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/render_reference.py new file mode 100644 index 000000000..c3882bf07 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/render_reference.py @@ -0,0 +1,144 @@ +import os, inspect +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(os.path.dirname(currentdir)) +os.sys.path.insert(0,parentdir) + +print('parent:', parentdir) + +import pybullet_data +import pybullet +import time +import random + + +from pybullet_utils.bullet_client import BulletClient +from deep_mimic.env.motion_capture_data import MotionCaptureData + +# from pybullet_envs.deep_mimic.env.humanoid_stable_pd import HumanoidStablePd +from humanoid import Humanoid +from humanoid import HumanoidPose + +# from env.humanoid_stable_pd + +from deepmimic_json_generator import * + +import pybullet as p +import numpy as np +import argparse + + + +parser = argparse.ArgumentParser(description='Arguments for loading reference for learning.') + +# General arguments +parser.add_argument('--dataset_path', default='data/data_3d_h36m.npz', type=str, help='target dataset') # h36m or humaneva +parser.add_argument('--json_path', default='data/Walking.json', type=str, help='json file path for storing the deepmimic-format json created by inverse-kinect.') +parser.add_argument('--fps', default=24, type=int, help='frame per second') +parser.add_argument('--subject', default='S11', type=str, help='camera subject.') +parser.add_argument('--action', default='Walking', type=str, help='name of the action.') +parser.add_argument('--loop', default='wrap', type=str, help='loop information in deepmimic, wrap or none.') +parser.add_argument('--draw_gt', action='store_true', help='draw ground truth or not.') + +args = parser.parse_args() + + +dataset_path = args.dataset_path +json_path = args.json_path +fps = args.fps +subject = args.subject +action = args.action +loop = args.loop +draw_gt = args.draw_gt + + +def draw_ground_truth(coord_seq, frame, duration, shift): + global joint_info + joint = coord_seq[frame] + shift = np.array(shift) + for i in range(1, 17): + # print(x[11], x[14]) + joint_fa = joint_info['father'][i] + if joint_info['side'][i] == 'right': + p.addUserDebugLine(lineFromXYZ=joint[i]+shift, + lineToXYZ=joint[joint_fa]+shift, + lineColorRGB=(255,0,0), + lineWidth=1, + lifeTime=duration) + else: + p.addUserDebugLine(lineFromXYZ=joint[i]+shift, + lineToXYZ=joint[joint_fa]+shift, + lineColorRGB=(0,0,0), + lineWidth=1, + lifeTime=duration) + + +dataset = init_fb_h36m_dataset(dataset_path) +ground_truth = pose3D_from_fb_h36m(dataset, + subject = subject, + action = action, + shift = [1.0,0.0,0.0]) + +rot_seq = coord_seq_to_rot_seq(coord_seq = ground_truth, + frame_duration = 1/fps) + + +rot_seq_to_deepmimic_json( rot_seq = rot_seq, + loop = loop, + json_path = json_path) + + + +bc = BulletClient(connection_mode=pybullet.GUI) +bc.setAdditionalSearchPath(pybullet_data.getDataPath()) +bc.configureDebugVisualizer(bc.COV_ENABLE_Y_AXIS_UP,1) +bc.setGravity(0,-9.8,0) +motion=MotionCaptureData() + +motionPath = json_path +motion.Load(motionPath) +print("numFrames = ", motion.NumFrames()) + + + +simTimeId= bc.addUserDebugParameter("simTime",0,motion.NumFrames()-1.1,0) + +y2zOrn = bc.getQuaternionFromEuler([-1.57,0,0]) +bc.loadURDF("plane.urdf",[0,-0.04,0], y2zOrn) + +humanoid = Humanoid(bc, motion, [0,0,0]) #这是初始位置的坐标 + + + +print(p.getBasePositionAndOrientation(humanoid._humanoid)) + + + +simTime = 0 +keyFrameDuration = motion.KeyFrameDuraction() +print("keyFrameDuration=",keyFrameDuration) +for utNum in range(motion.NumFrames()): + bc.stepSimulation() + humanoid.RenderReference(utNum * keyFrameDuration) + if draw_gt: + draw_ground_truth(coord_seq = ground_truth, + frame = utNum, + duration = keyFrameDuration, + shift = [-1.0, 0.0, 1.0]) + time.sleep(0.001) +stage = 0 + + + +def Reset(humanoid): + global simTime + humanoid.Reset() + simTime = 0 + humanoid.SetSimTime(simTime) + pose = humanoid.InitializePoseFromMotionData() + humanoid.ApplyPose(pose, True, True, humanoid._humanoid,bc) + + +Reset(humanoid) +p.disconnect() + + diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/skeleton.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/skeleton.py new file mode 100644 index 000000000..fa6d1e622 --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/skeleton.py @@ -0,0 +1,81 @@ +import numpy as np + +class Skeleton: + def __init__(self, parents, joints_left, joints_right): + assert len(joints_left) == len(joints_right) + + self._parents = np.array(parents) + self._joints_left = joints_left + self._joints_right = joints_right + self._compute_metadata() + + def num_joints(self): + return len(self._parents) + + def parents(self): + return self._parents + + def has_children(self): + return self._has_children + + def children(self): + return self._children + + def remove_joints(self, joints_to_remove): + """ + Remove the joints specified in 'joints_to_remove'. + """ + valid_joints = [] + for joint in range(len(self._parents)): + if joint not in joints_to_remove: + valid_joints.append(joint) + + for i in range(len(self._parents)): + while self._parents[i] in joints_to_remove: + self._parents[i] = self._parents[self._parents[i]] + + index_offsets = np.zeros(len(self._parents), dtype=int) + new_parents = [] + for i, parent in enumerate(self._parents): + if i not in joints_to_remove: + new_parents.append(parent - index_offsets[parent]) + else: + index_offsets[i:] += 1 + self._parents = np.array(new_parents) + + + if self._joints_left is not None: + new_joints_left = [] + for joint in self._joints_left: + if joint in valid_joints: + new_joints_left.append(joint - index_offsets[joint]) + self._joints_left = new_joints_left + if self._joints_right is not None: + new_joints_right = [] + for joint in self._joints_right: + if joint in valid_joints: + new_joints_right.append(joint - index_offsets[joint]) + self._joints_right = new_joints_right + + self._compute_metadata() + + return valid_joints + + def joints_left(self): + return self._joints_left + + def joints_right(self): + return self._joints_right + + def _compute_metadata(self): + self._has_children = np.zeros(len(self._parents)).astype(bool) + for i, parent in enumerate(self._parents): + if parent != -1: + self._has_children[parent] = True + + self._children = [] + for i, parent in enumerate(self._parents): + self._children.append([]) + for i, parent in enumerate(self._parents): + if parent != -1: + self._children[parent].append(i) diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/transformation.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/transformation.py new file mode 100644 index 000000000..f9745fe6e --- /dev/null +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/transformation.py @@ -0,0 +1,1585 @@ +from __future__ import division, print_function + + +import math + +import numpy + + +def identity_matrix(): + """Return 4x4 identity/unit matrix. + + >>> I = identity_matrix() + >>> numpy.allclose(I, numpy.dot(I, I)) + True + >>> numpy.sum(I), numpy.trace(I) + (4.0, 4.0) + >>> numpy.allclose(I, numpy.identity(4)) + True + + """ + return numpy.identity(4) + + +def translation_matrix(direction): + """Return matrix to translate by direction vector. + + >>> v = numpy.random.random(3) - 0.5 + >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) + True + + """ + M = numpy.identity(4) + M[:3, 3] = direction[:3] + return M + + +def translation_from_matrix(matrix): + """Return translation vector from translation matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = translation_from_matrix(translation_matrix(v0)) + >>> numpy.allclose(v0, v1) + True + + """ + return numpy.array(matrix, copy=False)[:3, 3].copy() + + +def reflection_matrix(point, normal): + """Return matrix to mirror at plane defined by point and normal vector. + + >>> v0 = numpy.random.random(4) - 0.5 + >>> v0[3] = 1. + >>> v1 = numpy.random.random(3) - 0.5 + >>> R = reflection_matrix(v0, v1) + >>> numpy.allclose(2, numpy.trace(R)) + True + >>> numpy.allclose(v0, numpy.dot(R, v0)) + True + >>> v2 = v0.copy() + >>> v2[:3] += v1 + >>> v3 = v0.copy() + >>> v2[:3] -= v1 + >>> numpy.allclose(v2, numpy.dot(R, v3)) + True + + """ + normal = unit_vector(normal[:3]) + M = numpy.identity(4) + M[:3, :3] -= 2.0 * numpy.outer(normal, normal) + M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal + return M + + +def reflection_from_matrix(matrix): + """Return mirror plane point and normal vector from reflection matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = numpy.random.random(3) - 0.5 + >>> M0 = reflection_matrix(v0, v1) + >>> point, normal = reflection_from_matrix(M0) + >>> M1 = reflection_matrix(point, normal) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + # normal: unit eigenvector corresponding to eigenvalue -1 + w, V = numpy.linalg.eig(M[:3, :3]) + i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0] + if not len(i): + raise ValueError('no unit eigenvector corresponding to eigenvalue -1') + normal = numpy.real(V[:, i[0]]).squeeze() + # point: any unit eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError('no unit eigenvector corresponding to eigenvalue 1') + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return point, normal + + +def rotation_matrix(angle, direction, point=None): + """Return matrix to rotate about axis defined by point and direction. + + >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) + >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) + True + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + >>> I = numpy.identity(4, numpy.float64) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = numpy.diag([cosa, cosa, cosa]) + R += numpy.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += numpy.array([[ 0.0, -direction[2], direction[1]], + [ direction[2], 0.0, -direction[0]], + [-direction[1], direction[0], 0.0]]) + M = numpy.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + M[:3, 3] = point - numpy.dot(R, point) + return M + + +def rotation_from_matrix(matrix): + """Return rotation angle and axis from rotation matrix. + + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> angle, direc, point = rotation_from_matrix(R0) + >>> R1 = rotation_matrix(angle, direc, point) + >>> is_same_transform(R0, R1) + True + + """ + R = numpy.array(matrix, dtype=numpy.float64, copy=False) + R33 = R[:3, :3] + # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, W = numpy.linalg.eig(R33.T) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError('no unit eigenvector corresponding to eigenvalue 1') + direction = numpy.real(W[:, i[-1]]).squeeze() + # point: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, Q = numpy.linalg.eig(R) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError('no unit eigenvector corresponding to eigenvalue 1') + point = numpy.real(Q[:, i[-1]]).squeeze() + point /= point[3] + # rotation angle depending on direction + cosa = (numpy.trace(R33) - 1.0) / 2.0 + if abs(direction[2]) > 1e-8: + sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] + elif abs(direction[1]) > 1e-8: + sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] + else: + sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] + angle = math.atan2(sina, cosa) + return angle, direction, point + + +def scale_matrix(factor, origin=None, direction=None): + """Return matrix to scale by factor around origin in direction. + + Use factor -1 for point symmetry. + + >>> v = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v[3] = 1 + >>> S = scale_matrix(-1.234) + >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) + True + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S = scale_matrix(factor, origin) + >>> S = scale_matrix(factor, origin, direct) + + """ + if direction is None: + # uniform scaling + M = numpy.diag([factor, factor, factor, 1.0]) + if origin is not None: + M[:3, 3] = origin[:3] + M[:3, 3] *= 1.0 - factor + else: + # nonuniform scaling + direction = unit_vector(direction[:3]) + factor = 1.0 - factor + M = numpy.identity(4) + M[:3, :3] -= factor * numpy.outer(direction, direction) + if origin is not None: + M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction + return M + + +def scale_from_matrix(matrix): + """Return scaling factor, origin and direction from scaling matrix. + + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S0 = scale_matrix(factor, origin) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + >>> S0 = scale_matrix(factor, origin, direct) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + factor = numpy.trace(M33) - 2.0 + try: + # direction: unit eigenvector corresponding to eigenvalue factor + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0] + direction = numpy.real(V[:, i]).squeeze() + direction /= vector_norm(direction) + except IndexError: + # uniform scaling + factor = (factor + 2.0) / 3.0 + direction = None + # origin: any eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError('no eigenvector corresponding to eigenvalue 1') + origin = numpy.real(V[:, i[-1]]).squeeze() + origin /= origin[3] + return factor, origin, direction + + +def projection_matrix(point, normal, direction=None, + perspective=None, pseudo=False): + """Return matrix to project onto plane defined by point and normal. + + Using either perspective point, projection direction, or none of both. + + If pseudo is True, perspective projections will preserve relative depth + such that Perspective = dot(Orthogonal, PseudoPerspective). + + >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) + >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) + True + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> P1 = projection_matrix(point, normal, direction=direct) + >>> P2 = projection_matrix(point, normal, perspective=persp) + >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> is_same_transform(P2, numpy.dot(P0, P3)) + True + >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) + >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(P, v0) + >>> numpy.allclose(v1[1], v0[1]) + True + >>> numpy.allclose(v1[0], 3-v1[1]) + True + + """ + M = numpy.identity(4) + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + normal = unit_vector(normal[:3]) + if perspective is not None: + # perspective projection + perspective = numpy.array(perspective[:3], dtype=numpy.float64, + copy=False) + M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) + M[:3, :3] -= numpy.outer(perspective, normal) + if pseudo: + # preserve relative depth + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) + else: + M[:3, 3] = numpy.dot(point, normal) * perspective + M[3, :3] = -normal + M[3, 3] = numpy.dot(perspective, normal) + elif direction is not None: + # parallel projection + direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) + scale = numpy.dot(direction, normal) + M[:3, :3] -= numpy.outer(direction, normal) / scale + M[:3, 3] = direction * (numpy.dot(point, normal) / scale) + else: + # orthogonal projection + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * normal + return M + + +def projection_from_matrix(matrix, pseudo=False): + """Return projection plane and perspective point from projection matrix. + + Return values are same as arguments for projection_matrix function: + point, normal, direction, perspective, and pseudo. + + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, direct) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) + >>> result = projection_from_matrix(P0, pseudo=False) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> result = projection_from_matrix(P0, pseudo=True) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not pseudo and len(i): + # point: any eigenvector corresponding to eigenvalue 1 + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + # direction: unit eigenvector corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if not len(i): + raise ValueError('no eigenvector corresponding to eigenvalue 0') + direction = numpy.real(V[:, i[0]]).squeeze() + direction /= vector_norm(direction) + # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33.T) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if len(i): + # parallel projection + normal = numpy.real(V[:, i[0]]).squeeze() + normal /= vector_norm(normal) + return point, normal, direction, None, False + else: + # orthogonal projection, where normal equals direction vector + return point, direction, None, None, False + else: + # perspective projection + i = numpy.where(abs(numpy.real(w)) > 1e-8)[0] + if not len(i): + raise ValueError( + 'no eigenvector not corresponding to eigenvalue 0') + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + normal = - M[3, :3] + perspective = M[:3, 3] / numpy.dot(point[:3], normal) + if pseudo: + perspective -= normal + return point, normal, None, perspective, pseudo + + +def clip_matrix(left, right, bottom, top, near, far, perspective=False): + """Return matrix to obtain normalized device coordinates from frustum. + + The frustum bounds are axis-aligned along x (left, right), + y (bottom, top) and z (near, far). + + Normalized device coordinates are in range [-1, 1] if coordinates are + inside the frustum. + + If perspective is True the frustum is a truncated pyramid with the + perspective point at origin and direction along z axis, otherwise an + orthographic canonical view volume (a box). + + Homogeneous coordinates transformed by the perspective clip matrix + need to be dehomogenized (divided by w coordinate). + + >>> frustum = numpy.random.rand(6) + >>> frustum[1] += frustum[0] + >>> frustum[3] += frustum[2] + >>> frustum[5] += frustum[4] + >>> M = clip_matrix(perspective=False, *frustum) + >>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + array([-1., -1., -1., 1.]) + >>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1]) + array([ 1., 1., 1., 1.]) + >>> M = clip_matrix(perspective=True, *frustum) + >>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + >>> v / v[3] + array([-1., -1., -1., 1.]) + >>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1]) + >>> v / v[3] + array([ 1., 1., -1., 1.]) + + """ + if left >= right or bottom >= top or near >= far: + raise ValueError('invalid frustum') + if perspective: + if near <= _EPS: + raise ValueError('invalid frustum: near <= 0') + t = 2.0 * near + M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0], + [0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0], + [0.0, 0.0, (far+near)/(near-far), t*far/(far-near)], + [0.0, 0.0, -1.0, 0.0]] + else: + M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)], + [0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)], + [0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)], + [0.0, 0.0, 0.0, 1.0]] + return numpy.array(M) + + +def shear_matrix(angle, direction, point, normal): + """Return matrix to shear by angle along direction vector on shear plane. + + The shear plane is defined by a point and normal vector. The direction + vector must be orthogonal to the plane's normal vector. + + A point P is transformed by the shear matrix into P" such that + the vector P-P" is parallel to the direction vector and its extent is + given by the angle of P-P'-P", where P' is the orthogonal projection + of P onto the shear plane. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S = shear_matrix(angle, direct, point, normal) + >>> numpy.allclose(1, numpy.linalg.det(S)) + True + + """ + normal = unit_vector(normal[:3]) + direction = unit_vector(direction[:3]) + if abs(numpy.dot(normal, direction)) > 1e-6: + raise ValueError('direction and normal vectors are not orthogonal') + angle = math.tan(angle) + M = numpy.identity(4) + M[:3, :3] += angle * numpy.outer(direction, normal) + M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction + return M + + +def shear_from_matrix(matrix): + """Return shear angle, direction and plane from shear matrix. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S0 = shear_matrix(angle, direct, point, normal) + >>> angle, direct, point, normal = shear_from_matrix(S0) + >>> S1 = shear_matrix(angle, direct, point, normal) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + # normal: cross independent eigenvectors corresponding to the eigenvalue 1 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0] + if len(i) < 2: + raise ValueError('no two linear independent eigenvectors found %s' % w) + V = numpy.real(V[:, i]).squeeze().T + lenorm = -1.0 + for i0, i1 in ((0, 1), (0, 2), (1, 2)): + n = numpy.cross(V[i0], V[i1]) + w = vector_norm(n) + if w > lenorm: + lenorm = w + normal = n + normal /= lenorm + # direction and angle + direction = numpy.dot(M33 - numpy.identity(3), normal) + angle = vector_norm(direction) + direction /= angle + angle = math.atan(angle) + # point: eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError('no eigenvector corresponding to eigenvalue 1') + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return angle, direction, point, normal + + +def decompose_matrix(matrix): + """Return sequence of transformations from transformation matrix. + + matrix : array_like + Non-degenerative homogeneous transformation matrix + + Return tuple of: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + Raise ValueError if matrix is of wrong type or degenerative. + + >>> T0 = translation_matrix([1, 2, 3]) + >>> scale, shear, angles, trans, persp = decompose_matrix(T0) + >>> T1 = translation_matrix(trans) + >>> numpy.allclose(T0, T1) + True + >>> S = scale_matrix(0.123) + >>> scale, shear, angles, trans, persp = decompose_matrix(S) + >>> scale[0] + 0.123 + >>> R0 = euler_matrix(1, 2, 3) + >>> scale, shear, angles, trans, persp = decompose_matrix(R0) + >>> R1 = euler_matrix(*angles) + >>> numpy.allclose(R0, R1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=True).T + if abs(M[3, 3]) < _EPS: + raise ValueError('M[3, 3] is zero') + M /= M[3, 3] + P = M.copy() + P[:, 3] = 0.0, 0.0, 0.0, 1.0 + if not numpy.linalg.det(P): + raise ValueError('matrix is singular') + + scale = numpy.zeros((3, )) + shear = [0.0, 0.0, 0.0] + angles = [0.0, 0.0, 0.0] + + if any(abs(M[:3, 3]) > _EPS): + perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) + M[:, 3] = 0.0, 0.0, 0.0, 1.0 + else: + perspective = numpy.array([0.0, 0.0, 0.0, 1.0]) + + translate = M[3, :3].copy() + M[3, :3] = 0.0 + + row = M[:3, :3].copy() + scale[0] = vector_norm(row[0]) + row[0] /= scale[0] + shear[0] = numpy.dot(row[0], row[1]) + row[1] -= row[0] * shear[0] + scale[1] = vector_norm(row[1]) + row[1] /= scale[1] + shear[0] /= scale[1] + shear[1] = numpy.dot(row[0], row[2]) + row[2] -= row[0] * shear[1] + shear[2] = numpy.dot(row[1], row[2]) + row[2] -= row[1] * shear[2] + scale[2] = vector_norm(row[2]) + row[2] /= scale[2] + shear[1:] /= scale[2] + + if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: + numpy.negative(scale, scale) + numpy.negative(row, row) + + angles[1] = math.asin(-row[0, 2]) + if math.cos(angles[1]): + angles[0] = math.atan2(row[1, 2], row[2, 2]) + angles[2] = math.atan2(row[0, 1], row[0, 0]) + else: + # angles[0] = math.atan2(row[1, 0], row[1, 1]) + angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[2] = 0.0 + + return scale, shear, angles, translate, perspective + + +def compose_matrix(scale=None, shear=None, angles=None, translate=None, + perspective=None): + """Return transformation matrix from sequence of transformations. + + This is the inverse of the decompose_matrix function. + + Sequence of transformations: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + >>> scale = numpy.random.random(3) - 0.5 + >>> shear = numpy.random.random(3) - 0.5 + >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) + >>> trans = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(4) - 0.5 + >>> M0 = compose_matrix(scale, shear, angles, trans, persp) + >>> result = decompose_matrix(M0) + >>> M1 = compose_matrix(*result) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.identity(4) + if perspective is not None: + P = numpy.identity(4) + P[3, :] = perspective[:4] + M = numpy.dot(M, P) + if translate is not None: + T = numpy.identity(4) + T[:3, 3] = translate[:3] + M = numpy.dot(M, T) + if angles is not None: + R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + M = numpy.dot(M, R) + if shear is not None: + Z = numpy.identity(4) + Z[1, 2] = shear[2] + Z[0, 2] = shear[1] + Z[0, 1] = shear[0] + M = numpy.dot(M, Z) + if scale is not None: + S = numpy.identity(4) + S[0, 0] = scale[0] + S[1, 1] = scale[1] + S[2, 2] = scale[2] + M = numpy.dot(M, S) + M /= M[3, 3] + return M + + +def orthogonalization_matrix(lengths, angles): + """Return orthogonalization matrix for crystallographic cell coordinates. + + Angles are expected in degrees. + + The de-orthogonalization matrix is the inverse. + + >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90]) + >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) + True + >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) + >>> numpy.allclose(numpy.sum(O), 43.063229) + True + + """ + a, b, c = lengths + angles = numpy.radians(angles) + sina, sinb, _ = numpy.sin(angles) + cosa, cosb, cosg = numpy.cos(angles) + co = (cosa * cosb - cosg) / (sina * sinb) + return numpy.array([ + [ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0], + [-a*sinb*co, b*sina, 0.0, 0.0], + [ a*cosb, b*cosa, c, 0.0], + [ 0.0, 0.0, 0.0, 1.0]]) + + +def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): + """Return affine transform matrix to register two point sets. + + v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous + coordinates, where ndims is the dimensionality of the coordinate space. + + If shear is False, a similarity transformation matrix is returned. + If also scale is False, a rigid/Euclidean transformation matrix + is returned. + + By default the algorithm by Hartley and Zissermann [15] is used. + If usesvd is True, similarity and Euclidean transformation matrices + are calculated by minimizing the weighted sum of squared deviations + (RMSD) according to the algorithm by Kabsch [8]. + Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9] + is used, which is slower when using this Python implementation. + + The returned matrix performs rotation, translation and uniform scaling + (if specified). + + >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]] + >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]] + >>> affine_matrix_from_points(v0, v1) + array([[ 0.14549, 0.00062, 675.50008], + [ 0.00048, 0.14094, 53.24971], + [ 0. , 0. , 1. ]]) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> S = scale_matrix(random.random()) + >>> M = concatenate_matrices(T, R, S) + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1) + >>> M = affine_matrix_from_points(v0[:3], v1[:3]) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + + More examples in superimposition_matrix() + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=True) + v1 = numpy.array(v1, dtype=numpy.float64, copy=True) + + ndims = v0.shape[0] + if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape: + raise ValueError('input arrays are of wrong shape or type') + + # move centroids to origin + t0 = -numpy.mean(v0, axis=1) + M0 = numpy.identity(ndims+1) + M0[:ndims, ndims] = t0 + v0 += t0.reshape(ndims, 1) + t1 = -numpy.mean(v1, axis=1) + M1 = numpy.identity(ndims+1) + M1[:ndims, ndims] = t1 + v1 += t1.reshape(ndims, 1) + + if shear: + # Affine transformation + A = numpy.concatenate((v0, v1), axis=0) + u, s, vh = numpy.linalg.svd(A.T) + vh = vh[:ndims].T + B = vh[:ndims] + C = vh[ndims:2*ndims] + t = numpy.dot(C, numpy.linalg.pinv(B)) + t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1) + M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,))) + elif usesvd or ndims != 3: + # Rigid transformation via SVD of covariance matrix + u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) + # rotation matrix from SVD orthonormal bases + R = numpy.dot(u, vh) + if numpy.linalg.det(R) < 0.0: + # R does not constitute right handed system + R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0) + s[-1] *= -1.0 + # homogeneous transformation matrix + M = numpy.identity(ndims+1) + M[:ndims, :ndims] = R + else: + # Rigid transformation matrix via quaternion + # compute symmetric matrix N + xx, yy, zz = numpy.sum(v0 * v1, axis=1) + xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) + xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) + N = [[xx+yy+zz, 0.0, 0.0, 0.0], + [yz-zy, xx-yy-zz, 0.0, 0.0], + [zx-xz, xy+yx, yy-xx-zz, 0.0], + [xy-yx, zx+xz, yz+zy, zz-xx-yy]] + # quaternion: eigenvector corresponding to most positive eigenvalue + w, V = numpy.linalg.eigh(N) + q = V[:, numpy.argmax(w)] + q /= vector_norm(q) # unit quaternion + # homogeneous transformation matrix + M = quaternion_matrix(q) + + if scale and not shear: + # Affine transformation; scale is ratio of RMS deviations from centroid + v0 *= v0 + v1 *= v1 + M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) + + # move centroids back + M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0)) + M /= M[ndims, ndims] + return M + + +def superimposition_matrix(v0, v1, scale=False, usesvd=True): + """Return matrix to transform given 3D point set into second point set. + + v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. + + The parameters scale and usesvd are explained in the more general + affine_matrix_from_points function. + + The returned matrix is a similarity or Euclidean transformation matrix. + This function has a fast C implementation in transformations.c. + + >>> v0 = numpy.random.rand(3, 10) + >>> M = superimposition_matrix(v0, v0) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> S = scale_matrix(random.random()) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> M = concatenate_matrices(T, R, S) + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) + >>> M = superimposition_matrix(v0, v1, scale=True) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v = numpy.empty((4, 100, 3)) + >>> v[:, :, 0] = v0 + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] + v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] + return affine_matrix_from_points(v0, v1, shear=False, + scale=scale, usesvd=usesvd) + + +def euler_matrix(ai, aj, ak, axes='sxyz'): + """Return homogeneous rotation matrix from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> R = euler_matrix(1, 2, 3, 'syxz') + >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) + True + >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) + >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) + True + >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + >>> for axes in _TUPLE2AXES.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # noqa: validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + if frame: + ai, ak = ak, ai + if parity: + ai, aj, ak = -ai, -aj, -ak + + si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) + ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + cc, cs = ci*ck, ci*sk + sc, ss = si*ck, si*sk + + M = numpy.identity(4) + if repetition: + M[i, i] = cj + M[i, j] = sj*si + M[i, k] = sj*ci + M[j, i] = sj*sk + M[j, j] = -cj*ss+cc + M[j, k] = -cj*cs-sc + M[k, i] = -sj*ck + M[k, j] = cj*sc+cs + M[k, k] = cj*cc-ss + else: + M[i, i] = cj*ck + M[i, j] = sj*sc-cs + M[i, k] = sj*cc+ss + M[j, i] = cj*sk + M[j, j] = sj*ss+cc + M[j, k] = sj*cs-sc + M[k, i] = -sj + M[k, j] = cj*si + M[k, k] = cj*ci + return M + + +def euler_from_matrix(matrix, axes='sxyz'): + """Return Euler angles from rotation matrix for specified axis sequence. + + axes : One of 24 axis sequences as string or encoded tuple + + Note that many Euler angle triplets can describe one matrix. + + >>> R0 = euler_matrix(1, 2, 3, 'syxz') + >>> al, be, ga = euler_from_matrix(R0, 'syxz') + >>> R1 = euler_matrix(al, be, ga, 'syxz') + >>> numpy.allclose(R0, R1) + True + >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R0 = euler_matrix(axes=axes, *angles) + ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) + ... if not numpy.allclose(R0, R1): print(axes, "failed") + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # noqa: validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] + if repetition: + sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) + if sy > _EPS: + ax = math.atan2( M[i, j], M[i, k]) + ay = math.atan2( sy, M[i, i]) + az = math.atan2( M[j, i], -M[k, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2( sy, M[i, i]) + az = 0.0 + else: + cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) + if cy > _EPS: + ax = math.atan2( M[k, j], M[k, k]) + ay = math.atan2(-M[k, i], cy) + az = math.atan2( M[j, i], M[i, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2(-M[k, i], cy) + az = 0.0 + + if parity: + ax, ay, az = -ax, -ay, -az + if frame: + ax, az = az, ax + return ax, ay, az + + +def euler_from_quaternion(quaternion, axes='sxyz'): + """Return Euler angles from quaternion for specified axis sequence. + + >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(angles, [0.123, 0, 0]) + True + + """ + return euler_from_matrix(quaternion_matrix(quaternion), axes) + + +def quaternion_from_euler(ai, aj, ak, axes='sxyz'): + """Return quaternion from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') + >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435]) + True + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # noqa: validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + 1 + j = _NEXT_AXIS[i+parity-1] + 1 + k = _NEXT_AXIS[i-parity] + 1 + + if frame: + ai, ak = ak, ai + if parity: + aj = -aj + + ai /= 2.0 + aj /= 2.0 + ak /= 2.0 + ci = math.cos(ai) + si = math.sin(ai) + cj = math.cos(aj) + sj = math.sin(aj) + ck = math.cos(ak) + sk = math.sin(ak) + cc = ci*ck + cs = ci*sk + sc = si*ck + ss = si*sk + + q = numpy.empty((4, )) + if repetition: + q[0] = cj*(cc - ss) + q[i] = cj*(cs + sc) + q[j] = sj*(cc + ss) + q[k] = sj*(cs - sc) + else: + q[0] = cj*cc + sj*ss + q[i] = cj*sc - sj*cs + q[j] = cj*ss + sj*cc + q[k] = cj*cs - sj*sc + if parity: + q[j] *= -1.0 + + return q + + +def quaternion_about_axis(angle, axis): + """Return quaternion for rotation about axis. + + >>> q = quaternion_about_axis(0.123, [1, 0, 0]) + >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0]) + True + + """ + q = numpy.array([0.0, axis[0], axis[1], axis[2]]) + qlen = vector_norm(q) + if qlen > _EPS: + q *= math.sin(angle/2.0) / qlen + q[0] = math.cos(angle/2.0) + return q + + +def quaternion_matrix(quaternion): + """Return homogeneous rotation matrix from quaternion. + + >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) + True + >>> M = quaternion_matrix([1, 0, 0, 0]) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> M = quaternion_matrix([0, 1, 0, 0]) + >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + n = numpy.dot(q, q) + if n < _EPS: + return numpy.identity(4) + q *= math.sqrt(2.0 / n) + q = numpy.outer(q, q) + return numpy.array([ + [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], + [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], + [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], + [ 0.0, 0.0, 0.0, 1.0]]) + + +def quaternion_from_matrix(matrix, isprecise=False): + """Return quaternion from rotation matrix. + + If isprecise is True, the input matrix is assumed to be a precise rotation + matrix and a faster algorithm is used. + + >>> q = quaternion_from_matrix(numpy.identity(4), True) + >>> numpy.allclose(q, [1, 0, 0, 0]) + True + >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) + >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) + True + >>> R = rotation_matrix(0.123, (1, 2, 3)) + >>> q = quaternion_from_matrix(R, True) + >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) + True + >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], + ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) + True + >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], + ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) + True + >>> R = random_rotation_matrix() + >>> q = quaternion_from_matrix(R) + >>> is_same_transform(R, quaternion_matrix(q)) + True + >>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False), + ... quaternion_from_matrix(R, isprecise=True)) + True + >>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0) + >>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False), + ... quaternion_from_matrix(R, isprecise=True)) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] + if isprecise: + q = numpy.empty((4, )) + t = numpy.trace(M) + if t > M[3, 3]: + q[0] = t + q[3] = M[1, 0] - M[0, 1] + q[2] = M[0, 2] - M[2, 0] + q[1] = M[2, 1] - M[1, 2] + else: + i, j, k = 0, 1, 2 + if M[1, 1] > M[0, 0]: + i, j, k = 1, 2, 0 + if M[2, 2] > M[i, i]: + i, j, k = 2, 0, 1 + t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] + q[i] = t + q[j] = M[i, j] + M[j, i] + q[k] = M[k, i] + M[i, k] + q[3] = M[k, j] - M[j, k] + q = q[[3, 0, 1, 2]] + q *= 0.5 / math.sqrt(t * M[3, 3]) + else: + m00 = M[0, 0] + m01 = M[0, 1] + m02 = M[0, 2] + m10 = M[1, 0] + m11 = M[1, 1] + m12 = M[1, 2] + m20 = M[2, 0] + m21 = M[2, 1] + m22 = M[2, 2] + # symmetric matrix K + K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0], + [m01+m10, m11-m00-m22, 0.0, 0.0], + [m02+m20, m12+m21, m22-m00-m11, 0.0], + [m21-m12, m02-m20, m10-m01, m00+m11+m22]]) + K /= 3.0 + # quaternion is eigenvector of K that corresponds to largest eigenvalue + w, V = numpy.linalg.eigh(K) + q = V[[3, 0, 1, 2], numpy.argmax(w)] + if q[0] < 0.0: + numpy.negative(q, q) + return q + + +def quaternion_multiply(quaternion1, quaternion0): + """Return multiplication of two quaternions. + + >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) + >>> numpy.allclose(q, [28, -44, -14, 48]) + True + + """ + w0, x0, y0, z0 = quaternion0 + w1, x1, y1, z1 = quaternion1 + return numpy.array([ + -x1*x0 - y1*y0 - z1*z0 + w1*w0, + x1*w0 + y1*z0 - z1*y0 + w1*x0, + -x1*z0 + y1*w0 + z1*x0 + w1*y0, + x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64) + + +def quaternion_conjugate(quaternion): + """Return conjugate of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_conjugate(q0) + >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q + + +def quaternion_inverse(quaternion): + """Return inverse of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_inverse(q0) + >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q / numpy.dot(q, q) + + +def quaternion_real(quaternion): + """Return real part of quaternion. + + >>> quaternion_real([3, 0, 1, 2]) + 3.0 + + """ + return float(quaternion[0]) + + +def quaternion_imag(quaternion): + """Return imaginary part of quaternion. + + >>> quaternion_imag([3, 0, 1, 2]) + array([ 0., 1., 2.]) + + """ + return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True) + + +def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): + """Return spherical linear interpolation between two quaternions. + + >>> q0 = random_quaternion() + >>> q1 = random_quaternion() + >>> q = quaternion_slerp(q0, q1, 0) + >>> numpy.allclose(q, q0) + True + >>> q = quaternion_slerp(q0, q1, 1, 1) + >>> numpy.allclose(q, q1) + True + >>> q = quaternion_slerp(q0, q1, 0.5) + >>> angle = math.acos(numpy.dot(q0, q)) + >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \ + numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle) + True + + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = numpy.dot(q0, q1) + if abs(abs(d) - 1.0) < _EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + numpy.negative(q1, q1) + angle = math.acos(d) + spin * math.pi + if abs(angle) < _EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quaternion(rand=None): + """Return uniform random unit quaternion. + + rand: array like or None + Three independent random variables that are uniformly distributed + between 0 and 1. + + >>> q = random_quaternion() + >>> numpy.allclose(1, vector_norm(q)) + True + >>> q = random_quaternion(numpy.random.random(3)) + >>> len(q.shape), q.shape[0]==4 + (1, True) + + """ + if rand is None: + rand = numpy.random.rand(3) + else: + assert len(rand) == 3 + r1 = numpy.sqrt(1.0 - rand[0]) + r2 = numpy.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1, + numpy.cos(t1)*r1, numpy.sin(t2)*r2]) + + +def random_rotation_matrix(rand=None): + """Return uniform random rotation matrix. + + rand: array like + Three independent random variables that are uniformly distributed + between 0 and 1 for each returned quaternion. + + >>> R = random_rotation_matrix() + >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) + True + + """ + return quaternion_matrix(random_quaternion(rand)) + + +# epsilon for testing whether a number is close to zero +_EPS = numpy.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), + 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), + 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), + 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), + 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), + 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), + 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), + 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + + +def vector_norm(data, axis=None, out=None): + """Return length, i.e. Euclidean norm, of ndarray along axis. + + >>> v = numpy.random.random(3) + >>> n = vector_norm(v) + >>> numpy.allclose(n, numpy.linalg.norm(v)) + True + >>> v = numpy.random.rand(6, 5, 3) + >>> n = vector_norm(v, axis=-1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) + True + >>> n = vector_norm(v, axis=1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> v = numpy.random.rand(5, 4, 3) + >>> n = numpy.empty((5, 3)) + >>> vector_norm(v, axis=1, out=n) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> vector_norm([]) + 0.0 + >>> vector_norm([1]) + 1.0 + + """ + data = numpy.array(data, dtype=numpy.float64, copy=True) + if out is None: + if data.ndim == 1: + return math.sqrt(numpy.dot(data, data)) + data *= data + out = numpy.atleast_1d(numpy.sum(data, axis=axis)) + numpy.sqrt(out, out) + return out + else: + data *= data + numpy.sum(data, axis=axis, out=out) + numpy.sqrt(out, out) + + +def unit_vector(data, axis=None, out=None): + """Return ndarray normalized by length, i.e. Euclidean norm, along axis. + + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + >>> v1 = numpy.empty((5, 4, 3)) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + >>> list(unit_vector([])) + [] + >>> list(unit_vector([1])) + [1.0] + + """ + if out is None: + data = numpy.array(data, dtype=numpy.float64, copy=True) + if data.ndim == 1: + data /= math.sqrt(numpy.dot(data, data)) + return data + else: + if out is not data: + out[:] = numpy.array(data, copy=False) + data = out + length = numpy.atleast_1d(numpy.sum(data*data, axis)) + numpy.sqrt(length, length) + if axis is not None: + length = numpy.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def random_vector(size): + """Return array of random doubles in the half-open interval [0.0, 1.0). + + >>> v = random_vector(10000) + >>> numpy.all(v >= 0) and numpy.all(v < 1) + True + >>> v0 = random_vector(10) + >>> v1 = random_vector(10) + >>> numpy.any(v0 == v1) + False + + """ + return numpy.random.random(size) + + +def vector_product(v0, v1, axis=0): + """Return vector perpendicular to vectors. + + >>> v = vector_product([2, 0, 0], [0, 3, 0]) + >>> numpy.allclose(v, [0, 0, 6]) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> v = vector_product(v0, v1) + >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> v = vector_product(v0, v1, axis=1) + >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]]) + True + + """ + return numpy.cross(v0, v1, axis=axis) + + +def angle_between_vectors(v0, v1, directed=True, axis=0): + """Return angle between vectors. + + If directed is False, the input vectors are interpreted as undirected axes, + i.e. the maximum angle is pi/2. + + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) + >>> numpy.allclose(a, math.pi) + True + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) + >>> numpy.allclose(a, 0) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> a = angle_between_vectors(v0, v1) + >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> a = angle_between_vectors(v0, v1, axis=1) + >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532]) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False) + v1 = numpy.array(v1, dtype=numpy.float64, copy=False) + dot = numpy.sum(v0 * v1, axis=axis) + dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis) + dot = numpy.clip(dot, -1.0, 1.0) + return numpy.arccos(dot if directed else numpy.fabs(dot)) + + +def inverse_matrix(matrix): + """Return inverse of square transformation matrix. + + >>> M0 = random_rotation_matrix() + >>> M1 = inverse_matrix(M0.T) + >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) + True + >>> for size in range(1, 7): + ... M0 = numpy.random.rand(size, size) + ... M1 = inverse_matrix(M0) + ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size) + + """ + return numpy.linalg.inv(matrix) + + +def concatenate_matrices(*matrices): + """Return concatenation of series of transformation matrices. + + >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 + >>> numpy.allclose(M, concatenate_matrices(M)) + True + >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) + True + + """ + M = numpy.identity(4) + for i in matrices: + M = numpy.dot(M, i) + return M + + +def is_same_transform(matrix0, matrix1): + """Return True if two matrices perform same transformation. + + >>> is_same_transform(numpy.identity(4), numpy.identity(4)) + True + >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) + False + + """ + matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) + matrix0 /= matrix0[3, 3] + matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) + matrix1 /= matrix1[3, 3] + return numpy.allclose(matrix0, matrix1) + + +def is_same_quaternion(q0, q1): + """Return True if two quaternions are equal.""" + q0 = numpy.array(q0) + q1 = numpy.array(q1) + return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1) + + +def _import_module(name, package=None, warn=True, postfix='_py', ignore='_'): + """Try import all public attributes from module into global namespace. + + Existing attributes with name clashes are renamed with prefix. + Attributes starting with underscore are ignored by default. + + Return True on successful import. + + """ + import warnings + from importlib import import_module + try: + if not package: + module = import_module(name) + else: + module = import_module('.' + name, package=package) + except ImportError as err: + if warn: + warnings.warn(str(err)) + else: + for attr in dir(module): + if ignore and attr.startswith(ignore): + continue + if postfix: + if attr in globals(): + globals()[attr + postfix] = globals()[attr] + elif warn: + warnings.warn('no Python implementation of ' + attr) + globals()[attr] = getattr(module, attr) + return True + + +# _import_module('_transformations', __package__) + + +# if __name__ == '__main__': +# import doctest +# import random # noqa: used in doctests +# try: +# numpy.set_printoptions(suppress=True, precision=5, legacy='1.13') +# except TypeError: +# numpy.set_printoptions(suppress=True, precision=5) +# doctest.testmod() \ No newline at end of file