add yapf style and apply yapf to format all Python files

This recreates pull request #2192
This commit is contained in:
Erwin Coumans
2019-04-27 07:31:15 -07:00
parent c591735042
commit ef9570c315
347 changed files with 70304 additions and 22752 deletions

View File

@@ -1,26 +1,28 @@
import numpy as np
import numpy as np
from quaternion import qrot, qinverse
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X/w*2 - [1, h/w]
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X / w * 2 - [1, h / w]
def image_coordinates(X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h/w])*w/2
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h / w]) * w / 2
def world_to_camera(X, R, t):
Rt = qinverse(R)
Q = np.tile(Rt, (*X.shape[:-1], 1))
V = X - t
return qrot(Q, V)
def camera_to_world(X, R, t):
Q = np.tile(R, (*X.shape[:-1], 1))
V = X
return qrot(Q, V) + t
Rt = qinverse(R)
Q = np.tile(Rt, (*X.shape[:-1], 1))
V = X - t
return qrot(Q, V)
def camera_to_world(X, R, t):
Q = np.tile(R, (*X.shape[:-1], 1))
V = X
return qrot(Q, V) + t

View File

@@ -3,54 +3,63 @@ from h36m_dataset import Human36mDataset
from camera import *
import numpy as np
# In[2]:
# In[2]:
joint_info = {
'joint_name':['root', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', 'chest', 'neck', 'nose', 'eye', 'left_shoulder', 'left_elbow', 'left_wrist', 'right_shoulder', 'right_elbow', 'right_wrist'],
'father':[0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15],
'side':['middle', 'right', 'right', 'right', 'left', 'left', 'left', 'middle', 'middle', 'middle', 'middle', 'left', 'left', 'left', 'right', 'right', 'right']
'joint_name': [
'root', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle',
'chest', 'neck', 'nose', 'eye', 'left_shoulder', 'left_elbow', 'left_wrist',
'right_shoulder', 'right_elbow', 'right_wrist'
],
'father': [0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15],
'side': [
'middle', 'right', 'right', 'right', 'left', 'left', 'left', 'middle', 'middle', 'middle',
'middle', 'left', 'left', 'left', 'right', 'right', 'right'
]
}
# In[3]:
def init_fb_h36m_dataset(dataset_path):
dataset = Human36mDataset(dataset_path)
print('Preparing Facebook Human3.6M Dataset...')
for subject in dataset.subjects():
for action in dataset[subject].keys():
anim = dataset[subject][action]
positions_3d = []
for cam in anim['cameras']:
pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])
pos_3d[:, 1:] -= pos_3d[:, :1] # Remove global offset, but keep trajectory in first position
positions_3d.append(pos_3d)
anim['positions_3d'] = positions_3d
return dataset
dataset = Human36mDataset(dataset_path)
print('Preparing Facebook Human3.6M Dataset...')
for subject in dataset.subjects():
for action in dataset[subject].keys():
anim = dataset[subject][action]
positions_3d = []
for cam in anim['cameras']:
pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])
pos_3d[:, 1:] -= pos_3d[:, :
1] # Remove global offset, but keep trajectory in first position
positions_3d.append(pos_3d)
anim['positions_3d'] = positions_3d
return dataset
def pose3D_from_fb_h36m(dataset, subject, action, shift):
pose_seq = dataset[subject][action]['positions_3d'][0].copy()
trajectory = pose_seq[:, :1]
pose_seq[:, 1:] += trajectory
# Invert camera transformation
cam = dataset.cameras()[subject][0]
pose_seq = camera_to_world(pose_seq,
R=cam['orientation'],
t=cam['translation'])
x = pose_seq[:,:,0:1]
y = pose_seq[:,:,1:2] * -1
z = pose_seq[:,:,2:3]
pose_seq = np.concatenate((x,z,y),axis=2)
# plus shift
pose_seq += np.array([[shift for i in range(pose_seq.shape[1])] for j in range(pose_seq.shape[0])])
return pose_seq
pose_seq = dataset[subject][action]['positions_3d'][0].copy()
trajectory = pose_seq[:, :1]
pose_seq[:, 1:] += trajectory
# Invert camera transformation
cam = dataset.cameras()[subject][0]
pose_seq = camera_to_world(pose_seq, R=cam['orientation'], t=cam['translation'])
x = pose_seq[:, :, 0:1]
y = pose_seq[:, :, 1:2] * -1
z = pose_seq[:, :, 2:3]
pose_seq = np.concatenate((x, z, y), axis=2)
# plus shift
pose_seq += np.array([[shift for i in range(pose_seq.shape[1])] for j in range(pose_seq.shape[0])
])
return pose_seq
def rot_seq_to_deepmimic_json(rot_seq, loop, json_path):
to_json = {"Loop": loop, "Frames":[]}
rot_seq = np.around(rot_seq, decimals=6)
to_json["Frames"] = rot_seq.tolist()
# In[14]:
to_file = json.dumps(to_json)
file = open(json_path,"w")
file.write(to_file)
file.close()
to_json = {"Loop": loop, "Frames": []}
rot_seq = np.around(rot_seq, decimals=6)
to_json["Frames"] = rot_seq.tolist()
# In[14]:
to_file = json.dumps(to_json)
file = open(json_path, "w")
file.write(to_file)
file.close()

View File

@@ -3,11 +3,13 @@ import copy
from skeleton import Skeleton
from mocap_dataset import MocapDataset
from camera import normalize_screen_coordinates, image_coordinates
h36m_skeleton = Skeleton(parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],
joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],
joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31])
h36m_skeleton = Skeleton(parents=[
-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12, 16, 17, 18, 19, 20, 19, 22, 12, 24,
25, 26, 27, 28, 27, 30
],
joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],
joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31])
h36m_cameras_intrinsic_params = [
{
@@ -18,7 +20,7 @@ h36m_cameras_intrinsic_params = [
'tangential_distortion': [-0.0009756988729350269, -0.00142447161488235],
'res_w': 1000,
'res_h': 1002,
'azimuth': 70, # Only used for visualization
'azimuth': 70, # Only used for visualization
},
{
'id': '55011271',
@@ -28,7 +30,7 @@ h36m_cameras_intrinsic_params = [
'tangential_distortion': [-0.0016190266469493508, -0.0027408944442868233],
'res_w': 1000,
'res_h': 1000,
'azimuth': -70, # Only used for visualization
'azimuth': -70, # Only used for visualization
},
{
'id': '58860488',
@@ -38,7 +40,7 @@ h36m_cameras_intrinsic_params = [
'tangential_distortion': [0.0014843869721516967, -0.0007599993259645998],
'res_w': 1000,
'res_h': 1000,
'azimuth': 110, # Only used for visualization
'azimuth': 110, # Only used for visualization
},
{
'id': '60457274',
@@ -48,26 +50,34 @@ h36m_cameras_intrinsic_params = [
'tangential_distortion': [-0.0005872055771760643, -0.0018133620033040643],
'res_w': 1000,
'res_h': 1002,
'azimuth': -110, # Only used for visualization
'azimuth': -110, # Only used for visualization
},
]
h36m_cameras_extrinsic_params = {
'S1': [
{
'orientation': [0.1407056450843811, -0.1500701755285263, -0.755240797996521, 0.6223280429840088],
'orientation': [
0.1407056450843811, -0.1500701755285263, -0.755240797996521, 0.6223280429840088
],
'translation': [1841.1070556640625, 4955.28466796875, 1563.4454345703125],
},
{
'orientation': [0.6157187819480896, -0.764836311340332, -0.14833825826644897, 0.11794740706682205],
'orientation': [
0.6157187819480896, -0.764836311340332, -0.14833825826644897, 0.11794740706682205
],
'translation': [1761.278564453125, -5078.0068359375, 1606.2650146484375],
},
{
'orientation': [0.14651472866535187, -0.14647851884365082, 0.7653023600578308, -0.6094175577163696],
'orientation': [
0.14651472866535187, -0.14647851884365082, 0.7653023600578308, -0.6094175577163696
],
'translation': [-1846.7777099609375, 5215.04638671875, 1491.972412109375],
},
{
'orientation': [0.5834008455276489, -0.7853162288665771, 0.14548823237419128, -0.14749594032764435],
'orientation': [
0.5834008455276489, -0.7853162288665771, 0.14548823237419128, -0.14749594032764435
],
'translation': [-1794.7896728515625, -3722.698974609375, 1574.8927001953125],
},
],
@@ -91,158 +101,206 @@ h36m_cameras_extrinsic_params = {
],
'S5': [
{
'orientation': [0.1467377245426178, -0.162370964884758, -0.7551892995834351, 0.6178938746452332],
'orientation': [
0.1467377245426178, -0.162370964884758, -0.7551892995834351, 0.6178938746452332
],
'translation': [2097.3916015625, 4880.94482421875, 1605.732421875],
},
{
'orientation': [0.6159758567810059, -0.7626792192459106, -0.15728192031383514, 0.1189815029501915],
'orientation': [
0.6159758567810059, -0.7626792192459106, -0.15728192031383514, 0.1189815029501915
],
'translation': [2031.7008056640625, -5167.93310546875, 1612.923095703125],
},
{
'orientation': [0.14291371405124664, -0.12907841801643372, 0.7678384780883789, -0.6110143065452576],
'orientation': [
0.14291371405124664, -0.12907841801643372, 0.7678384780883789, -0.6110143065452576
],
'translation': [-1620.5948486328125, 5171.65869140625, 1496.43701171875],
},
{
'orientation': [0.5920479893684387, -0.7814217805862427, 0.1274748593568802, -0.15036417543888092],
'orientation': [
0.5920479893684387, -0.7814217805862427, 0.1274748593568802, -0.15036417543888092
],
'translation': [-1637.1737060546875, -3867.3173828125, 1547.033203125],
},
],
'S6': [
{
'orientation': [0.1337897777557373, -0.15692396461963654, -0.7571090459823608, 0.6198879480361938],
'orientation': [
0.1337897777557373, -0.15692396461963654, -0.7571090459823608, 0.6198879480361938
],
'translation': [1935.4517822265625, 4950.24560546875, 1618.0838623046875],
},
{
'orientation': [0.6147197484970093, -0.7628812789916992, -0.16174767911434174, 0.11819244921207428],
'orientation': [
0.6147197484970093, -0.7628812789916992, -0.16174767911434174, 0.11819244921207428
],
'translation': [1969.803955078125, -5128.73876953125, 1632.77880859375],
},
{
'orientation': [0.1529948115348816, -0.13529130816459656, 0.7646096348762512, -0.6112781167030334],
'orientation': [
0.1529948115348816, -0.13529130816459656, 0.7646096348762512, -0.6112781167030334
],
'translation': [-1769.596435546875, 5185.361328125, 1476.993408203125],
},
{
'orientation': [0.5916101336479187, -0.7804774045944214, 0.12832270562648773, -0.1561593860387802],
'orientation': [
0.5916101336479187, -0.7804774045944214, 0.12832270562648773, -0.1561593860387802
],
'translation': [-1721.668701171875, -3884.13134765625, 1540.4879150390625],
},
],
'S7': [
{
'orientation': [0.1435241848230362, -0.1631336808204651, -0.7548328638076782, 0.6188824772834778],
'orientation': [
0.1435241848230362, -0.1631336808204651, -0.7548328638076782, 0.6188824772834778
],
'translation': [1974.512939453125, 4926.3544921875, 1597.8326416015625],
},
{
'orientation': [0.6141672730445862, -0.7638262510299683, -0.1596645563840866, 0.1177929937839508],
'orientation': [
0.6141672730445862, -0.7638262510299683, -0.1596645563840866, 0.1177929937839508
],
'translation': [1937.0584716796875, -5119.7900390625, 1631.5665283203125],
},
{
'orientation': [0.14550060033798218, -0.12874816358089447, 0.7660516500473022, -0.6127139329910278],
'orientation': [
0.14550060033798218, -0.12874816358089447, 0.7660516500473022, -0.6127139329910278
],
'translation': [-1741.8111572265625, 5208.24951171875, 1464.8245849609375],
},
{
'orientation': [0.5912848114967346, -0.7821764349937439, 0.12445473670959473, -0.15196487307548523],
'orientation': [
0.5912848114967346, -0.7821764349937439, 0.12445473670959473, -0.15196487307548523
],
'translation': [-1734.7105712890625, -3832.42138671875, 1548.5830078125],
},
],
'S8': [
{
'orientation': [0.14110587537288666, -0.15589867532253265, -0.7561917304992676, 0.619644045829773],
'orientation': [
0.14110587537288666, -0.15589867532253265, -0.7561917304992676, 0.619644045829773
],
'translation': [2150.65185546875, 4896.1611328125, 1611.9046630859375],
},
{
'orientation': [0.6169601678848267, -0.7647668123245239, -0.14846350252628326, 0.11158157885074615],
'orientation': [
0.6169601678848267, -0.7647668123245239, -0.14846350252628326, 0.11158157885074615
],
'translation': [2219.965576171875, -5148.453125, 1613.0440673828125],
},
{
'orientation': [0.1471444070339203, -0.13377119600772858, 0.7670128345489502, -0.6100369691848755],
'orientation': [
0.1471444070339203, -0.13377119600772858, 0.7670128345489502, -0.6100369691848755
],
'translation': [-1571.2215576171875, 5137.0185546875, 1498.1761474609375],
},
{
'orientation': [0.5927824378013611, -0.7825870513916016, 0.12147816270589828, -0.14631995558738708],
'orientation': [
0.5927824378013611, -0.7825870513916016, 0.12147816270589828, -0.14631995558738708
],
'translation': [-1476.913330078125, -3896.7412109375, 1547.97216796875],
},
],
'S9': [
{
'orientation': [0.15540587902069092, -0.15548215806484222, -0.7532095313072205, 0.6199594736099243],
'orientation': [
0.15540587902069092, -0.15548215806484222, -0.7532095313072205, 0.6199594736099243
],
'translation': [2044.45849609375, 4935.1171875, 1481.2275390625],
},
{
'orientation': [0.618784487247467, -0.7634735107421875, -0.14132238924503326, 0.11933968216180801],
'orientation': [
0.618784487247467, -0.7634735107421875, -0.14132238924503326, 0.11933968216180801
],
'translation': [1990.959716796875, -5123.810546875, 1568.8048095703125],
},
{
'orientation': [0.13357827067375183, -0.1367100477218628, 0.7689454555511475, -0.6100738644599915],
'orientation': [
0.13357827067375183, -0.1367100477218628, 0.7689454555511475, -0.6100738644599915
],
'translation': [-1670.9921875, 5211.98583984375, 1528.387939453125],
},
{
'orientation': [0.5879399180412292, -0.7823407053947449, 0.1427614390850067, -0.14794869720935822],
'orientation': [
0.5879399180412292, -0.7823407053947449, 0.1427614390850067, -0.14794869720935822
],
'translation': [-1696.04345703125, -3827.099853515625, 1591.4127197265625],
},
],
'S11': [
{
'orientation': [0.15232472121715546, -0.15442320704460144, -0.7547563314437866, 0.6191070079803467],
'orientation': [
0.15232472121715546, -0.15442320704460144, -0.7547563314437866, 0.6191070079803467
],
'translation': [2098.440185546875, 4926.5546875, 1500.278564453125],
},
{
'orientation': [0.6189449429512024, -0.7600917220115662, -0.15300633013248444, 0.1255258321762085],
'orientation': [
0.6189449429512024, -0.7600917220115662, -0.15300633013248444, 0.1255258321762085
],
'translation': [2083.182373046875, -4912.1728515625, 1561.07861328125],
},
{
'orientation': [0.14943228662014008, -0.15650227665901184, 0.7681233882904053, -0.6026304364204407],
'orientation': [
0.14943228662014008, -0.15650227665901184, 0.7681233882904053, -0.6026304364204407
],
'translation': [-1609.8153076171875, 5177.3359375, 1537.896728515625],
},
{
'orientation': [0.5894251465797424, -0.7818877100944519, 0.13991211354732513, -0.14715361595153809],
'orientation': [
0.5894251465797424, -0.7818877100944519, 0.13991211354732513, -0.14715361595153809
],
'translation': [-1590.738037109375, -3854.1689453125, 1578.017578125],
},
],
}
class Human36mDataset(MocapDataset):
def __init__(self, path, remove_static_joints=True):
super().__init__(fps=50, skeleton=h36m_skeleton)
self._cameras = copy.deepcopy(h36m_cameras_extrinsic_params)
for cameras in self._cameras.values():
for i, cam in enumerate(cameras):
cam.update(h36m_cameras_intrinsic_params[i])
for k, v in cam.items():
if k not in ['id', 'res_w', 'res_h']:
cam[k] = np.array(v, dtype='float32')
# Normalize camera frame
cam['center'] = normalize_screen_coordinates(cam['center'], w=cam['res_w'], h=cam['res_h']).astype('float32')
cam['focal_length'] = cam['focal_length']/cam['res_w']*2
if 'translation' in cam:
cam['translation'] = cam['translation']/1000 # mm to meters
# Add intrinsic parameters vector
cam['intrinsic'] = np.concatenate((cam['focal_length'],
cam['center'],
cam['radial_distortion'],
cam['tangential_distortion']))
# Load serialized dataset
data = np.load(path)['positions_3d'].item()
self._data = {}
for subject, actions in data.items():
self._data[subject] = {}
for action_name, positions in actions.items():
self._data[subject][action_name] = {
'positions': positions,
'cameras': self._cameras[subject],
}
if remove_static_joints:
# Bring the skeleton to 17 joints instead of the original 32
self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
# Rewire shoulders to the correct parents
self._skeleton._parents[11] = 8
self._skeleton._parents[14] = 8
def supports_semi_supervised(self):
return True
def __init__(self, path, remove_static_joints=True):
super().__init__(fps=50, skeleton=h36m_skeleton)
self._cameras = copy.deepcopy(h36m_cameras_extrinsic_params)
for cameras in self._cameras.values():
for i, cam in enumerate(cameras):
cam.update(h36m_cameras_intrinsic_params[i])
for k, v in cam.items():
if k not in ['id', 'res_w', 'res_h']:
cam[k] = np.array(v, dtype='float32')
# Normalize camera frame
cam['center'] = normalize_screen_coordinates(cam['center'], w=cam['res_w'],
h=cam['res_h']).astype('float32')
cam['focal_length'] = cam['focal_length'] / cam['res_w'] * 2
if 'translation' in cam:
cam['translation'] = cam['translation'] / 1000 # mm to meters
# Add intrinsic parameters vector
cam['intrinsic'] = np.concatenate((cam['focal_length'], cam['center'],
cam['radial_distortion'], cam['tangential_distortion']))
# Load serialized dataset
data = np.load(path)['positions_3d'].item()
self._data = {}
for subject, actions in data.items():
self._data[subject] = {}
for action_name, positions in actions.items():
self._data[subject][action_name] = {
'positions': positions,
'cameras': self._cameras[subject],
}
if remove_static_joints:
# Bring the skeleton to 17 joints instead of the original 32
self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
# Rewire shoulders to the correct parents
self._skeleton._parents[11] = 8
self._skeleton._parents[14] = 8
def supports_semi_supervised(self):
return True

View File

@@ -9,142 +9,138 @@ from transformation import *
from pyquaternion import Quaternion
def get_angle(vec1, vec2):
cos_theta = np.dot(vec1, vec2)/(np.linalg.norm(vec1) * np.linalg.norm(vec2))
return acos(cos_theta)
cos_theta = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
return acos(cos_theta)
def get_quaternion(ox, oy, oz, x, y, z):
# given transformed axis in x-y-z order return a quaternion
ox /= np.linalg.norm(ox)
oy /= np.linalg.norm(oy)
oz /= np.linalg.norm(oz)
# given transformed axis in x-y-z order return a quaternion
ox /= np.linalg.norm(ox)
oy /= np.linalg.norm(oy)
oz /= np.linalg.norm(oz)
set1 = np.vstack((ox,oy,oz))
set1 = np.vstack((ox, oy, oz))
x /= np.linalg.norm(x)
y /= np.linalg.norm(y)
z /= np.linalg.norm(z)
x /= np.linalg.norm(x)
y /= np.linalg.norm(y)
z /= np.linalg.norm(z)
set2 = np.vstack((x,y,z))
rot_mat = superimposition_matrix(set1, set2, scale=False, usesvd=True)
rot_qua = quaternion_from_matrix(rot_mat)
set2 = np.vstack((x, y, z))
rot_mat = superimposition_matrix(set1, set2, scale=False, usesvd=True)
rot_qua = quaternion_from_matrix(rot_mat)
return rot_qua
return rot_qua
# 3D coord to deepmimic rotations
def coord_to_rot(frameNum, frame, frame_duration):
eps = 0.001
axis_rotate_rate = 0.3
eps = 0.001
axis_rotate_rate = 0.3
frame = np.array(frame)
tmp = [[] for i in range(15)]
# duration of frame in seconds (1D),
tmp[0] = [frame_duration]
# root position (3D),
tmp[1] = frame[0]
# root rotation (4D),
root_y = (frame[7] - frame[0])
root_z = (frame[1] - frame[0])
root_x = np.cross(root_y, root_z)
frame = np.array(frame)
tmp = [[] for i in range(15)]
# duration of frame in seconds (1D),
tmp[0] = [frame_duration]
# root position (3D),
tmp[1] = frame[0]
# root rotation (4D),
root_y = (frame[7] - frame[0])
root_z = (frame[1] - frame[0])
root_x = np.cross(root_y, root_z)
x = np.array([1.0,0,0])
y = np.array([0,1.0,0])
z = np.array([0,0,1.0])
rot_qua = get_quaternion(root_x, root_y, root_z, x, y, z)
tmp[2] = list(rot_qua)
x = np.array([1.0, 0, 0])
y = np.array([0, 1.0, 0])
z = np.array([0, 0, 1.0])
# chest rotation (4D),
chest_y = (frame[8] - frame[7])
chest_z = (frame[14] - frame[8])
chest_x = np.cross(chest_y, chest_z)
rot_qua = get_quaternion(chest_x, chest_y, chest_z, root_x, root_y, root_z)
tmp[3] = list(rot_qua)
rot_qua = get_quaternion(root_x, root_y, root_z, x, y, z)
tmp[2] = list(rot_qua)
# neck rotation (4D),
neck_y = (frame[10] - frame[8])
neck_z = np.cross(frame[10]-frame[9], frame[8]-frame[9])
neck_x = np.cross(neck_y, neck_z)
rot_qua = get_quaternion(neck_x, neck_y, neck_z, chest_x, chest_y, chest_z)
tmp[4] = list(rot_qua)
# chest rotation (4D),
chest_y = (frame[8] - frame[7])
chest_z = (frame[14] - frame[8])
chest_x = np.cross(chest_y, chest_z)
rot_qua = get_quaternion(chest_x, chest_y, chest_z, root_x, root_y, root_z)
tmp[3] = list(rot_qua)
# right hip rotation (4D),
r_hip_y = (frame[1] - frame[2])
r_hip_z = np.cross(frame[1]-frame[2], frame[3]-frame[2])
r_hip_x = np.cross(r_hip_y, r_hip_z)
rot_qua = get_quaternion(r_hip_x, r_hip_y, r_hip_z, root_x, root_y, root_z)
tmp[5] = list(rot_qua)
# neck rotation (4D),
neck_y = (frame[10] - frame[8])
neck_z = np.cross(frame[10] - frame[9], frame[8] - frame[9])
neck_x = np.cross(neck_y, neck_z)
rot_qua = get_quaternion(neck_x, neck_y, neck_z, chest_x, chest_y, chest_z)
tmp[4] = list(rot_qua)
# right knee rotation (1D),
vec1 = frame[1] - frame[2]
vec2 = frame[3] - frame[2]
angle1 = get_angle(vec1, vec2)
tmp[6] = [angle1-pi]
# right hip rotation (4D),
r_hip_y = (frame[1] - frame[2])
r_hip_z = np.cross(frame[1] - frame[2], frame[3] - frame[2])
r_hip_x = np.cross(r_hip_y, r_hip_z)
rot_qua = get_quaternion(r_hip_x, r_hip_y, r_hip_z, root_x, root_y, root_z)
tmp[5] = list(rot_qua)
# right ankle rotation (4D),
tmp[7] = [1,0,0,0]
# right knee rotation (1D),
vec1 = frame[1] - frame[2]
vec2 = frame[3] - frame[2]
angle1 = get_angle(vec1, vec2)
tmp[6] = [angle1 - pi]
# right shoulder rotation (4D),
r_shou_y = (frame[14] - frame[15])
r_shou_z = np.cross(frame[16]-frame[15], frame[14]-frame[15])
r_shou_x = np.cross(r_shou_y, r_shou_z)
rot_qua = get_quaternion(r_shou_x, r_shou_y, r_shou_z, chest_x, chest_y, chest_z)
tmp[8] = list(rot_qua)
# right ankle rotation (4D),
tmp[7] = [1, 0, 0, 0]
# right elbow rotation (1D),
vec1 = frame[14] - frame[15]
vec2 = frame[16] - frame[15]
angle1 = get_angle(vec1, vec2)
tmp[9] = [pi-angle1]
# right shoulder rotation (4D),
r_shou_y = (frame[14] - frame[15])
r_shou_z = np.cross(frame[16] - frame[15], frame[14] - frame[15])
r_shou_x = np.cross(r_shou_y, r_shou_z)
rot_qua = get_quaternion(r_shou_x, r_shou_y, r_shou_z, chest_x, chest_y, chest_z)
tmp[8] = list(rot_qua)
# left hip rotation (4D),
l_hip_y = (frame[4] - frame[5])
l_hip_z = np.cross(frame[4]-frame[5], frame[6]-frame[5])
l_hip_x = np.cross(l_hip_y, l_hip_z)
rot_qua = get_quaternion(l_hip_x, l_hip_y, l_hip_z, root_x, root_y, root_z)
tmp[10] = list(rot_qua)
# left knee rotation (1D),
vec1 = frame[4] - frame[5]
vec2 = frame[6] - frame[5]
angle1 = get_angle(vec1, vec2)
tmp[11] = [angle1-pi]
# left ankle rotation (4D),
tmp[12] = [1,0,0,0]
# right elbow rotation (1D),
vec1 = frame[14] - frame[15]
vec2 = frame[16] - frame[15]
angle1 = get_angle(vec1, vec2)
tmp[9] = [pi - angle1]
# left shoulder rotation (4D),
l_shou_y = (frame[11] - frame[12])
l_shou_z = np.cross(frame[13]-frame[12], frame[11]-frame[12])
l_shou_x = np.cross(l_shou_y, l_shou_z)
rot_qua = get_quaternion(l_shou_x, l_shou_y, l_shou_z, chest_x, chest_y, chest_z)
tmp[13] = list(rot_qua)
# left hip rotation (4D),
l_hip_y = (frame[4] - frame[5])
l_hip_z = np.cross(frame[4] - frame[5], frame[6] - frame[5])
l_hip_x = np.cross(l_hip_y, l_hip_z)
rot_qua = get_quaternion(l_hip_x, l_hip_y, l_hip_z, root_x, root_y, root_z)
tmp[10] = list(rot_qua)
# left elbow rotation (1D)
vec1 = frame[11] - frame[12]
vec2 = frame[13] - frame[12]
angle1 = get_angle(vec1, vec2)
tmp[14] = [pi-angle1]
# left knee rotation (1D),
vec1 = frame[4] - frame[5]
vec2 = frame[6] - frame[5]
angle1 = get_angle(vec1, vec2)
tmp[11] = [angle1 - pi]
# left ankle rotation (4D),
tmp[12] = [1, 0, 0, 0]
# left shoulder rotation (4D),
l_shou_y = (frame[11] - frame[12])
l_shou_z = np.cross(frame[13] - frame[12], frame[11] - frame[12])
l_shou_x = np.cross(l_shou_y, l_shou_z)
rot_qua = get_quaternion(l_shou_x, l_shou_y, l_shou_z, chest_x, chest_y, chest_z)
tmp[13] = list(rot_qua)
# left elbow rotation (1D)
vec1 = frame[11] - frame[12]
vec2 = frame[13] - frame[12]
angle1 = get_angle(vec1, vec2)
tmp[14] = [pi - angle1]
ret = []
for i in tmp:
ret += list(i)
return np.array(ret)
ret = []
for i in tmp:
ret += list(i)
return np.array(ret)
# In[6]:
def coord_seq_to_rot_seq(coord_seq, frame_duration):
ret = []
for i in range(len(coord_seq)):
tmp = coord_to_rot( i, coord_seq[i], frame_duration)
ret.append(list(tmp))
return ret
ret = []
for i in range(len(coord_seq)):
tmp = coord_to_rot(i, coord_seq[i], frame_duration)
ret.append(list(tmp))
return ret

View File

@@ -1,36 +1,37 @@
import numpy as np
from skeleton import Skeleton
class MocapDataset:
def __init__(self, fps, skeleton):
self._skeleton = skeleton
self._fps = fps
self._data = None # Must be filled by subclass
self._cameras = None # Must be filled by subclass
def remove_joints(self, joints_to_remove):
kept_joints = self._skeleton.remove_joints(joints_to_remove)
for subject in self._data.keys():
for action in self._data[subject].keys():
s = self._data[subject][action]
s['positions'] = s['positions'][:, kept_joints]
def __getitem__(self, key):
return self._data[key]
def subjects(self):
return self._data.keys()
def fps(self):
return self._fps
def skeleton(self):
return self._skeleton
def cameras(self):
return self._cameras
def supports_semi_supervised(self):
# This method can be overridden
return False
def __init__(self, fps, skeleton):
self._skeleton = skeleton
self._fps = fps
self._data = None # Must be filled by subclass
self._cameras = None # Must be filled by subclass
def remove_joints(self, joints_to_remove):
kept_joints = self._skeleton.remove_joints(joints_to_remove)
for subject in self._data.keys():
for action in self._data[subject].keys():
s = self._data[subject][action]
s['positions'] = s['positions'][:, kept_joints]
def __getitem__(self, key):
return self._data[key]
def subjects(self):
return self._data.keys()
def fps(self):
return self._fps
def skeleton(self):
return self._skeleton
def cameras(self):
return self._cameras
def supports_semi_supervised(self):
# This method can be overridden
return False

View File

@@ -1,30 +1,31 @@
import numpy as np
def qrot(q, v):
"""
"""
Rotate vector(s) v about the rotation described by quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
qvec = q[..., 1:]
uv = np.cross(qvec, v)
uuv = np.cross(qvec, uv)
return (v + 2 * (q[..., :1] * uv + uuv))
qvec = q[..., 1:]
uv = np.cross(qvec, v)
uuv = np.cross(qvec, uv)
return (v + 2 * (q[..., :1] * uv + uuv))
def qinverse(q, inplace=False):
# We assume the quaternion to be normalized
if inplace:
q[..., 1:] *= -1
return q
else:
w = q[..., :1]
xyz = q[..., 1:]
return np.hstack((w, -xyz))
# We assume the quaternion to be normalized
if inplace:
q[..., 1:] *= -1
return q
else:
w = q[..., :1]
xyz = q[..., 1:]
return np.hstack((w, -xyz))

View File

@@ -1,7 +1,7 @@
import os, inspect
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
os.sys.path.insert(0, parentdir)
print('parent:', parentdir)
@@ -10,7 +10,6 @@ import pybullet
import time
import random
from pybullet_utils.bullet_client import BulletClient
from deep_mimic.env.motion_capture_data import MotionCaptureData
@@ -26,21 +25,28 @@ import pybullet as p
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Arguments for loading reference for learning.')
# General arguments
parser.add_argument('--dataset_path', default='data/data_3d_h36m.npz', type=str, help='target dataset') # h36m or humaneva
parser.add_argument('--json_path', default='data/Walking.json', type=str, help='json file path for storing the deepmimic-format json created by inverse-kinect.')
parser.add_argument('--dataset_path',
default='data/data_3d_h36m.npz',
type=str,
help='target dataset') # h36m or humaneva
parser.add_argument(
'--json_path',
default='data/Walking.json',
type=str,
help='json file path for storing the deepmimic-format json created by inverse-kinect.')
parser.add_argument('--fps', default=24, type=int, help='frame per second')
parser.add_argument('--subject', default='S11', type=str, help='camera subject.')
parser.add_argument('--action', default='Walking', type=str, help='name of the action.')
parser.add_argument('--loop', default='wrap', type=str, help='loop information in deepmimic, wrap or none.')
parser.add_argument('--action', default='Walking', type=str, help='name of the action.')
parser.add_argument('--loop',
default='wrap',
type=str,
help='loop information in deepmimic, wrap or none.')
parser.add_argument('--draw_gt', action='store_true', help='draw ground truth or not.')
args = parser.parse_args()
args = parser.parse_args()
dataset_path = args.dataset_path
json_path = args.json_path
@@ -52,93 +58,75 @@ draw_gt = args.draw_gt
def draw_ground_truth(coord_seq, frame, duration, shift):
global joint_info
joint = coord_seq[frame]
shift = np.array(shift)
for i in range(1, 17):
# print(x[11], x[14])
joint_fa = joint_info['father'][i]
if joint_info['side'][i] == 'right':
p.addUserDebugLine(lineFromXYZ=joint[i]+shift,
lineToXYZ=joint[joint_fa]+shift,
lineColorRGB=(255,0,0),
lineWidth=1,
lifeTime=duration)
else:
p.addUserDebugLine(lineFromXYZ=joint[i]+shift,
lineToXYZ=joint[joint_fa]+shift,
lineColorRGB=(0,0,0),
lineWidth=1,
lifeTime=duration)
global joint_info
joint = coord_seq[frame]
shift = np.array(shift)
for i in range(1, 17):
# print(x[11], x[14])
joint_fa = joint_info['father'][i]
if joint_info['side'][i] == 'right':
p.addUserDebugLine(lineFromXYZ=joint[i] + shift,
lineToXYZ=joint[joint_fa] + shift,
lineColorRGB=(255, 0, 0),
lineWidth=1,
lifeTime=duration)
else:
p.addUserDebugLine(lineFromXYZ=joint[i] + shift,
lineToXYZ=joint[joint_fa] + shift,
lineColorRGB=(0, 0, 0),
lineWidth=1,
lifeTime=duration)
dataset = init_fb_h36m_dataset(dataset_path)
ground_truth = pose3D_from_fb_h36m(dataset,
subject = subject,
action = action,
shift = [1.0,0.0,0.0])
rot_seq = coord_seq_to_rot_seq(coord_seq = ground_truth,
frame_duration = 1/fps)
rot_seq_to_deepmimic_json( rot_seq = rot_seq,
loop = loop,
json_path = json_path)
ground_truth = pose3D_from_fb_h36m(dataset, subject=subject, action=action, shift=[1.0, 0.0, 0.0])
rot_seq = coord_seq_to_rot_seq(coord_seq=ground_truth, frame_duration=1 / fps)
rot_seq_to_deepmimic_json(rot_seq=rot_seq, loop=loop, json_path=json_path)
bc = BulletClient(connection_mode=pybullet.GUI)
bc.setAdditionalSearchPath(pybullet_data.getDataPath())
bc.configureDebugVisualizer(bc.COV_ENABLE_Y_AXIS_UP,1)
bc.setGravity(0,-9.8,0)
motion=MotionCaptureData()
bc.configureDebugVisualizer(bc.COV_ENABLE_Y_AXIS_UP, 1)
bc.setGravity(0, -9.8, 0)
motion = MotionCaptureData()
motionPath = json_path
motion.Load(motionPath)
print("numFrames = ", motion.NumFrames())
simTimeId = bc.addUserDebugParameter("simTime", 0, motion.NumFrames() - 1.1, 0)
y2zOrn = bc.getQuaternionFromEuler([-1.57, 0, 0])
bc.loadURDF("plane.urdf", [0, -0.04, 0], y2zOrn)
simTimeId= bc.addUserDebugParameter("simTime",0,motion.NumFrames()-1.1,0)
y2zOrn = bc.getQuaternionFromEuler([-1.57,0,0])
bc.loadURDF("plane.urdf",[0,-0.04,0], y2zOrn)
humanoid = Humanoid(bc, motion, [0,0,0]) #这是初始位置的坐标
humanoid = Humanoid(bc, motion, [0, 0, 0]) #这是初始位置的坐标
print(p.getBasePositionAndOrientation(humanoid._humanoid))
simTime = 0
keyFrameDuration = motion.KeyFrameDuraction()
print("keyFrameDuration=",keyFrameDuration)
print("keyFrameDuration=", keyFrameDuration)
for utNum in range(motion.NumFrames()):
bc.stepSimulation()
humanoid.RenderReference(utNum * keyFrameDuration)
if draw_gt:
draw_ground_truth(coord_seq = ground_truth,
frame = utNum,
duration = keyFrameDuration,
shift = [-1.0, 0.0, 1.0])
time.sleep(0.001)
bc.stepSimulation()
humanoid.RenderReference(utNum * keyFrameDuration)
if draw_gt:
draw_ground_truth(coord_seq=ground_truth,
frame=utNum,
duration=keyFrameDuration,
shift=[-1.0, 0.0, 1.0])
time.sleep(0.001)
stage = 0
def Reset(humanoid):
global simTime
humanoid.Reset()
simTime = 0
humanoid.SetSimTime(simTime)
pose = humanoid.InitializePoseFromMotionData()
humanoid.ApplyPose(pose, True, True, humanoid._humanoid,bc)
global simTime
humanoid.Reset()
simTime = 0
humanoid.SetSimTime(simTime)
pose = humanoid.InitializePoseFromMotionData()
humanoid.ApplyPose(pose, True, True, humanoid._humanoid, bc)
Reset(humanoid)
p.disconnect()

View File

@@ -1,81 +1,82 @@
import numpy as np
class Skeleton:
def __init__(self, parents, joints_left, joints_right):
assert len(joints_left) == len(joints_right)
self._parents = np.array(parents)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def num_joints(self):
return len(self._parents)
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
"""
def __init__(self, parents, joints_left, joints_right):
assert len(joints_left) == len(joints_right)
self._parents = np.array(parents)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def num_joints(self):
return len(self._parents)
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
"""
Remove the joints specified in 'joints_to_remove'.
"""
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
for i in range(len(self._parents)):
while self._parents[i] in joints_to_remove:
self._parents[i] = self._parents[self._parents[i]]
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
if self._joints_left is not None:
new_joints_left = []
for joint in self._joints_left:
if joint in valid_joints:
new_joints_left.append(joint - index_offsets[joint])
self._joints_left = new_joints_left
if self._joints_right is not None:
new_joints_right = []
for joint in self._joints_right:
if joint in valid_joints:
new_joints_right.append(joint - index_offsets[joint])
self._joints_right = new_joints_right
for i in range(len(self._parents)):
while self._parents[i] in joints_to_remove:
self._parents[i] = self._parents[self._parents[i]]
self._compute_metadata()
return valid_joints
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
self._children = []
for i, parent in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)
if self._joints_left is not None:
new_joints_left = []
for joint in self._joints_left:
if joint in valid_joints:
new_joints_left.append(joint - index_offsets[joint])
self._joints_left = new_joints_left
if self._joints_right is not None:
new_joints_right = []
for joint in self._joints_right:
if joint in valid_joints:
new_joints_right.append(joint - index_offsets[joint])
self._joints_right = new_joints_right
self._compute_metadata()
return valid_joints
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
self._children = []
for i, parent in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)