add yapf style and apply yapf to format all Python files
This recreates pull request #2192
This commit is contained in:
@@ -1,124 +1,124 @@
|
||||
import re as RE
|
||||
|
||||
|
||||
class ArgParser(object):
|
||||
global_parser = None
|
||||
global_parser = None
|
||||
|
||||
def __init__(self):
|
||||
self._table = dict()
|
||||
return
|
||||
def __init__(self):
|
||||
self._table = dict()
|
||||
return
|
||||
|
||||
def clear(self):
|
||||
self._table.clear()
|
||||
return
|
||||
def clear(self):
|
||||
self._table.clear()
|
||||
return
|
||||
|
||||
def load_args(self, arg_strs):
|
||||
succ = True
|
||||
vals = []
|
||||
curr_key = ''
|
||||
def load_args(self, arg_strs):
|
||||
succ = True
|
||||
vals = []
|
||||
curr_key = ''
|
||||
|
||||
for str in arg_strs:
|
||||
if not (self._is_comment(str)):
|
||||
is_key = self._is_key(str)
|
||||
if (is_key):
|
||||
if (curr_key != ''):
|
||||
if (curr_key not in self._table):
|
||||
self._table[curr_key] = vals
|
||||
|
||||
vals = []
|
||||
curr_key = str[2::]
|
||||
else:
|
||||
vals.append(str)
|
||||
|
||||
if (curr_key != ''):
|
||||
for str in arg_strs:
|
||||
if not (self._is_comment(str)):
|
||||
is_key = self._is_key(str)
|
||||
if (is_key):
|
||||
if (curr_key != ''):
|
||||
if (curr_key not in self._table):
|
||||
self._table[curr_key] = vals
|
||||
self._table[curr_key] = vals
|
||||
|
||||
vals = []
|
||||
vals = []
|
||||
curr_key = str[2::]
|
||||
else:
|
||||
vals.append(str)
|
||||
|
||||
return succ
|
||||
if (curr_key != ''):
|
||||
if (curr_key not in self._table):
|
||||
self._table[curr_key] = vals
|
||||
|
||||
def load_file(self, filename):
|
||||
succ = False
|
||||
with open(filename, 'r') as file:
|
||||
lines = RE.split(r'[\n\r]+', file.read())
|
||||
file.close()
|
||||
vals = []
|
||||
|
||||
arg_strs = []
|
||||
for line in lines:
|
||||
if (len(line) > 0 and not self._is_comment(line)):
|
||||
arg_strs += line.split()
|
||||
return succ
|
||||
|
||||
succ = self.load_args(arg_strs)
|
||||
return succ
|
||||
def load_file(self, filename):
|
||||
succ = False
|
||||
with open(filename, 'r') as file:
|
||||
lines = RE.split(r'[\n\r]+', file.read())
|
||||
file.close()
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self._table
|
||||
arg_strs = []
|
||||
for line in lines:
|
||||
if (len(line) > 0 and not self._is_comment(line)):
|
||||
arg_strs += line.split()
|
||||
|
||||
def parse_string(self, key, default=''):
|
||||
str = default
|
||||
if self.has_key(key):
|
||||
str = self._table[key][0]
|
||||
return str
|
||||
succ = self.load_args(arg_strs)
|
||||
return succ
|
||||
|
||||
def parse_strings(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = self._table[key]
|
||||
return arr
|
||||
def has_key(self, key):
|
||||
return key in self._table
|
||||
|
||||
def parse_int(self, key, default=0):
|
||||
val = default
|
||||
if self.has_key(key):
|
||||
val = int(self._table[key][0])
|
||||
return val
|
||||
def parse_string(self, key, default=''):
|
||||
str = default
|
||||
if self.has_key(key):
|
||||
str = self._table[key][0]
|
||||
return str
|
||||
|
||||
def parse_ints(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = [int(str) for str in self._table[key]]
|
||||
return arr
|
||||
def parse_strings(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = self._table[key]
|
||||
return arr
|
||||
|
||||
def parse_float(self, key, default=0.0):
|
||||
val = default
|
||||
if self.has_key(key):
|
||||
val = float(self._table[key][0])
|
||||
return val
|
||||
def parse_int(self, key, default=0):
|
||||
val = default
|
||||
if self.has_key(key):
|
||||
val = int(self._table[key][0])
|
||||
return val
|
||||
|
||||
def parse_floats(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = [float(str) for str in self._table[key]]
|
||||
return arr
|
||||
def parse_ints(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = [int(str) for str in self._table[key]]
|
||||
return arr
|
||||
|
||||
def parse_bool(self, key, default=False):
|
||||
val = default
|
||||
if self.has_key(key):
|
||||
val = self._parse_bool(self._table[key][0])
|
||||
return val
|
||||
def parse_float(self, key, default=0.0):
|
||||
val = default
|
||||
if self.has_key(key):
|
||||
val = float(self._table[key][0])
|
||||
return val
|
||||
|
||||
def parse_bools(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = [self._parse_bool(str) for str in self._table[key]]
|
||||
return arr
|
||||
def parse_floats(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = [float(str) for str in self._table[key]]
|
||||
return arr
|
||||
|
||||
def _is_comment(self, str):
|
||||
is_comment = False
|
||||
if (len(str) > 0):
|
||||
is_comment = str[0] == '#'
|
||||
def parse_bool(self, key, default=False):
|
||||
val = default
|
||||
if self.has_key(key):
|
||||
val = self._parse_bool(self._table[key][0])
|
||||
return val
|
||||
|
||||
return is_comment
|
||||
|
||||
def _is_key(self, str):
|
||||
is_key = False
|
||||
if (len(str) >= 3):
|
||||
is_key = str[0] == '-' and str[1] == '-'
|
||||
def parse_bools(self, key, default=[]):
|
||||
arr = default
|
||||
if self.has_key(key):
|
||||
arr = [self._parse_bool(str) for str in self._table[key]]
|
||||
return arr
|
||||
|
||||
return is_key
|
||||
def _is_comment(self, str):
|
||||
is_comment = False
|
||||
if (len(str) > 0):
|
||||
is_comment = str[0] == '#'
|
||||
|
||||
def _parse_bool(self, str):
|
||||
val = False
|
||||
if (str == 'true' or str == 'True' or str == '1'
|
||||
or str == 'T' or str == 't'):
|
||||
val = True
|
||||
return val
|
||||
return is_comment
|
||||
|
||||
def _is_key(self, str):
|
||||
is_key = False
|
||||
if (len(str) >= 3):
|
||||
is_key = str[0] == '-' and str[1] == '-'
|
||||
|
||||
return is_key
|
||||
|
||||
def _parse_bool(self, str):
|
||||
val = False
|
||||
if (str == 'true' or str == 'True' or str == '1' or str == 'T' or str == 't'):
|
||||
val = True
|
||||
return val
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
import functools
|
||||
import inspect
|
||||
import pybullet
|
||||
import pybullet
|
||||
|
||||
|
||||
class BulletClient(object):
|
||||
@@ -42,9 +42,13 @@ class BulletClient(object):
|
||||
attribute = getattr(pybullet, name)
|
||||
if inspect.isbuiltin(attribute):
|
||||
if name not in [
|
||||
"invertTransform", "multiplyTransforms", "getMatrixFromQuaternion",
|
||||
"getEulerFromQuaternion", "computeViewMatrixFromYawPitchRoll",
|
||||
"computeProjectionMatrixFOV", "getQuaternionFromEuler",
|
||||
"invertTransform",
|
||||
"multiplyTransforms",
|
||||
"getMatrixFromQuaternion",
|
||||
"getEulerFromQuaternion",
|
||||
"computeViewMatrixFromYawPitchRoll",
|
||||
"computeProjectionMatrixFOV",
|
||||
"getQuaternionFromEuler",
|
||||
]: # A temporary hack for now.
|
||||
attribute = functools.partial(attribute, physicsClientId=self._client)
|
||||
return attribute
|
||||
|
||||
@@ -10,7 +10,6 @@ p0.setAdditionalSearchPath(pybullet_data.getDataPath())
|
||||
p1 = bc.BulletClient(connection_mode=pybullet.DIRECT)
|
||||
p1.setAdditionalSearchPath(pybullet_data.getDataPath())
|
||||
|
||||
|
||||
#can also connect using different modes, GUI, SHARED_MEMORY, TCP, UDP, SHARED_MEMORY_SERVER, GUI_SERVER
|
||||
|
||||
husky = p1.loadURDF("husky/husky.urdf", flags=p0.URDF_USE_IMPLICIT_CYLINDER)
|
||||
@@ -22,36 +21,34 @@ ed1 = ed.UrdfEditor()
|
||||
ed1.initializeFromBulletBody(kuka, p0._client)
|
||||
#ed1.saveUrdf("combined.urdf")
|
||||
|
||||
|
||||
parentLinkIndex = 0
|
||||
|
||||
jointPivotXYZInParent = [0,0,0]
|
||||
jointPivotRPYInParent = [0,0,0]
|
||||
jointPivotXYZInParent = [0, 0, 0]
|
||||
jointPivotRPYInParent = [0, 0, 0]
|
||||
|
||||
jointPivotXYZInChild = [0, 0, 0]
|
||||
jointPivotRPYInChild = [0, 0, 0]
|
||||
|
||||
jointPivotXYZInChild = [0,0,0]
|
||||
jointPivotRPYInChild = [0,0,0]
|
||||
|
||||
newjoint = ed0.joinUrdf(ed1, parentLinkIndex , jointPivotXYZInParent, jointPivotRPYInParent, jointPivotXYZInChild, jointPivotRPYInChild, p0._client, p1._client)
|
||||
newjoint = ed0.joinUrdf(ed1, parentLinkIndex, jointPivotXYZInParent, jointPivotRPYInParent,
|
||||
jointPivotXYZInChild, jointPivotRPYInChild, p0._client, p1._client)
|
||||
newjoint.joint_type = p0.JOINT_FIXED
|
||||
|
||||
ed0.saveUrdf("combined.urdf")
|
||||
|
||||
print(p0._client)
|
||||
print(p1._client)
|
||||
print("p0.getNumBodies()=",p0.getNumBodies())
|
||||
print("p1.getNumBodies()=",p1.getNumBodies())
|
||||
print("p0.getNumBodies()=", p0.getNumBodies())
|
||||
print("p1.getNumBodies()=", p1.getNumBodies())
|
||||
|
||||
pgui = bc.BulletClient(connection_mode=pybullet.GUI)
|
||||
pgui.configureDebugVisualizer(pgui.COV_ENABLE_RENDERING, 0)
|
||||
|
||||
orn=[0,0,0,1]
|
||||
ed0.createMultiBody([0,0,0],orn, pgui._client)
|
||||
orn = [0, 0, 0, 1]
|
||||
ed0.createMultiBody([0, 0, 0], orn, pgui._client)
|
||||
pgui.setRealTimeSimulation(1)
|
||||
|
||||
pgui.configureDebugVisualizer(pgui.COV_ENABLE_RENDERING, 1)
|
||||
|
||||
|
||||
while (pgui.isConnected()):
|
||||
pgui.getCameraImage(320,200, renderer=pgui.ER_BULLET_HARDWARE_OPENGL)
|
||||
time.sleep(1./240.)
|
||||
pgui.getCameraImage(320, 200, renderer=pgui.ER_BULLET_HARDWARE_OPENGL)
|
||||
time.sleep(1. / 240.)
|
||||
|
||||
@@ -2,12 +2,10 @@ import pybullet as p
|
||||
p.connect(p.GUI)
|
||||
p.loadURDF("combined.urdf", useFixedBase=True)
|
||||
|
||||
|
||||
|
||||
#for j in range (p.getNumJoints(0)):
|
||||
# p.setJointMotorControl2(0,j,p.VELOCITY_CONTROL,targetVelocity=0.1)
|
||||
p.setRealTimeSimulation(1)
|
||||
while (p.isConnected()):
|
||||
p.getCameraImage(320,200)
|
||||
import time
|
||||
time.sleep(1./240.)
|
||||
p.getCameraImage(320, 200)
|
||||
import time
|
||||
time.sleep(1. / 240.)
|
||||
|
||||
@@ -6,7 +6,9 @@ import pybullet_data as pd
|
||||
import pybullet_utils.urdfEditor as ed
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument('--mjcf', help='MuJoCo xml file to be converted to URDF', default='mjcf/humanoid.xml')
|
||||
parser.add_argument('--mjcf',
|
||||
help='MuJoCo xml file to be converted to URDF',
|
||||
default='mjcf/humanoid.xml')
|
||||
args = parser.parse_args()
|
||||
|
||||
p = bc.BulletClient()
|
||||
@@ -14,15 +16,15 @@ p.setAdditionalSearchPath(pd.getDataPath())
|
||||
objs = p.loadMJCF(args.mjcf, flags=p.URDF_USE_IMPLICIT_CYLINDER)
|
||||
|
||||
for o in objs:
|
||||
#print("o=",o, p.getBodyInfo(o), p.getNumJoints(o))
|
||||
humanoid = objs[o]
|
||||
ed0 = ed.UrdfEditor()
|
||||
ed0.initializeFromBulletBody(humanoid, p._client)
|
||||
robotName = str(p.getBodyInfo(o)[1],'utf-8')
|
||||
partName = str(p.getBodyInfo(o)[0], 'utf-8')
|
||||
|
||||
print("robotName=",robotName)
|
||||
print("partName=",partName)
|
||||
|
||||
saveVisuals=False
|
||||
ed0.saveUrdf(robotName+"_"+partName+".urdf", saveVisuals)
|
||||
#print("o=",o, p.getBodyInfo(o), p.getNumJoints(o))
|
||||
humanoid = objs[o]
|
||||
ed0 = ed.UrdfEditor()
|
||||
ed0.initializeFromBulletBody(humanoid, p._client)
|
||||
robotName = str(p.getBodyInfo(o)[1], 'utf-8')
|
||||
partName = str(p.getBodyInfo(o)[0], 'utf-8')
|
||||
|
||||
print("robotName=", robotName)
|
||||
print("partName=", partName)
|
||||
|
||||
saveVisuals = False
|
||||
ed0.saveUrdf(robotName + "_" + partName + ".urdf", saveVisuals)
|
||||
|
||||
@@ -15,6 +15,5 @@ p0.loadURDF("r2d2.urdf")
|
||||
p1.loadSDF("stadium.sdf")
|
||||
print(p0._client)
|
||||
print(p1._client)
|
||||
print("p0.getNumBodies()=",p0.getNumBodies())
|
||||
print("p1.getNumBodies()=",p1.getNumBodies())
|
||||
|
||||
print("p0.getNumBodies()=", p0.getNumBodies())
|
||||
print("p1.getNumBodies()=", p1.getNumBodies())
|
||||
|
||||
@@ -1,26 +1,28 @@
|
||||
import pybullet_data
|
||||
from pybullet_utils.arg_parser import ArgParser
|
||||
from pybullet_utils.logger import Logger
|
||||
from pybullet_utils.arg_parser import ArgParser
|
||||
from pybullet_utils.logger import Logger
|
||||
import sys
|
||||
|
||||
def build_arg_parser(args):
|
||||
arg_parser = ArgParser()
|
||||
arg_parser.load_args(args)
|
||||
|
||||
arg_file = arg_parser.parse_string('arg_file', '')
|
||||
if (arg_file != ''):
|
||||
path = pybullet_data.getDataPath()+"/args/"+arg_file
|
||||
succ = arg_parser.load_file(path)
|
||||
Logger.print2(arg_file)
|
||||
assert succ, Logger.print2('Failed to load args from: ' + arg_file)
|
||||
def build_arg_parser(args):
|
||||
arg_parser = ArgParser()
|
||||
arg_parser.load_args(args)
|
||||
|
||||
arg_file = arg_parser.parse_string('arg_file', '')
|
||||
if (arg_file != ''):
|
||||
path = pybullet_data.getDataPath() + "/args/" + arg_file
|
||||
succ = arg_parser.load_file(path)
|
||||
Logger.print2(arg_file)
|
||||
assert succ, Logger.print2('Failed to load args from: ' + arg_file)
|
||||
|
||||
return arg_parser
|
||||
|
||||
return arg_parser
|
||||
|
||||
args = sys.argv[1:]
|
||||
arg_parser = build_arg_parser(args)
|
||||
motion_file = arg_parser.parse_string("motion_file")
|
||||
print("motion_file=",motion_file)
|
||||
motion_file = arg_parser.parse_string("motion_file")
|
||||
print("motion_file=", motion_file)
|
||||
bodies = arg_parser.parse_ints("fall_contact_bodies")
|
||||
print("bodies=",bodies)
|
||||
print("bodies=", bodies)
|
||||
int_output_path = arg_parser.parse_string("int_output_path")
|
||||
print("int_output_path=",int_output_path)
|
||||
print("int_output_path=", int_output_path)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from pybullet_utils.logger import Logger
|
||||
logger = Logger()
|
||||
logger.configure_output_file("e:/mylog.txt")
|
||||
for i in range (10):
|
||||
logger.log_tabular("Iteration", 1)
|
||||
for i in range(10):
|
||||
logger.log_tabular("Iteration", 1)
|
||||
Logger.print2("hello world")
|
||||
|
||||
logger.print_tabular()
|
||||
logger.dump_tabular()
|
||||
logger.dump_tabular()
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import pybullet_utils.mpi_util as MPIUtil
|
||||
|
||||
"""
|
||||
|
||||
Some simple logging functionality, inspired by rllab's logging.
|
||||
@@ -17,112 +16,114 @@ A['EpRewMean']
|
||||
|
||||
import os.path as osp, shutil, time, atexit, os, subprocess
|
||||
|
||||
|
||||
class Logger:
|
||||
def print2(str):
|
||||
if (MPIUtil.is_root_proc()):
|
||||
print(str)
|
||||
return
|
||||
|
||||
def __init__(self):
|
||||
self.output_file = None
|
||||
self.first_row = True
|
||||
self.log_headers = []
|
||||
self.log_current_row = {}
|
||||
self._dump_str_template = ""
|
||||
return
|
||||
def print2(str):
|
||||
if (MPIUtil.is_root_proc()):
|
||||
print(str)
|
||||
return
|
||||
|
||||
def reset(self):
|
||||
self.first_row = True
|
||||
self.log_headers = []
|
||||
self.log_current_row = {}
|
||||
if self.output_file is not None:
|
||||
self.output_file = open(output_path, 'w')
|
||||
return
|
||||
def __init__(self):
|
||||
self.output_file = None
|
||||
self.first_row = True
|
||||
self.log_headers = []
|
||||
self.log_current_row = {}
|
||||
self._dump_str_template = ""
|
||||
return
|
||||
|
||||
def configure_output_file(self, filename=None):
|
||||
"""
|
||||
def reset(self):
|
||||
self.first_row = True
|
||||
self.log_headers = []
|
||||
self.log_current_row = {}
|
||||
if self.output_file is not None:
|
||||
self.output_file = open(output_path, 'w')
|
||||
return
|
||||
|
||||
def configure_output_file(self, filename=None):
|
||||
"""
|
||||
Set output directory to d, or to /tmp/somerandomnumber if d is None
|
||||
"""
|
||||
self.first_row = True
|
||||
self.log_headers = []
|
||||
self.log_current_row = {}
|
||||
self.first_row = True
|
||||
self.log_headers = []
|
||||
self.log_current_row = {}
|
||||
|
||||
output_path = filename or "output/log_%i.txt"%int(time.time())
|
||||
output_path = filename or "output/log_%i.txt" % int(time.time())
|
||||
|
||||
out_dir = os.path.dirname(output_path)
|
||||
if not os.path.exists(out_dir) and MPIUtil.is_root_proc():
|
||||
os.makedirs(out_dir)
|
||||
out_dir = os.path.dirname(output_path)
|
||||
if not os.path.exists(out_dir) and MPIUtil.is_root_proc():
|
||||
os.makedirs(out_dir)
|
||||
|
||||
if (MPIUtil.is_root_proc()):
|
||||
self.output_file = open(output_path, 'w')
|
||||
assert osp.exists(output_path)
|
||||
atexit.register(self.output_file.close)
|
||||
if (MPIUtil.is_root_proc()):
|
||||
self.output_file = open(output_path, 'w')
|
||||
assert osp.exists(output_path)
|
||||
atexit.register(self.output_file.close)
|
||||
|
||||
Logger.print2("Logging data to " + self.output_file.name)
|
||||
return
|
||||
Logger.print2("Logging data to " + self.output_file.name)
|
||||
return
|
||||
|
||||
def log_tabular(self, key, val):
|
||||
"""
|
||||
def log_tabular(self, key, val):
|
||||
"""
|
||||
Log a value of some diagnostic
|
||||
Call this once for each diagnostic quantity, each iteration
|
||||
"""
|
||||
if self.first_row and key not in self.log_headers:
|
||||
self.log_headers.append(key)
|
||||
else:
|
||||
assert key in self.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
|
||||
self.log_current_row[key] = val
|
||||
return
|
||||
if self.first_row and key not in self.log_headers:
|
||||
self.log_headers.append(key)
|
||||
else:
|
||||
assert key in self.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration" % key
|
||||
self.log_current_row[key] = val
|
||||
return
|
||||
|
||||
def get_num_keys(self):
|
||||
return len(self.log_headers)
|
||||
def get_num_keys(self):
|
||||
return len(self.log_headers)
|
||||
|
||||
def print_tabular(self):
|
||||
"""
|
||||
def print_tabular(self):
|
||||
"""
|
||||
Print all of the diagnostics from the current iteration
|
||||
"""
|
||||
if (MPIUtil.is_root_proc()):
|
||||
vals = []
|
||||
Logger.print2("-"*37)
|
||||
for key in self.log_headers:
|
||||
val = self.log_current_row.get(key, "")
|
||||
if isinstance(val, float):
|
||||
valstr = "%8.3g"%val
|
||||
elif isinstance(val, int):
|
||||
valstr = str(val)
|
||||
else:
|
||||
valstr = val
|
||||
Logger.print2("| %15s | %15s |"%(key, valstr))
|
||||
vals.append(val)
|
||||
Logger.print2("-" * 37)
|
||||
return
|
||||
if (MPIUtil.is_root_proc()):
|
||||
vals = []
|
||||
Logger.print2("-" * 37)
|
||||
for key in self.log_headers:
|
||||
val = self.log_current_row.get(key, "")
|
||||
if isinstance(val, float):
|
||||
valstr = "%8.3g" % val
|
||||
elif isinstance(val, int):
|
||||
valstr = str(val)
|
||||
else:
|
||||
valstr = val
|
||||
Logger.print2("| %15s | %15s |" % (key, valstr))
|
||||
vals.append(val)
|
||||
Logger.print2("-" * 37)
|
||||
return
|
||||
|
||||
def dump_tabular(self):
|
||||
"""
|
||||
def dump_tabular(self):
|
||||
"""
|
||||
Write all of the diagnostics from the current iteration
|
||||
"""
|
||||
if (MPIUtil.is_root_proc()):
|
||||
if (self.first_row):
|
||||
self._dump_str_template = self._build_str_template()
|
||||
if (MPIUtil.is_root_proc()):
|
||||
if (self.first_row):
|
||||
self._dump_str_template = self._build_str_template()
|
||||
|
||||
vals = []
|
||||
for key in self.log_headers:
|
||||
val = self.log_current_row.get(key, "")
|
||||
vals.append(val)
|
||||
|
||||
if self.output_file is not None:
|
||||
if self.first_row:
|
||||
header_str = self._dump_str_template.format(*self.log_headers)
|
||||
self.output_file.write(header_str + "\n")
|
||||
vals = []
|
||||
for key in self.log_headers:
|
||||
val = self.log_current_row.get(key, "")
|
||||
vals.append(val)
|
||||
|
||||
val_str = self._dump_str_template.format(*map(str,vals))
|
||||
self.output_file.write(val_str + "\n")
|
||||
self.output_file.flush()
|
||||
if self.output_file is not None:
|
||||
if self.first_row:
|
||||
header_str = self._dump_str_template.format(*self.log_headers)
|
||||
self.output_file.write(header_str + "\n")
|
||||
|
||||
self.log_current_row.clear()
|
||||
self.first_row=False
|
||||
return
|
||||
val_str = self._dump_str_template.format(*map(str, vals))
|
||||
self.output_file.write(val_str + "\n")
|
||||
self.output_file.flush()
|
||||
|
||||
def _build_str_template(self):
|
||||
num_keys = self.get_num_keys()
|
||||
template = "{:<25}" * num_keys
|
||||
return template
|
||||
self.log_current_row.clear()
|
||||
self.first_row = False
|
||||
return
|
||||
|
||||
def _build_str_template(self):
|
||||
num_keys = self.get_num_keys()
|
||||
template = "{:<25}" * num_keys
|
||||
return template
|
||||
|
||||
@@ -4,15 +4,19 @@ RAD_TO_DEG = 57.2957795
|
||||
DEG_TO_RAD = 1.0 / RAD_TO_DEG
|
||||
INVALID_IDX = -1
|
||||
|
||||
|
||||
def lerp(x, y, t):
|
||||
return (1 - t) * x + t * y
|
||||
return (1 - t) * x + t * y
|
||||
|
||||
|
||||
def log_lerp(x, y, t):
|
||||
return np.exp(lerp(np.log(x), np.log(y), t))
|
||||
return np.exp(lerp(np.log(x), np.log(y), t))
|
||||
|
||||
|
||||
def flatten(arr_list):
|
||||
return np.concatenate([np.reshape(a, [-1]) for a in arr_list], axis=0)
|
||||
return np.concatenate([np.reshape(a, [-1]) for a in arr_list], axis=0)
|
||||
|
||||
|
||||
def flip_coin(p):
|
||||
rand_num = np.random.binomial(1, p, 1)
|
||||
return rand_num[0] == 1
|
||||
rand_num = np.random.binomial(1, p, 1)
|
||||
return rand_num[0] == 1
|
||||
|
||||
@@ -3,50 +3,61 @@ from mpi4py import MPI
|
||||
|
||||
ROOT_PROC_RANK = 0
|
||||
|
||||
|
||||
def get_num_procs():
|
||||
return MPI.COMM_WORLD.Get_size()
|
||||
return MPI.COMM_WORLD.Get_size()
|
||||
|
||||
|
||||
def get_proc_rank():
|
||||
return MPI.COMM_WORLD.Get_rank()
|
||||
return MPI.COMM_WORLD.Get_rank()
|
||||
|
||||
|
||||
def is_root_proc():
|
||||
rank = get_proc_rank()
|
||||
return rank == ROOT_PROC_RANK
|
||||
rank = get_proc_rank()
|
||||
return rank == ROOT_PROC_RANK
|
||||
|
||||
|
||||
def bcast(x):
|
||||
MPI.COMM_WORLD.Bcast(x, root=ROOT_PROC_RANK)
|
||||
return
|
||||
MPI.COMM_WORLD.Bcast(x, root=ROOT_PROC_RANK)
|
||||
return
|
||||
|
||||
|
||||
def reduce_sum(x):
|
||||
return reduce_all(x, MPI.SUM)
|
||||
return reduce_all(x, MPI.SUM)
|
||||
|
||||
|
||||
def reduce_prod(x):
|
||||
return reduce_all(x, MPI.PROD)
|
||||
return reduce_all(x, MPI.PROD)
|
||||
|
||||
|
||||
def reduce_avg(x):
|
||||
buffer = reduce_sum(x)
|
||||
buffer /= get_num_procs()
|
||||
return buffer
|
||||
buffer = reduce_sum(x)
|
||||
buffer /= get_num_procs()
|
||||
return buffer
|
||||
|
||||
|
||||
def reduce_min(x):
|
||||
return reduce_all(x, MPI.MIN)
|
||||
return reduce_all(x, MPI.MIN)
|
||||
|
||||
|
||||
def reduce_max(x):
|
||||
return reduce_all(x, MPI.MAX)
|
||||
return reduce_all(x, MPI.MAX)
|
||||
|
||||
|
||||
def reduce_all(x, op):
|
||||
is_array = isinstance(x, np.ndarray)
|
||||
x_buf = x if is_array else np.array([x])
|
||||
buffer = np.zeros_like(x_buf)
|
||||
MPI.COMM_WORLD.Allreduce(x_buf, buffer, op=op)
|
||||
buffer = buffer if is_array else buffer[0]
|
||||
return buffer
|
||||
is_array = isinstance(x, np.ndarray)
|
||||
x_buf = x if is_array else np.array([x])
|
||||
buffer = np.zeros_like(x_buf)
|
||||
MPI.COMM_WORLD.Allreduce(x_buf, buffer, op=op)
|
||||
buffer = buffer if is_array else buffer[0]
|
||||
return buffer
|
||||
|
||||
|
||||
def gather_all(x):
|
||||
is_array = isinstance(x, np.ndarray)
|
||||
x_buf = np.array([x])
|
||||
buffer = np.zeros_like(x_buf)
|
||||
buffer = np.repeat(buffer, get_num_procs(), axis=0)
|
||||
MPI.COMM_WORLD.Allgather(x_buf, buffer)
|
||||
buffer = list(buffer)
|
||||
return buffer
|
||||
is_array = isinstance(x, np.ndarray)
|
||||
x_buf = np.array([x])
|
||||
buffer = np.zeros_like(x_buf)
|
||||
buffer = np.repeat(buffer, get_num_procs(), axis=0)
|
||||
MPI.COMM_WORLD.Allgather(x_buf, buffer)
|
||||
buffer = list(buffer)
|
||||
return buffer
|
||||
|
||||
@@ -1,218 +1,232 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
class PDControllerStableMultiDof(object):
|
||||
def __init__(self, pb):
|
||||
self._pb = pb
|
||||
|
||||
def computeAngVel(self,ornStart, ornEnd, deltaTime, bullet_client):
|
||||
dorn = bullet_client.getDifferenceQuaternion(ornStart,ornEnd)
|
||||
axis,angle = bullet_client.getAxisAngleFromQuaternion(dorn)
|
||||
angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime]
|
||||
return angVel
|
||||
|
||||
def quatMul(self, q1, q2):
|
||||
return [ q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1],
|
||||
q1[3] * q2[1] + q1[1] * q2[3] + q1[2] * q2[0] - q1[0] * q2[2],
|
||||
q1[3] * q2[2] + q1[2] * q2[3] + q1[0] * q2[1] - q1[1] * q2[0],
|
||||
q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2]]
|
||||
|
||||
def computeAngVelRel(self,ornStart, ornEnd, deltaTime, bullet_client):
|
||||
ornStartConjugate = [-ornStart[0],-ornStart[1],-ornStart[2],ornStart[3]]
|
||||
q_diff = self.quatMul(ornStartConjugate, ornEnd)#bullet_client.multiplyTransforms([0,0,0], ornStartConjugate, [0,0,0], ornEnd)
|
||||
|
||||
axis,angle = bullet_client.getAxisAngleFromQuaternion(q_diff)
|
||||
angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime]
|
||||
return angVel
|
||||
|
||||
|
||||
def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds, maxForces, timeStep):
|
||||
|
||||
numJoints = len(jointIndices)#self._pb.getNumJoints(bodyUniqueId)
|
||||
curPos,curOrn = self._pb.getBasePositionAndOrientation(bodyUniqueId)
|
||||
q1 = [curPos[0],curPos[1],curPos[2],curOrn[0],curOrn[1],curOrn[2],curOrn[3]]
|
||||
#print("q1=",q1)
|
||||
|
||||
|
||||
#qdot1 = [0,0,0, 0,0,0,0]
|
||||
baseLinVel, baseAngVel = self._pb.getBaseVelocity(bodyUniqueId)
|
||||
#print("baseLinVel=",baseLinVel)
|
||||
qdot1 = [baseLinVel[0],baseLinVel[1],baseLinVel[2],baseAngVel[0],baseAngVel[1],baseAngVel[2],0]
|
||||
#qError = [0,0,0, 0,0,0,0]
|
||||
desiredOrn = [desiredPositions[3],desiredPositions[4],desiredPositions[5],desiredPositions[6]]
|
||||
axis1 = self._pb.getAxisDifferenceQuaternion(desiredOrn,curOrn)
|
||||
angDiff = [0,0,0]#self.computeAngVel(curOrn, desiredOrn, 1, self._pb)
|
||||
qError=[ desiredPositions[0]-curPos[0], desiredPositions[1]-curPos[1], desiredPositions[2]-curPos[2],angDiff[0],angDiff[1],angDiff[2],0]
|
||||
target_pos = np.array(desiredPositions)
|
||||
#np.savetxt("pb_target_pos.csv", target_pos, delimiter=",")
|
||||
|
||||
|
||||
qIndex=7
|
||||
qdotIndex=7
|
||||
zeroAccelerations=[0,0,0, 0,0,0,0]
|
||||
for i in range (numJoints):
|
||||
js = self._pb.getJointStateMultiDof(bodyUniqueId, jointIndices[i])
|
||||
|
||||
jointPos=js[0]
|
||||
jointVel = js[1]
|
||||
q1+=jointPos
|
||||
|
||||
if len(js[0])==1:
|
||||
desiredPos=desiredPositions[qIndex]
|
||||
|
||||
qdiff=desiredPos - jointPos[0]
|
||||
qError.append(qdiff)
|
||||
zeroAccelerations.append(0.)
|
||||
qdot1+=jointVel
|
||||
qIndex+=1
|
||||
qdotIndex+=1
|
||||
if len(js[0])==4:
|
||||
desiredPos=[desiredPositions[qIndex],desiredPositions[qIndex+1],desiredPositions[qIndex+2],desiredPositions[qIndex+3]]
|
||||
#axis = self._pb.getAxisDifferenceQuaternion(desiredPos,jointPos)
|
||||
angDiff = self.computeAngVelRel(jointPos, desiredPos, 1, self._pb)
|
||||
#angDiff = self._pb.computeAngVelRel(jointPos, desiredPos, 1)
|
||||
|
||||
jointVelNew = [jointVel[0],jointVel[1],jointVel[2],0]
|
||||
qdot1+=jointVelNew
|
||||
qError.append(angDiff[0])
|
||||
qError.append(angDiff[1])
|
||||
qError.append(angDiff[2])
|
||||
qError.append(0)
|
||||
desiredVel=[desiredVelocities[qdotIndex],desiredVelocities[qdotIndex+1],desiredVelocities[qdotIndex+2]]
|
||||
zeroAccelerations+=[0.,0.,0.,0.]
|
||||
qIndex+=4
|
||||
qdotIndex+=4
|
||||
|
||||
q = np.array(q1)
|
||||
|
||||
qerr = np.array(qError)
|
||||
|
||||
#np.savetxt("pb_qerro.csv",qerr,delimiter=",")
|
||||
|
||||
#np.savetxt("pb_q.csv", q, delimiter=",")
|
||||
|
||||
qdot=np.array(qdot1)
|
||||
#np.savetxt("qdot.csv", qdot, delimiter=",")
|
||||
|
||||
qdotdesired = np.array(desiredVelocities)
|
||||
qdoterr = qdotdesired-qdot
|
||||
|
||||
|
||||
Kp = np.diagflat(kps)
|
||||
Kd = np.diagflat(kds)
|
||||
|
||||
p = Kp.dot(qError)
|
||||
|
||||
#np.savetxt("pb_qError.csv", qError, delimiter=",")
|
||||
#np.savetxt("pb_p.csv", p, delimiter=",")
|
||||
|
||||
d = Kd.dot(qdoterr)
|
||||
def __init__(self, pb):
|
||||
self._pb = pb
|
||||
|
||||
#np.savetxt("pb_d.csv", d, delimiter=",")
|
||||
#np.savetxt("pbqdoterr.csv", qdoterr, delimiter=",")
|
||||
|
||||
|
||||
M1 = self._pb.calculateMassMatrix(bodyUniqueId,q1, flags=1)
|
||||
|
||||
|
||||
M2 = np.array(M1)
|
||||
#np.savetxt("M2.csv", M2, delimiter=",")
|
||||
|
||||
M = (M2 + Kd * timeStep)
|
||||
|
||||
#np.savetxt("pbM_tKd.csv",M, delimiter=",")
|
||||
|
||||
|
||||
|
||||
c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations, flags=1)
|
||||
|
||||
|
||||
c = np.array(c1)
|
||||
#np.savetxt("pb_C.csv",c, delimiter=",")
|
||||
A = M
|
||||
#p = [0]*43
|
||||
#np.savetxt("pb_kp_dot_qError.csv", p)
|
||||
#np.savetxt("pb_kd_dot_vError.csv", d)
|
||||
|
||||
b = p + d -c
|
||||
#np.savetxt("pb_b_acc.csv",b, delimiter=",")
|
||||
|
||||
|
||||
useNumpySolver = True
|
||||
if useNumpySolver:
|
||||
qddot = np.linalg.solve(A, b)
|
||||
else:
|
||||
dofCount = len(b)
|
||||
print(dofCount)
|
||||
qddot = self._pb.ldltSolve(bodyUniqueId, jointPositions=q1, b=b.tolist(), kd=kds, t=timeStep)
|
||||
|
||||
tau = p + d - Kd.dot(qddot) * timeStep
|
||||
#print("len(tau)=",len(tau))
|
||||
#np.savetxt("pb_tau_not_clamped.csv", tau, delimiter=",")
|
||||
|
||||
maxF = np.array(maxForces)
|
||||
#print("maxF",maxF)
|
||||
forces = np.clip(tau, -maxF , maxF )
|
||||
|
||||
#np.savetxt("pb_tau_clamped.csv", tau, delimiter=",")
|
||||
return forces
|
||||
|
||||
|
||||
|
||||
class PDControllerStable(object):
|
||||
def __init__(self, pb):
|
||||
self._pb = pb
|
||||
def computeAngVel(self, ornStart, ornEnd, deltaTime, bullet_client):
|
||||
dorn = bullet_client.getDifferenceQuaternion(ornStart, ornEnd)
|
||||
axis, angle = bullet_client.getAxisAngleFromQuaternion(dorn)
|
||||
angVel = [(axis[0] * angle) / deltaTime, (axis[1] * angle) / deltaTime,
|
||||
(axis[2] * angle) / deltaTime]
|
||||
return angVel
|
||||
|
||||
def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds, maxForces, timeStep):
|
||||
numBaseDofs = 0
|
||||
numPosBaseDofs=0
|
||||
baseMass = self._pb.getDynamicsInfo(bodyUniqueId,-1)[0]
|
||||
curPos,curOrn = self._pb.getBasePositionAndOrientation(bodyUniqueId)
|
||||
q1 = []
|
||||
qdot1 = []
|
||||
zeroAccelerations = []
|
||||
qError=[]
|
||||
if (baseMass>0):
|
||||
numBaseDofs=6
|
||||
numPosBaseDofs=7
|
||||
q1 = [curPos[0],curPos[1],curPos[2],curOrn[0],curOrn[1],curOrn[2],curOrn[3]]
|
||||
qdot1=[0]*numBaseDofs
|
||||
zeroAccelerations = [0]*numBaseDofs
|
||||
angDiff=[0,0,0]
|
||||
qError=[ desiredPositions[0]-curPos[0], desiredPositions[1]-curPos[1], desiredPositions[2]-curPos[2],angDiff[0],angDiff[1],angDiff[2]]
|
||||
numJoints = self._pb.getNumJoints(bodyUniqueId)
|
||||
jointStates = self._pb.getJointStates(bodyUniqueId, jointIndices)
|
||||
|
||||
for i in range (numJoints):
|
||||
q1.append(jointStates[i][0])
|
||||
qdot1.append(jointStates[i][1])
|
||||
zeroAccelerations.append(0)
|
||||
q = np.array(q1)
|
||||
qdot=np.array(qdot1)
|
||||
qdes = np.array(desiredPositions)
|
||||
qdotdes = np.array(desiredVelocities)
|
||||
#qError = qdes - q
|
||||
for j in range(numJoints):
|
||||
qError.append(desiredPositions[j+numPosBaseDofs]-q1[j+numPosBaseDofs])
|
||||
#print("qError=",qError)
|
||||
qdotError = qdotdes - qdot
|
||||
Kp = np.diagflat(kps)
|
||||
Kd = np.diagflat(kds)
|
||||
p = Kp.dot(qError)
|
||||
d = Kd.dot(qdotError)
|
||||
forces = p + d
|
||||
|
||||
M1 = self._pb.calculateMassMatrix(bodyUniqueId,q1)
|
||||
M2 = np.array(M1)
|
||||
M = (M2 + Kd * timeStep)
|
||||
c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations)
|
||||
c = np.array(c1)
|
||||
A = M
|
||||
b = -c + p + d
|
||||
def quatMul(self, q1, q2):
|
||||
return [
|
||||
q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1],
|
||||
q1[3] * q2[1] + q1[1] * q2[3] + q1[2] * q2[0] - q1[0] * q2[2],
|
||||
q1[3] * q2[2] + q1[2] * q2[3] + q1[0] * q2[1] - q1[1] * q2[0],
|
||||
q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2]
|
||||
]
|
||||
|
||||
def computeAngVelRel(self, ornStart, ornEnd, deltaTime, bullet_client):
|
||||
ornStartConjugate = [-ornStart[0], -ornStart[1], -ornStart[2], ornStart[3]]
|
||||
q_diff = self.quatMul(
|
||||
ornStartConjugate,
|
||||
ornEnd) #bullet_client.multiplyTransforms([0,0,0], ornStartConjugate, [0,0,0], ornEnd)
|
||||
|
||||
axis, angle = bullet_client.getAxisAngleFromQuaternion(q_diff)
|
||||
angVel = [(axis[0] * angle) / deltaTime, (axis[1] * angle) / deltaTime,
|
||||
(axis[2] * angle) / deltaTime]
|
||||
return angVel
|
||||
|
||||
def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds,
|
||||
maxForces, timeStep):
|
||||
|
||||
numJoints = len(jointIndices) #self._pb.getNumJoints(bodyUniqueId)
|
||||
curPos, curOrn = self._pb.getBasePositionAndOrientation(bodyUniqueId)
|
||||
q1 = [curPos[0], curPos[1], curPos[2], curOrn[0], curOrn[1], curOrn[2], curOrn[3]]
|
||||
#print("q1=",q1)
|
||||
|
||||
#qdot1 = [0,0,0, 0,0,0,0]
|
||||
baseLinVel, baseAngVel = self._pb.getBaseVelocity(bodyUniqueId)
|
||||
#print("baseLinVel=",baseLinVel)
|
||||
qdot1 = [
|
||||
baseLinVel[0], baseLinVel[1], baseLinVel[2], baseAngVel[0], baseAngVel[1], baseAngVel[2], 0
|
||||
]
|
||||
#qError = [0,0,0, 0,0,0,0]
|
||||
desiredOrn = [
|
||||
desiredPositions[3], desiredPositions[4], desiredPositions[5], desiredPositions[6]
|
||||
]
|
||||
axis1 = self._pb.getAxisDifferenceQuaternion(desiredOrn, curOrn)
|
||||
angDiff = [0, 0, 0] #self.computeAngVel(curOrn, desiredOrn, 1, self._pb)
|
||||
qError = [
|
||||
desiredPositions[0] - curPos[0], desiredPositions[1] - curPos[1],
|
||||
desiredPositions[2] - curPos[2], angDiff[0], angDiff[1], angDiff[2], 0
|
||||
]
|
||||
target_pos = np.array(desiredPositions)
|
||||
#np.savetxt("pb_target_pos.csv", target_pos, delimiter=",")
|
||||
|
||||
qIndex = 7
|
||||
qdotIndex = 7
|
||||
zeroAccelerations = [0, 0, 0, 0, 0, 0, 0]
|
||||
for i in range(numJoints):
|
||||
js = self._pb.getJointStateMultiDof(bodyUniqueId, jointIndices[i])
|
||||
|
||||
jointPos = js[0]
|
||||
jointVel = js[1]
|
||||
q1 += jointPos
|
||||
|
||||
if len(js[0]) == 1:
|
||||
desiredPos = desiredPositions[qIndex]
|
||||
|
||||
qdiff = desiredPos - jointPos[0]
|
||||
qError.append(qdiff)
|
||||
zeroAccelerations.append(0.)
|
||||
qdot1 += jointVel
|
||||
qIndex += 1
|
||||
qdotIndex += 1
|
||||
if len(js[0]) == 4:
|
||||
desiredPos = [
|
||||
desiredPositions[qIndex], desiredPositions[qIndex + 1], desiredPositions[qIndex + 2],
|
||||
desiredPositions[qIndex + 3]
|
||||
]
|
||||
#axis = self._pb.getAxisDifferenceQuaternion(desiredPos,jointPos)
|
||||
angDiff = self.computeAngVelRel(jointPos, desiredPos, 1, self._pb)
|
||||
#angDiff = self._pb.computeAngVelRel(jointPos, desiredPos, 1)
|
||||
|
||||
jointVelNew = [jointVel[0], jointVel[1], jointVel[2], 0]
|
||||
qdot1 += jointVelNew
|
||||
qError.append(angDiff[0])
|
||||
qError.append(angDiff[1])
|
||||
qError.append(angDiff[2])
|
||||
qError.append(0)
|
||||
desiredVel = [
|
||||
desiredVelocities[qdotIndex], desiredVelocities[qdotIndex + 1],
|
||||
desiredVelocities[qdotIndex + 2]
|
||||
]
|
||||
zeroAccelerations += [0., 0., 0., 0.]
|
||||
qIndex += 4
|
||||
qdotIndex += 4
|
||||
|
||||
q = np.array(q1)
|
||||
|
||||
qerr = np.array(qError)
|
||||
|
||||
#np.savetxt("pb_qerro.csv",qerr,delimiter=",")
|
||||
|
||||
#np.savetxt("pb_q.csv", q, delimiter=",")
|
||||
|
||||
qdot = np.array(qdot1)
|
||||
#np.savetxt("qdot.csv", qdot, delimiter=",")
|
||||
|
||||
qdotdesired = np.array(desiredVelocities)
|
||||
qdoterr = qdotdesired - qdot
|
||||
|
||||
Kp = np.diagflat(kps)
|
||||
Kd = np.diagflat(kds)
|
||||
|
||||
p = Kp.dot(qError)
|
||||
|
||||
#np.savetxt("pb_qError.csv", qError, delimiter=",")
|
||||
#np.savetxt("pb_p.csv", p, delimiter=",")
|
||||
|
||||
d = Kd.dot(qdoterr)
|
||||
|
||||
#np.savetxt("pb_d.csv", d, delimiter=",")
|
||||
#np.savetxt("pbqdoterr.csv", qdoterr, delimiter=",")
|
||||
|
||||
M1 = self._pb.calculateMassMatrix(bodyUniqueId, q1, flags=1)
|
||||
|
||||
M2 = np.array(M1)
|
||||
#np.savetxt("M2.csv", M2, delimiter=",")
|
||||
|
||||
M = (M2 + Kd * timeStep)
|
||||
|
||||
#np.savetxt("pbM_tKd.csv",M, delimiter=",")
|
||||
|
||||
c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations, flags=1)
|
||||
|
||||
c = np.array(c1)
|
||||
#np.savetxt("pb_C.csv",c, delimiter=",")
|
||||
A = M
|
||||
#p = [0]*43
|
||||
#np.savetxt("pb_kp_dot_qError.csv", p)
|
||||
#np.savetxt("pb_kd_dot_vError.csv", d)
|
||||
|
||||
b = p + d - c
|
||||
#np.savetxt("pb_b_acc.csv",b, delimiter=",")
|
||||
|
||||
useNumpySolver = True
|
||||
if useNumpySolver:
|
||||
qddot = np.linalg.solve(A, b)
|
||||
tau = p + d - Kd.dot(qddot) * timeStep
|
||||
maxF = np.array(maxForces)
|
||||
forces = np.clip(tau, -maxF , maxF )
|
||||
#print("c=",c)
|
||||
return tau
|
||||
else:
|
||||
dofCount = len(b)
|
||||
print(dofCount)
|
||||
qddot = self._pb.ldltSolve(bodyUniqueId, jointPositions=q1, b=b.tolist(), kd=kds, t=timeStep)
|
||||
|
||||
tau = p + d - Kd.dot(qddot) * timeStep
|
||||
#print("len(tau)=",len(tau))
|
||||
#np.savetxt("pb_tau_not_clamped.csv", tau, delimiter=",")
|
||||
|
||||
maxF = np.array(maxForces)
|
||||
#print("maxF",maxF)
|
||||
forces = np.clip(tau, -maxF, maxF)
|
||||
|
||||
#np.savetxt("pb_tau_clamped.csv", tau, delimiter=",")
|
||||
return forces
|
||||
|
||||
|
||||
class PDControllerStable(object):
|
||||
|
||||
def __init__(self, pb):
|
||||
self._pb = pb
|
||||
|
||||
def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds,
|
||||
maxForces, timeStep):
|
||||
numBaseDofs = 0
|
||||
numPosBaseDofs = 0
|
||||
baseMass = self._pb.getDynamicsInfo(bodyUniqueId, -1)[0]
|
||||
curPos, curOrn = self._pb.getBasePositionAndOrientation(bodyUniqueId)
|
||||
q1 = []
|
||||
qdot1 = []
|
||||
zeroAccelerations = []
|
||||
qError = []
|
||||
if (baseMass > 0):
|
||||
numBaseDofs = 6
|
||||
numPosBaseDofs = 7
|
||||
q1 = [curPos[0], curPos[1], curPos[2], curOrn[0], curOrn[1], curOrn[2], curOrn[3]]
|
||||
qdot1 = [0] * numBaseDofs
|
||||
zeroAccelerations = [0] * numBaseDofs
|
||||
angDiff = [0, 0, 0]
|
||||
qError = [
|
||||
desiredPositions[0] - curPos[0], desiredPositions[1] - curPos[1],
|
||||
desiredPositions[2] - curPos[2], angDiff[0], angDiff[1], angDiff[2]
|
||||
]
|
||||
numJoints = self._pb.getNumJoints(bodyUniqueId)
|
||||
jointStates = self._pb.getJointStates(bodyUniqueId, jointIndices)
|
||||
|
||||
for i in range(numJoints):
|
||||
q1.append(jointStates[i][0])
|
||||
qdot1.append(jointStates[i][1])
|
||||
zeroAccelerations.append(0)
|
||||
q = np.array(q1)
|
||||
qdot = np.array(qdot1)
|
||||
qdes = np.array(desiredPositions)
|
||||
qdotdes = np.array(desiredVelocities)
|
||||
#qError = qdes - q
|
||||
for j in range(numJoints):
|
||||
qError.append(desiredPositions[j + numPosBaseDofs] - q1[j + numPosBaseDofs])
|
||||
#print("qError=",qError)
|
||||
qdotError = qdotdes - qdot
|
||||
Kp = np.diagflat(kps)
|
||||
Kd = np.diagflat(kds)
|
||||
p = Kp.dot(qError)
|
||||
d = Kd.dot(qdotError)
|
||||
forces = p + d
|
||||
|
||||
M1 = self._pb.calculateMassMatrix(bodyUniqueId, q1)
|
||||
M2 = np.array(M1)
|
||||
M = (M2 + Kd * timeStep)
|
||||
c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations)
|
||||
c = np.array(c1)
|
||||
A = M
|
||||
b = -c + p + d
|
||||
qddot = np.linalg.solve(A, b)
|
||||
tau = p + d - Kd.dot(qddot) * timeStep
|
||||
maxF = np.array(maxForces)
|
||||
forces = np.clip(tau, -maxF, maxF)
|
||||
#print("c=",c)
|
||||
return tau
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,14 @@
|
||||
import random
|
||||
import numpy as np
|
||||
|
||||
|
||||
def set_global_seeds(seed):
|
||||
try:
|
||||
import tensorflow as tf
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
tf.set_random_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
return
|
||||
try:
|
||||
import tensorflow as tf
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
tf.set_random_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user