add actuatornet code
This commit is contained in:
@@ -0,0 +1,2 @@
|
||||
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from absl import app
|
||||
from absl import flags
|
||||
from keras.callbacks import ModelCheckpoint
|
||||
from keras.layers import Dense
|
||||
from keras.models import Sequential
|
||||
from keras import optimizers
|
||||
import numpy
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string(
|
||||
"input_filename", "data/minitaur_log_latency_0.01.csv", "The name of the input CSV file."
|
||||
"Each line in the CSV file will contain the motor position, the "
|
||||
"motor speed, action and torques.")
|
||||
|
||||
|
||||
def main(unused_argv):
|
||||
# fix random seed for reproducibility
|
||||
numpy.random.seed(7)
|
||||
# load pima indians dataset
|
||||
dataset = numpy.loadtxt(
|
||||
FLAGS.input_filename,
|
||||
delimiter=",")
|
||||
# split into input (X) and output (Y) variables
|
||||
x = dataset[:, 0:3]
|
||||
y = dataset[:, 3]
|
||||
print("x=", x)
|
||||
print("y=", y)
|
||||
|
||||
# create model
|
||||
model = Sequential()
|
||||
model.add(Dense(12, input_dim=3, activation="relu"))
|
||||
model.add(Dense(8, activation="sigmoid"))
|
||||
model.add(Dense(1, activation="linear"))
|
||||
|
||||
# Compile model (use adam or sgd)
|
||||
model.compile(
|
||||
loss="mean_squared_error",
|
||||
optimizer="adam",
|
||||
metrics=["mean_squared_error"])
|
||||
|
||||
# checkpoint
|
||||
filepath = "/tmp/keras/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
|
||||
checkpoint = ModelCheckpoint(
|
||||
filepath, monitor="val_loss", verbose=1, save_best_only=True, mode="min")
|
||||
callbacks_list = [checkpoint]
|
||||
|
||||
# Fit the model
|
||||
# model.fit(X, Y, epochs=150, batch_size=10)
|
||||
# model.fit(X, Y, epochs=150, batch_size=10, callbacks=callbacks_list)
|
||||
model.fit(
|
||||
x,
|
||||
y,
|
||||
validation_split=0.34,
|
||||
epochs=4500,
|
||||
batch_size=1024,
|
||||
callbacks=callbacks_list,
|
||||
verbose=0)
|
||||
|
||||
# evaluate the model
|
||||
scores = model.evaluate(x, y)
|
||||
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(main)
|
||||
@@ -0,0 +1,166 @@
|
||||
"""see https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/"""
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('TkAgg')
|
||||
import numpy as np
|
||||
from matplotlib import pyplot
|
||||
from pandas import read_csv
|
||||
from pandas import DataFrame
|
||||
from pandas import concat
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
from sklearn.metrics import mean_squared_error
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Dropout
|
||||
from keras.layers import Dense
|
||||
from keras.layers import LSTM
|
||||
from keras.callbacks import ModelCheckpoint
|
||||
from matplotlib import pyplot
|
||||
|
||||
from pandas import read_csv
|
||||
|
||||
|
||||
# convert series to supervised learning
|
||||
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
|
||||
n_vars = 1 if type(data) is list else data.shape[1]
|
||||
df = DataFrame(data)
|
||||
cols, names = list(), list()
|
||||
# input sequence (t-n, ... t-1)
|
||||
for i in range(n_in, 0, -1):
|
||||
cols.append(df.shift(i))
|
||||
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
|
||||
# forecast sequence (t, t+1, ... t+n)
|
||||
for i in range(0, n_out):
|
||||
cols.append(df.shift(-i))
|
||||
if i == 0:
|
||||
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
|
||||
else:
|
||||
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
|
||||
# put it all together
|
||||
agg = concat(cols, axis=1)
|
||||
agg.columns = names
|
||||
# drop rows with NaN values
|
||||
if dropnan:
|
||||
agg.dropna(inplace=True)
|
||||
return agg
|
||||
|
||||
|
||||
values = [x for x in range(10)]
|
||||
data = series_to_supervised(values, 2)
|
||||
print(data)
|
||||
|
||||
# load dataset
|
||||
#dataset = read_csv('./data/minitaur_log_latency_0.01.csv')
|
||||
#dataset = read_csv('./data/minitaur_log_latency_0.003.csv')
|
||||
dataset = read_csv('./data/minitaur_log_latency_0.006.csv')
|
||||
|
||||
values = dataset.values
|
||||
# integer encode direction
|
||||
#encoder = LabelEncoder()
|
||||
#values[:,3] = encoder.fit_transform(values[:,3])
|
||||
# ensure all data is float
|
||||
values = values.astype('float32')
|
||||
|
||||
# normalize features
|
||||
useNormalization = False
|
||||
if useNormalization:
|
||||
scaler = MinMaxScaler(feature_range=(0, 1))
|
||||
scaled = scaler.fit_transform(values)
|
||||
else:
|
||||
scaled = values
|
||||
|
||||
# frame as supervised learning
|
||||
lag_steps = 5
|
||||
reframed = series_to_supervised(scaled, lag_steps, 1)
|
||||
print("reframed before drop=", reframed)
|
||||
|
||||
# drop columns we don't want to predict
|
||||
reframed.drop(reframed.columns[[3,7,11,15,19]], axis=1, inplace=True)
|
||||
print("after drop=",reframed.head())
|
||||
|
||||
#dummy = scaler.inverse_transform(reframed)
|
||||
#print(dummy)
|
||||
|
||||
groups = [0, 1, 2, 3]
|
||||
|
||||
i = 1
|
||||
# plot each column
|
||||
doPlot = False
|
||||
if doPlot:
|
||||
pyplot.figure()
|
||||
for group in groups:
|
||||
pyplot.subplot(len(groups), 1, i)
|
||||
pyplot.plot(values[0:25, group])
|
||||
pyplot.title(dataset.columns[group], y=0.5, loc='right')
|
||||
i += 1
|
||||
pyplot.show()
|
||||
|
||||
# split into train and test sets
|
||||
values = reframed.values
|
||||
n_train_hours = 6000
|
||||
train = values[:n_train_hours, :]
|
||||
test = values[n_train_hours:, :]
|
||||
# split into input and outputs
|
||||
train_X, train_y = train[:, :-1], train[:, -1]
|
||||
test_X, test_y = test[:, :-1], test[:, -1]
|
||||
|
||||
print("train_X.shape[1]=",train_X.shape[1])
|
||||
|
||||
|
||||
# design network
|
||||
useLSTM=True
|
||||
if useLSTM:
|
||||
# reshape input to be 3D [samples, timesteps, features]
|
||||
train_X = train_X.reshape((train_X.shape[0], lag_steps+1, int(train_X.shape[1]/(lag_steps+1))))
|
||||
test_X = test_X.reshape((test_X.shape[0], lag_steps+1, int(test_X.shape[1]/(lag_steps+1))))
|
||||
model = Sequential()
|
||||
model.add(LSTM(40, input_shape=(train_X.shape[1], train_X.shape[2])))
|
||||
model.add(Dropout(0.05))
|
||||
model.add(Dense(8, activation='sigmoid'))
|
||||
model.add(Dense(8, activation='sigmoid'))
|
||||
model.add(Dropout(0.05))
|
||||
model.add(Dense(1, activation='linear'))
|
||||
else:
|
||||
# create model
|
||||
model = Sequential()
|
||||
model.add(Dense(12, input_dim=train_X.shape[1], activation="relu"))
|
||||
model.add(Dense(8, activation="sigmoid"))
|
||||
model.add(Dense(1, activation="linear"))
|
||||
|
||||
#model.compile(loss='mae', optimizer='adam')
|
||||
model.compile(
|
||||
loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
|
||||
|
||||
# checkpoint
|
||||
filepath = '/tmp/keras/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5'
|
||||
checkpoint = ModelCheckpoint(
|
||||
filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
|
||||
callbacks_list = [checkpoint]
|
||||
|
||||
# fit network
|
||||
history = model.fit(
|
||||
train_X,
|
||||
train_y,
|
||||
epochs=1500,
|
||||
batch_size=32,
|
||||
callbacks=callbacks_list,
|
||||
validation_data=(test_X, test_y),
|
||||
verbose=2,
|
||||
shuffle=False)
|
||||
# plot history
|
||||
|
||||
|
||||
data = np.array([[[1.513535008329887299,3.234624992847829894e-01,1.731481043119239782,1.741165415165205399,
|
||||
1.534267104753672228e+00,1.071354965017878635e+00,1.712386127673626302e+00]]])
|
||||
|
||||
|
||||
#prediction = model.predict(data)
|
||||
#print("prediction=",prediction)
|
||||
|
||||
|
||||
pyplot.plot(history.history['loss'], label='train')
|
||||
pyplot.plot(history.history['val_loss'], label='test')
|
||||
pyplot.legend()
|
||||
pyplot.show()
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -0,0 +1,67 @@
|
||||
#The example to run the raibert controller in a Minitaur gym env.
|
||||
|
||||
#blaze run :minitaur_raibert_controller_example -- --log_path=/tmp/logs
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from pybullet_envs.minitaur.envs import minitaur_raibert_controller
|
||||
from pybullet_envs.minitaur.envs import minitaur_gym_env
|
||||
|
||||
flags = tf.app.flags
|
||||
FLAGS = tf.app.flags.FLAGS
|
||||
|
||||
flags.DEFINE_float("motor_kp", 1.0, "The position gain of the motor.")
|
||||
flags.DEFINE_float("motor_kd", 0.015, "The speed gain of the motor.")
|
||||
flags.DEFINE_float(
|
||||
"control_latency", 0.006, "The latency between sensor measurement and action"
|
||||
" execution the robot.")
|
||||
flags.DEFINE_string("log_path", ".", "The directory to write the log file.")
|
||||
|
||||
|
||||
def speed(t):
|
||||
max_speed = 0.35
|
||||
t1 = 3
|
||||
if t < t1:
|
||||
return t / t1 * max_speed
|
||||
else:
|
||||
return -max_speed
|
||||
|
||||
|
||||
def main(argv):
|
||||
del argv
|
||||
env = minitaur_gym_env.MinitaurGymEnv(
|
||||
urdf_version=minitaur_gym_env.RAINBOW_DASH_V0_URDF_VERSION,
|
||||
control_time_step=0.006,
|
||||
action_repeat=6,
|
||||
pd_latency=0.0,
|
||||
control_latency=FLAGS.control_latency,
|
||||
motor_kp=FLAGS.motor_kp,
|
||||
motor_kd=FLAGS.motor_kd,
|
||||
remove_default_joint_damping=True,
|
||||
leg_model_enabled=False,
|
||||
render=True,
|
||||
on_rack=False,
|
||||
accurate_motor_model_enabled=True,
|
||||
log_path=FLAGS.log_path)
|
||||
env.reset()
|
||||
|
||||
controller = minitaur_raibert_controller.MinitaurRaibertTrottingController(
|
||||
env.minitaur)
|
||||
|
||||
tstart = env.minitaur.GetTimeSinceReset()
|
||||
for _ in range(1000):
|
||||
t = env.minitaur.GetTimeSinceReset() - tstart
|
||||
controller.behavior_parameters = (
|
||||
minitaur_raibert_controller.BehaviorParameters(
|
||||
desired_forward_speed=speed(t)))
|
||||
controller.update(t)
|
||||
env.step(controller.get_action())
|
||||
|
||||
#env.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.app.run(main)
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
#python proto2csv.py --proto_file=/tmp/logs/minitaur_log_2019-01-27-12-59-31 --csv_file=/tmp/logs/out.csv
|
||||
#each line in csv contains: angle, velocity, action, torque
|
||||
|
||||
import tensorflow as tf
|
||||
import argparse
|
||||
import numpy
|
||||
from pybullet_envs.minitaur.envs import minitaur_logging
|
||||
|
||||
flags = tf.app.flags
|
||||
FLAGS = tf.app.flags.FLAGS
|
||||
|
||||
flags.DEFINE_string("proto_file", "logs", "path to protobuf input file")
|
||||
flags.DEFINE_string("csv_file", "file.csv", "poth to csv output file")
|
||||
|
||||
|
||||
def main(argv):
|
||||
del argv
|
||||
|
||||
logging = minitaur_logging.MinitaurLogging()
|
||||
episode = logging.restore_episode(FLAGS.proto_file)
|
||||
#print(dir (episode))
|
||||
#print("episode=",episode)
|
||||
fields = episode.ListFields()
|
||||
|
||||
recs = []
|
||||
|
||||
for rec in fields[0][1]:
|
||||
#print(rec.time)
|
||||
for motorState in rec.motor_states:
|
||||
#print("motorState.angle=",motorState.angle)
|
||||
#print("motorState.velocity=",motorState.velocity)
|
||||
#print("motorState.action=",motorState.action)
|
||||
#print("motorState.torque=",motorState.torque)
|
||||
recs.append([motorState.angle,motorState.velocity,motorState.action,motorState.torque])
|
||||
|
||||
a = numpy.array(recs)
|
||||
numpy.savetxt(FLAGS.csv_file, a, delimiter=",")
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.app.run(main)
|
||||
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
pybullet
|
||||
tensorflow
|
||||
gym
|
||||
pandas
|
||||
matplotlib
|
||||
|
||||
@@ -249,7 +249,8 @@ class MinitaurGymEnv(gym.Env):
|
||||
self._hard_reset = hard_reset # This assignment need to be after reset()
|
||||
|
||||
def close(self):
|
||||
self.logging.save_episode(self._episode_proto)
|
||||
if self._env_step_counter>0:
|
||||
self.logging.save_episode(self._episode_proto)
|
||||
self.minitaur.Terminate()
|
||||
|
||||
def add_env_randomizer(self, env_randomizer):
|
||||
@@ -258,7 +259,8 @@ class MinitaurGymEnv(gym.Env):
|
||||
def reset(self, initial_motor_angles=None, reset_duration=1.0):
|
||||
self._pybullet_client.configureDebugVisualizer(
|
||||
self._pybullet_client.COV_ENABLE_RENDERING, 0)
|
||||
self.logging.save_episode(self._episode_proto)
|
||||
if self._env_step_counter>0:
|
||||
self.logging.save_episode(self._episode_proto)
|
||||
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
|
||||
minitaur_logging.preallocate_episode_proto(self._episode_proto,
|
||||
self._num_steps_to_log)
|
||||
|
||||
Reference in New Issue
Block a user