added the learning algorithm from RL-lab

This commit is contained in:
Jie Tan
2017-03-10 12:29:47 -08:00
parent 923fbe8588
commit 37a809f5d1
4 changed files with 100 additions and 33 deletions

View File

@@ -0,0 +1,48 @@
from envs.bullet.cartpole_bullet import CartPoleBulletEnv
from sandbox.rocky.tf.algos.trpo import TRPO
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
stub(globals())
env = TfEnv(normalize(GymEnv("CartPoleBulletEnv-v0")))
policy = GaussianMLPPolicy(
name = "tf_gaussian_mlp",
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(8,)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=5000,
max_path_length=env.horizon,
n_itr=50,
discount=0.999,
step_size=0.01,
force_batch_sampler=True,
# Uncomment both lines (this and the plot parameter below) to enable plotting
#plot=True,
)
run_experiment_lite(
algo.train(),
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
#plot=True,
)