apply yapf
This commit is contained in:
@@ -20,17 +20,17 @@ import argparse
|
|||||||
|
|
||||||
# Setting the Hyper Parameters
|
# Setting the Hyper Parameters
|
||||||
class Hp():
|
class Hp():
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.nb_steps = 10000
|
self.nb_steps = 10000
|
||||||
self.episode_length = 1000
|
self.episode_length = 1000
|
||||||
self.learning_rate = 0.02
|
self.learning_rate = 0.02
|
||||||
self.nb_directions = 16
|
self.nb_directions = 16
|
||||||
self.nb_best_directions = 8
|
self.nb_best_directions = 8
|
||||||
assert self.nb_best_directions <= self.nb_directions
|
assert self.nb_best_directions <= self.nb_directions
|
||||||
self.noise = 0.03
|
self.noise = 0.03
|
||||||
self.seed = 1
|
self.seed = 1
|
||||||
self.env_name = 'HalfCheetahBulletEnv-v0'
|
self.env_name = 'HalfCheetahBulletEnv-v0'
|
||||||
|
|
||||||
|
|
||||||
# Multiprocess Exploring the policy on one specific direction and over one episode
|
# Multiprocess Exploring the policy on one specific direction and over one episode
|
||||||
@@ -165,52 +165,54 @@ def explore(env, normalizer, policy, direction, delta, hp):
|
|||||||
|
|
||||||
|
|
||||||
def train(env, policy, normalizer, hp, parentPipes, args):
|
def train(env, policy, normalizer, hp, parentPipes, args):
|
||||||
for step in range(hp.nb_steps):
|
for step in range(hp.nb_steps):
|
||||||
|
|
||||||
# Initializing the perturbations deltas and the positive/negative rewards
|
# Initializing the perturbations deltas and the positive/negative rewards
|
||||||
deltas = policy.sample_deltas()
|
deltas = policy.sample_deltas()
|
||||||
positive_rewards = [0] * hp.nb_directions
|
positive_rewards = [0] * hp.nb_directions
|
||||||
negative_rewards = [0] * hp.nb_directions
|
negative_rewards = [0] * hp.nb_directions
|
||||||
|
|
||||||
if parentPipes:
|
if parentPipes:
|
||||||
for k in range(hp.nb_directions):
|
for k in range(hp.nb_directions):
|
||||||
parentPipe = parentPipes[k]
|
parentPipe = parentPipes[k]
|
||||||
parentPipe.send([_EXPLORE,[normalizer, policy, hp, "positive", deltas[k]]])
|
parentPipe.send([_EXPLORE, [normalizer, policy, hp, "positive", deltas[k]]])
|
||||||
for k in range(hp.nb_directions):
|
for k in range(hp.nb_directions):
|
||||||
positive_rewards[k] = parentPipes[k].recv()[0]
|
positive_rewards[k] = parentPipes[k].recv()[0]
|
||||||
|
|
||||||
for k in range(hp.nb_directions):
|
for k in range(hp.nb_directions):
|
||||||
parentPipe = parentPipes[k]
|
parentPipe = parentPipes[k]
|
||||||
parentPipe.send([_EXPLORE,[normalizer, policy, hp, "negative", deltas[k]]])
|
parentPipe.send([_EXPLORE, [normalizer, policy, hp, "negative", deltas[k]]])
|
||||||
for k in range(hp.nb_directions):
|
for k in range(hp.nb_directions):
|
||||||
negative_rewards[k] = parentPipes[k].recv()[0]
|
negative_rewards[k] = parentPipes[k].recv()[0]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Getting the positive rewards in the positive directions
|
# Getting the positive rewards in the positive directions
|
||||||
for k in range(hp.nb_directions):
|
for k in range(hp.nb_directions):
|
||||||
positive_rewards[k] = explore(env, normalizer, policy, "positive", deltas[k], hp)
|
positive_rewards[k] = explore(env, normalizer, policy, "positive", deltas[k], hp)
|
||||||
|
|
||||||
|
# Getting the negative rewards in the negative/opposite directions
|
||||||
# Getting the negative rewards in the negative/opposite directions
|
for k in range(hp.nb_directions):
|
||||||
for k in range(hp.nb_directions):
|
negative_rewards[k] = explore(env, normalizer, policy, "negative", deltas[k], hp)
|
||||||
negative_rewards[k] = explore(env, normalizer, policy, "negative", deltas[k], hp)
|
|
||||||
|
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
|
||||||
|
all_rewards = np.array(positive_rewards + negative_rewards)
|
||||||
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
|
sigma_r = all_rewards.std()
|
||||||
all_rewards = np.array(positive_rewards + negative_rewards)
|
|
||||||
sigma_r = all_rewards.std()
|
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
|
||||||
|
scores = {
|
||||||
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
|
k: max(r_pos, r_neg)
|
||||||
scores = {k:max(r_pos, r_neg) for k,(r_pos,r_neg) in enumerate(zip(positive_rewards, negative_rewards))}
|
for k, (r_pos, r_neg) in enumerate(zip(positive_rewards, negative_rewards))
|
||||||
order = sorted(scores.keys(), key = lambda x:-scores[x])[:hp.nb_best_directions]
|
}
|
||||||
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
|
order = sorted(scores.keys(), key=lambda x: -scores[x])[:hp.nb_best_directions]
|
||||||
|
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
|
||||||
# Updating our policy
|
|
||||||
policy.update(rollouts, sigma_r, args)
|
# Updating our policy
|
||||||
|
policy.update(rollouts, sigma_r, args)
|
||||||
# Printing the final reward of the policy after the update
|
|
||||||
reward_evaluation = explore(env, normalizer, policy, None, None, hp)
|
# Printing the final reward of the policy after the update
|
||||||
print('Step:', step, 'Reward:', reward_evaluation)
|
reward_evaluation = explore(env, normalizer, policy, None, None, hp)
|
||||||
|
print('Step:', step, 'Reward:', reward_evaluation)
|
||||||
|
|
||||||
|
|
||||||
# Running the main code
|
# Running the main code
|
||||||
|
|
||||||
@@ -226,19 +228,15 @@ if __name__ == "__main__":
|
|||||||
mp.freeze_support()
|
mp.freeze_support()
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||||
parser.add_argument('--env',
|
parser.add_argument(
|
||||||
help='Gym environment name',
|
'--env', help='Gym environment name', type=str, default='HalfCheetahBulletEnv-v0')
|
||||||
type=str,
|
|
||||||
default='HalfCheetahBulletEnv-v0')
|
|
||||||
parser.add_argument('--seed', help='RNG seed', type=int, default=1)
|
parser.add_argument('--seed', help='RNG seed', type=int, default=1)
|
||||||
parser.add_argument('--render', help='OpenGL Visualizer', type=int, default=0)
|
parser.add_argument('--render', help='OpenGL Visualizer', type=int, default=0)
|
||||||
parser.add_argument('--movie', help='rgb_array gym movie', type=int, default=0)
|
parser.add_argument('--movie', help='rgb_array gym movie', type=int, default=0)
|
||||||
parser.add_argument('--steps', help='Number of steps', type=int, default=10000)
|
parser.add_argument('--steps', help='Number of steps', type=int, default=10000)
|
||||||
parser.add_argument('--policy', help='Starting policy file (npy)', type=str, default='')
|
parser.add_argument('--policy', help='Starting policy file (npy)', type=str, default='')
|
||||||
parser.add_argument('--logdir',
|
parser.add_argument(
|
||||||
help='Directory root to log policy files (npy)',
|
'--logdir', help='Directory root to log policy files (npy)', type=str, default='.')
|
||||||
type=str,
|
|
||||||
default='.')
|
|
||||||
parser.add_argument('--mp', help='Enable multiprocessing', type=int, default=1)
|
parser.add_argument('--mp', help='Enable multiprocessing', type=int, default=1)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|||||||
Reference in New Issue
Block a user