add yapf style and apply yapf to format all Python files

This recreates pull request #2192
This commit is contained in:
Erwin Coumans
2019-04-27 07:31:15 -07:00
parent c591735042
commit ef9570c315
347 changed files with 70304 additions and 22752 deletions

View File

@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proximal Policy Optimization algorithm."""
from __future__ import absolute_import

View File

@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proximal Policy Optimization algorithm.
Based on John Schulman's implementation in Python and Theano:
@@ -49,51 +48,51 @@ class PPOAlgorithm(object):
self._is_training = is_training
self._should_log = should_log
self._config = config
self._observ_filter = normalize.StreamingNormalize(
self._batch_env.observ[0], center=True, scale=True, clip=5,
name='normalize_observ')
self._reward_filter = normalize.StreamingNormalize(
self._batch_env.reward[0], center=False, scale=True, clip=10,
name='normalize_reward')
self._observ_filter = normalize.StreamingNormalize(self._batch_env.observ[0],
center=True,
scale=True,
clip=5,
name='normalize_observ')
self._reward_filter = normalize.StreamingNormalize(self._batch_env.reward[0],
center=False,
scale=True,
clip=10,
name='normalize_reward')
# Memory stores tuple of observ, action, mean, logstd, reward.
template = (
self._batch_env.observ[0], self._batch_env.action[0],
self._batch_env.action[0], self._batch_env.action[0],
self._batch_env.reward[0])
self._memory = memory.EpisodeMemory(
template, config.update_every, config.max_length, 'memory')
template = (self._batch_env.observ[0], self._batch_env.action[0], self._batch_env.action[0],
self._batch_env.action[0], self._batch_env.reward[0])
self._memory = memory.EpisodeMemory(template, config.update_every, config.max_length, 'memory')
self._memory_index = tf.Variable(0, False)
use_gpu = self._config.use_gpu and utility.available_gpus()
with tf.device('/gpu:0' if use_gpu else '/cpu:0'):
# Create network variables for later calls to reuse.
action_size = self._batch_env.action.shape[1].value
self._network = tf.make_template(
'network', functools.partial(config.network, config, action_size))
self._network = tf.make_template('network',
functools.partial(config.network, config, action_size))
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
tf.zeros_like(self._batch_env.observ)[:, None], tf.ones(len(self._batch_env)))
with tf.variable_scope('ppo_temporary'):
self._episodes = memory.EpisodeMemory(
template, len(batch_env), config.max_length, 'episodes')
self._episodes = memory.EpisodeMemory(template, len(batch_env), config.max_length,
'episodes')
if output.state is None:
self._last_state = None
else:
# Ensure the batch dimension is set.
tf.contrib.framework.nest.map_structure(
lambda x: x.set_shape([len(batch_env)] + x.shape.as_list()[1:]),
output.state)
lambda x: x.set_shape([len(batch_env)] + x.shape.as_list()[1:]), output.state)
# pylint: disable=undefined-variable
self._last_state = tf.contrib.framework.nest.map_structure(
lambda x: tf.Variable(lambda: tf.zeros_like(x), False),
output.state)
self._last_action = tf.Variable(
tf.zeros_like(self._batch_env.action), False, name='last_action')
self._last_mean = tf.Variable(
tf.zeros_like(self._batch_env.action), False, name='last_mean')
self._last_logstd = tf.Variable(
tf.zeros_like(self._batch_env.action), False, name='last_logstd')
self._penalty = tf.Variable(
self._config.kl_init_penalty, False, dtype=tf.float32)
lambda x: tf.Variable(lambda: tf.zeros_like(x), False), output.state)
self._last_action = tf.Variable(tf.zeros_like(self._batch_env.action),
False,
name='last_action')
self._last_mean = tf.Variable(tf.zeros_like(self._batch_env.action),
False,
name='last_mean')
self._last_logstd = tf.Variable(tf.zeros_like(self._batch_env.action),
False,
name='last_logstd')
self._penalty = tf.Variable(self._config.kl_init_penalty, False, dtype=tf.float32)
self._optimizer = self._config.optimizer(self._config.learning_rate)
def begin_episode(self, agent_indices):
@@ -109,8 +108,7 @@ class PPOAlgorithm(object):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_state = utility.reinit_nested_vars(self._last_state, agent_indices)
reset_buffer = self._episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
@@ -130,36 +128,33 @@ class PPOAlgorithm(object):
if self._last_state is None:
state = None
else:
state = tf.contrib.framework.nest.map_structure(
lambda x: tf.gather(x, agent_indices), self._last_state)
state = tf.contrib.framework.nest.map_structure(lambda x: tf.gather(x, agent_indices),
self._last_state)
output = self._network(observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, lambda: output.mean)
action = tf.cond(self._is_training, output.policy.sample, lambda: output.mean)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mean', output.mean[:, 0]),
tf.summary.histogram('std', tf.exp(output.logstd[:, 0])),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
summary = tf.cond(
self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mean', output.mean[:, 0]),
tf.summary.histogram('std', tf.exp(output.logstd[:, 0])),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)
]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
assign_state = utility.assign_nested_vars(self._last_state, output.state, agent_indices)
with tf.control_dependencies([
assign_state,
tf.scatter_update(
self._last_action, agent_indices, action[:, 0]),
tf.scatter_update(
self._last_mean, agent_indices, output.mean[:, 0]),
tf.scatter_update(
self._last_logstd, agent_indices, output.logstd[:, 0])]):
tf.scatter_update(self._last_action, agent_indices, action[:, 0]),
tf.scatter_update(self._last_mean, agent_indices, output.mean[:, 0]),
tf.scatter_update(self._last_logstd, agent_indices, output.logstd[:, 0])
]):
return tf.check_numerics(action[:, 0], 'action'), tf.identity(summary)
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
def experience(self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
@@ -181,34 +176,36 @@ class PPOAlgorithm(object):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
lambda: self._define_experience(agent_indices, observ, action, reward),
str)
def _define_experience(self, agent_indices, observ, action, reward):
"""Implement the branch of experience() entered during training."""
update_filters = tf.summary.merge([
self._observ_filter.update(observ),
self._reward_filter.update(reward)])
update_filters = tf.summary.merge(
[self._observ_filter.update(observ),
self._reward_filter.update(reward)])
with tf.control_dependencies([update_filters]):
if self._config.train_on_agent_action:
# NOTE: Doesn't seem to change much.
action = self._last_action
batch = (
observ, action, tf.gather(self._last_mean, agent_indices),
tf.gather(self._last_logstd, agent_indices), reward)
batch = (observ, action, tf.gather(self._last_mean,
agent_indices), tf.gather(self._last_logstd,
agent_indices), reward)
append = self._episodes.append(batch, agent_indices)
with tf.control_dependencies([append]):
norm_observ = self._observ_filter.transform(observ)
norm_reward = tf.reduce_mean(self._reward_filter.transform(reward))
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.summary.scalar('memory_size', self._memory_index),
tf.summary.histogram('normalized_observ', norm_observ),
tf.summary.histogram('action', self._last_action),
tf.summary.scalar('normalized_reward', norm_reward)]), str)
summary = tf.cond(
self._should_log, lambda: tf.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.summary.scalar('memory_size', self._memory_index),
tf.summary.histogram('normalized_observ', norm_observ),
tf.summary.histogram('action', self._last_action),
tf.summary.scalar('normalized_reward', norm_reward)
]), str)
return summary
def end_episode(self, agent_indices):
@@ -226,20 +223,16 @@ class PPOAlgorithm(object):
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
return tf.cond(self._is_training, lambda: self._define_end_episode(agent_indices), str)
def _define_end_episode(self, agent_indices):
"""Implement the branch of end_episode() entered during training."""
episodes, length = self._episodes.data(agent_indices)
space_left = self._config.update_every - self._memory_index
use_episodes = tf.range(tf.minimum(
tf.shape(agent_indices)[0], space_left))
use_episodes = tf.range(tf.minimum(tf.shape(agent_indices)[0], space_left))
episodes = [tf.gather(elem, use_episodes) for elem in episodes]
append = self._memory.replace(
episodes, tf.gather(length, use_episodes),
use_episodes + self._memory_index)
append = self._memory.replace(episodes, tf.gather(length, use_episodes),
use_episodes + self._memory_index)
with tf.control_dependencies([append]):
inc_index = self._memory_index.assign_add(tf.shape(use_episodes)[0])
with tf.control_dependencies([inc_index]):
@@ -256,8 +249,7 @@ class PPOAlgorithm(object):
Summary tensor.
"""
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._memory_index, self._config.update_every)
assert_full = tf.assert_equal(self._memory_index, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._memory.data()
(observ, action, old_mean, old_logstd, reward), length = data
@@ -265,22 +257,18 @@ class PPOAlgorithm(object):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_mean, old_logstd, reward, length)
update_summary = self._perform_update_steps(observ, action, old_mean, old_logstd, reward,
length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_mean, old_logstd, length)
penalty_summary = self._adjust_penalty(observ, old_mean, old_logstd, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._memory.clear(), self._memory_index.assign(0))
clear_memory = tf.group(self._memory.clear(), self._memory_index.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
weight_summary = utility.variable_summaries(tf.trainable_variables(),
self._config.weight_summaries)
return tf.summary.merge([update_summary, penalty_summary, weight_summary])
def _perform_update_steps(
self, observ, action, old_mean, old_logstd, reward, length):
def _perform_update_steps(self, observ, action, old_mean, old_logstd, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
@@ -298,37 +286,29 @@ class PPOAlgorithm(object):
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
return_ = utility.discounted_return(reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_return(
reward, value, length, self._config.discount,
self._config.gae_lambda)
advantage = utility.lambda_return(reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
advantage = tf.Print(advantage,
[tf.reduce_mean(return_), tf.reduce_mean(value)], 'return and value: ')
advantage = tf.Print(advantage, [tf.reduce_mean(advantage)], 'normalized advantage: ')
# pylint: disable=g-long-lambda
value_loss, policy_loss, summary = tf.scan(
lambda _1, _2: self._update_step(
observ, action, old_mean, old_logstd, reward, advantage, length),
tf.range(self._config.update_epochs),
[0., 0., ''], parallel_iterations=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
value_loss, policy_loss, summary = tf.scan(lambda _1, _2: self._update_step(
observ, action, old_mean, old_logstd, reward, advantage, length),
tf.range(self._config.update_epochs), [0., 0., ''],
parallel_iterations=1)
print_losses = tf.group(tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
def _update_step(
self, observ, action, old_mean, old_logstd, reward, advantage, length):
def _update_step(self, observ, action, old_mean, old_logstd, reward, advantage, length):
"""Compute the current combined loss and perform a gradient update step.
Args:
@@ -345,27 +325,20 @@ class PPOAlgorithm(object):
"""
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
network.mean, network.logstd, old_mean, old_logstd, action,
advantage, length)
value_gradients, value_variables = (
zip(*self._optimizer.compute_gradients(value_loss)))
policy_gradients, policy_variables = (
zip(*self._optimizer.compute_gradients(policy_loss)))
policy_loss, policy_summary = self._policy_loss(network.mean, network.logstd, old_mean,
old_logstd, action, advantage, length)
value_gradients, value_variables = (zip(*self._optimizer.compute_gradients(value_loss)))
policy_gradients, policy_variables = (zip(*self._optimizer.compute_gradients(policy_loss)))
all_gradients = value_gradients + policy_gradients
all_variables = value_variables + policy_variables
optimize = self._optimizer.apply_gradients(
zip(all_gradients, all_variables))
optimize = self._optimizer.apply_gradients(zip(all_gradients, all_variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.scalar(
'value_gradient_norm', tf.global_norm(value_gradients)),
tf.summary.scalar(
'policy_gradient_norm', tf.global_norm(policy_gradients)),
utility.gradient_summaries(
zip(value_gradients, value_variables), dict(value=r'.*')),
utility.gradient_summaries(
zip(policy_gradients, policy_variables), dict(policy=r'.*'))])
tf.summary.scalar('value_gradient_norm', tf.global_norm(value_gradients)),
tf.summary.scalar('policy_gradient_norm', tf.global_norm(policy_gradients)),
utility.gradient_summaries(zip(value_gradients, value_variables), dict(value=r'.*')),
utility.gradient_summaries(zip(policy_gradients, policy_variables), dict(policy=r'.*'))
])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
@@ -385,18 +358,17 @@ class PPOAlgorithm(object):
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
return_ = utility.discounted_return(reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
value_loss = 0.5 * self._mask(advantage**2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))
])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
def _policy_loss(
self, mean, logstd, old_mean, old_logstd, action, advantage, length):
def _policy_loss(self, mean, logstd, old_mean, old_logstd, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
@@ -420,24 +392,20 @@ class PPOAlgorithm(object):
"""
with tf.name_scope('policy_loss'):
entropy = utility.diag_normal_entropy(mean, logstd)
kl = tf.reduce_mean(self._mask(utility.diag_normal_kl(
old_mean, old_logstd, mean, logstd), length), 1)
kl = tf.reduce_mean(
self._mask(utility.diag_normal_kl(old_mean, old_logstd, mean, logstd), length), 1)
policy_gradient = tf.exp(
utility.diag_normal_logpdf(mean, logstd, action) -
utility.diag_normal_logpdf(old_mean, old_logstd, action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = -tf.reduce_mean(
self._mask(policy_gradient * tf.stop_gradient(advantage), length), 1)
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
cutoff_count = tf.reduce_sum(tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies(
[tf.cond(cutoff_count > 0, lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (self._config.kl_cutoff_coef * tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold)**2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
@@ -449,7 +417,8 @@ class PPOAlgorithm(object):
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))
])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
@@ -471,30 +440,30 @@ class PPOAlgorithm(object):
"""
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
assert_change = tf.assert_equal(
tf.reduce_all(tf.equal(network.mean, old_mean)), False,
message='policy should change')
assert_change = tf.assert_equal(tf.reduce_all(tf.equal(network.mean, old_mean)),
False,
message='policy should change')
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([assert_change, print_penalty]):
kl_change = tf.reduce_mean(self._mask(utility.diag_normal_kl(
old_mean, old_logstd, network.mean, network.logstd), length))
kl_change = tf.reduce_mean(
self._mask(utility.diag_normal_kl(old_mean, old_logstd, network.mean, network.logstd),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
lambda: tf.Print(self._penalty.assign(self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
lambda: tf.Print(self._penalty.assign(self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
tf.summary.scalar('penalty', self._penalty)
])
def _mask(self, tensor, length):
"""Set padding elements of a batch of sequences to zero.

View File

@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Memory that stores episodes."""
from __future__ import absolute_import
@@ -43,10 +42,9 @@ class EpisodeMemory(object):
self._scope = var_scope
self._length = tf.Variable(tf.zeros(capacity, tf.int32), False)
self._buffers = [
tf.Variable(tf.zeros(
[capacity, max_length] + elem.shape.as_list(),
elem.dtype), False)
for elem in template]
tf.Variable(tf.zeros([capacity, max_length] + elem.shape.as_list(), elem.dtype), False)
for elem in template
]
def length(self, rows=None):
"""Tensor holding the current length of episodes.
@@ -72,13 +70,11 @@ class EpisodeMemory(object):
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
assert_capacity = tf.assert_less(rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
assert_max_length = tf.assert_less(tf.gather(self._length, rows),
self._max_length,
message='max length exceeded')
append_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, transitions):
@@ -86,8 +82,7 @@ class EpisodeMemory(object):
indices = tf.stack([rows, timestep], 1)
append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
episode_mask = tf.reduce_sum(tf.one_hot(rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
def replace(self, episodes, length, rows=None):
@@ -103,11 +98,11 @@ class EpisodeMemory(object):
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
assert_capacity = tf.assert_less(rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
assert_max_length = tf.assert_less_equal(length,
self._max_length,
message='max length exceeded')
replace_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, episodes):

View File

@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalize tensors based on streaming estimates of mean and variance."""
from __future__ import absolute_import
@@ -24,8 +23,7 @@ import tensorflow as tf
class StreamingNormalize(object):
"""Normalize tensors based on streaming estimates of mean and variance."""
def __init__(
self, template, center=True, scale=True, clip=10, name='normalize'):
def __init__(self, template, center=True, scale=True, clip=10, name='normalize'):
"""Normalize tensors based on streaming estimates of mean and variance.
Centering the value, scaling it by the standard deviation, and clipping
@@ -69,8 +67,7 @@ class StreamingNormalize(object):
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
self._count > 1, lambda: self._std() + 1e-8, lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
@@ -97,8 +94,7 @@ class StreamingNormalize(object):
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
var_delta = (value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
@@ -116,10 +112,8 @@ class StreamingNormalize(object):
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
return tf.group(self._count.assign(0), self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
def summary(self):
"""Summary string of mean and standard deviation.
@@ -128,10 +122,8 @@ class StreamingNormalize(object):
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._count > 1, lambda: self._summary('stddev', self._std()), str)
mean_summary = tf.cond(self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.summary.merge([mean_summary, std_summary])
def _std(self):
@@ -143,10 +135,8 @@ class StreamingNormalize(object):
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
variance = tf.cond(self._count > 1, lambda: self._var_sum / tf.cast(
self._count - 1, tf.float32), lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.

View File

@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the PPO algorithm."""
from __future__ import absolute_import
@@ -37,8 +36,7 @@ def reinit_nested_vars(variables, indices=None):
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
reinit_nested_vars(variable, indices) for variable in variables])
return tf.group(*[reinit_nested_vars(variable, indices) for variable in variables])
if indices is None:
return variables.assign(tf.zeros_like(variables))
else:
@@ -58,9 +56,8 @@ def assign_nested_vars(variables, tensors, indices=None):
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
assign_nested_vars(variable, tensor)
for variable, tensor in zip(variables, tensors)])
return tf.group(
*[assign_nested_vars(variable, tensor) for variable, tensor in zip(variables, tensors)])
if indices is None:
return variables.assign(tensors)
else:
@@ -71,10 +68,11 @@ def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
return_ = tf.reverse(
tf.transpose(
tf.scan(lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
@@ -85,9 +83,8 @@ def fixed_step_return(reward, value, length, discount, window):
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.concat(
[reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
return_ += discount ** window * tf.concat(
reward = discount * tf.concat([reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
return_ += discount**window * tf.concat(
[value[:, window:], tf.zeros_like(value[:, -window:]), 1])
return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')
@@ -99,10 +96,11 @@ def lambda_return(reward, value, length, discount, lambda_):
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return_ = tf.reverse(
tf.transpose(
tf.scan(lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]),
1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
@@ -112,27 +110,26 @@ def lambda_advantage(reward, value, length, discount):
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discount * next_value - value
advantage = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),
tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])
advantage = tf.reverse(
tf.transpose(
tf.scan(lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]), tf.zeros_like(delta[:, -1]),
1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
def diag_normal_kl(mean0, logstd0, mean1, logstd1):
"""Epirical KL divergence of two normals with diagonal covariance."""
logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1
return 0.5 * (
tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) +
tf.reduce_sum((mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) +
tf.reduce_sum(logstd1_2, -1) - tf.reduce_sum(logstd0_2, -1) -
mean0.shape[-1].value)
return 0.5 * (tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) + tf.reduce_sum(
(mean1 - mean0)**2 / tf.exp(logstd1_2), -1) + tf.reduce_sum(logstd1_2, -1) -
tf.reduce_sum(logstd0_2, -1) - mean0.shape[-1].value)
def diag_normal_logpdf(mean, logstd, loc):
"""Log density of a normal with diagonal covariance."""
constant = -0.5 * math.log(2 * math.pi) - logstd
value = -0.5 * ((loc - mean) / tf.exp(logstd)) ** 2
value = -0.5 * ((loc - mean) / tf.exp(logstd))**2
return tf.reduce_sum(constant + value, -1)