add yapf style and apply yapf to format all Python files
This recreates pull request #2192
This commit is contained in:
@@ -11,7 +11,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for using reinforcement learning algorithms."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
@@ -46,8 +45,7 @@ def define_simulation_graph(batch_env, algo_cls, config):
|
||||
do_report = tf.placeholder(tf.bool, name='do_report')
|
||||
force_reset = tf.placeholder(tf.bool, name='force_reset')
|
||||
algo = algo_cls(batch_env, step, is_training, should_log, config)
|
||||
done, score, summary = tools.simulate(
|
||||
batch_env, algo, should_log, force_reset)
|
||||
done, score, summary = tools.simulate(batch_env, algo, should_log, force_reset)
|
||||
message = 'Graph contains {} trainable variables.'
|
||||
tf.logging.info(message.format(tools.count_weights()))
|
||||
# pylint: enable=unused-variable
|
||||
@@ -67,9 +65,7 @@ def define_batch_env(constructor, num_agents, env_processes):
|
||||
"""
|
||||
with tf.variable_scope('environments'):
|
||||
if env_processes:
|
||||
envs = [
|
||||
tools.wrappers.ExternalProcess(constructor)
|
||||
for _ in range(num_agents)]
|
||||
envs = [tools.wrappers.ExternalProcess(constructor) for _ in range(num_agents)]
|
||||
else:
|
||||
envs = [constructor() for _ in range(num_agents)]
|
||||
batch_env = tools.BatchEnv(envs, blocking=not env_processes)
|
||||
@@ -111,9 +107,7 @@ def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
|
||||
ValueError: If resume expected but no log directory specified.
|
||||
RuntimeError: If no resume expected but a checkpoint was found.
|
||||
"""
|
||||
sess.run(tf.group(
|
||||
tf.local_variables_initializer(),
|
||||
tf.global_variables_initializer()))
|
||||
sess.run(tf.group(tf.local_variables_initializer(), tf.global_variables_initializer()))
|
||||
if resume and not (logdir or checkpoint):
|
||||
raise ValueError('Need to specify logdir to resume a checkpoint.')
|
||||
if logdir:
|
||||
@@ -152,9 +146,8 @@ def save_config(config, logdir=None):
|
||||
with tf.gfile.GFile(config_path, 'w') as file_:
|
||||
yaml.dump(config, file_, default_flow_style=False)
|
||||
else:
|
||||
message = (
|
||||
'Start a new run without storing summaries and checkpoints since no '
|
||||
'logging directory was specified.')
|
||||
message = ('Start a new run without storing summaries and checkpoints since no '
|
||||
'logging directory was specified.')
|
||||
tf.logging.info(message)
|
||||
return config
|
||||
|
||||
@@ -173,9 +166,8 @@ def load_config(logdir):
|
||||
"""
|
||||
config_path = logdir and os.path.join(logdir, 'config.yaml')
|
||||
if not config_path or not tf.gfile.Exists(config_path):
|
||||
message = (
|
||||
'Cannot resume an existing run since the logging directory does not '
|
||||
'contain a configuration file.')
|
||||
message = ('Cannot resume an existing run since the logging directory does not '
|
||||
'contain a configuration file.')
|
||||
raise IOError(message)
|
||||
with tf.gfile.FastGFile(config_path, 'r') as file_:
|
||||
config = yaml.load(file_, Loader=yaml.Loader)
|
||||
|
||||
Reference in New Issue
Block a user