Fix gym deprecation warnings

This commit is contained in:
Antonin RAFFIN
2018-12-28 14:30:05 +01:00
parent 8bc1c8e01b
commit 0df3527884
24 changed files with 229 additions and 244 deletions

View File

@@ -74,7 +74,7 @@ class MinitaurAlternatingLegsEnv(minitaur_gym_env.MinitaurGymEnv):
be running, but only first num_steps_to_log will be recorded in logging.
env_randomizer: An instance (or a list) of EnvRanzomier(s) that can
randomize the environment during when env.reset() is called and add
perturbations when env._step() is called.
perturbations when env.step() is called.
log_path: The path to write out logs. For the details of logging, refer to
minitaur_logging.proto.
"""
@@ -107,7 +107,7 @@ class MinitaurAlternatingLegsEnv(minitaur_gym_env.MinitaurGymEnv):
self._cam_yaw = 30
self._cam_pitch = -30
def _reset(self):
def reset(self):
self.desired_pitch = DESIRED_PITCH
# In this environment, the actions are
# [swing leg 1, swing leg 2, swing leg 3, swing leg 4,
@@ -123,7 +123,7 @@ class MinitaurAlternatingLegsEnv(minitaur_gym_env.MinitaurGymEnv):
INIT_EXTENSION_POS + self._extension_offset[3]
]
initial_motor_angles = self._convert_from_leg_model(init_pose)
super(MinitaurAlternatingLegsEnv, self)._reset(
super(MinitaurAlternatingLegsEnv, self).reset(
initial_motor_angles=initial_motor_angles, reset_duration=0.5)
return self._get_observation()

View File

@@ -66,9 +66,9 @@ class MinitaurBallGymEnv(minitaur_gym_env.MinitaurGymEnv):
self.observation_space = spaces.Box(np.array([-math.pi, 0]),
np.array([math.pi, 100]))
def _reset(self):
def reset(self):
self._ball_id = 0
super(MinitaurBallGymEnv, self)._reset()
super(MinitaurBallGymEnv, self).reset()
self._init_ball_theta = random.uniform(-INIT_BALL_ANGLE, INIT_BALL_ANGLE)
self._init_ball_distance = INIT_BALL_DISTANCE
self._ball_pos = [self._init_ball_distance *

View File

@@ -85,7 +85,7 @@ class MinitaurFourLegStandEnv(minitaur_gym_env.MinitaurGymEnv):
render: Whether to render the simulation.
env_randomizer: An instance (or a list) of EnvRanzomier(s) that can
randomize the environment during when env.reset() is called and add
perturbations when env._step() is called.
perturbations when env.step() is called.
use_angular_velocity_in_observation: Whether to include roll_dot and
pitch_dot of the base in the observation.
use_motor_angle_in_observation: Whether to include motor angles in the
@@ -132,7 +132,7 @@ class MinitaurFourLegStandEnv(minitaur_gym_env.MinitaurGymEnv):
self._cur_ori = [0, 0, 0, 1]
self._goal_ori = [0, 0, 0, 1]
def _reset(self):
def reset(self):
self.desired_pitch = DESIRED_PITCH
# In this environment, the actions are
# [swing leg 1, swing leg 2, swing leg 3, swing leg 4,
@@ -150,11 +150,11 @@ class MinitaurFourLegStandEnv(minitaur_gym_env.MinitaurGymEnv):
initial_motor_angles = self._convert_from_leg_model(init_pose)
self._pybullet_client.resetBasePositionAndOrientation(
0, [0, 0, 0], [0, 0, 0, 1])
super(MinitaurFourLegStandEnv, self)._reset(
super(MinitaurFourLegStandEnv, self).reset(
initial_motor_angles=initial_motor_angles, reset_duration=0.5)
return self._get_observation()
def _step(self, action):
def step(self, action):
"""Step forward the simulation, given the action.
Args:
@@ -187,9 +187,9 @@ class MinitaurFourLegStandEnv(minitaur_gym_env.MinitaurGymEnv):
action = self._transform_action_to_motor_command(action)
t = self._env_step_counter % MOVING_FLOOR_TOTAL_STEP
if t == 0:
self._seed()
self.seed()
orientation_x = random.uniform(-0.2, 0.2)
self._seed()
self.seed()
orientation_y = random.uniform(-0.2, 0.2)
_, self._cur_ori = self._pybullet_client.getBasePositionAndOrientation(0)
self._goal_ori = self._pybullet_client.getQuaternionFromEuler(

View File

@@ -237,7 +237,7 @@ class MinitaurGymEnv(gym.Env):
if self._urdf_version is None:
self._urdf_version = DEFAULT_URDF_VERSION
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._seed()
self.seed()
self.reset()
observation_high = (self._get_observation_upper_bound() + OBSERVATION_EPS)
observation_low = (self._get_observation_lower_bound() - OBSERVATION_EPS)
@@ -248,14 +248,14 @@ class MinitaurGymEnv(gym.Env):
self.viewer = None
self._hard_reset = hard_reset # This assignment need to be after reset()
def _close(self):
def close(self):
self.logging.save_episode(self._episode_proto)
self.minitaur.Terminate()
def add_env_randomizer(self, env_randomizer):
self._env_randomizers.append(env_randomizer)
def _reset(self, initial_motor_angles=None, reset_duration=1.0):
def reset(self, initial_motor_angles=None, reset_duration=1.0):
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 0)
self.logging.save_episode(self._episode_proto)
@@ -317,7 +317,7 @@ class MinitaurGymEnv(gym.Env):
self._pybullet_client.COV_ENABLE_RENDERING, 1)
return self._get_observation()
def _seed(self, seed=None):
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
@@ -331,7 +331,7 @@ class MinitaurGymEnv(gym.Env):
action = self.minitaur.ConvertFromLegModel(action)
return action
def _step(self, action):
def step(self, action):
"""Step forward the simulation, given the action.
Args:
@@ -379,7 +379,7 @@ class MinitaurGymEnv(gym.Env):
self.minitaur.Terminate()
return np.array(self._get_observation()), reward, done, {}
def _render(self, mode="rgb_array", close=False):
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = self.minitaur.GetBasePosition()
@@ -569,12 +569,12 @@ class MinitaurGymEnv(gym.Env):
"""
return len(self._get_observation())
if parse_version(gym.__version__)>=parse_version('0.9.6'):
close = _close
render = _render
reset = _reset
seed = _seed
step = _step
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
def set_time_step(self, control_step, simulation_step=0.001):
"""Sets the time step of the environment.
@@ -617,5 +617,3 @@ class MinitaurGymEnv(gym.Env):
@property
def env_step_counter(self):
return self._env_step_counter

View File

@@ -27,7 +27,7 @@ class MinitaurRandomizeTerrainGymEnv(minitaur_gym_env.MinitaurGymEnv):
"""
def _reset(self):
def reset(self):
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=self._num_bullet_solver_iterations)

View File

@@ -90,7 +90,7 @@ class MinitaurReactiveEnv(minitaur_gym_env.MinitaurGymEnv):
and moved to the origin.
env_randomizer: An instance (or a list) of EnvRanzomier(s) that can
randomize the environment during when env.reset() is called and add
perturbations when env._step() is called.
perturbations when env.step() is called.
log_path: The path to write out logs. For the details of logging, refer to
minitaur_logging.proto.
"""
@@ -123,7 +123,7 @@ class MinitaurReactiveEnv(minitaur_gym_env.MinitaurGymEnv):
self._cam_yaw = 30
self._cam_pitch = -30
def _reset(self):
def reset(self):
# TODO(b/73666007): Use composition instead of inheritance.
# (http://go/design-for-testability-no-inheritance).
init_pose = MinitaurPose(
@@ -137,7 +137,7 @@ class MinitaurReactiveEnv(minitaur_gym_env.MinitaurGymEnv):
extension_angle_4=INIT_EXTENSION_POS)
# TODO(b/73734502): Refactor input of _convert_from_leg_model to namedtuple.
initial_motor_angles = self._convert_from_leg_model(list(init_pose))
super(MinitaurReactiveEnv, self)._reset(
super(MinitaurReactiveEnv, self).reset(
initial_motor_angles=initial_motor_angles, reset_duration=0.5)
return self._get_observation()

View File

@@ -101,13 +101,13 @@ class MinitaurStandGymEnv(minitaur_gym_env.MinitaurGymEnv):
done = self._termination()
return np.array(self._get_observation()), reward, done, {}
def _step(self, action):
def step(self, action):
# At start, use policy_flip to lift the robot to its two legs. After the
# robot reaches the lift up stage, give control back to the agent by
# returning the current state and reward.
if self._env_step_counter < 1:
return self._stand_up()
return super(MinitaurStandGymEnv, self)._step(action)
return super(MinitaurStandGymEnv, self).step(action)
def _reward(self):
"""Reward function for standing up pose.

View File

@@ -85,7 +85,7 @@ class MinitaurTrottingEnv(minitaur_gym_env.MinitaurGymEnv):
reposition the robot.
env_randomizer: A list of EnvRandomizers that can randomize the
environment during when env.reset() is called and add perturbation
forces when env._step() is called.
forces when env.step() is called.
log_path: The path to write out logs. For the details of logging, refer to
minitaur_logging.proto.
init_extension: The initial reset length of the leg.
@@ -139,12 +139,12 @@ class MinitaurTrottingEnv(minitaur_gym_env.MinitaurGymEnv):
self._cam_yaw = 30
self._cam_pitch = -30
def _reset(self):
def reset(self):
# In this environment, the actions are
# [swing leg 1, swing leg 2, swing leg 3, swing leg 4,
# extension leg 1, extension leg 2, extension leg 3, extension leg 4]
initial_motor_angles = self._convert_from_leg_model(self._init_pose)
super(MinitaurTrottingEnv, self)._reset(
super(MinitaurTrottingEnv, self).reset(
initial_motor_angles=initial_motor_angles, reset_duration=0.5)
return self._get_observation()