diff --git a/sim/humanoid_gym/envs/getup_env.py b/sim/humanoid_gym/envs/getup_env.py index 755cab1f..0a4451af 100755 --- a/sim/humanoid_gym/envs/getup_env.py +++ b/sim/humanoid_gym/envs/getup_env.py @@ -319,13 +319,8 @@ def _reward_base_height(self): The reward is computed based on the height difference between the robot's base and the average height of its feet when they are in contact with the ground. """ - # TODO pfb30 - adjust that based on the body base_height = self.root_states[:, 2] - default_feet_height - reward = torch.exp( - -torch.abs(base_height - self.cfg.rewards.base_height_target) * 100 - ) - # print(reward, base_height - self.cfg.rewards.base_height_target, base_height) - # print(base_height) + return base_height def _reward_base_acc(self): diff --git a/sim/humanoid_gym/envs/legs_env.py b/sim/humanoid_gym/envs/legs_env.py index ac93e0e1..14dc8167 100755 --- a/sim/humanoid_gym/envs/legs_env.py +++ b/sim/humanoid_gym/envs/legs_env.py @@ -69,7 +69,6 @@ def __init__( env_handle, actor_handle, joint ) self.legs_joints["right_" + name] = joint_handle - breakpoint() self.compute_observations() def _push_robots(self): diff --git a/sim/humanoid_gym/play.py b/sim/humanoid_gym/play.py index 479b146a..73a4b267 100755 --- a/sim/humanoid_gym/play.py +++ b/sim/humanoid_gym/play.py @@ -47,7 +47,7 @@ def play(args: argparse.Namespace) -> None: logger = Logger(env.dt) robot_index = 0 # which robot is used for logging joint_index = 1 # which joint is used for logging - stop_state_log = 200 # number of steps before plotting states + stop_state_log = 1200 # number of steps before plotting states if RENDER: camera_properties = gymapi.CameraProperties()