Skip to content

Commit

Permalink
Merge pull request #68 from ll7/67-offensive-demo-is-not-working
Browse files Browse the repository at this point in the history
Fix demos + QoL change
  • Loading branch information
ll7 authored Oct 21, 2024
2 parents 4ab4db7 + a3e3dc3 commit abddb05
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 44 deletions.
6 changes: 3 additions & 3 deletions examples/demo_defensive.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ def obs_adapter(orig_obs):
ray_state = np.squeeze(ray_state)
return np.concatenate((ray_state, drive_state), axis=0)

obs = env.reset()
obs, _ = env.reset()
for _ in range(10000):
obs = obs_adapter(obs)
action, _ = model.predict(obs, deterministic=True)
obs, _, done, _ = env.step(action)
obs, _, done, _, _ = env.step(action)
env.render()

if done:
obs = env.reset()
obs, _ = env.reset()
env.render()
env.exit()

Expand Down
8 changes: 4 additions & 4 deletions examples/demo_offensive.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,17 @@ def training():
env_config = EnvSettings(
sim_config=SimulationSettings(difficulty=0, ped_density_by_difficulty=[0.02]),
robot_config=BicycleDriveSettings(radius=0.5, max_accel=3.0, allow_backwards=True))
env = RobotEnv(env_config, debug=True, recording_enabled=True)
env = RobotEnv(env_config, debug=True, recording_enabled=False)
model = PPO.load("./model/run_043", env=env)

obs = env.reset()
obs, _ = env.reset()
for _ in range(10000):
action, _ = model.predict(obs, deterministic=True)
obs, _, done, _ = env.step(action)
obs, _, done, _, _ = env.step(action)
env.render()

if done:
obs = env.reset()
obs, _ = env.reset()
env.render()
env.exit()

Expand Down
6 changes: 3 additions & 3 deletions examples/ego_ped_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_simulation(map_definition: MapDefinition):

env = PedestrianEnv(env_config, robot_model=robot_model, debug=True, recording_enabled=True)

obs = env.reset()
obs, _ = env.reset()

logger.info("Simulating the random policy.")
for _ in range(10000):
Expand All @@ -39,7 +39,7 @@ def test_simulation(map_definition: MapDefinition):
env.render()

if done:
obs = env.reset()
obs, _ = env.reset()
env.render()

env.reset()
Expand All @@ -56,7 +56,7 @@ def get_file():

def main():

map_def = convert_map("maps/svg_maps/debug_03.svg")
map_def = convert_map("maps/svg_maps/debug_06.svg")

test_simulation(map_def)

Expand Down
10 changes: 4 additions & 6 deletions examples/simulate_with_svg_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@ def obs_adapter(orig_obs):
ray_state = np.squeeze(ray_state)
return np.concatenate((ray_state, drive_state), axis=0)

obs = env.reset()
obs, _ = env.reset()
logger.info("Simulating the random policy.")
for _ in range(10000):
obs = obs_adapter(obs)
action = env.action_space.sample()
obs, _, done, _ = env.step(action)
obs, _, done, _, _ = env.step(action)
env.render()

if done:
obs = env.reset()
obs, _ = env.reset()
env.render()
env.exit()

Expand All @@ -58,9 +58,7 @@ def main():
"""Simulate a random policy with a map defined in SVG format."""
logger.info("Simulating a random policy with the map.")

# svg_file = "maps/svg_maps/02_simple_maps.svg"
# svg_file = "maps/svg_maps/03_mid_object.svg"
svg_file = "maps/svg_maps/04_small_mid_object.svg"
svg_file = "maps/svg_maps/debug_06.svg"

logger.info("Converting SVG map to MapDefinition object.")
logger.info(f"SVG file: {svg_file}")
Expand Down
5 changes: 1 addition & 4 deletions examples/view_recording.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,12 @@
import os
from pathlib import Path

import numpy as np
from loguru import logger

from robot_sf.nav.svg_map_parser import SvgMapConverter
from robot_sf.nav.map_config import MapDefinition, MapDefinitionPool
from robot_sf.gym_env.env_config import SimulationSettings, EnvSettings
from robot_sf.gym_env.env_config import EnvSettings
from robot_sf.gym_env.robot_env import RobotEnv
from robot_sf.sensor.sensor_fusion import OBS_RAYS, OBS_DRIVE_STATE
from robot_sf.robot.differential_drive import DifferentialDriveSettings
from robot_sf.render.playback_recording import load_states_and_visualize


Expand Down
3 changes: 3 additions & 0 deletions robot_sf/gym_env/pedestrian_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,9 @@ def reset(self, seed=None, options=None):
- obs: The initial observation after resetting the environment.
"""
super().reset(seed=seed, options=options)
# Reset last actions
self.last_action_robot = None
self.last_action_ped = None
# Reset internal simulator state
self.simulator.reset_state()
# Reset the environment's state and return the initial observation
Expand Down
46 changes: 22 additions & 24 deletions robot_sf/render/sim_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class VisualizableSimState:
ped_actions: np.ndarray
ego_ped_pose: PedPose = None
ego_ped_ray_vecs: np.ndarray = None
ego_ped_actions: Union[VisualizableAction, None] = None
ego_ped_action: Union[VisualizableAction, None] = None
# obstacles: List[Obstacle]


Expand All @@ -91,7 +91,7 @@ class SimulationView:
focus_on_robot: bool = False
focus_on_ego_ped: bool = False
display_help: bool = False
display_robot_info: bool = False
display_robot_info: int = 0
"""The offset is already uses `scaling` as a factor."""

@property
Expand Down Expand Up @@ -172,7 +172,8 @@ def _handle_keydown(self, e):
# display help
pygame.K_h: lambda: setattr(self, 'display_help', not self.display_help),
# display robotinfo
pygame.K_q: lambda: setattr(self, 'display_robot_info', not self.display_robot_info),
pygame.K_q: lambda: setattr(self, 'display_robot_info',
(self.display_robot_info + 1) % 3),
}

if e.key in key_action_map:
Expand Down Expand Up @@ -204,7 +205,6 @@ def clear(self):
adds text at position 0, and updates the display.
"""
self.screen.fill(BACKGROUND_COLOR)
self._augment_timestep(0)
pygame.display.update()

def render(self, state: VisualizableSimState):
Expand Down Expand Up @@ -255,11 +255,11 @@ def render(self, state: VisualizableSimState):
self._draw_robot(state.robot_pose)
if state.ego_ped_pose:
self._augment_lidar(state.ego_ped_ray_vecs)
self._augment_action(state.ego_ped_actions, EGO_PED_ACTION_COLOR)
if state.ego_ped_action:
self._augment_action(state.ego_ped_action, EGO_PED_ACTION_COLOR)
self._draw_ego_ped(state.ego_ped_pose)

# information
self._augment_timestep(state.timestep)
self._add_text(state.timestep, state)
if self.display_help:
self._add_help_text()
Expand All @@ -280,7 +280,7 @@ def _move_camera(self, state: VisualizableSimState):
r_x, r_y = state.robot_pose[0]
self.offset[0] = int(r_x * self.scaling - self.width / 2) * -1
self.offset[1] = int(r_y * self.scaling - self.height / 2) * -1
if self.focus_on_ego_ped:
if self.focus_on_ego_ped and state.ego_ped_pose:
r_x, r_y = state.ego_ped_pose[0]
self.offset[0] = int(r_x * self.scaling - self.width / 2) * -1
self.offset[1] = int(r_y * self.scaling - self.height / 2) * -1
Expand Down Expand Up @@ -427,25 +427,23 @@ def _draw_coordinates(self, x, y):
text = self.font.render(f'({x}, {y})', False, TEXT_COLOR)
self.screen.blit(text, (x, y))

def _augment_timestep(self, timestep: int):
# TODO: show map name as well
text = f'step: {timestep}'
text_surface = self.font.render(text, False, TEXT_COLOR)
self.screen.blit(text_surface, self._timestep_text_pos)

def _add_text(self, timestep: int, state: VisualizableSimState):
if self.display_robot_info:
lines = [
lines = []
if self.display_robot_info == 1 and state.robot_action:
lines += [
f'RobotPose: {state.robot_pose}',
f'RobotAction: {state.robot_action.action}',
f'RobotGoal: {state.robot_action.goal}']
else:
distance_to_robot = euclid_dist(state.ego_ped_pose[0], state.robot_pose[0])
lines = [
f'PedestrianPose: {state.ego_ped_pose}',
f'PedestrianAction: {state.ego_ped_actions.action}',
f'PedestrianGoal: {state.ego_ped_actions.goal}',
f'DistanceRobot: {distance_to_robot:.2f}',]
f'RobotAction: {state.robot_action.action if state.robot_action else None}',
f'RobotGoal: {state.robot_action.goal if state.robot_action else None}',]
elif self.display_robot_info == 2:
if state.ego_ped_pose and state.ego_ped_action:
distance_to_robot = euclid_dist(state.ego_ped_pose[0], state.robot_pose[0])
lines += [
f'PedestrianPose: {state.ego_ped_pose}',
f'PedestrianAction: {state.ego_ped_action.action}',
f'PedestrianGoal: {state.ego_ped_action.goal}',
f'DistanceRobot: {distance_to_robot:.2f}',]
else:
self.display_robot_info = 0
text_lines = [
f'step: {timestep}',
f'scaling: {self.scaling}',
Expand Down

0 comments on commit abddb05

Please sign in to comment.