diff --git a/.cspell.json b/.cspell.json
index 3c1715b8..7978a84c 100644
--- a/.cspell.json
+++ b/.cspell.json
@@ -104,6 +104,28 @@
"wpedantic",
"xacro",
"xyzrpy",
- "zcvf"
+ "zcvf",
+ "mppi",
+ "Qdin",
+ "Kohei",
+ "MPPI",
+ "Savitzky",
+ "satitzky",
+ "Golay",
+ "Vandermonde",
+ "michikuni",
+ "eguchi",
+ "vander",
+ "pinv",
+ "dtheta",
+ "cind",
+ "dind",
+ "ncourse",
+ "argmin",
+ "reparameting",
+ "tempature",
+ "rsample",
+ "coeffs",
+ "softplus"
]
}
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 379992ee..13913d57 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -12,3 +12,4 @@ aichallenge/workspace/src/aichallenge_submit/racing_kart_description/** @Autumn6
aichallenge/workspace/src/aichallenge_submit/racing_kart_sensor_kit_description/** @booars/aic2024-developers
aichallenge/workspace/src/aichallenge_submit/simple_pure_pursuit/** @sitahara
aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/** @booars/aic2024-developers
+aichallenge/workspace/src/aichallenge_submit/mppi_controller/** @tamago117
diff --git a/aichallenge/pkill_all.sh b/aichallenge/pkill_all.sh
old mode 100644
new mode 100755
diff --git a/aichallenge/workspace/src/aichallenge_submit/booars_launch/config/planning/costmap_generator.param.yaml b/aichallenge/workspace/src/aichallenge_submit/booars_launch/config/planning/costmap_generator.param.yaml
index 761d16e7..c5c54ba6 100644
--- a/aichallenge/workspace/src/aichallenge_submit/booars_launch/config/planning/costmap_generator.param.yaml
+++ b/aichallenge/workspace/src/aichallenge_submit/booars_launch/config/planning/costmap_generator.param.yaml
@@ -15,4 +15,4 @@
predicted_object_layer:
type: "predicted_object"
predicted_objects_topic: "/perception/object_recognition/objects"
- distance_threshold: 1.0
+ distance_threshold: 1.2
diff --git a/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/control.launch.xml b/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/control.launch.xml
index f23f0812..d39ec7b6 100644
--- a/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/control.launch.xml
+++ b/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/control.launch.xml
@@ -5,12 +5,13 @@
-
-
+
+
-
+
+
diff --git a/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/planning.launch.xml b/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/planning.launch.xml
index 483d4cd0..24425f1b 100644
--- a/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/planning.launch.xml
+++ b/aichallenge/workspace/src/aichallenge_submit/booars_launch/launch/components/planning.launch.xml
@@ -111,5 +111,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/.gitignore b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/.gitignore
new file mode 100644
index 00000000..7e99e367
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/.gitignore
@@ -0,0 +1 @@
+*.pyc
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/config/mppi_controller.param.yaml b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/config/mppi_controller.param.yaml
new file mode 100644
index 00000000..8f0d35a0
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/config/mppi_controller.param.yaml
@@ -0,0 +1,26 @@
+/**:
+ ros__parameters:
+ # mppi
+ horizon : 25
+ num_samples : 3000
+ u_min : [-2.0, -0.25] # accel(m/s2), steer angle(rad)
+ u_max : [2.0, 0.25]
+ sigmas : [0.5, 0.15] # sample range
+ lambda : 1.0
+ auto_lambda : false
+ # reference path
+ DL : 0.1
+ lookahead_distance : 0.3
+ reference_path_interval : 0.85
+ # cost weights
+ Qc : 20.0
+ Ql : 1.0
+ Qv : 2.0
+ Qo : 10000.0
+ Qin : 0.01
+ Qdin : 0.5
+ # model param
+ delta_t : 0.1
+ vehicle_L : 1.0
+ V_MAX : 8.0
+
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/MPPI.py b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/MPPI.py
new file mode 100644
index 00000000..90f52793
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/MPPI.py
@@ -0,0 +1,463 @@
+"""
+Kohei Honda, 2023.
+"""
+
+from __future__ import annotations
+
+from typing import Callable, Tuple, Dict
+import torch
+import torch.nn as nn
+from torch.distributions.multivariate_normal import MultivariateNormal
+
+
+class MPPI(nn.Module):
+ """
+ Model Predictive Path Integral Control,
+ J. Williams et al., T-RO, 2017.
+ """
+
+ def __init__(
+ self,
+ horizon: int,
+ num_samples: int,
+ dim_state: int,
+ dim_control: int,
+ dynamics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
+ cost_func: Callable[[torch.Tensor, torch.Tensor, Dict], torch.Tensor],
+ u_min: torch.Tensor,
+ u_max: torch.Tensor,
+ sigmas: torch.Tensor,
+ lambda_: float,
+ auto_lambda: bool = False,
+ exploration: float = 0.0,
+ use_sg_filter: bool = False,
+ sg_window_size: int = 5,
+ sg_poly_order: int = 3,
+ device=torch.device("cuda"),
+ dtype=torch.float32,
+ seed: int = 42,
+ ) -> None:
+ """
+ :param horizon: Predictive horizon length.
+ :param predictive_interval: Predictive interval (seconds).
+ :param delta: predictive horizon step size (seconds).
+ :param num_samples: Number of samples.
+ :param dim_state: Dimension of state.
+ :param dim_control: Dimension of control.
+ :param dynamics: Dynamics model.
+ :param cost_func: Cost function.
+ :param u_min: Minimum control.
+ :param u_max: Maximum control.
+ :param sigmas: Noise standard deviation for each control dimension.
+ :param lambda_: temperature parameter.
+ :param exploration: Exploration rate when sampling.
+ :param use_sg_filter: Use Savitzky-Golay filter.
+ :param sg_window_size: Window size for Savitzky-Golay filter. larger is smoother. Must be odd.
+ :param sg_poly_order: Polynomial order for Savitzky-Golay filter. Smaller is smoother.
+ :param device: Device to run the solver.
+ :param dtype: Data type to run the solver.
+ :param seed: Seed for torch.
+ """
+
+ super().__init__()
+
+ # torch seed
+ torch.manual_seed(seed)
+
+ # check dimensions
+ assert u_min.shape == (dim_control,)
+ assert u_max.shape == (dim_control,)
+ assert sigmas.shape == (dim_control,)
+ # assert num_samples % batch_size == 0 and num_samples >= batch_size
+
+ # device and dtype
+ if torch.cuda.is_available() and device == torch.device("cuda"):
+ self._device = torch.device("cuda")
+ else:
+ self._device = torch.device("cpu")
+ print(f"Device: {self._device}")
+ self._dtype = dtype
+
+ # set parameters
+ self._horizon = horizon
+ self._num_samples = num_samples
+ self._dim_state = dim_state
+ self._dim_control = dim_control
+ self._dynamics = dynamics
+ self._cost_func = cost_func
+ self._u_min = u_min.clone().detach().to(self._device, self._dtype)
+ self._u_max = u_max.clone().detach().to(self._device, self._dtype)
+ self._sigmas = sigmas.clone().detach().to(self._device, self._dtype)
+ self._lambda = lambda_
+ self._exploration = exploration
+ self._use_sg_filter = use_sg_filter
+ self._sg_window_size = sg_window_size
+ self._sg_poly_order = sg_poly_order
+
+ # noise distribution
+ self._covariance = torch.zeros(
+ self._horizon,
+ self._dim_control,
+ self._dim_control,
+ device=self._device,
+ dtype=self._dtype,
+ )
+ self._covariance[:, :, :] = torch.diag(sigmas**2).to(self._device, self._dtype)
+ self._inv_covariance = torch.zeros_like(
+ self._covariance, device=self._device, dtype=self._dtype
+ )
+ for t in range(1, self._horizon):
+ self._inv_covariance[t] = torch.inverse(self._covariance[t])
+
+ zero_mean = torch.zeros(dim_control, device=self._device, dtype=self._dtype)
+ self._noise_distribution = MultivariateNormal(
+ loc=zero_mean, covariance_matrix=self._covariance
+ )
+
+ self._sample_shape = torch.Size([self._num_samples])
+
+ # sampling with reparameting trick
+ self._action_noises = self._noise_distribution.rsample(
+ sample_shape=self._sample_shape
+ )
+
+ zero_mean_seq = torch.zeros(
+ self._horizon, self._dim_control, device=self._device, dtype=self._dtype
+ )
+ self._perturbed_action_seqs = torch.clamp(
+ zero_mean_seq + self._action_noises, self._u_min, self._u_max
+ )
+
+ self._previous_action_seq = zero_mean_seq
+
+ # init satitzky-golay filter
+ self._coeffs = self._savitzky_golay_coeffs(
+ self._sg_window_size, self._sg_poly_order
+ )
+ self._actions_history_for_sg = torch.zeros(
+ self._horizon - 1, self._dim_control, device=self._device, dtype=self._dtype
+ ) # previous inputted actions for sg filter
+
+ # inner variables
+ self._state_seq_batch = torch.zeros(
+ self._num_samples,
+ self._horizon + 1,
+ self._dim_state,
+ device=self._device,
+ dtype=self._dtype,
+ )
+ self._weights = torch.zeros(
+ self._num_samples, device=self._device, dtype=self._dtype
+ )
+ self._optimal_state_seq = torch.zeros(
+ self._horizon + 1, self._dim_state, device=self._device, dtype=self._dtype
+ )
+
+ # auto lambda tuning
+ self._auto_lambda = auto_lambda
+ if auto_lambda:
+ self.log_tempature = torch.nn.Parameter(
+ torch.log(
+ torch.tensor([self._lambda], device=self._device, dtype=self._dtype)
+ )
+ )
+ self.optimizer = torch.optim.Adam([self.log_tempature], lr=1e-2)
+
+ def reset(self):
+ """
+ Reset the previous action sequence.
+ """
+ self._previous_action_seq = torch.zeros(
+ self._horizon, self._dim_control, device=self._device, dtype=self._dtype
+ )
+ self._actions_history_for_sg = torch.zeros(
+ self._horizon - 1, self._dim_control, device=self._device, dtype=self._dtype
+ ) # previous inputted actions for sg filter
+
+ def forward(
+ self, state: torch.Tensor, info: Dict = {}
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Solve the optimal control problem.
+ Args:
+ state (torch.Tensor): Current state.
+ Returns:
+ Tuple[torch.Tensor, torch.Tensor]: Tuple of predictive control and state sequence.
+ """
+ assert state.shape == (self._dim_state,)
+
+ if not torch.is_tensor(state):
+ state = torch.tensor(state, device=self._device, dtype=self._dtype)
+ else:
+ if state.device != self._device or state.dtype != self._dtype:
+ state = state.to(self._device, self._dtype)
+
+ mean_action_seq = self._previous_action_seq.clone().detach()
+
+ # random sampling with reparametrization trick
+ self._action_noises = self._noise_distribution.rsample(
+ sample_shape=self._sample_shape
+ )
+
+ # noise injection with exploration
+ threshold = int(self._num_samples * (1 - self._exploration))
+ inherited_samples = mean_action_seq + self._action_noises[:threshold]
+ self._perturbed_action_seqs = torch.cat(
+ [inherited_samples, self._action_noises[threshold:]]
+ )
+
+ # clamp actions
+ self._perturbed_action_seqs = torch.clamp(
+ self._perturbed_action_seqs, self._u_min, self._u_max
+ )
+
+ # rollout samples in parallel
+ self._state_seq_batch[:, 0, :] = state.repeat(self._num_samples, 1)
+
+ for t in range(self._horizon):
+ self._state_seq_batch[:, t + 1, :] = self._dynamics(
+ self._state_seq_batch[:, t, :],
+ self._perturbed_action_seqs[:, t, :],
+ )
+
+ # compute sample costs
+ costs = torch.zeros(
+ self._num_samples, self._horizon, device=self._device, dtype=self._dtype
+ )
+ action_costs = torch.zeros(
+ self._num_samples, self._horizon, device=self._device, dtype=self._dtype
+ )
+ initial_state = self._state_seq_batch[:, 0, :]
+ for t in range(self._horizon):
+ prev_index = t - 1 if t > 0 else 0
+ prev_state = self._state_seq_batch[:, prev_index, :]
+ prev_action = self._perturbed_action_seqs[:, prev_index, :]
+ # info update
+ info["prev_state"] = prev_state
+ info["prev_action"] = prev_action
+ info["initial_state"] = initial_state
+ info["t"] = t
+ costs[:, t] = self._cost_func(
+ self._state_seq_batch[:, t, :],
+ self._perturbed_action_seqs[:, t, :],
+ info,
+ )
+ action_costs[:, t] = (
+ mean_action_seq[t]
+ @ self._inv_covariance[t]
+ @ self._perturbed_action_seqs[:, t].T
+ )
+
+ prev_state = self._state_seq_batch[:, -2, :]
+ info["prev_state"] = prev_state
+ zero_action = torch.zeros(
+ self._num_samples,
+ self._dim_control,
+ device=self._device,
+ dtype=self._dtype,
+ )
+ terminal_costs = self._cost_func(
+ self._state_seq_batch[:, -1, :], zero_action, info
+ )
+
+ # In the original paper, the action cost is added to consider KL div. penalty,
+ # but it is easier to tune without it
+ costs = (
+ torch.sum(costs, dim=1)
+ + terminal_costs
+ # + torch.sum(self._lambda * action_costs, dim=1)
+ )
+
+ # calculate weights
+ self._weights = torch.softmax(-costs / self._lambda, dim=0)
+
+ # find optimal control by weighted average
+ optimal_action_seq = torch.sum(
+ self._weights.view(self._num_samples, 1, 1) * self._perturbed_action_seqs,
+ dim=0,
+ )
+
+ mean_action_seq = optimal_action_seq
+
+ # auto-tune temperature parameter
+ # Refer E step of MPO algorithm:
+ # https://arxiv.org/pdf/1806.06920
+ if self._auto_lambda:
+ for _ in range(1):
+ self.optimizer.zero_grad()
+ tempature = torch.nn.functional.softplus(self.log_tempature)
+ cost_logsumexp = torch.logsumexp(-costs / tempature, dim=0)
+ epsilon = 0.1 # tolerance hyperparameter for KL divergence
+ loss = tempature * (epsilon + torch.mean(cost_logsumexp))
+ loss.backward()
+ self.optimizer.step()
+ self._lambda = torch.exp(self.log_tempature).item()
+
+ # calculate new covariance
+ # https://arxiv.org/pdf/2104.00241
+ # covariance = torch.sum(
+ # self._weights.view(self._num_samples, 1, 1)
+ # * (self._perturbed_action_seqs - optimal_action_seq) ** 2,
+ # dim=0,
+ # ) # T x dim_control
+
+ # small_cov = 1e-6 * torch.eye(
+ # self._dim_control, device=self._device, dtype=self._dtype
+ # )
+ # self._covariance = torch.diag_embed(covariance) + small_cov
+
+ # for t in range(1, self._horizon):
+ # self._inv_covariance[t] = torch.inverse(self._covariance[t])
+ # zero_mean = torch.zeros(self._dim_control, device=self._device, dtype=self._dtype)
+ # self._noise_distribution = MultivariateNormal(
+ # loc=zero_mean, covariance_matrix=self._covariance
+ # )
+
+ if self._use_sg_filter:
+ # apply savitzky-golay filter to N-1 previous action history + N optimal action seq
+ prolonged_action_seq = torch.cat(
+ [
+ self._actions_history_for_sg,
+ optimal_action_seq,
+ ],
+ dim=0,
+ )
+
+ # apply sg filter for each control dimension
+ filtered_action_seq = torch.zeros_like(
+ prolonged_action_seq, device=self._device, dtype=self._dtype
+ )
+ for i in range(self._dim_control):
+ filtered_action_seq[:, i] = self._apply_savitzky_golay(
+ prolonged_action_seq[:, i], self._coeffs
+ )
+
+ # use only N step optimal action seq
+ optimal_action_seq = filtered_action_seq[-self._horizon :]
+
+ # predictive state seq
+ expanded_optimal_action_seq = optimal_action_seq.repeat(1, 1, 1)
+ optimal_state_seq = self._states_prediction(state, expanded_optimal_action_seq)
+
+ # update previous actions
+ self._previous_action_seq = optimal_action_seq
+
+ # stuck previous actions for sg filter
+ optimal_action = optimal_action_seq[0]
+ self._actions_history_for_sg = torch.cat(
+ [self._actions_history_for_sg[1:], optimal_action.view(1, -1)]
+ )
+
+ return optimal_action_seq, optimal_state_seq
+
+ def get_top_samples(self, num_samples: int) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Get top samples.
+ Args:
+ num_samples (int): Number of state samples to get.
+ Returns:
+ Tuple[torch.Tensor, torch.Tensor]: Tuple of top samples and their weights.
+ """
+ assert num_samples <= self._num_samples
+
+ # large weights are better
+ top_indices = torch.topk(self._weights, num_samples).indices
+
+ top_samples = self._state_seq_batch[top_indices]
+ top_weights = self._weights[top_indices]
+
+ top_samples = top_samples[torch.argsort(top_weights, descending=True)]
+ top_weights = top_weights[torch.argsort(top_weights, descending=True)]
+
+ return top_samples, top_weights
+
+ def get_samples_from_posterior(
+ self, optimal_solution: torch.Tensor, state: torch.Tensor, num_samples: int
+ ) -> Tuple[torch.Tensor]:
+ assert num_samples <= self._num_samples
+
+ # posterior distribution of MPPI
+ # covariance is the same as noise distribution
+ posterior_distribution = MultivariateNormal(
+ loc=optimal_solution, covariance_matrix=self._covariance
+ )
+
+ # sampling control input sequence from posterior
+ samples = posterior_distribution.sample(sample_shape=torch.Size([num_samples]))
+
+ # get state sequence from sampled control input sequence
+ predictive_states = self._states_prediction(state, samples)
+
+ return samples, predictive_states
+
+ def _states_prediction(
+ self, state: torch.Tensor, action_seqs: torch.Tensor
+ ) -> torch.Tensor:
+ state_seqs = torch.zeros(
+ action_seqs.shape[0],
+ self._horizon + 1,
+ self._dim_state,
+ device=self._device,
+ dtype=self._dtype,
+ )
+ state_seqs[:, 0, :] = state
+ # expanded_optimal_action_seq = action_seq.repeat(1, 1, 1)
+ for t in range(self._horizon):
+ state_seqs[:, t + 1, :] = self._dynamics(
+ state_seqs[:, t, :], action_seqs[:, t, :]
+ )
+ return state_seqs
+
+ def _savitzky_golay_coeffs(self, window_size: int, poly_order: int) -> torch.Tensor:
+ """
+ Compute the Savitzky-Golay filter coefficients using PyTorch.
+
+ Parameters:
+ - window_size: The size of the window (must be odd).
+ - poly_order: The order of the polynomial to fit.
+
+ Returns:
+ - coeffs: The filter coefficients as a PyTorch tensor.
+ """
+ # Ensure the window size is odd and greater than the polynomial order
+ if window_size % 2 == 0 or window_size <= poly_order:
+ raise ValueError("window_size must be odd and greater than poly_order.")
+
+ # Generate the Vandermonde matrix of powers for the polynomial fit
+ half_window = (window_size - 1) // 2
+ indices = torch.arange(
+ -half_window, half_window + 1, dtype=self._dtype, device=self._device
+ )
+ A = torch.vander(indices, N=poly_order + 1, increasing=True)
+
+ # Compute the pseudo-inverse of the matrix
+ pseudo_inverse = torch.linalg.pinv(A)
+
+ # The filter coefficients are given by the first row of the pseudo-inverse
+ coeffs = pseudo_inverse[0]
+
+ return coeffs
+
+ def _apply_savitzky_golay(
+ self, y: torch.Tensor, coeffs: torch.Tensor
+ ) -> torch.Tensor:
+ """
+ Apply the Savitzky-Golay filter to a 1D signal using the provided coefficients.
+
+ Parameters:
+ - y: The input signal as a PyTorch tensor.
+ - coeffs: The filter coefficients as a PyTorch tensor.
+
+ Returns:
+ - y_filtered: The filtered signal.
+ """
+ # Pad the signal at both ends to handle the borders
+ pad_size = len(coeffs) // 2
+ y_padded = torch.cat([y[:pad_size].flip(0), y, y[-pad_size:].flip(0)])
+
+ # Apply convolution
+ y_filtered = torch.conv1d(
+ y_padded.view(1, 1, -1), coeffs.view(1, 1, -1), padding="valid"
+ )
+
+ return y_filtered.view(-1)
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/__init__.py b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/cost_map_tensor.py b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/cost_map_tensor.py
new file mode 100644
index 00000000..747bc749
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/cost_map_tensor.py
@@ -0,0 +1,49 @@
+import torch
+from typing import Tuple
+
+class CostMapTensor:
+ def __init__(
+ self,
+ cost_map: torch.Tensor,
+ cell_size: float,
+ origin: Tuple[float, float] = (0.0, 0.0),
+ device=torch.device("cuda"),
+ dtype=torch.float32,
+ ) -> None:
+ """
+ cost map tensor for collision checking.
+ input:
+ cost_map (torch.Tensor): cost map tensor.
+ cell_size (float): size(m) of each cell in the cost map.
+ origin (Tuple[float, float]): origin of the cost map. (m, m)
+ device: device to run the computation.
+ dtype: data type of the tensor.
+ """
+ self.cost_map: torch.Tensor = cost_map
+ self.cell_size: float = cell_size
+ origin: Tuple[float, float] = origin
+ self.origin_tensor = torch.tensor(origin, device=device, dtype=dtype)
+ self.device = device
+ self.dtype = dtype
+
+ def compute_cost(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Check collision in a batch of trajectories.
+ :param x: Tensor of shape (batch_size, traj_length, position_dim).
+ :return: collision costs on the trajectories.
+ """
+ assert self.cost_map is not None
+ if x.device != self.device or x.dtype != self.dtype:
+ x = x.to(self.device, self.dtype)
+
+ # project to cell map
+ x_occ = (x - self.origin_tensor) / self.cell_size
+ x_occ = torch.round(x_occ).long().to(self.device)
+
+ x_occ[..., 0] = torch.clamp(x_occ[..., 0], 0, self.cost_map.shape[0] - 1)
+ x_occ[..., 1] = torch.clamp(x_occ[..., 1], 0, self.cost_map.shape[1] - 1)
+
+ # collision check
+ collisions = self.cost_map[x_occ[..., 1], x_occ[..., 0]]
+
+ return collisions
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/mppi_controller.py b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/mppi_controller.py
new file mode 100644
index 00000000..7a65192f
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/mppi_controller.py
@@ -0,0 +1,244 @@
+import torch
+import numpy as np
+from typing import Tuple
+from mppi_controller.MPPI import MPPI
+from mppi_controller.cost_map_tensor import CostMapTensor
+
+import time
+
+@torch.jit.script
+def angle_normalize(x) -> torch.Tensor:
+ return ((x + torch.pi) % (2 * torch.pi)) - torch.pi
+
+class mppi_controller:
+ def __init__(self, config, debug=False, device=torch.device("cuda"), dtype=torch.float32):
+ self.config = config
+ self.debug = debug
+ # device and dtype
+ if torch.cuda.is_available() and device == torch.device("cuda"):
+ self._device = torch.device("cuda")
+ else:
+ self._device = torch.device("cpu")
+ self._dtype = dtype
+
+ self.u_min = torch.tensor(self.config["u_min"], device=self._device, dtype=self._dtype)
+ self.u_max = torch.tensor(self.config["u_max"], device=self._device, dtype=self._dtype)
+ self.sigmas = torch.tensor(self.config["sigmas"], device=self._device, dtype=self._dtype)
+
+ # solver
+ self.solver = MPPI(
+ horizon=self.config["horizon"],
+ num_samples=self.config["num_samples"],
+ dim_state=4,
+ dim_control=2,
+ dynamics=self.dynamics,
+ cost_func=self.cost_function,
+ u_min=self.u_min,
+ u_max=self.u_max,
+ sigmas=self.sigmas,
+ lambda_=self.config["lambda"],
+ auto_lambda=self.config["auto_lambda"],
+ )
+
+ # model parameter
+ self.delta_t = torch.tensor(self.config["delta_t"], device=self._device, dtype=self._dtype)
+ self.vehicle_L = torch.tensor(self.config["vehicle_L"], device=self._device, dtype=self._dtype)
+ self.V_MAX = torch.tensor(self.config["V_MAX"], device=self._device, dtype=self._dtype)
+
+ # cost weights
+ self.Qc = self.config["Qc"] # contouring error cost
+ self.Ql = self.config["Ql"] # lag error cost
+ self.Qv = self.config["Qv"] # velocity cost
+ self.Qo = self.config["Qo"] # obstacle cost
+ self.Qin = self.config["Qin"] # input cost
+ self.Qdin = self.config["Qdin"] # input rate cost
+
+ self.current_path_index = 0
+
+ # reference information (tensor)
+ self.reference_path: torch.Tensor = None
+ self.cost_map: CostMapTensor = None
+
+ def update(self, state: torch.Tensor, racing_center_path: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Update the controller with the current state and reference path.
+ Args:
+ state (torch.Tensor): current state of the vehicle, shape (4,) [x, y, yaw, v]
+ racing_center_path (torch.Tensor): racing center path, shape (N, 3) [x, y, yaw]
+ Returns:
+ Tuple[torch.Tensor, torch.Tensor]: action sequence tensor, shape (horizon, 2) [accel, steer], state sequence tensor, shape (horizon + 1, 4) [x, y, yaw, v]
+ """
+
+ # reference
+ self.reference_path, self.current_path_index = self.calc_ref_trajectory(
+ state, racing_center_path, self.current_path_index, self.solver._horizon, DL=self.config["DL"], lookahead_distance=self.config["lookahead_distance"], reference_path_interval=self.config["reference_path_interval"]
+ )
+
+ if self.reference_path is None:
+ raise ValueError("reference path, obstacle map, and lane map must be set before calling solve method.")
+
+ # solve
+ start = time.time()
+ action_seq, state_seq = self.solver.forward(state=state)
+ end = time.time()
+ solve_time = end - start
+
+ if self.debug:
+ print("solve time: {}".format(round(solve_time * 1000, 2)), " [ms]")
+
+ return action_seq, state_seq
+
+ def get_top_samples(self, num_samples = 300) -> Tuple[torch.Tensor, torch.Tensor]:
+ return self.solver.get_top_samples(num_samples=num_samples)
+
+ def set_cost_map(self, cost_map: CostMapTensor) -> None:
+ self.cost_map = cost_map
+
+ def dynamics(
+ self, state: torch.Tensor, action: torch.Tensor
+ ) -> torch.Tensor:
+ """
+ Update robot state based on differential drive dynamics.
+ Args:
+ state (torch.Tensor): state batch tensor, shape (batch_size, 3) [x, y, theta, v]
+ action (torch.Tensor): control batch tensor, shape (batch_size, 2) [accel, steer]
+ delta_t (float): time step interval [s]
+ Returns:
+ torch.Tensor: shape (batch_size, 3) [x, y, theta]
+ """
+
+ # Perform calculations as before
+ x = state[:, 0].view(-1, 1)
+ y = state[:, 1].view(-1, 1)
+ theta = state[:, 2].view(-1, 1)
+ v = state[:, 3].view(-1, 1)
+ accel = torch.clamp(action[:, 0].view(-1, 1), self.u_min[0], self.u_max[0])
+ steer = torch.clamp(action[:, 1].view(-1, 1), self.u_min[1], self.u_max[1])
+ theta = angle_normalize(theta)
+
+ dx = v * torch.cos(theta)
+ dy = v * torch.sin(theta)
+ dv = accel
+ dtheta = v * torch.tan(steer) / self.vehicle_L
+
+ new_x = x + dx * self.delta_t
+ new_y = y + dy * self.delta_t
+ new_theta = angle_normalize(theta + dtheta * self.delta_t)
+ new_v = v + dv * self.delta_t
+
+ # Clamp velocity
+ new_v = torch.clamp(new_v, -self.V_MAX, self.V_MAX)
+
+ result = torch.cat([new_x, new_y, new_theta, new_v], dim=1)
+
+ return result
+
+
+ def cost_function(self, state: torch.Tensor, action: torch.Tensor, info: dict) -> torch.Tensor:
+ """
+ Calculate cost function
+ Args:
+ state (torch.Tensor): state batch tensor, shape (batch_size, 4) [x, y, theta, v]
+ action (torch.Tensor): control batch tensor, shape (batch_size, 2) [accel, steer]
+ Returns:
+ torch.Tensor: shape (batch_size,)
+ """
+ # info
+ prev_action = info["prev_action"]
+ t = info["t"] # horizon number
+
+ # path cost
+ # contouring and lag error of path
+ ec = torch.sin(self.reference_path[t, 2]) * (state[:, 0] - self.reference_path[t, 0]) \
+ -torch.cos(self.reference_path[t, 2]) * (state[:, 1] - self.reference_path[t, 1])
+ el = -torch.cos(self.reference_path[t, 2]) * (state[:, 0] - self.reference_path[t, 0]) \
+ -torch.sin(self.reference_path[t, 2]) * (state[:, 1] - self.reference_path[t, 1])
+
+ path_cost = self.Qc * ec.pow(2) + self.Ql * el.pow(2)
+
+ # velocity cost
+ v = state[:, 3]
+ v_target = self.reference_path[t, 3]
+ velocity_cost = self.Qv * (v - v_target).pow(2)
+
+ # compute obstacle cost from cost map
+ pos_batch = state[:, :2].unsqueeze(1) # (batch_size, 1, 2)
+ obstacle_cost = self.cost_map.compute_cost(pos_batch).squeeze(1) # (batch_size,)
+ obstacle_cost = self.Qo * obstacle_cost
+
+ # input cost
+ input_cost = self.Qin * action.pow(2).sum(dim=1)
+ input_cost += self.Qdin * (action - prev_action).pow(2).sum(dim=1)
+
+ cost = path_cost + velocity_cost + obstacle_cost + input_cost
+
+ return cost
+
+ def calc_ref_trajectory(self, state: torch.Tensor, path: torch.Tensor,
+ cind: int, horizon: int, DL=0.1, lookahead_distance=1.0, reference_path_interval=0.5
+ ) -> Tuple[torch.Tensor, int]:
+ """
+ Calculate the reference trajectory for the vehicle.
+
+ Args:
+ state (torch.Tensor): current state of the vehicle, shape (4,) [x, y, yaw, v]
+ path (torch.Tensor): reference path, shape (N, 4) [x, y, yaw, target_v]
+ cind (int): current index of the vehicle on the path
+ horizon (int): prediction horizon
+ DL (float): resolution of the path
+ lookahead_distance (float): distance to look ahead
+ reference_path_interval (float): interval of the reference path
+
+ Returns:
+ Tuple[torch.Tensor, int]: reference trajectory tensor, shape (horizon + 1, 4) [x, y, yaw, target_v], index of the vehicle on the path
+ """
+
+ def resample_path(path, DL):
+ # Calculate the number of segments needed for each pair of points
+ distances = torch.norm(path[1:, :2] - path[:-1, :2], dim=1)
+ num_points = torch.ceil(distances / DL).to(torch.int64)
+
+ # Create a tensor to store the new resampled path points
+ total_points = num_points.sum() + 1 # Include the first point
+ new_path = torch.zeros((total_points, path.shape[1]), dtype=path.dtype, device=path.device)
+
+ # Initialize the first point
+ new_path[0] = path[0]
+
+ # Generate all new points at once
+ start_idx = 1
+ for i in range(len(path) - 1):
+ segment_length = num_points[i].item()
+ if segment_length == 0:
+ continue
+
+ t = torch.linspace(0, 1, segment_length + 1, device=path.device)[1:] # Skip the first point to avoid duplication
+ interpolated_points = (1 - t).unsqueeze(1) * path[i] + t.unsqueeze(1) * path[i + 1]
+ new_path[start_idx:start_idx + segment_length] = interpolated_points
+ start_idx += segment_length
+
+ return new_path
+
+ # Resample the path with the specified DL
+ path = resample_path(path, DL)
+ ncourse = len(path)
+ xref = torch.zeros((horizon + 1, state.shape[0]), dtype=state.dtype, device=state.device)
+
+ # Calculate the nearest index to the vehicle
+ ind = torch.argmin(torch.norm(path[:, :2] - state[:2], dim=1)).item()
+ # Ensure the index is not less than the current index
+ ind = max(cind, ind)
+
+ # Generate the rest of the reference trajectory
+ travel = lookahead_distance
+
+ for i in range(horizon + 1):
+ travel += reference_path_interval
+ dind = int(round(travel / DL))
+
+ if (ind + dind) < ncourse:
+ xref[i] = path[ind + dind]
+ else:
+ xref[i] = path[-1]
+
+ return xref, ind
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/mppi_controller_node.py b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/mppi_controller_node.py
new file mode 100644
index 00000000..a0153179
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/mppi_controller/mppi_controller_node.py
@@ -0,0 +1,250 @@
+import rclpy
+from rclpy.node import Node
+from nav_msgs.msg import Odometry
+from geometry_msgs.msg import Pose, Point, Quaternion
+from autoware_auto_planning_msgs.msg import Trajectory, TrajectoryPoint
+from autoware_auto_control_msgs.msg import AckermannControlCommand
+from nav_msgs.msg import OccupancyGrid
+from rclpy.qos import QoSProfile
+import math
+import torch
+import numpy as np
+import tf_transformations
+from mppi_controller.mppi_controller import mppi_controller
+from mppi_controller.cost_map_tensor import CostMapTensor
+
+class MppiControllerNode(Node):
+
+ def __init__(self):
+ super().__init__('mppi_controller_node')
+
+ # parameters
+ # declare
+ self.declare_parameter('horizon', 25)
+ self.declare_parameter('num_samples', 4000)
+ self.declare_parameter('u_min', [-2.0, -0.25])
+ self.declare_parameter('u_max', [2.0, 0.25])
+ self.declare_parameter('sigmas', [0.5, 0.1])
+ self.declare_parameter('lambda', 1.0)
+ self.declare_parameter('auto_lambda', False)
+ self.declare_parameter('DL', 0.1)
+ self.declare_parameter('lookahead_distance', 3.0)
+ self.declare_parameter('reference_path_interval', 0.85)
+ self.declare_parameter('Qc', 2.0)
+ self.declare_parameter('Ql', 3.0)
+ self.declare_parameter('Qv', 2.0)
+ self.declare_parameter('Qo', 10000.0)
+ self.declare_parameter('Qin', 0.01)
+ self.declare_parameter('Qdin', 0.5)
+ self.declare_parameter('delta_t', 0.1)
+ self.declare_parameter('vehicle_L', 1.0)
+ self.declare_parameter('V_MAX', 8.0)
+ # get
+ config = {
+ "horizon": self.get_parameter('horizon').get_parameter_value().integer_value,
+ "num_samples": self.get_parameter('num_samples').get_parameter_value().integer_value,
+ "u_min": self.get_parameter('u_min').get_parameter_value().double_array_value,
+ "u_max": self.get_parameter('u_max').get_parameter_value().double_array_value,
+ "sigmas": self.get_parameter('sigmas').get_parameter_value().double_array_value,
+ "lambda": self.get_parameter('lambda').get_parameter_value().double_value,
+ "auto_lambda": self.get_parameter('auto_lambda').get_parameter_value().bool_value,
+ "DL": self.get_parameter('DL').get_parameter_value().double_value,
+ "lookahead_distance": self.get_parameter('lookahead_distance').get_parameter_value().double_value,
+ "reference_path_interval": self.get_parameter('reference_path_interval').get_parameter_value().double_value,
+ "Qc": self.get_parameter('Qc').get_parameter_value().double_value,
+ "Ql": self.get_parameter('Ql').get_parameter_value().double_value,
+ "Qv": self.get_parameter('Qv').get_parameter_value().double_value,
+ "Qo": self.get_parameter('Qo').get_parameter_value().double_value,
+ "Qin": self.get_parameter('Qin').get_parameter_value().double_value,
+ "Qdin": self.get_parameter('Qdin').get_parameter_value().double_value,
+ "delta_t": self.get_parameter('delta_t').get_parameter_value().double_value,
+ "vehicle_L": self.get_parameter('vehicle_L').get_parameter_value().double_value,
+ "V_MAX": self.get_parameter('V_MAX').get_parameter_value().double_value,
+ }
+
+ self.get_logger().info(f'config: {config}')
+
+ # publisher
+ # control command
+ self.pub_cmd = self.create_publisher(AckermannControlCommand, 'output/control_cmd', 1)
+ # planned path
+ self.pub_planned_path = self.create_publisher(Trajectory, 'output/planned_path', 1)
+ # debug path
+ self.pub_debug_path = self.create_publisher(Trajectory, 'debug/path', 1)
+
+ # subscriber
+ # state
+ self.sub_kinematics = self.create_subscription(
+ Odometry,
+ 'input/kinematics',
+ self.kinematics_callback,
+ 1
+ )
+ # reference trajectory
+ self.sub_trajectory = self.create_subscription(
+ Trajectory,
+ 'input/reference_trajectory',
+ self.trajectory_callback,
+ 1
+ )
+ # costmap
+ self.sub_costmap = self.create_subscription(
+ OccupancyGrid,
+ 'input/costmap',
+ self.costmap_callback,
+ 1
+ )
+
+ # device and dtype
+ self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+ self.dtype = torch.float32
+
+ # mppi controller
+ self.controller = mppi_controller(config=config, debug=True, device=self.device, dtype=self.dtype)
+
+ self.odometry: Odometry = None
+ self.trajectory: Trajectory = None
+ self.costmap: OccupancyGrid = None
+
+ self.timer = self.create_timer(0.03, self.on_timer)
+
+ def kinematics_callback(self, msg : Odometry):
+ self.odometry = msg
+
+ def trajectory_callback(self, msg : Trajectory):
+ self.trajectory = msg
+
+ def costmap_callback(self, msg : OccupancyGrid):
+ self.costmap = msg
+
+ def zero_ackermann_control_command(self):
+ cmd = AckermannControlCommand()
+ now = self.get_clock().now().to_msg()
+ cmd.stamp = now
+ cmd.longitudinal.stamp = now
+ cmd.longitudinal.speed = 0.0
+ cmd.longitudinal.acceleration = 0.0
+ cmd.lateral.stamp = now
+ cmd.lateral.steering_tire_angle = 0.0
+ return cmd
+
+ def on_timer(self):
+ if not self.subscribe_message_available():
+ return
+
+ cmd = self.zero_ackermann_control_command()
+
+ # convert tensor
+ # state
+ state_tensor = torch.tensor([
+ self.odometry.pose.pose.position.x,
+ self.odometry.pose.pose.position.y,
+ tf_transformations.euler_from_quaternion([
+ self.odometry.pose.pose.orientation.x,
+ self.odometry.pose.pose.orientation.y,
+ self.odometry.pose.pose.orientation.z,
+ self.odometry.pose.pose.orientation.w
+ ])[2],
+ self.odometry.twist.twist.linear.x
+ ], dtype=self.dtype, device=self.device)
+ # reference path
+ reference_path_tensor = torch.tensor([
+ [point.pose.position.x,
+ point.pose.position.y,
+ tf_transformations.euler_from_quaternion([
+ point.pose.orientation.x,
+ point.pose.orientation.y,
+ point.pose.orientation.z,
+ point.pose.orientation.w
+ ])[2],
+ point.longitudinal_velocity_mps] for point in self.trajectory.points
+ ], dtype=self.dtype, device=self.device)
+ # convert OccupancyGrid to tensor
+ costmap_tensor: CostMapTensor = CostMapTensor(
+ cost_map=torch.tensor(self.costmap.data, dtype=self.dtype, device=self.device).reshape(self.costmap.info.height, self.costmap.info.width),
+ cell_size=self.costmap.info.resolution,
+ origin=(self.costmap.info.origin.position.x,
+ self.costmap.info.origin.position.y),
+ device=self.device,
+ dtype=self.dtype
+ )
+ # set cost map
+ self.controller.set_cost_map(costmap_tensor)
+
+ # update controller
+ action_seq, state_seq = self.controller.update(state_tensor, reference_path_tensor)
+ # get top samples
+ top_samples = self.controller.get_top_samples(num_samples=300)
+
+ # convert numpy
+ state = state_seq.cpu().numpy()[0]
+ action = action_seq.cpu().numpy()[0]
+
+ # publish control command
+ cmd.longitudinal.speed = float(state[0, 3])
+ cmd.longitudinal.acceleration = float(action[0])
+ cmd.lateral.steering_tire_angle = float(action[1])
+ self.pub_cmd.publish(cmd)
+
+ # publish planned path
+ planned_path = Trajectory()
+ planned_path.header.stamp = self.get_clock().now().to_msg()
+ planned_path.header.frame_id = 'map'
+ planned_path.points = [
+ TrajectoryPoint(
+ pose=Pose(
+ position=Point(x=float(point[0]), y=float(point[1])),
+ orientation=Quaternion(
+ x=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[0],
+ y=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[1],
+ z=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[2],
+ w=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[3]
+ )
+ ),
+ longitudinal_velocity_mps=float(point[3])
+ )
+ for point in state]
+
+ self.pub_planned_path.publish(planned_path)
+
+ # publish debug path
+ debug_path = Trajectory()
+ debug_path.header.stamp = self.get_clock().now().to_msg()
+ debug_path.header.frame_id = 'map'
+ debug_path.points = [
+ TrajectoryPoint(
+ pose=Pose(
+ position=Point(x=float(point[0]), y=float(point[1])),
+ orientation=Quaternion(
+ x=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[0],
+ y=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[1],
+ z=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[2],
+ w=tf_transformations.quaternion_from_euler(0.0, 0.0, float(point[2]))[3]
+ )
+ ),
+ longitudinal_velocity_mps=float(point[3])
+ )
+ for point in self.controller.reference_path.cpu().numpy()]
+ self.pub_debug_path.publish(debug_path)
+
+
+ def subscribe_message_available(self):
+ if not self.odometry:
+ self.get_logger().info('odometry is not available', throttle_duration_sec=1)
+ return False
+ if not self.trajectory:
+ self.get_logger().info('trajectory is not available', throttle_duration_sec=1)
+ return False
+ if not self.costmap:
+ self.get_logger().info('costmap is not available', throttle_duration_sec=1)
+ return False
+ return True
+
+def main(args=None):
+ rclpy.init(args=args)
+ node = MppiControllerNode()
+ rclpy.spin(node)
+ rclpy.shutdown()
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/package.xml b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/package.xml
new file mode 100644
index 00000000..e35b459d
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/package.xml
@@ -0,0 +1,16 @@
+
+
+
+ mppi_controller
+ 0.0.0
+ TODO: Package description
+ eguchi
+ TODO: License declaration
+
+ rclpy
+ rclpy
+
+
+ ament_python
+
+
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/resource/mppi_controller b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/resource/mppi_controller
new file mode 100644
index 00000000..e69de29b
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/setup.cfg b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/setup.cfg
new file mode 100644
index 00000000..7d2409e1
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/mppi_controller
+[install]
+install_scripts=$base/lib/mppi_controller
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_submit/mppi_controller/setup.py b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/setup.py
new file mode 100644
index 00000000..37ccad01
--- /dev/null
+++ b/aichallenge/workspace/src/aichallenge_submit/mppi_controller/setup.py
@@ -0,0 +1,33 @@
+import subprocess
+import sys
+
+# dependency
+subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'setuptools==58.0.4', '--quiet'])
+subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'torch', '--index-url', 'https://download.pytorch.org/whl/cpu', '--quiet'])
+subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'transforms3d', '--quiet'])
+
+from setuptools import setup, find_packages
+package_name = 'mppi_controller'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(),
+ data_files=[
+ ('share/ament_index/resource_index/packages', ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ('share/' + package_name + '/config', ['config/mppi_controller.param.yaml']),
+ ],
+ install_requires=['setuptools', 'scipy'],
+ zip_safe=True,
+ maintainer='michikuni eguchi',
+ maintainer_email='egrt117@gmail.com',
+ description='MPPI controller',
+ license='License declaration',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ 'mppi_controller_node = mppi_controller.mppi_controller_node:main',
+ ],
+ },
+)
\ No newline at end of file
diff --git a/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/config/autoware.rviz b/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/config/autoware.rviz
index 2d248d76..9e75c143 100644
--- a/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/config/autoware.rviz
+++ b/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/config/autoware.rviz
@@ -5,9 +5,13 @@ Panels:
Property Tree Widget:
Expanded:
- /Sensing1/LiDAR1/ConcatenatePointCloud1/Autocompute Value Bounds1
- - /Planning1
+ - /Planning1/ScenarioPlanning1/ScenarioTrajectory1
+ - /Planning1/ScenarioPlanning1/ScenarioTrajectory1/View Path1
+ - /Planning1/Costmap1/Map1
+ - /Planning1/MPPI1
+ - /Planning1/MPPI1/Trajectory1
Splitter Ratio: 0.557669460773468
- Tree Height: 242
+ Tree Height: 158
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
@@ -865,7 +869,7 @@ Visualization Manager:
- Class: rviz_common/Group
Displays:
- Class: rviz_plugins/Trajectory
- Color Border Vel Max: 3
+ Color Border Vel Max: 8
Enabled: true
Name: ScenarioTrajectory
Topic:
@@ -889,7 +893,7 @@ Visualization Manager:
Color: 0; 0; 0
Constant Color: false
Value: true
- Width: 2
+ Width: 0.5
View Point:
Alpha: 1
Color: 0; 60; 255
@@ -1941,7 +1945,7 @@ Visualization Manager:
Displays:
- Alpha: 0.699999988079071
Class: rviz_default_plugins/Map
- Color Scheme: map
+ Color Scheme: costmap
Draw Behind: false
Enabled: true
Name: Map
@@ -1962,6 +1966,51 @@ Visualization Manager:
Value: true
Enabled: true
Name: Costmap
+ - Class: rviz_common/Group
+ Displays:
+ - Class: rviz_plugins/Trajectory
+ Color Border Vel Max: 8
+ Enabled: true
+ Name: Trajectory
+ Topic:
+ Depth: 1
+ Durability Policy: Volatile
+ Filter size: 11
+ History Policy: Keep Last
+ Reliability Policy: Reliable
+ Value: /planning/output/mppi_planned_path
+ Value: true
+ View Footprint:
+ Alpha: 1
+ Color: 230; 230; 50
+ Offset from BaseLink: 0
+ Rear Overhang: 1.0299999713897705
+ Value: false
+ Vehicle Length: 4.769999980926514
+ Vehicle Width: 1.8300000429153442
+ View Path:
+ Alpha: 1
+ Color: 0; 0; 0
+ Constant Color: false
+ Value: true
+ Width: 0.5
+ View Point:
+ Alpha: 1
+ Color: 0; 60; 255
+ Offset: 0
+ Radius: 0.10000000149011612
+ Value: false
+ View Text Velocity:
+ Scale: 0.30000001192092896
+ Value: false
+ View Velocity:
+ Alpha: 1
+ Color: 0; 0; 0
+ Constant Color: false
+ Scale: 0.30000001192092896
+ Value: true
+ Enabled: true
+ Name: MPPI
Enabled: true
Name: Planning
- Class: rviz_common/Group
@@ -2170,11 +2219,11 @@ Visualization Manager:
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
- Scale: 10.328941345214844
+ Scale: 7.122405529022217
Target Frame: viewer
Value: TopDownOrtho (rviz_default_plugins)
- X: 0
- Y: 0
+ X: 2.5461606979370117
+ Y: -8.487200736999512
Saved:
- Class: rviz_default_plugins/ThirdPersonFollower
Distance: 18
@@ -2220,12 +2269,12 @@ Window Geometry:
collapsed: false
Displays:
collapsed: false
- Height: 1376
+ Height: 1043
Hide Left Dock: false
Hide Right Dock: false
InitialPoseButtonPanel:
collapsed: false
- QMainWindow State: 000000ff00000000fd00000004000000000000029700000531fc0200000010fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007301000000140000012d000000c700fffffffc000001470000010b000000bb0100001afa000000000100000002fb0000000a0056006900650077007301000000000000033c0000010000fffffffb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000000ffffffff0000008c00fffffffb00000024004100750074006f00770061007200650053007400610074006500500061006e0065006c0100000258000001fd0000015e00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000c00430061006d0065007200610100000682000000eb0000000000000000fb0000000a0049006d0061006700650100000505000002680000000000000000fb0000002c0049006e0069007400690061006c0050006f007300650042007500740074006f006e00500061006e0065006c000000068f000000de0000007100fffffffb0000002c0049006e0069007400690061006c0050006f007300650042007500740074006f006e00500061006e0065006c000000068f000000de0000000000000000fb00000030005200650063006f0067006e006900740069006f006e0052006500730075006c0074004f006e0049006d006100670065000000038a0000010b0000002800fffffffb0000002a004100750074006f0077006100720065004400610074006500540069006d006500500061006e0065006c010000045b0000005e0000003f00fffffffb00000034004100750074006f007700610072006500530063007200650065006e004300610070007400750072006500500061006e0065006c01000004bf000000860000005d00ffffff000000010000015f000006fffc0200000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000e7a0000005afc0100000001fb0000000a00560069006500770073030000004e00000080000002e1000001970000000300000e7a0000005afc0100000002fb0000000800540069006d0065010000000000000e7a0000000000000000fb0000000800540069006d00650100000000000004500000000000000000000009df0000053100000004000000040000000800000008fc0000000100000000000000010000000a0054006f006f006c00730300000000ffffffff0000000000000000
+ QMainWindow State: 000000ff00000000fd000000040000000000000297000003e4fc0200000010fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730100000014000000d9000000c700fffffffc000000f3000000cc000000bb0100001afa000000000100000002fb0000000a0056006900650077007301000000000000033c0000010000fffffffb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000000ffffffff0000008c00fffffffb00000024004100750074006f00770061007200650053007400610074006500500061006e0065006c01000001c50000017d0000015e00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000c00430061006d0065007200610100000682000000eb0000000000000000fb0000000a0049006d0061006700650100000505000002680000000000000000fb0000002c0049006e0069007400690061006c0050006f007300650042007500740074006f006e00500061006e0065006c000000068f000000de0000007100fffffffb0000002c0049006e0069007400690061006c0050006f007300650042007500740074006f006e00500061006e0065006c000000068f000000de0000000000000000fb00000030005200650063006f0067006e006900740069006f006e0052006500730075006c0074004f006e0049006d006100670065000000038a0000010b0000002800fffffffb0000002a004100750074006f0077006100720065004400610074006500540069006d006500500061006e0065006c0100000348000000450000003f00fffffffb00000034004100750074006f007700610072006500530063007200650065006e004300610070007400750072006500500061006e0065006c0100000393000000650000005d00ffffff000000010000015f000006fffc0200000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000e7a0000005afc0100000001fb0000000a00560069006500770073030000004e00000080000002e1000001970000000300000e7a0000005afc0100000002fb0000000800540069006d0065010000000000000e7a0000000000000000fb0000000800540069006d0065010000000000000450000000000000000000000435000003e400000004000000040000000800000008fc0000000100000000000000010000000a0054006f006f006c00730300000000ffffffff0000000000000000
RecognitionResultOnImage:
collapsed: false
Selection:
@@ -2234,6 +2283,6 @@ Window Geometry:
collapsed: false
Views:
collapsed: false
- Width: 3370
- X: 70
- Y: 27
+ Width: 1920
+ X: 0
+ Y: 32
diff --git a/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/launch/aichallenge_system.launch.xml b/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/launch/aichallenge_system.launch.xml
index eda86a3a..9969163f 100644
--- a/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/launch/aichallenge_system.launch.xml
+++ b/aichallenge/workspace/src/aichallenge_system/aichallenge_system_launch/launch/aichallenge_system.launch.xml
@@ -9,7 +9,7 @@
-
+