From 2e262df1f5ecec3695ab3f2bbd64006101270589 Mon Sep 17 00:00:00 2001 From: Timothy Glover Date: Wed, 19 Jun 2024 09:40:18 +0100 Subject: [PATCH] Add return_tracks to UncertaintyRewardFunction and a property for adding measurement noise in sensor management --- ...h_for_Autonomous_Source_Term_Estimation.py | 7 +-- stonesoup/sensormanager/reward.py | 46 +++++++++++++++---- .../sensormanager/tests/test_sensormanager.py | 29 +++++++++++- 3 files changed, 69 insertions(+), 13 deletions(-) diff --git a/docs/examples/sensormanagement/Monte_Carlo_Tree_Search_for_Autonomous_Source_Term_Estimation.py b/docs/examples/sensormanagement/Monte_Carlo_Tree_Search_for_Autonomous_Source_Term_Estimation.py index bf397e441..38050953c 100644 --- a/docs/examples/sensormanagement/Monte_Carlo_Tree_Search_for_Autonomous_Source_Term_Estimation.py +++ b/docs/examples/sensormanagement/Monte_Carlo_Tree_Search_for_Autonomous_Source_Term_Estimation.py @@ -73,7 +73,6 @@ # General imports and environment setup import numpy as np from datetime import datetime, timedelta -import random np.random.seed(1991) @@ -253,13 +252,15 @@ def constraint_function(particle_state): reward_updater = ParticleUpdater(measurement_model=None) # Myopic benchmark approach -reward_funcA = ExpectedKLDivergence(updater=reward_updater) +reward_funcA = ExpectedKLDivergence(updater=reward_updater, measurement_noise=True) sensormanagerA = BruteForceSensorManager(sensors={gas_sensorA}, platforms={sensor_platformA}, reward_function=reward_funcA) # MCTS with rollout approach -reward_funcB = ExpectedKLDivergence(updater=reward_updater, return_tracks=True) +reward_funcB = ExpectedKLDivergence(updater=reward_updater, + measurement_noise=True, + return_tracks=True) sensormanagerB = MCTSRolloutSensorManager(sensors={gas_sensorB}, platforms={sensor_platformB}, reward_function=reward_funcB, diff --git a/stonesoup/sensormanager/reward.py b/stonesoup/sensormanager/reward.py index dd34d4644..111a234b6 100644 --- a/stonesoup/sensormanager/reward.py +++ b/stonesoup/sensormanager/reward.py @@ -22,7 +22,7 @@ from ..updater.base import Updater from ..updater.particle import ParticleUpdater from ..resampler.particle import SystematicResampler -from ..types.state import State +from ..types.groundtruth import GroundTruthState from ..dataassociator.base import DataAssociator @@ -71,6 +71,14 @@ class UncertaintyRewardFunction(RewardFunction): method_sum: bool = Property(default=True, doc="Determines method of calculating reward." "Default calculates sum across all targets." "Otherwise calculates mean of all targets.") + return_tracks: bool = Property(default=False, + doc="A flag for allowing the predicted track, " + "used to calculate the reward, to be " + "returned.") + measurement_noise: bool = Property(default=False, + doc="Decide whether or not to apply measurement model " + "noise to the predicted measurements for sensor " + "management.") def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], metric_time: datetime.datetime, *args, **kwargs): @@ -116,8 +124,13 @@ def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] for sensor in predicted_sensors: # Assumes one detection per track - detections = {detection.groundtruth_path: detection - for detection in sensor.measure(predicted_tracks, noise=False) + detections = {predicted_track: detection + for detection in + sensor.measure({GroundTruthState(predicted_track.mean, + timestamp=predicted_track.timestamp, + metadata=predicted_track.metadata)}, + noise=self.measurement_noise) + for predicted_track in predicted_tracks if isinstance(detection, TrueDetection)} for predicted_track, detection in detections.items(): @@ -143,7 +156,10 @@ def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] config_metric /= len(detections) # Return value of configuration metric - return config_metric + if self.return_tracks: + return config_metric, predicted_tracks + else: + return config_metric class ExpectedKLDivergence(RewardFunction): @@ -183,6 +199,11 @@ class ExpectedKLDivergence(RewardFunction): "used to calculate the reward, to be " "returned.") + measurement_noise: bool = Property(default=False, + doc="Decide whether or not to apply measurement model " + "noise to the predicted measurements for sensor " + "management.") + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.KLD = KLDivergence() @@ -281,9 +302,11 @@ def _generate_detections(self, predicted_tracks, sensors, timestamp=None): for sensor in sensors: detections = {} for predicted_track in predicted_tracks: - tmp_detection = sensor.measure({State(predicted_track.mean, - timestamp=predicted_track.timestamp)}, - noise=True) + tmp_detection = sensor.measure( + {GroundTruthState(predicted_track.mean, + timestamp=predicted_track.timestamp, + metadata=predicted_track.metadata)}, + noise=self.measurement_noise) detections.update({predicted_track: tmp_detection}) if self.data_associator: @@ -327,6 +350,8 @@ class MultiUpdateExpectedKLDivergence(ExpectedKLDivergence): doc="Number of measurements to generate from each " "track prediction. This should be > 1.") + measurement_noise: bool = Property(default=True) + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.KLD = KLDivergence() @@ -353,8 +378,11 @@ def _generate_detections(self, predicted_tracks, sensors, timestamp=None): nparts=self.updates_per_track) tmp_detections = set() for state in measurement_sources.state_vector: - tmp_detections.update(sensor.measure({State(state, timestamp=timestamp)}, - noise=True)) + tmp_detections.update( + sensor.measure({GroundTruthState(state, + timestamp=timestamp, + metadata=predicted_track.metadata)}, + noise=self.measurement_noise)) detections.update({predicted_track: tmp_detections}) all_detections.update({sensor: detections}) diff --git a/stonesoup/sensormanager/tests/test_sensormanager.py b/stonesoup/sensormanager/tests/test_sensormanager.py index e1a1fbe0f..8db97a282 100644 --- a/stonesoup/sensormanager/tests/test_sensormanager.py +++ b/stonesoup/sensormanager/tests/test_sensormanager.py @@ -694,11 +694,38 @@ def test_sensor_manager_with_platform(params): np.diag([1.5, 0.25, 1.5, 0.25] + np.random.normal(0, 5e-4, 4))), # track2_state2 MCTSBestChildPolicyEnum.MAXCREWARD, # best_child_policy + ), ( + ParticlePredictor, # predictor_obj + ParticleUpdater, # updater_obj + None, # hypothesiser + None, # associator + UncertaintyRewardFunction, # reward_function_obj + ParticleState(state_vector=StateVectors(np.random.multivariate_normal( + mean=np.array([1, 1, 1, 1]), + cov=np.diag([1.5, 0.25, 1.5, 0.25]), + size=100).T), + weight=np.array([1/100]*100)), # track1_state1 + ParticleState(state_vector=StateVectors(np.random.multivariate_normal( + mean=np.array([2, 1.5, 2, 1.5]), + cov=np.diag([3, 0.5, 3, 0.5]), + size=100).T), + weight=np.array([1/100]*100)), # track1_state2 + ParticleState(state_vector=StateVectors(np.random.multivariate_normal( + mean=np.array([-1, 1, -1, 1]), + cov=np.diag([3, 0.5, 3, 0.5]), + size=100).T), + weight=np.array([1/100]*100)), # track2_state1 + ParticleState(state_vector=StateVectors(np.random.multivariate_normal( + mean=np.array([2, 1.5, 2, 1.5]), + cov=np.diag([1.5, 0.25, 1.5, 0.25]), + size=100).T), + weight=np.array([1/100]*100)), # track2_state2 + 'max_cumulative_reward', # best_child_policy ) ], ids=['KLDivergenceMCTSNoAssociation', 'KLDivergenceMCTSAssociation', 'KLDivergenceMCTSGaussianTest', 'KLDMCTSGaussianPolicy1', 'KLDMCTSGaussianPolicy2', - 'KLDMCTSGaussianEnum'] + 'KLDMCTSGaussianEnum', 'UncertaintyMCTSTest'] ) def test_mcts_sensor_managers(predictor_obj, updater_obj, hypothesiser_obj, associator_obj, reward_function_obj, track1_state1, track1_state2, track2_state1,