From 935a7ebe11ee7cac5e79f3a4a3a7d8d7e18cff38 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 15 Aug 2022 11:30:41 +0100 Subject: [PATCH 01/87] Added SMCPHD implementation --- stonesoup/custom/smcphd.py | 156 +++++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 stonesoup/custom/smcphd.py diff --git a/stonesoup/custom/smcphd.py b/stonesoup/custom/smcphd.py new file mode 100644 index 000000000..269c356b8 --- /dev/null +++ b/stonesoup/custom/smcphd.py @@ -0,0 +1,156 @@ +import copy +from typing import List + +import numpy as np +from scipy.stats import multivariate_normal + +from stonesoup.base import Base, Property +from stonesoup.models.measurement import MeasurementModel +from stonesoup.models.transition import TransitionModel +from stonesoup.resampler import Resampler +from stonesoup.types.array import StateVectors +from stonesoup.types.detection import Detection, MissedDetection +from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.multihypothesis import MultipleHypothesis +from stonesoup.types.numeric import Probability +from stonesoup.types.prediction import Prediction +from stonesoup.types.state import State +from stonesoup.types.update import Update + + +class SMCPHDFilter(Base): + """ + Sequential Monte-Carlo (SMC) PHD filter implementation, based on [1]_ + + .. [1] Ba-Ngu Vo, S. Singh and A. Doucet, "Sequential Monte Carlo Implementation of the + PHD Filter for Multi-target Tracking," Sixth International Conference of Information + Fusion, 2003. Proceedings of the, 2003, pp. 792-799, doi: 10.1109/ICIF.2003.177320. + .. [2] P. Horridge and S. Maskell, “Using a probabilistic hypothesis density filter to + confirm tracks in a multi-target environment,” in 2011 Jahrestagung der Gesellschaft + fr Informatik, October 2011. + """ + + transition_model: TransitionModel = Property(doc='The transition model') + measurement_model: MeasurementModel = Property(doc='The measurement model') + prob_detect: Probability = Property(doc='The probability of detection') + prob_death: Probability = Property(doc='The probability of death') + prob_birth: Probability = Property(doc='The probability of birth') + birth_rate: float = Property(doc='The birth rate (i.e. number of new/born targets at each iteration(') + birth_density: State = Property(doc='The birth density (i.e. density from which we sample birth particles)') + clutter_intensity: float = Property(doc='The clutter intensity per unit volume') + resampler: Resampler = Property(default=None, doc='Resampler to prevent particle degeneracy') + num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) + birth_scheme: str = Property( + doc='The scheme for birth particles. Options are "expansion" | "mixture". ' + 'Default is "expansion"', + default='expansion' + ) + + def iterate(self, state, detections: List[Detection], timestamp): + prior_weights = state.weight + time_interval = timestamp - state.timestamp + detections_list = list(detections) + + # 1) Predict + # =======================================================================================> + + # Predict particles forward + pred_particles_sv = self.transition_model.function(state, + time_interval=time_interval, + noise=True) + + if self.birth_scheme == 'expansion': + # Expansion birth scheme, as described in [1] + # Compute number of birth particles (J_k) as a fraction of the number of particles + num_birth = round(float(self.prob_birth * self.num_samples)) + + total_samples = self.num_samples + num_birth # L_{k-1} + Jk + + # Sample birth particles + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + num_birth) + birth_weights = np.ones((num_birth,)) * Probability(self.birth_rate / num_birth) + + # Surviving particle weights + pred_weights = (1 - self.prob_death) * prior_weights + + # Append birth particles to predicted ones + pred_particles_sv = StateVectors( + np.concatenate((pred_particles_sv, birth_particles.T), axis=1)) + pred_weights = np.concatenate((pred_weights, birth_weights)) + else: + # Mixture based birth scheme + total_samples = self.num_samples + + # Flip a coin for each particle to decide if it gets replaced by a birth particle + birth_inds = np.flatnonzero(np.random.binomial(1, self.prob_birth, self.num_samples)) + + # Sample birth particles and replace in original state vector matrix + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + len(birth_inds)) + pred_particles_sv[:, birth_inds] = birth_particles.T + + # Process weights + pred_weights = ((1 - self.prob_death) + Probability(self.birth_rate / total_samples)) * prior_weights + + prediction = Prediction.from_state(state, state_vector=pred_particles_sv, + weight=pred_weights, + timestamp=timestamp, particle_list=None, + transition_model=self.transition_model) + + # 2) Update + # =======================================================================================> + + # Compute g(z|x) likelihood matrix as in [1] + g = np.zeros((total_samples, len(detections)), dtype=Probability) + for i, detection in enumerate(detections_list): + g[:, i] = detection.measurement_model.pdf(detection, prediction, + noise=True) + + # Calculate w^{n,i} Eq. (20) of [2] + # (i.e. the individual sum terms inside the square brackets in Eq. (8) of [1], multiplied + # by the corresponding predicted weight w_{k|k-1}^(i)) + weights_per_hyp = np.zeros((total_samples, len(detections) + 1), dtype=Probability) + weights_per_hyp[:, 0] = (1 - self.prob_detect) * pred_weights # Null hypothesis + if len(detections): + # C = \psi_{k,z}(x_k^(i)) * w_{k|k-1}^(i) in Eq. (8) of [1] + C = self.prob_detect * g * pred_weights[:, np.newaxis] + Ck = np.sum(C, axis=0) # C_k(z) (Eq. (9) of [1]) + C_plus = Ck + self.clutter_intensity # \kappa_{k}(z) + Ck(z) term in Eq. (8) of [1] + weights_per_hyp[:, 1:] = C / C_plus # True-detection hypotheses + + # Construct hypothesis objects (StoneSoup specific) + intensity_per_hyp = np.sum(weights_per_hyp, axis=0) + single_hypotheses = [ + SingleProbabilityHypothesis(prediction, + measurement=MissedDetection(timestamp=timestamp), + probability=Probability(intensity_per_hyp[0]))] + for i, detection in enumerate(detections_list): + single_hypotheses.append( + SingleProbabilityHypothesis(prediction, + measurement=detection, + probability=Probability(intensity_per_hyp[i + 1])) + ) + hypothesis = MultipleHypothesis(single_hypotheses, normalise=False) + + # Update weights Eq. (8) of [1] + # w_k^i = \sum_{z \in Z_k}{w^{n,i}}, where i is the index of z in Z_k + post_weights = np.sum(weights_per_hyp, axis=1) + + # Resample + num_targets = np.sum(post_weights) # N_{k|k} + update = copy.copy(prediction) + update.weight = post_weights / num_targets # Normalize weights + if self.resampler is not None: + update = self.resampler.resample(update, self.num_samples) # Resample + update.weight = np.array(update.weight) * num_targets # De-normalize + + return Update.from_state( + state=prediction, + state_vector=update.state_vector, + weight=update.weight, + particle_list=None, + hypothesis=hypothesis, + timestamp=timestamp) \ No newline at end of file From 0b90c501cdc207d4582623948f787c9ac81134c0 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 15 Aug 2022 17:27:02 +0100 Subject: [PATCH 02/87] Add n_particles as optional argument to SystematicResampler --- stonesoup/resampler/particle.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stonesoup/resampler/particle.py b/stonesoup/resampler/particle.py index dc35e575c..427948175 100644 --- a/stonesoup/resampler/particle.py +++ b/stonesoup/resampler/particle.py @@ -8,7 +8,7 @@ class SystematicResampler(Resampler): - def resample(self, particles): + def resample(self, particles, n_particles=None): """Resample the particles Parameters @@ -24,7 +24,8 @@ def resample(self, particles): if not isinstance(particles, ParticleState): particles = ParticleState(None, particle_list=particles) - n_particles = len(particles) + if n_particles is None: + n_particles = len(particles) weight = Probability(1 / n_particles) log_weights = np.asfarray(np.log(particles.weight)) From 9ea0ce89a70bf1706d937d3681eae66aebb7428c Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 15 Aug 2022 17:27:55 +0100 Subject: [PATCH 03/87] Add SMCPHD example script --- examples/smcphd-example.py | 160 +++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 examples/smcphd-example.py diff --git a/examples/smcphd-example.py b/examples/smcphd-example.py new file mode 100644 index 000000000..8bd824389 --- /dev/null +++ b/examples/smcphd-example.py @@ -0,0 +1,160 @@ +from matplotlib import pyplot as plt +from stonesoup.resampler.particle import SystematicResampler +from stonesoup.types.array import StateVector, StateVectors +from stonesoup.types.numeric import Probability +from stonesoup.types.particle import Particle +from stonesoup.types.state import GaussianState, ParticleState +from stonesoup.custom.smcphd import SMCPHDFilter + +from datetime import datetime +from datetime import timedelta +import numpy as np +from scipy.stats import uniform, multivariate_normal + +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState +from stonesoup.types.detection import TrueDetection +from stonesoup.types.detection import Clutter +from stonesoup.models.measurement.linear import LinearGaussian + +np.random.seed(1991) + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +prob_detect = Probability(.9) # 90% chance of detection. +prob_death = Probability(0.01) # Probability of death +prob_birth = Probability(0.1) # Probability of birth +birth_rate = 0.05 # Birth-rate (Mean number of new targets per scan) +clutter_rate = .01 # Clutter-rate (Mean number of clutter measurements per scan) +surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] +surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ + * (surveillance_region[1][1] - surveillance_region[1][0]) +clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area +birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0])), + np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2])) # Birth density +birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' +num_particles = 2 ** 12 # Number of particles used by the PHD filter +num_iter = 100 # Number of simulation steps +PLOT = True # Set [True | False] to turn plotting [ON | OFF] + +# Models +# ====== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.05), + ConstantVelocity(0.05)]) +# Measurement model +measurement_model = LinearGaussian(ndim_state=4, + mapping=(0, 2), + noise_covar=np.array([[0.02, 0], + [0, 0.02]])) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.)]) +truths = set() +truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +timestamps = [] +for k in range(1, num_iter + 1): + timestamps.append(start_time + timedelta(seconds=k)) + +# Plot ground truth. +if PLOT: + from stonesoup.plotter import Plotter + + plotter = Plotter() + plotter.ax.set_ylim(0, 25) + plotter.plot_ground_truths(truths, [0, 2]) + +# Simulate measurements +# ===================== +scans = [] + +for k in range(num_iter): + measurement_set = set() + + # True detections + for truth in truths: + # Generate actual detection from the state with a 10% chance that no detection is received. + if np.random.rand() <= prob_detect: + measurement = measurement_model.function(truth[k], noise=True) + measurement_set.add(TrueDetection(state_vector=measurement, + groundtruth_path=truth, + timestamp=truth[k].timestamp, + measurement_model=measurement_model)) + + # Generate clutter at this time-step + truth_x = truth[k].state_vector[0] + truth_y = truth[k].state_vector[2] + + # Clutter detections + for _ in range(np.random.poisson(clutter_rate)): + x = uniform.rvs(-10, 30) + y = uniform.rvs(0, 25) + measurement_set.add(Clutter(np.array([[x], [y]]), timestamp=truth[k].timestamp, + measurement_model=measurement_model)) + scans.append((timestamps[k], measurement_set)) + +# Initialise PHD Filter +# ===================== +resampler = SystematicResampler() +phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detect=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity + 0.001, + num_samples=num_particles, resampler=resampler, + birth_scheme=birth_scheme) + +# Estimate +# ======== + +# Sample prior state from birth density +state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), + birth_density.covar, + size=num_particles).T) +weight = np.ones((num_particles,)) * Probability(1 / num_particles) +state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) + +# Plot the prior +if PLOT: + fig1 = plt.figure(figsize=(13, 7)) + ax1 = plt.gca() + ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') + +# Main tracking loop +for k, (timestamp, detections) in enumerate(scans): + + new_state = phd_filter.iterate(state, detections, timestamp) + state = new_state + + print('Num targets: ', np.sum(state.weight)) + + # Plot resulting density + if PLOT: + ax1.cla() + for i, truth in enumerate(truths): + data = np.array([s.state_vector for s in truth[:k + 1]]) + ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') + if len(detections): + det_data = np.array([det.state_vector for det in detections]) + ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') + ax1.plot(new_state.state_vector[0, :], new_state.state_vector[2, :], + 'r.', label='Particles') + plt.axis([*surveillance_region[0], *surveillance_region[1]]) + plt.legend(loc='center right') + plt.pause(0.01) From 963555613cc39db4c2608421dd344670c4512981 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 23 Sep 2022 13:08:29 +0100 Subject: [PATCH 04/87] Add metadata property to SingleHypothesis --- stonesoup/types/hypothesis.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stonesoup/types/hypothesis.py b/stonesoup/types/hypothesis.py index 12834bd14..ba1219ba3 100644 --- a/stonesoup/types/hypothesis.py +++ b/stonesoup/types/hypothesis.py @@ -1,6 +1,6 @@ from abc import abstractmethod from collections import UserDict -from typing import Sequence +from typing import Sequence, Dict import numpy as np @@ -58,6 +58,7 @@ class SingleHypothesis(Hypothesis): measurement: Detection = Property(doc="Detection used for hypothesis and updating") measurement_prediction: MeasurementPrediction = Property( default=None, doc="Optional track prediction in measurement space") + metadata: Dict = Property(default=None, doc="Optional metadata") def __bool__(self): return (not isinstance(self.measurement, MissedDetection)) and \ From 41d618453523dba674ae3d2203cf8628e649d1a4 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 23 Sep 2022 13:09:10 +0100 Subject: [PATCH 05/87] Added IPDA and JIPDAWithEHM2 classes --- stonesoup/custom/jipda.py | 66 +++++++++++++++++++++++++++ stonesoup/hypothesiser/probability.py | 55 ++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 stonesoup/custom/jipda.py diff --git a/stonesoup/custom/jipda.py b/stonesoup/custom/jipda.py new file mode 100644 index 000000000..70c93cc97 --- /dev/null +++ b/stonesoup/custom/jipda.py @@ -0,0 +1,66 @@ +from pyehm.plugins.stonesoup import JPDAWithEHM2 + +from stonesoup.types.detection import MissedDetection +from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.multihypothesis import MultipleHypothesis +from stonesoup.types.numeric import Probability + + +class JIPDAWithEHM2(JPDAWithEHM2): + + @classmethod + def _compute_multi_hypotheses(cls, tracks, detections, hypotheses, time): + + # Tracks and detections must be in a list so we can keep track of their order + track_list = list(tracks) + detection_list = list(detections) + + # Calculate validation and likelihood matrices + validation_matrix, likelihood_matrix = \ + cls._calc_validation_and_likelihood_matrices(track_list, detection_list, hypotheses) + + # Run EHM + assoc_prob_matrix = cls._run_ehm(validation_matrix, likelihood_matrix) + + # Calculate MultiMeasurementHypothesis for each Track over all + # available Detections with probabilities drawn from the association matrix + new_hypotheses = dict() + + for i, track in enumerate(track_list): + + single_measurement_hypotheses = list() + + # Null measurement hypothesis + null_hypothesis = next((hyp for hyp in hypotheses[track] if not hyp), None) + w = null_hypothesis.metadata['w'] + prob_misdetect = Probability(assoc_prob_matrix[i, 0])/(1+w) + single_measurement_hypotheses.append( + SingleProbabilityHypothesis( + hypotheses[track][0].prediction, + MissedDetection(timestamp=time), + measurement_prediction=null_hypothesis.measurement_prediction, + probability=prob_misdetect)) + + # True hypotheses + for hypothesis in hypotheses[track]: + if not hypothesis: + continue + + # Get the detection index + j = next(d_i + 1 for d_i, detection in enumerate(detection_list) + if hypothesis.measurement == detection) + + pro_detect_assoc = Probability(assoc_prob_matrix[i, j]) + single_measurement_hypotheses.append( + SingleProbabilityHypothesis( + hypothesis.prediction, + hypothesis.measurement, + measurement_prediction=hypothesis.measurement_prediction, + probability=pro_detect_assoc)) + + track.exist_prob = Probability.sum(hyp.probability + for hyp in single_measurement_hypotheses) + + new_hypotheses[track] = MultipleHypothesis(single_measurement_hypotheses, True, 1) + + return new_hypotheses \ No newline at end of file diff --git a/stonesoup/hypothesiser/probability.py b/stonesoup/hypothesiser/probability.py index b95049542..51e6515f3 100644 --- a/stonesoup/hypothesiser/probability.py +++ b/stonesoup/hypothesiser/probability.py @@ -192,3 +192,58 @@ def _validation_region_volume(cls, prob_gate, meas_pred): @lru_cache() def _gate_threshold(prob_gate, n): return chi2.ppf(float(prob_gate), n) + + +class IPDAHypothesiser(PDAHypothesiser): + """ Integrated PDA Hypothesiser """ + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + + def hypothesise(self, track, detections, timestamp, **kwargs): + r"""Evaluate and return all track association hypotheses. + """ + + hypotheses = list() + + # Common state & measurement prediction + prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) + # Compute predicted existence + time_interval = timestamp - track.timestamp + prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + # Missed detection hypothesis + probability = Probability(1 - self.prob_detect * self.prob_gate * track.exist_prob) + w = (1 - track.exist_prob) / ((1 - self.prob_detect * self.prob_gate) * track.exist_prob) + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + MissedDetection(timestamp=timestamp), + probability, + metadata={"w": w} + )) + + # True detection hypotheses + for detection in detections: + # Re-evaluate prediction + prediction = self.predictor.predict( + track.state, timestamp=detection.timestamp) + # Compute measurement prediction and probability measure + measurement_prediction = self.updater.predict_measurement( + prediction, detection.measurement_model, **kwargs) + # Calculate difference before to handle custom types (mean defaults to zero) + # This is required as log pdf coverts arrays to floats + log_pdf = multivariate_normal.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar) + pdf = Probability(log_pdf, log_value=True) + probability = (pdf * self.prob_detect * track.exist_prob)/self.clutter_spatial_density + + # True detection hypothesis + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + detection, + probability, + measurement_prediction)) + + return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) + From 8f78a393c96085ffebf772c59b04d58caaac1035 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 23 Sep 2022 13:09:53 +0100 Subject: [PATCH 06/87] SMCPHD update + Added SMCPHDInitiator --- stonesoup/custom/smcphd.py | 196 ++++++++++++++++++++++++++++++------- 1 file changed, 160 insertions(+), 36 deletions(-) diff --git a/stonesoup/custom/smcphd.py b/stonesoup/custom/smcphd.py index 269c356b8..9fe6d502a 100644 --- a/stonesoup/custom/smcphd.py +++ b/stonesoup/custom/smcphd.py @@ -1,10 +1,11 @@ -import copy -from typing import List +from copy import copy +from typing import List, Any import numpy as np from scipy.stats import multivariate_normal from stonesoup.base import Base, Property +from stonesoup.initiator import Initiator from stonesoup.models.measurement import MeasurementModel from stonesoup.models.transition import TransitionModel from stonesoup.resampler import Resampler @@ -15,7 +16,8 @@ from stonesoup.types.numeric import Probability from stonesoup.types.prediction import Prediction from stonesoup.types.state import State -from stonesoup.types.update import Update +from stonesoup.types.track import Track +from stonesoup.types.update import Update, GaussianStateUpdate class SMCPHDFilter(Base): @@ -46,13 +48,25 @@ class SMCPHDFilter(Base): default='expansion' ) - def iterate(self, state, detections: List[Detection], timestamp): + def predict(self, state, timestamp): + """ + Predict the next state of the target density + + Parameters + ---------- + state: :class:`~.State` + The current state of the target + timestamp: :class:`datetime.datetime` + The time at which the state is valid + + Returns + ------- + : :class:`~.State` + The predicted next state of the target + """ + prior_weights = state.weight time_interval = timestamp - state.timestamp - detections_list = list(detections) - - # 1) Predict - # =======================================================================================> # Predict particles forward pred_particles_sv = self.transition_model.function(state, @@ -64,8 +78,6 @@ def iterate(self, state, detections: List[Detection], timestamp): # Compute number of birth particles (J_k) as a fraction of the number of particles num_birth = round(float(self.prob_birth * self.num_samples)) - total_samples = self.num_samples + num_birth # L_{k-1} + Jk - # Sample birth particles birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), self.birth_density.covar, @@ -73,7 +85,8 @@ def iterate(self, state, detections: List[Detection], timestamp): birth_weights = np.ones((num_birth,)) * Probability(self.birth_rate / num_birth) # Surviving particle weights - pred_weights = (1 - self.prob_death) * prior_weights + prob_survive = np.exp(-self.prob_death*time_interval.total_seconds()) + pred_weights = prob_survive * prior_weights # Append birth particles to predicted ones pred_particles_sv = StateVectors( @@ -100,38 +113,41 @@ def iterate(self, state, detections: List[Detection], timestamp): timestamp=timestamp, particle_list=None, transition_model=self.transition_model) - # 2) Update - # =======================================================================================> + return prediction - # Compute g(z|x) likelihood matrix as in [1] - g = np.zeros((total_samples, len(detections)), dtype=Probability) - for i, detection in enumerate(detections_list): - g[:, i] = detection.measurement_model.pdf(detection, prediction, - noise=True) + def update(self, prediction, detections, timestamp, meas_weights=None): + """ + Update the predicted state of the target density with the given detections - # Calculate w^{n,i} Eq. (20) of [2] - # (i.e. the individual sum terms inside the square brackets in Eq. (8) of [1], multiplied - # by the corresponding predicted weight w_{k|k-1}^(i)) - weights_per_hyp = np.zeros((total_samples, len(detections) + 1), dtype=Probability) - weights_per_hyp[:, 0] = (1 - self.prob_detect) * pred_weights # Null hypothesis - if len(detections): - # C = \psi_{k,z}(x_k^(i)) * w_{k|k-1}^(i) in Eq. (8) of [1] - C = self.prob_detect * g * pred_weights[:, np.newaxis] - Ck = np.sum(C, axis=0) # C_k(z) (Eq. (9) of [1]) - C_plus = Ck + self.clutter_intensity # \kappa_{k}(z) + Ck(z) term in Eq. (8) of [1] - weights_per_hyp[:, 1:] = C / C_plus # True-detection hypotheses + Parameters + ---------- + prediction: :class:`~.State` + The predicted state of the target + detections: :class:`~.Detection` + The detections at the current time step + timestamp: :class:`datetime.datetime` + The time at which the update is valid + meas_weights: :class:`np.ndarray` + The weights of the measurements + + Returns + ------- + : :class:`~.State` + The updated state of the target + """ + + weights_per_hyp = self.get_weights_per_hypothesis(prediction, detections, meas_weights) # Construct hypothesis objects (StoneSoup specific) - intensity_per_hyp = np.sum(weights_per_hyp, axis=0) single_hypotheses = [ SingleProbabilityHypothesis(prediction, measurement=MissedDetection(timestamp=timestamp), - probability=Probability(intensity_per_hyp[0]))] - for i, detection in enumerate(detections_list): + probability=weights_per_hyp[:, 0])] + for i, detection in enumerate(detections): single_hypotheses.append( SingleProbabilityHypothesis(prediction, measurement=detection, - probability=Probability(intensity_per_hyp[i + 1])) + probability=weights_per_hyp[:, i + 1]) ) hypothesis = MultipleHypothesis(single_hypotheses, normalise=False) @@ -141,11 +157,11 @@ def iterate(self, state, detections: List[Detection], timestamp): # Resample num_targets = np.sum(post_weights) # N_{k|k} - update = copy.copy(prediction) + update = copy(prediction) update.weight = post_weights / num_targets # Normalize weights if self.resampler is not None: update = self.resampler.resample(update, self.num_samples) # Resample - update.weight = np.array(update.weight) * num_targets # De-normalize + update.weight = np.array(update.weight) * num_targets # De-normalize return Update.from_state( state=prediction, @@ -153,4 +169,112 @@ def iterate(self, state, detections: List[Detection], timestamp): weight=update.weight, particle_list=None, hypothesis=hypothesis, - timestamp=timestamp) \ No newline at end of file + timestamp=timestamp) + + def iterate(self, state, detections: List[Detection], timestamp): + """ + Iterate the filter over the given state and detections + + Parameters + ---------- + state: :class:`~.State` + The predicted state of the target + detections: :class:`~.Detection` + The detections at the current time step + timestamp: :class:`datetime.datetime` + The time at which the update is valid + + Returns + ------- + : :class:`~.State` + The updated state of the target + """ + prediction = self.predict(state, timestamp) + update = self.update(prediction, detections, timestamp) + return update + + def get_measurement_likelihoods(self, prediction, detections, meas_weights): + num_samples = prediction.state_vector.shape[1] + # Compute g(z|x) matrix as in [1] + g = np.zeros((num_samples, len(detections)), dtype=Probability) + for i, detection in enumerate(detections): + if not meas_weights[i]: + g[:, i] = Probability(0) + continue + g[:, i] = detection.measurement_model.pdf(detection, prediction, + noise=True) + return g + + def get_weights_per_hypothesis(self, prediction, detections, meas_weights): + num_samples = prediction.state_vector.shape[1] + if meas_weights is None: + meas_weights = np.array([Probability(1) for _ in range(len(detections))]) + + # Compute g(z|x) matrix as in [1] + g = self.get_measurement_likelihoods(prediction, detections, meas_weights) + + # Calculate w^{n,i} Eq. (20) of [2] + Ck = meas_weights * self.prob_detect * g * prediction.weight[:, np.newaxis] + C = np.sum(Ck, axis=0) + k = np.array([detection.metadata['clutter_density'] + if 'clutter_density' in detection.metadata else self.clutter_intensity + for detection in detections]) + C_plus = C + k + + weights_per_hyp = np.zeros((num_samples, len(detections) + 1), dtype=Probability) + weights_per_hyp[:, 0] = (1 - self.prob_detect) * prediction.weight + if len(detections): + weights_per_hyp[:, 1:] = Ck / C_plus + + return weights_per_hyp + + +class SMCPHDInitiator(Initiator): + filter: SMCPHDFilter = Property(doc='The phd filter') + prior: Any = Property(doc='The prior state') + threshold: Probability = Property(doc='The thrshold probability for initiation', + default=Probability(0.9)) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._state = self.prior + + def initiate(self, detections, timestamp, weights=None, **kwargs): + tracks = set() + + # Predict forward + prediction = self.filter.predict(self._state, timestamp) + + weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights) + + intensity_per_hyp = np.sum(weights_per_hyp, axis=0) + valid_inds = np.flatnonzero(intensity_per_hyp > self.threshold) + for idx in valid_inds: + if not idx: + continue + + particles_sv = copy(prediction.state_vector) + weight = weights_per_hyp[:, idx] / intensity_per_hyp[idx] + + mu = np.average(particles_sv, + axis=1, + weights=weight) + cov = np.cov(particles_sv, ddof=0, aweights=np.array(weight)) + + hypothesis = SingleProbabilityHypothesis(prediction, + measurement=detections[idx-1], + probability=weights_per_hyp[:, idx]) + + track_state = GaussianStateUpdate(mu, cov, hypothesis=hypothesis, + timestamp=timestamp) + + # if np.trace(track_state.covar) < 10: + weights_per_hyp[:, idx] = Probability(0) + track = Track([track_state]) + track.exist_prob = intensity_per_hyp[idx] + tracks.add(track) + + weights[idx-1] = 0 + + self._state = self.filter.update(prediction, detections, timestamp, weights) + return tracks From 1561a38733e663705c699836bf1d0c88800818fb Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 23 Sep 2022 17:34:07 +0100 Subject: [PATCH 07/87] Added SMCPHD initiator example --- examples/reactive-isr/smcphd_init-example.py | 297 +++++++++++++++++++ 1 file changed, 297 insertions(+) create mode 100644 examples/reactive-isr/smcphd_init-example.py diff --git a/examples/reactive-isr/smcphd_init-example.py b/examples/reactive-isr/smcphd_init-example.py new file mode 100644 index 000000000..1db9b4025 --- /dev/null +++ b/examples/reactive-isr/smcphd_init-example.py @@ -0,0 +1,297 @@ +from matplotlib import pyplot as plt +from matplotlib.patches import Ellipse +from pyehm.plugins.stonesoup import JPDAWithEHM2 + +from stonesoup.custom.jipda import JIPDAWithEHM2 +from stonesoup.deleter.time import UpdateTimeDeleter +from stonesoup.functions import gm_reduce_single +from stonesoup.gater.distance import DistanceGater +from stonesoup.hypothesiser.probability import IPDAHypothesiser, PDAHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.resampler.particle import SystematicResampler +from stonesoup.types.array import StateVector, StateVectors +from stonesoup.types.numeric import Probability +from stonesoup.types.particle import Particle +from stonesoup.types.state import GaussianState, ParticleState +from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator + +from datetime import datetime +from datetime import timedelta +import numpy as np +from scipy.stats import uniform, multivariate_normal + +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState +from stonesoup.types.detection import TrueDetection +from stonesoup.types.detection import Clutter +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import KalmanUpdater + + +def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): + """ + Plots an `nstd` sigma error ellipse based on the specified covariance + matrix (`cov`). Additional keyword arguments are passed on to the + ellipse patch artist. + Parameters + ---------- + cov : The 2x2 covariance matrix to base the ellipse on + pos : The location of the center of the ellipse. Expects a 2-element + sequence of [x0, y0]. + nstd : The radius of the ellipse in numbers of standard deviations. + Defaults to 2 standard deviations. + ax : The axis that the ellipse will be plotted on. Defaults to the + current axis. + Additional keyword arguments are pass on to the ellipse patch. + Returns + ------- + A matplotlib ellipse artist + """ + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + if ax is None: + ax = plt.gca() + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + + ax.add_artist(ellip) + return ellip + +# np.random.seed(1991) + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +prob_detect = Probability(.9) # 90% chance of detection. +prob_death = Probability(0.01) # Probability of death +prob_birth = Probability(0.1) # Probability of birth +prob_survive = Probability(0.99) # Probability of survival +birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) +clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) +surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] +surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ + * (surveillance_region[1][1] - surveillance_region[1][0]) +clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area +birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0])), + np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2])) # Birth density +birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' +num_particles = 2 ** 12 # Number of particles used by the PHD filter +num_iter = 100 # Number of simulation steps +PLOT = True # Set [True | False] to turn plotting [ON | OFF] + +# Models +# ====== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), + ConstantVelocity(0.01)]) +# Measurement model +measurement_model = LinearGaussian(ndim_state=4, + mapping=(0, 2), + noise_covar=np.array([[0.1, 0], + [0, 0.1]])) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.)]) +truths = set() +truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +timestamps = [] +for k in range(1, num_iter + 1): + timestamps.append(start_time + timedelta(seconds=k)) + +# Simulate measurements +# ===================== +scans = [] + +for k in range(num_iter): + measurement_set = set() + + # True detections + for truth in truths: + # Generate actual detection from the state with a 10% chance that no detection is received. + if np.random.rand() <= prob_detect: + measurement = measurement_model.function(truth[k], noise=True) + measurement_set.add(TrueDetection(state_vector=measurement, + groundtruth_path=truth, + timestamp=truth[k].timestamp, + measurement_model=measurement_model)) + + # Generate clutter at this time-step + truth_x = truth[k].state_vector[0] + truth_y = truth[k].state_vector[2] + + # Clutter detections + for _ in range(np.random.poisson(clutter_rate)): + x = uniform.rvs(-10, 30) + y = uniform.rvs(0, 25) + measurement_set.add(Clutter(np.array([[x], [y]]), timestamp=truth[k].timestamp, + measurement_model=measurement_model)) + scans.append((timestamps[k], measurement_set)) + +# Predictor & Updater +# =================== +predictor = KalmanPredictor(transition_model) +updater = KalmanUpdater(measurement_model) + +# Hypothesiser & Data Associator +# ============================== +hypothesiser = IPDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect, + prob_survive=prob_survive) +# hypothesiser = PDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect) +hypothesiser = DistanceGater(hypothesiser, Mahalanobis(), 10) +associator = JIPDAWithEHM2(hypothesiser) + +# Track Deleter +# ============= +deleter = UpdateTimeDeleter(time_since_update=timedelta(minutes=5)) + +# Initiator +# ========= +# Initialise PHD Filter +resampler = SystematicResampler() +phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detect=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, resampler=resampler, + birth_scheme=birth_scheme) + +# Sample prior state from birth density +state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), + birth_density.covar, + size=num_particles).T) +weight = np.ones((num_particles,)) * Probability(1 / num_particles) +state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) + + +initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + +# Estimate +# ======== + +# Plot the prior +if PLOT: + fig1 = plt.figure(figsize=(13, 7)) + ax1 = plt.gca() + ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') + +# Main tracking loop +tracks = set() +for k, (timestamp, detections) in enumerate(scans): + + tracks = list(tracks) + detections = list(detections) + num_tracks = len(tracks) + num_detections = len(detections) + + # Perform data association + associations = associator.associate(tracks, detections, timestamp) + + assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) + for i, track in enumerate(tracks): + for hyp in associations[track]: + if not hyp: + assoc_prob_matrix[i, 0] = hyp.weight + else: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + assoc_prob_matrix[i, j + 1] = hyp.weight + + rho = np.zeros((len(detections))) + for j, detection in enumerate(detections): + rho_tmp = 1 + if len(assoc_prob_matrix): + for i, track in enumerate(tracks): + rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] + rho[j] = rho_tmp + + for track, multihypothesis in associations.items(): + + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + + tracks = set(tracks) + new_tracks = initiator.initiate(detections, timestamp, weights=rho) + tracks |= new_tracks + state = initiator._state + + # Delete tracks that have not been updated for a while + del_tracks = set() + for track in tracks: + if track.exist_prob < 0.1: + del_tracks.add(track) + tracks -= del_tracks + + print('\n===========================================') + print(f'Num targets: {np.sum(state.weight)} - Num new targets: {len(new_tracks)}') + for track in tracks: + print(f'Track {track.id} - Exist prob: {track.exist_prob}') + + # Plot resulting density + if PLOT: + ax1.cla() + for i, truth in enumerate(truths): + data = np.array([s.state_vector for s in truth[:k + 1]]) + ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') + if len(detections): + det_data = np.array([det.state_vector for det in detections]) + ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') + # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], + # 'r.', label='Particles') + + for track in tracks: + data = np.array([s.state_vector for s in track]) + ax1.plot(data[:, 0], data[:, 2], label=f'Track {track.id}') + plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.axis([*surveillance_region[0], *surveillance_region[1]]) + plt.legend(loc='center right') + plt.pause(0.01) From aac33f4bdb8b9c5edfd8a2a73208eb975f6231a5 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 26 Sep 2022 13:14:10 +0100 Subject: [PATCH 08/87] Added actionable pan-tilt camera sensors --- stonesoup/custom/sensor.py | 423 +++++++++++++++++++++++++++++++++++++ 1 file changed, 423 insertions(+) create mode 100644 stonesoup/custom/sensor.py diff --git a/stonesoup/custom/sensor.py b/stonesoup/custom/sensor.py new file mode 100644 index 000000000..8a377d52e --- /dev/null +++ b/stonesoup/custom/sensor.py @@ -0,0 +1,423 @@ +import datetime +from copy import copy +from typing import Sequence, Iterator, Set, Union +from itertools import product + +import numpy as np + +from stonesoup.base import Property +from stonesoup.functions import cart2sphere +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.action import Action, RealNumberActionGenerator +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.passive import PassiveElevationBearing +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.angle import Angle, Bearing, Elevation +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState +from stonesoup.functions import build_rotation_matrix + + +class ChangePanTiltAction(Action): + """The action of changing the dwell centre of sensors where `dwell_centre` is an + :class:`~.ActionableProperty`""" + + rotation_end_time: datetime.datetime = Property(readonly=True, + doc="End time of rotation.") + increasing_angle: Sequence[bool] = Property( + default=None, readonly=True, + doc="Indicated the direction of change in the dwell centre angle. The first element " + "relates to bearing, the second to elevation.") + + def act(self, current_time, timestamp, init_value): + """Assumes that duration keeps within the action end time + + Parameters + ---------- + current_time: datetime.datetime + Current time + timestamp: datetime.datetime + Modification of attribute ends at this time stamp + init_value: Any + Current value of the dwell centre + + Returns + ------- + Any + The new value of the dwell centre""" + + if timestamp >= self.end_time: + return self.target_value # target direction + else: + return init_value # same direction + + +class PanTiltActionsGenerator(RealNumberActionGenerator): + """Generates possible actions for changing the dwell centre of a sensor in a given + time period.""" + + owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " + "dwell-centre attributes") + resolution: Angle = Property(default=np.radians(1), doc="Resolution of action space") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.epsilon = Angle(np.radians(1e-6)) + + @property + def default_action(self): + return ChangePanTiltAction(rotation_end_time=self.end_time, + generator=self, + end_time=self.end_time, + target_value=self.current_value, + increasing_angle=None) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution : Angle + Resolution of yielded action target values + epsilon: float + Tolerance of equality check in iteration + """ + if resolution is not None: + self.resolution = resolution + if epsilon is not None: + self.epsilon = epsilon + + @property + def initial_value(self): + return self.current_value + + @property + def duration(self): + return self.end_time - self.start_time + + @property + def rps(self): + return self.owner.rpm / 60 + + @property + def angle_delta(self): + return StateVector([Angle(0), + Angle(self.duration.total_seconds() * self.rps[0] * 2 * np.pi), + Angle(self.duration.total_seconds() * self.rps[1] * 2 * np.pi)]) + + @property + def min(self): + min = self.initial_value.astype(float) - self.angle_delta + min[1] = np.maximum(Angle(self.initial_value[1]) - self.angle_delta[1], + Angle(-np.pi / 2)) + return min + + @property + def max(self): + max = self.initial_value.astype(float) + self.angle_delta + max[1] = np.minimum(Angle(self.initial_value[1]) + self.angle_delta[1], + Angle(np.pi / 2)) + return max + + def __contains__(self, item): + + if self.angle_delta[2] >= np.pi or self.angle_delta[1] >= np.pi / 2: + # Left turn and right turn are > 180, so all angles hit + return True + + if isinstance(item, ChangePanTiltAction): + item = item.target_value + + return self.min[1] <= item[1] <= self.max[1] and self.min[2] <= item[2] <= self.max[2] + + def _end_time_direction_pan(self, angle): + """Given a target bearing, should the dwell centre rotate so as to increase its angle + value, or decrease? And how long until it reaches the target.""" + + angle = Angle(angle) + + if self.initial_value[2] - self.epsilon \ + <= angle \ + <= self.initial_value[2] + self.epsilon: + return self.start_time, None # no rotation, target bearing achieved + + angle_delta = np.abs(angle - self.initial_value[2]) + + return ( + self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), + angle > self.initial_value[2] + ) + + def _end_time_direction_tilt(self, angle): + """Given a target bearing, should the dwell centre rotate so as to increase its angle + value, or decrease? And how long until it reaches the target.""" + + angle = Angle(angle) + + if self.initial_value[1] - self.epsilon \ + <= angle \ + <= self.initial_value[1] + self.epsilon: + return self.start_time, None # no rotation, target bearing achieved + + angle_delta = np.abs(angle - self.initial_value[1]) + + return ( + self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), + angle > self.initial_value[1] + ) + + def __iter__(self) -> Iterator[ChangePanTiltAction]: + """Returns ChangePanTiltAction types, where the value is a possible value of the [0, 0] + element of the dwell centre's state vector.""" + + possible_elevations = np.arange(self.min[1], self.max[1], self.resolution) + for elevation in possible_elevations: + elevation_end_time, increasing_e = self._end_time_direction_tilt(elevation) + bearing = self.min[2] + while bearing <= self.max[2] + self.epsilon: + bearing_end_time, increasing_b = self._end_time_direction_pan(bearing) + yield ChangePanTiltAction(rotation_end_time=max(bearing_end_time, + elevation_end_time), + generator=self, + end_time=self.end_time, + target_value=StateVector([Angle(0), + Elevation(elevation), + Bearing(bearing)]), + increasing_angle=[increasing_e, increasing_b]) + bearing += self.resolution + + def action_from_value(self, value): + raise NotImplementedError + + +class PanTiltUAVActionsGenerator(PanTiltActionsGenerator): + + @property + def min(self): + min = self.initial_value.astype(float) - self.angle_delta + min[0] = np.maximum(Angle(self.initial_value[0]) - self.angle_delta[0], + Angle(-np.pi / 2)) + min[1] = np.maximum(Angle(self.initial_value[1]) - self.angle_delta[1], + Angle(-np.pi / 2)) + return min + + @property + def max(self): + max = self.initial_value.astype(float) + self.angle_delta + max[0] = np.minimum(Angle(self.initial_value[0]) + self.angle_delta[0], + Angle(np.pi / 2)) + max[1] = np.minimum(Angle(self.initial_value[1]) + self.angle_delta[1], + Angle(np.pi / 2)) + return max + + def __contains__(self, item): + + if self.angle_delta[2] >= np.pi / 2 or self.angle_delta[1] >= np.pi / 2: + # Left turn and right turn are > 180, so all angles hit + return True + + if isinstance(item, ChangePanTiltAction): + item = item.target_value + + return self.min[1] <= item[1] <= self.max[1] and self.min[2] <= item[2] <= self.max[2] + + def _end_time_direction_pan(self, angle): + """Given a target bearing, should the dwell centre rotate so as to increase its angle + value, or decrease? And how long until it reaches the target.""" + + angle = Angle(angle) + + if self.initial_value[2] - self.epsilon \ + <= angle \ + <= self.initial_value[2] + self.epsilon: + return self.start_time, None # no rotation, target bearing achieved + + angle_delta = np.abs(angle - self.initial_value[2]) + + return ( + self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), + angle > self.initial_value[2] + ) + + def _end_time_direction_tilt(self, angle): + """Given a target bearing, should the dwell centre rotate so as to increase its angle + value, or decrease? And how long until it reaches the target.""" + + angle = Angle(angle) + + if self.initial_value[1] - self.epsilon \ + <= angle \ + <= self.initial_value[1] + self.epsilon: + return self.start_time, None # no rotation, target bearing achieved + + angle_delta = np.abs(angle - self.initial_value[1]) + + return ( + self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), + angle > self.initial_value[1] + ) + + def __iter__(self) -> Iterator[ChangePanTiltAction]: + """Returns ChangePanTiltAction types, where the value is a possible value of the [0, 0] + element of the dwell centre's state vector.""" + + possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) + possible_pan_angles = np.arange(self.min[2], self.max[2], self.resolution) + for (pan_angle, tilt_angle) in product(possible_pan_angles, possible_tilt_angles): + pan_end_time, increasing_p = self._end_time_direction_pan(pan_angle) + tilt_end_time, increasing_t = self._end_time_direction_tilt(tilt_angle) + yield ChangePanTiltAction(rotation_end_time=max(pan_end_time, tilt_end_time), + generator=self, + end_time=self.end_time, + target_value=StateVector([Angle(0), + Elevation(tilt_angle), + Bearing(pan_angle)]), + increasing_angle=[increasing_t, increasing_p]) + + def action_from_value(self, value): + raise NotImplementedError + + +class PanTiltCamera(PassiveElevationBearing): + """A camera that can pan and tilt.""" + + rotation_offset: StateVector = ActionableProperty( + doc="A StateVector containing the sensor rotation " + "offsets from the platform's primary axis (defined as the " + "direction of motion). Defaults to a zero vector with the " + "same length as the Platform's :attr:`velocity_mapping`", + default=None, + generator_cls=PanTiltActionsGenerator) + rpm: float = Property( + doc="The number of rotations per minute (RPM)") + fov_angle: float = Property( + doc="The field of view (FOV) angle (in radians).") + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate + # random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + # Check if state falls within sensor's FOV + fov_min = -self.fov_angle / 2 + fov_max = +self.fov_angle / 2 + bearing_t = measurement_vector[1, 0] + elevation_t = measurement_vector[0, 0] + + # Do not measure if state not in FOV + if (not fov_min <= bearing_t <= fov_max) or (not fov_min <= elevation_t <= fov_max): + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths) + detections |= clutter + + return detections + + +class PanTiltUAVCamera(Sensor): + """A camera that can pan and tilt.""" + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.CartesianToElevationBearing` model") + pan_tilt: StateVector = ActionableProperty( + doc="A StateVector containing the sensor rotation " + "offsets from the platform's primary axis (defined as the " + "direction of motion). Defaults to a zero vector with the " + "same length as the Platform's :attr:`velocity_mapping`", + default=None, + generator_cls=PanTiltUAVActionsGenerator) + rpm: float = Property( + doc="The number of rotations per minute (RPM)") + fov_angle: float = Property( + doc="The field of view (FOV) angle (in radians).") + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate + # random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + # Normalise measurement vector relative to sensor position + norm_measurement_vector = measurement_vector.astype(float) - self.position.astype(float) + + # Rotate measurement vector relative to sensor orientation + rotation_matrix = build_rotation_matrix(self.orientation) + norm_rotated_measurement_vector = rotation_matrix @ norm_measurement_vector + + # Convert to spherical coordinates + _, bearing_t, elevation_t = cart2sphere(*norm_rotated_measurement_vector) + + # Check if state falls within sensor's FOV + fov_min = -self.fov_angle / 2 + fov_max = +self.fov_angle / 2 + bearing_t = bearing_t + elevation_t = elevation_t + + # Do not measure if state not in FOV + if (not fov_min <= bearing_t <= fov_max) or (not fov_min <= elevation_t <= fov_max): + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths) + detections |= clutter + + return detections \ No newline at end of file From 72aeb256e2948d6e5ffa9a9c4858f3c95eca069f Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 26 Sep 2022 13:15:27 +0100 Subject: [PATCH 09/87] Added UAV camera example --- examples/reactive-isr/camera_lat_lon_uav.py | 122 ++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 examples/reactive-isr/camera_lat_lon_uav.py diff --git a/examples/reactive-isr/camera_lat_lon_uav.py b/examples/reactive-isr/camera_lat_lon_uav.py new file mode 100644 index 000000000..22cb2d8c6 --- /dev/null +++ b/examples/reactive-isr/camera_lat_lon_uav.py @@ -0,0 +1,122 @@ +from datetime import datetime, timedelta + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.patches import Rectangle + +from stonesoup.custom.sensor import PanTiltCamera, ChangePanTiltAction, PanTiltCameraLatLong, \ + PanTiltUAVCamera +from stonesoup.functions import pol2cart +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.platform import FixedPlatform +from stonesoup.types.angle import Bearing, Elevation, Angle +from stonesoup.types.array import StateVector +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState +from stonesoup.types.state import State + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +num_iter = 100 # Number of simulation steps +rotation_offset = StateVector([Angle(0), Angle(-np.pi/32), Angle(0)]) # Camera rotation offset +camera = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], noise_covar=np.diag([0.001, 0.001, 0.001]), + fov_angle=np.radians(10), rpm=np.array([10, 10]), rotation_offset=rotation_offset) +platform = FixedPlatform(position_mapping=(0, 2, 4), orientation=StateVector([0, -np.pi/2, 0]), + states=[State([10., 0., 10., 0., 100., 0], timestamp=start_time)], + sensors=[camera]) + + + +# Models +# ====== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), + ConstantVelocity(0.01), + ConstantVelocity(0.0)]) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) +truths = set() +truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +timestamps = [] +for k in range(1, num_iter + 1): + timestamps.append(start_time + timedelta(seconds=k)) + +# Simulate measurements +# ===================== +scans = [] + +generator = next(g for g in camera.actions(start_time + timedelta(seconds=10), start_timestamp=start_time)) +action = ChangePanTiltAction(rotation_end_time=start_time + timedelta(seconds=15), + generator=generator, + end_time=start_time + timedelta(seconds=15), + target_value=StateVector([Angle(0), + Angle(0), + Angle(0)]), + increasing_angle=[True, False]) + +camera.add_actions([action]) +fig = plt.figure(figsize=(10, 6)) +ax = fig.add_subplot(1, 1, 1) +for k in range(num_iter): + timestamp = timestamps[k] + camera.act(timestamp) + truth_states = [truth[k] for truth in truths] + measurement_set = camera.measure(truth_states, timestamp=timestamp) + scan = (timestamp, measurement_set) + scans.append(scan) + ax.cla() + ax.set_xlabel("$x$") + ax.set_ylabel("$y$") + ax.set_xlim(-10, 30) + ax.set_ylim(-10, 30) + ax.set_aspect('equal') + + # Fov ranges (min, center, max) + fov_range_tilt = (camera.rotation_offset[1]-camera.fov_angle/2, camera.rotation_offset[1], camera.rotation_offset[1]+camera.fov_angle/2) + fov_range_pan = (camera.rotation_offset[2]-camera.fov_angle/2, camera.rotation_offset[2], camera.rotation_offset[2]+camera.fov_angle/2) + + altitude = camera.position[2] + x_min = altitude * np.tan(fov_range_tilt[0]) + camera.position[0] + x_max = altitude * np.tan(fov_range_tilt[2]) + camera.position[0] + y_min = altitude * np.tan(fov_range_pan[0]) + camera.position[1] + y_max = altitude * np.tan(fov_range_pan[2]) + camera.position[1] + + ax.add_patch(Rectangle((x_min, y_min), x_max-x_min, y_max-y_min, facecolor='none', edgecolor='r')) + # x, y = pol2cart(100, camera.orientation[2] - camera.fov_angle / 2) + # ax.plot([0, x], [0, y], 'r-', label="Camera FOV") + # x, y = pol2cart(100, camera.orientation[2] + camera.fov_angle / 2) + # ax.plot([0, x], [0, y], 'r-') + for truth in truths: + data = np.array([state.state_vector for state in truth[:k + 1]]) + ax.plot(data[:, 0], data[:, 2], '--', label="Ground truth") + detections = scan[1] + for detection in detections: + # x, y = pol2cart(100, detection.state_vector[1] + camera.orientation[2]) + # ax.plot([0, x], [0, y], 'b-') + ax.plot(detection.state_vector[0], detection.state_vector[1], 'bx') + plt.pause(0.1) + a=2 + +# # Plot results +# # ============ +# for k, scan in enumerate(scans): +# +# a = 2 \ No newline at end of file From 65821a6f001b6a518f5508bf6bd6e72d543b560f Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 26 Sep 2022 14:28:37 +0100 Subject: [PATCH 10/87] Restructure custom module and tidy up defined actionable sensors --- examples/reactive-isr/camera_lat_lon_uav.py | 38 +- stonesoup/custom/functions/__init__.py | 20 + stonesoup/custom/sensor.py | 423 -------------------- stonesoup/custom/sensor/__init__.py | 0 stonesoup/custom/sensor/action/__init__.py | 0 stonesoup/custom/sensor/action/pan_tilt.py | 149 +++++++ stonesoup/custom/sensor/pan_tilt.py | 177 ++++++++ 7 files changed, 360 insertions(+), 447 deletions(-) create mode 100644 stonesoup/custom/functions/__init__.py delete mode 100644 stonesoup/custom/sensor.py create mode 100644 stonesoup/custom/sensor/__init__.py create mode 100644 stonesoup/custom/sensor/action/__init__.py create mode 100644 stonesoup/custom/sensor/action/pan_tilt.py create mode 100644 stonesoup/custom/sensor/pan_tilt.py diff --git a/examples/reactive-isr/camera_lat_lon_uav.py b/examples/reactive-isr/camera_lat_lon_uav.py index 22cb2d8c6..4ba12e4ac 100644 --- a/examples/reactive-isr/camera_lat_lon_uav.py +++ b/examples/reactive-isr/camera_lat_lon_uav.py @@ -4,9 +4,9 @@ from matplotlib import pyplot as plt from matplotlib.patches import Rectangle -from stonesoup.custom.sensor import PanTiltCamera, ChangePanTiltAction, PanTiltCameraLatLong, \ - PanTiltUAVCamera -from stonesoup.functions import pol2cart +from stonesoup.custom.functions import get_camera_footprint +from stonesoup.custom.sensor.action.pan_tilt import ChangePanTiltAction +from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ ConstantVelocity from stonesoup.platform import FixedPlatform @@ -15,14 +15,17 @@ from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState from stonesoup.types.state import State + + # Parameters # ========== start_time = datetime.now() # Simulation start time num_iter = 100 # Number of simulation steps -rotation_offset = StateVector([Angle(0), Angle(-np.pi/32), Angle(0)]) # Camera rotation offset +rotation_offset = StateVector([Angle(0), Angle(-np.pi/2), Angle(0)]) # Camera rotation offset +pan_tilt = StateVector([Angle(0), Angle(-np.pi/32)]) # Camera pan and tilt camera = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], noise_covar=np.diag([0.001, 0.001, 0.001]), - fov_angle=np.radians(10), rpm=np.array([10, 10]), rotation_offset=rotation_offset) -platform = FixedPlatform(position_mapping=(0, 2, 4), orientation=StateVector([0, -np.pi/2, 0]), + fov_angle=np.radians(10), rotation_offset=rotation_offset, pan_tilt=pan_tilt) +platform = FixedPlatform(position_mapping=(0, 2, 4), orientation=StateVector([0, 0, 0]), states=[State([10., 0., 10., 0., 100., 0], timestamp=start_time)], sensors=[camera]) @@ -63,14 +66,9 @@ # ===================== scans = [] -generator = next(g for g in camera.actions(start_time + timedelta(seconds=10), start_timestamp=start_time)) -action = ChangePanTiltAction(rotation_end_time=start_time + timedelta(seconds=15), - generator=generator, - end_time=start_time + timedelta(seconds=15), - target_value=StateVector([Angle(0), - Angle(0), - Angle(0)]), - increasing_angle=[True, False]) +# Schedule an action to change the pan and tilt of the camera after 30 seconds +generator = next(g for g in camera.actions(start_time + timedelta(seconds=30))) +action = generator.action_from_value(StateVector([Angle(0), Angle(0)])) camera.add_actions([action]) fig = plt.figure(figsize=(10, 6)) @@ -90,16 +88,9 @@ ax.set_aspect('equal') # Fov ranges (min, center, max) - fov_range_tilt = (camera.rotation_offset[1]-camera.fov_angle/2, camera.rotation_offset[1], camera.rotation_offset[1]+camera.fov_angle/2) - fov_range_pan = (camera.rotation_offset[2]-camera.fov_angle/2, camera.rotation_offset[2], camera.rotation_offset[2]+camera.fov_angle/2) - - altitude = camera.position[2] - x_min = altitude * np.tan(fov_range_tilt[0]) + camera.position[0] - x_max = altitude * np.tan(fov_range_tilt[2]) + camera.position[0] - y_min = altitude * np.tan(fov_range_pan[0]) + camera.position[1] - y_max = altitude * np.tan(fov_range_pan[2]) + camera.position[1] + xmin, xmax, ymin, ymax = get_camera_footprint(camera) - ax.add_patch(Rectangle((x_min, y_min), x_max-x_min, y_max-y_min, facecolor='none', edgecolor='r')) + ax.add_patch(Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, facecolor='none', edgecolor='r')) # x, y = pol2cart(100, camera.orientation[2] - camera.fov_angle / 2) # ax.plot([0, x], [0, y], 'r-', label="Camera FOV") # x, y = pol2cart(100, camera.orientation[2] + camera.fov_angle / 2) @@ -113,7 +104,6 @@ # ax.plot([0, x], [0, y], 'b-') ax.plot(detection.state_vector[0], detection.state_vector[1], 'bx') plt.pause(0.1) - a=2 # # Plot results # # ============ diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py new file mode 100644 index 000000000..a9e739ae6 --- /dev/null +++ b/stonesoup/custom/functions/__init__.py @@ -0,0 +1,20 @@ +import numpy as np + + +def get_camera_footprint(camera): + pan, tilt = camera.pan_tilt + altitude = camera.position[2] + + fov_range_pan = (pan-camera.fov_angle[0]/2, pan, pan+camera.fov_angle[0]/2) + fov_range_tilt = (tilt-camera.fov_angle[1]/2, tilt, tilt+camera.fov_angle[1]/2) + x_min = altitude * np.tan(fov_range_tilt[0]) + camera.position[0] + x_max = altitude * np.tan(fov_range_tilt[2]) + camera.position[0] + y_min = altitude * np.tan(fov_range_pan[0]) + camera.position[1] + y_max = altitude * np.tan(fov_range_pan[2]) + camera.position[1] + return x_min, x_max, y_min, y_max + + +def get_nearest(array, value): + array = np.asarray(array) + idx = (np.abs(array - value)).argmin() + return array[idx] \ No newline at end of file diff --git a/stonesoup/custom/sensor.py b/stonesoup/custom/sensor.py deleted file mode 100644 index 8a377d52e..000000000 --- a/stonesoup/custom/sensor.py +++ /dev/null @@ -1,423 +0,0 @@ -import datetime -from copy import copy -from typing import Sequence, Iterator, Set, Union -from itertools import product - -import numpy as np - -from stonesoup.base import Property -from stonesoup.functions import cart2sphere -from stonesoup.models.clutter import ClutterModel -from stonesoup.models.measurement.linear import LinearGaussian -from stonesoup.sensor.action import Action, RealNumberActionGenerator -from stonesoup.sensor.actionable import ActionableProperty -from stonesoup.sensor.passive import PassiveElevationBearing -from stonesoup.sensor.sensor import Sensor -from stonesoup.types.angle import Angle, Bearing, Elevation -from stonesoup.types.array import StateVector, CovarianceMatrix -from stonesoup.types.detection import TrueDetection -from stonesoup.types.groundtruth import GroundTruthState -from stonesoup.functions import build_rotation_matrix - - -class ChangePanTiltAction(Action): - """The action of changing the dwell centre of sensors where `dwell_centre` is an - :class:`~.ActionableProperty`""" - - rotation_end_time: datetime.datetime = Property(readonly=True, - doc="End time of rotation.") - increasing_angle: Sequence[bool] = Property( - default=None, readonly=True, - doc="Indicated the direction of change in the dwell centre angle. The first element " - "relates to bearing, the second to elevation.") - - def act(self, current_time, timestamp, init_value): - """Assumes that duration keeps within the action end time - - Parameters - ---------- - current_time: datetime.datetime - Current time - timestamp: datetime.datetime - Modification of attribute ends at this time stamp - init_value: Any - Current value of the dwell centre - - Returns - ------- - Any - The new value of the dwell centre""" - - if timestamp >= self.end_time: - return self.target_value # target direction - else: - return init_value # same direction - - -class PanTiltActionsGenerator(RealNumberActionGenerator): - """Generates possible actions for changing the dwell centre of a sensor in a given - time period.""" - - owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " - "dwell-centre attributes") - resolution: Angle = Property(default=np.radians(1), doc="Resolution of action space") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.epsilon = Angle(np.radians(1e-6)) - - @property - def default_action(self): - return ChangePanTiltAction(rotation_end_time=self.end_time, - generator=self, - end_time=self.end_time, - target_value=self.current_value, - increasing_angle=None) - - def __call__(self, resolution=None, epsilon=None): - """ - Parameters - ---------- - resolution : Angle - Resolution of yielded action target values - epsilon: float - Tolerance of equality check in iteration - """ - if resolution is not None: - self.resolution = resolution - if epsilon is not None: - self.epsilon = epsilon - - @property - def initial_value(self): - return self.current_value - - @property - def duration(self): - return self.end_time - self.start_time - - @property - def rps(self): - return self.owner.rpm / 60 - - @property - def angle_delta(self): - return StateVector([Angle(0), - Angle(self.duration.total_seconds() * self.rps[0] * 2 * np.pi), - Angle(self.duration.total_seconds() * self.rps[1] * 2 * np.pi)]) - - @property - def min(self): - min = self.initial_value.astype(float) - self.angle_delta - min[1] = np.maximum(Angle(self.initial_value[1]) - self.angle_delta[1], - Angle(-np.pi / 2)) - return min - - @property - def max(self): - max = self.initial_value.astype(float) + self.angle_delta - max[1] = np.minimum(Angle(self.initial_value[1]) + self.angle_delta[1], - Angle(np.pi / 2)) - return max - - def __contains__(self, item): - - if self.angle_delta[2] >= np.pi or self.angle_delta[1] >= np.pi / 2: - # Left turn and right turn are > 180, so all angles hit - return True - - if isinstance(item, ChangePanTiltAction): - item = item.target_value - - return self.min[1] <= item[1] <= self.max[1] and self.min[2] <= item[2] <= self.max[2] - - def _end_time_direction_pan(self, angle): - """Given a target bearing, should the dwell centre rotate so as to increase its angle - value, or decrease? And how long until it reaches the target.""" - - angle = Angle(angle) - - if self.initial_value[2] - self.epsilon \ - <= angle \ - <= self.initial_value[2] + self.epsilon: - return self.start_time, None # no rotation, target bearing achieved - - angle_delta = np.abs(angle - self.initial_value[2]) - - return ( - self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), - angle > self.initial_value[2] - ) - - def _end_time_direction_tilt(self, angle): - """Given a target bearing, should the dwell centre rotate so as to increase its angle - value, or decrease? And how long until it reaches the target.""" - - angle = Angle(angle) - - if self.initial_value[1] - self.epsilon \ - <= angle \ - <= self.initial_value[1] + self.epsilon: - return self.start_time, None # no rotation, target bearing achieved - - angle_delta = np.abs(angle - self.initial_value[1]) - - return ( - self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), - angle > self.initial_value[1] - ) - - def __iter__(self) -> Iterator[ChangePanTiltAction]: - """Returns ChangePanTiltAction types, where the value is a possible value of the [0, 0] - element of the dwell centre's state vector.""" - - possible_elevations = np.arange(self.min[1], self.max[1], self.resolution) - for elevation in possible_elevations: - elevation_end_time, increasing_e = self._end_time_direction_tilt(elevation) - bearing = self.min[2] - while bearing <= self.max[2] + self.epsilon: - bearing_end_time, increasing_b = self._end_time_direction_pan(bearing) - yield ChangePanTiltAction(rotation_end_time=max(bearing_end_time, - elevation_end_time), - generator=self, - end_time=self.end_time, - target_value=StateVector([Angle(0), - Elevation(elevation), - Bearing(bearing)]), - increasing_angle=[increasing_e, increasing_b]) - bearing += self.resolution - - def action_from_value(self, value): - raise NotImplementedError - - -class PanTiltUAVActionsGenerator(PanTiltActionsGenerator): - - @property - def min(self): - min = self.initial_value.astype(float) - self.angle_delta - min[0] = np.maximum(Angle(self.initial_value[0]) - self.angle_delta[0], - Angle(-np.pi / 2)) - min[1] = np.maximum(Angle(self.initial_value[1]) - self.angle_delta[1], - Angle(-np.pi / 2)) - return min - - @property - def max(self): - max = self.initial_value.astype(float) + self.angle_delta - max[0] = np.minimum(Angle(self.initial_value[0]) + self.angle_delta[0], - Angle(np.pi / 2)) - max[1] = np.minimum(Angle(self.initial_value[1]) + self.angle_delta[1], - Angle(np.pi / 2)) - return max - - def __contains__(self, item): - - if self.angle_delta[2] >= np.pi / 2 or self.angle_delta[1] >= np.pi / 2: - # Left turn and right turn are > 180, so all angles hit - return True - - if isinstance(item, ChangePanTiltAction): - item = item.target_value - - return self.min[1] <= item[1] <= self.max[1] and self.min[2] <= item[2] <= self.max[2] - - def _end_time_direction_pan(self, angle): - """Given a target bearing, should the dwell centre rotate so as to increase its angle - value, or decrease? And how long until it reaches the target.""" - - angle = Angle(angle) - - if self.initial_value[2] - self.epsilon \ - <= angle \ - <= self.initial_value[2] + self.epsilon: - return self.start_time, None # no rotation, target bearing achieved - - angle_delta = np.abs(angle - self.initial_value[2]) - - return ( - self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), - angle > self.initial_value[2] - ) - - def _end_time_direction_tilt(self, angle): - """Given a target bearing, should the dwell centre rotate so as to increase its angle - value, or decrease? And how long until it reaches the target.""" - - angle = Angle(angle) - - if self.initial_value[1] - self.epsilon \ - <= angle \ - <= self.initial_value[1] + self.epsilon: - return self.start_time, None # no rotation, target bearing achieved - - angle_delta = np.abs(angle - self.initial_value[1]) - - return ( - self.start_time + datetime.timedelta(seconds=angle_delta / (self.rps[1] * 2 * np.pi)), - angle > self.initial_value[1] - ) - - def __iter__(self) -> Iterator[ChangePanTiltAction]: - """Returns ChangePanTiltAction types, where the value is a possible value of the [0, 0] - element of the dwell centre's state vector.""" - - possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) - possible_pan_angles = np.arange(self.min[2], self.max[2], self.resolution) - for (pan_angle, tilt_angle) in product(possible_pan_angles, possible_tilt_angles): - pan_end_time, increasing_p = self._end_time_direction_pan(pan_angle) - tilt_end_time, increasing_t = self._end_time_direction_tilt(tilt_angle) - yield ChangePanTiltAction(rotation_end_time=max(pan_end_time, tilt_end_time), - generator=self, - end_time=self.end_time, - target_value=StateVector([Angle(0), - Elevation(tilt_angle), - Bearing(pan_angle)]), - increasing_angle=[increasing_t, increasing_p]) - - def action_from_value(self, value): - raise NotImplementedError - - -class PanTiltCamera(PassiveElevationBearing): - """A camera that can pan and tilt.""" - - rotation_offset: StateVector = ActionableProperty( - doc="A StateVector containing the sensor rotation " - "offsets from the platform's primary axis (defined as the " - "direction of motion). Defaults to a zero vector with the " - "same length as the Platform's :attr:`velocity_mapping`", - default=None, - generator_cls=PanTiltActionsGenerator) - rpm: float = Property( - doc="The number of rotations per minute (RPM)") - fov_angle: float = Property( - doc="The field of view (FOV) angle (in radians).") - clutter_model: ClutterModel = Property( - default=None, - doc="An optional clutter generator that adds a set of simulated " - ":class:`Clutter` objects to the measurements at each time step. " - "The clutter is simulated according to the provided distribution.") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, - **kwargs) -> Set[TrueDetection]: - detections = set() - measurement_model = self.measurement_model - - for truth in ground_truths: - # Transform state to measurement space and generate - # random noise - measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) - - # Check if state falls within sensor's FOV - fov_min = -self.fov_angle / 2 - fov_max = +self.fov_angle / 2 - bearing_t = measurement_vector[1, 0] - elevation_t = measurement_vector[0, 0] - - # Do not measure if state not in FOV - if (not fov_min <= bearing_t <= fov_max) or (not fov_min <= elevation_t <= fov_max): - continue - - detection = TrueDetection(measurement_vector, - measurement_model=measurement_model, - timestamp=truth.timestamp, - groundtruth_path=truth) - detections.add(detection) - - # Generate clutter at this time step - if self.clutter_model is not None: - self.clutter_model.measurement_model = measurement_model - clutter = self.clutter_model.function(ground_truths) - detections |= clutter - - return detections - - -class PanTiltUAVCamera(Sensor): - """A camera that can pan and tilt.""" - ndim_state: int = Property( - doc="Number of state dimensions. This is utilised by (and follows in\ - format) the underlying :class:`~.CartesianToElevationBearing`\ - model") - mapping: np.ndarray = Property( - doc="Mapping between the targets state space and the sensors\ - measurement capability") - noise_covar: CovarianceMatrix = Property( - doc="The sensor noise covariance matrix. This is utilised by\ - (and follow in format) the underlying \ - :class:`~.CartesianToElevationBearing` model") - pan_tilt: StateVector = ActionableProperty( - doc="A StateVector containing the sensor rotation " - "offsets from the platform's primary axis (defined as the " - "direction of motion). Defaults to a zero vector with the " - "same length as the Platform's :attr:`velocity_mapping`", - default=None, - generator_cls=PanTiltUAVActionsGenerator) - rpm: float = Property( - doc="The number of rotations per minute (RPM)") - fov_angle: float = Property( - doc="The field of view (FOV) angle (in radians).") - clutter_model: ClutterModel = Property( - default=None, - doc="An optional clutter generator that adds a set of simulated " - ":class:`Clutter` objects to the measurements at each time step. " - "The clutter is simulated according to the provided distribution.") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @property - def measurement_model(self): - return LinearGaussian( - ndim_state=self.ndim_state, - mapping=self.mapping, - noise_covar=self.noise_covar) - - def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, - **kwargs) -> Set[TrueDetection]: - - detections = set() - measurement_model = self.measurement_model - - for truth in ground_truths: - # Transform state to measurement space and generate - # random noise - measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) - - # Normalise measurement vector relative to sensor position - norm_measurement_vector = measurement_vector.astype(float) - self.position.astype(float) - - # Rotate measurement vector relative to sensor orientation - rotation_matrix = build_rotation_matrix(self.orientation) - norm_rotated_measurement_vector = rotation_matrix @ norm_measurement_vector - - # Convert to spherical coordinates - _, bearing_t, elevation_t = cart2sphere(*norm_rotated_measurement_vector) - - # Check if state falls within sensor's FOV - fov_min = -self.fov_angle / 2 - fov_max = +self.fov_angle / 2 - bearing_t = bearing_t - elevation_t = elevation_t - - # Do not measure if state not in FOV - if (not fov_min <= bearing_t <= fov_max) or (not fov_min <= elevation_t <= fov_max): - continue - - detection = TrueDetection(measurement_vector, - measurement_model=measurement_model, - timestamp=truth.timestamp, - groundtruth_path=truth) - detections.add(detection) - - # Generate clutter at this time step - if self.clutter_model is not None: - self.clutter_model.measurement_model = measurement_model - clutter = self.clutter_model.function(ground_truths) - detections |= clutter - - return detections \ No newline at end of file diff --git a/stonesoup/custom/sensor/__init__.py b/stonesoup/custom/sensor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/sensor/action/__init__.py b/stonesoup/custom/sensor/action/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/sensor/action/pan_tilt.py b/stonesoup/custom/sensor/action/pan_tilt.py new file mode 100644 index 000000000..cf20d9fda --- /dev/null +++ b/stonesoup/custom/sensor/action/pan_tilt.py @@ -0,0 +1,149 @@ +import datetime +from itertools import product +from typing import Sequence, Iterator + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.functions import get_nearest +from stonesoup.sensor.action import Action, RealNumberActionGenerator +from stonesoup.types.angle import Angle, Elevation, Bearing +from stonesoup.types.array import StateVector + + +class ChangePanTiltAction(Action): + """The action of changing the pan & tilt of sensors where `pan_tilt` is an + :class:`~.ActionableProperty`""" + + increasing_angle: Sequence[bool] = Property( + default=[None, None], readonly=True, + doc="Indicated the direction of change in the dwell centre angle. The first element " + "relates to bearing, the second to elevation.") + + def act(self, current_time, timestamp, init_value): + """Assumes that duration keeps within the action end time + + Parameters + ---------- + current_time: datetime.datetime + Current time + timestamp: datetime.datetime + Modification of attribute ends at this time stamp + init_value: Any + Current value of the dwell centre + + Returns + ------- + Any + The new value of the dwell centre""" + + if timestamp >= self.end_time: + return self.target_value # target direction + else: + return init_value # same direction + + +class PanTiltActionsGenerator(RealNumberActionGenerator): + """Generates possible actions for changing the dwell centre of a sensor in a given + time period.""" + + owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " + "dwell-centre attributes") + resolution: Angle = Property(default=np.radians(1), doc="Resolution of action space") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return ChangePanTiltAction(generator=self, + end_time=self.end_time, + target_value=self.current_value, + increasing_angle=None) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution : Angle + Resolution of yielded action target values + epsilon: float + Tolerance of equality check in iteration + """ + if resolution is not None: + self.resolution = resolution + if epsilon is not None: + self.epsilon = epsilon + + @property + def initial_value(self): + return self.current_value + + @property + def min(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return np.array([Angle(-2*np.pi), Angle(-np.pi/2)]) + + @property + def max(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return np.array([Angle(2*np.pi), Angle(np.pi/2)]) + + def __contains__(self, item): + + if isinstance(item, ChangePanTiltAction): + item = item.target_value + + return self.min[0] <= item[0] <= self.max[0] and self.min[1] <= item[1] <= self.max[1] + + def _get_direction(self, angle, idx): + angle = Angle(angle) + + if self.initial_value[idx] - self.resolution/2 \ + <= angle \ + <= self.initial_value[idx] + self.resolution/2: + return None # no rotation, target angle achieved + + return angle > self.initial_value[idx] + + def __iter__(self) -> Iterator[ChangePanTiltAction]: + """Returns all possible ChangePanTiltAction types""" + possible_pan_angles = np.arange(self.min[0], self.max[0], self.resolution) + possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) + for (pan_angle, tilt_angle) in product(possible_pan_angles, possible_tilt_angles): + increasing_p = self._get_direction(pan_angle, 0) + increasing_t = self._get_direction(tilt_angle, 1) + yield ChangePanTiltAction(generator=self, + end_time=self.end_time, + target_value=StateVector([Angle(pan_angle), + Angle(tilt_angle)]), + increasing_angle=[increasing_p, increasing_t]) + + def action_from_value(self, value): + if value not in self: + return None + pan_angle = Angle(value[0]) + tilt_angle = Angle(value[1]) + possible_pan_angles = np.arange(self.min[0], self.max[0], self.resolution) + possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) + pan_angle = get_nearest(possible_pan_angles, pan_angle) + tilt_angle = get_nearest(possible_tilt_angles, tilt_angle) + increasing_p = self._get_direction(pan_angle, 0) + increasing_t = self._get_direction(tilt_angle, 1) + return ChangePanTiltAction(generator=self, + end_time=self.end_time, + target_value=StateVector([pan_angle, tilt_angle]), + increasing_angle=[increasing_p, increasing_t]) + + +class PanTiltUAVActionsGenerator(PanTiltActionsGenerator): + + @property + def min(self): + # Pan and tilt are limited to +/- 90 degrees + return np.array([Angle(-np.pi / 2), Angle(-np.pi / 2)]) + + @property + def max(self): + # Pan and tilt are limited to +/- 90 degrees + return np.array([Angle(np.pi / 2), Angle(np.pi / 2)]) \ No newline at end of file diff --git a/stonesoup/custom/sensor/pan_tilt.py b/stonesoup/custom/sensor/pan_tilt.py new file mode 100644 index 000000000..06443a292 --- /dev/null +++ b/stonesoup/custom/sensor/pan_tilt.py @@ -0,0 +1,177 @@ +import datetime +from copy import copy +from typing import Sequence, Iterator, Set, Union, Optional, List +from itertools import product + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.sensor.action.pan_tilt import PanTiltActionsGenerator, \ + PanTiltUAVActionsGenerator +from stonesoup.functions import cart2sphere +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.action import Action, RealNumberActionGenerator +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.passive import PassiveElevationBearing +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.angle import Angle, Bearing, Elevation +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState +from stonesoup.functions import build_rotation_matrix + + +class PanTiltCamera(PassiveElevationBearing): + """A camera that can pan and tilt.""" + + pan_tilt: StateVector = ActionableProperty( + doc="A StateVector containing the sensor pan and tilt angles. Defaults to a zero vector", + default=None, + generator_cls=PanTiltActionsGenerator) + fov_angle: float = Property( + doc="The field of view (FOV) angle (in radians).") + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate + # random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + # Check if state falls within sensor's FOV + fov_min = -self.fov_angle / 2 + fov_max = +self.fov_angle / 2 + bearing_t = measurement_vector[1, 0] + elevation_t = measurement_vector[0, 0] + + # Do not measure if state not in FOV + if (not fov_min <= bearing_t <= fov_max) or (not fov_min <= elevation_t <= fov_max): + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths) + detections |= clutter + + return detections + + +class PanTiltUAVCamera(Sensor): + """A camera that can pan and tilt.""" + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.CartesianToElevationBearing` model") + fov_angle: Union[float, List[float]] = Property( + doc="The field of view (FOV) angle (in radians).") + pan_tilt: StateVector = ActionableProperty( + doc="A StateVector containing the sensor pan and tilt angles. Defaults to a zero vector", + default=None, + generator_cls=PanTiltUAVActionsGenerator) + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if isinstance(self.fov_angle, float): + self.fov_angle = [self.fov_angle, self.fov_angle] + if self.pan_tilt is None: + self.pan_tilt = StateVector([Angle(0), Angle(0)]) + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + # Normalise measurement vector relative to sensor position + norm_measurement_vector = measurement_vector.astype(float) - self.position.astype(float) + + # Rotate measurement vector relative to sensor orientation + rotation_matrix = build_rotation_matrix(self.orientation) + norm_rotated_measurement_vector = rotation_matrix @ norm_measurement_vector + + # Convert to spherical coordinates + _, bearing_t, elevation_t = cart2sphere(*norm_rotated_measurement_vector) + + # Check if state falls within sensor's FOV + fov_min = -np.array(self.fov_angle) / 2 + fov_max = +np.array(self.fov_angle) / 2 + + # Do not measure if state not in FOV + if (not fov_min[0] <= bearing_t <= fov_max[0]) \ + or (not fov_min[1] <= elevation_t <= fov_max[1]): + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths) + detections |= clutter + + return detections + + @property + def orientation(self) -> Optional[StateVector]: + """A 3x1 StateVector of angles (rad), specifying the sensor orientation in terms of the + counter-clockwise rotation around each Cartesian axis in the order :math:`x,y,z`. + The rotation angles are positive if the rotation is in the counter-clockwise + direction when viewed by an observer looking along the respective rotation axis, + towards the origin. + + .. note:: + This property delegates the actual calculation of orientation to the Sensor's + :attr:`movement_controller` + + It is settable if, and only if, the sensor holds its own internal movement_controller. + """ + if self.movement_controller is None: + return None + return self.movement_controller.orientation + self.rotation_offset \ + + StateVector([0, self.pan_tilt[1], self.pan_tilt[0]]) \ No newline at end of file From a195405e69a59cb67bc0ed69f060549ea1a38eec Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 27 Sep 2022 12:52:14 +0100 Subject: [PATCH 11/87] Ensure default action in also returned when iterating generator in PanTiltActionsGenerator --- stonesoup/custom/sensor/action/pan_tilt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stonesoup/custom/sensor/action/pan_tilt.py b/stonesoup/custom/sensor/action/pan_tilt.py index cf20d9fda..797ae4175 100644 --- a/stonesoup/custom/sensor/action/pan_tilt.py +++ b/stonesoup/custom/sensor/action/pan_tilt.py @@ -49,7 +49,7 @@ class PanTiltActionsGenerator(RealNumberActionGenerator): owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " "dwell-centre attributes") - resolution: Angle = Property(default=np.radians(1), doc="Resolution of action space") + resolution: Angle = Property(default=np.radians(5), doc="Resolution of action space") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -110,6 +110,7 @@ def __iter__(self) -> Iterator[ChangePanTiltAction]: """Returns all possible ChangePanTiltAction types""" possible_pan_angles = np.arange(self.min[0], self.max[0], self.resolution) possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) + yield self.default_action for (pan_angle, tilt_angle) in product(possible_pan_angles, possible_tilt_angles): increasing_p = self._get_direction(pan_angle, 0) increasing_t = self._get_direction(tilt_angle, 1) From bb0e4933dd1a08c476bd4376cdfb3ef532d6ddbe Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 27 Sep 2022 12:53:07 +0100 Subject: [PATCH 12/87] Doc update for fov_angle in PanTiltUAVCamera --- stonesoup/custom/sensor/pan_tilt.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stonesoup/custom/sensor/pan_tilt.py b/stonesoup/custom/sensor/pan_tilt.py index 06443a292..6eeb7c618 100644 --- a/stonesoup/custom/sensor/pan_tilt.py +++ b/stonesoup/custom/sensor/pan_tilt.py @@ -89,7 +89,9 @@ class PanTiltUAVCamera(Sensor): (and follow in format) the underlying \ :class:`~.CartesianToElevationBearing` model") fov_angle: Union[float, List[float]] = Property( - doc="The field of view (FOV) angle (in radians).") + doc="The field of view (FOV) angle (in radians). If provided in a list, the first element " + "is the pan FOV angle and the second element is the tilt FOV angle. Else, the same " + "FOV angle is used for both pan and tilt.") pan_tilt: StateVector = ActionableProperty( doc="A StateVector containing the sensor pan and tilt angles. Defaults to a zero vector", default=None, From 25737b2c60e82a59ec97648014c81881624130b5 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 27 Sep 2022 17:10:01 +0100 Subject: [PATCH 13/87] Split pan and tilt into seperate actionables --- stonesoup/custom/functions/__init__.py | 5 +- stonesoup/custom/sensor/action/angle.py | 148 ++++++++++++++++++++ stonesoup/custom/sensor/action/pan_tilt.py | 150 --------------------- stonesoup/custom/sensor/pan_tilt.py | 71 ++-------- 4 files changed, 162 insertions(+), 212 deletions(-) create mode 100644 stonesoup/custom/sensor/action/angle.py delete mode 100644 stonesoup/custom/sensor/action/pan_tilt.py diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index a9e739ae6..048914c92 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -2,7 +2,10 @@ def get_camera_footprint(camera): - pan, tilt = camera.pan_tilt + try: + pan, tilt = camera.pan_tilt + except: + pan, tilt = camera.pan, camera.tilt altitude = camera.position[2] fov_range_pan = (pan-camera.fov_angle[0]/2, pan, pan+camera.fov_angle[0]/2) diff --git a/stonesoup/custom/sensor/action/angle.py b/stonesoup/custom/sensor/action/angle.py new file mode 100644 index 000000000..4df8c5802 --- /dev/null +++ b/stonesoup/custom/sensor/action/angle.py @@ -0,0 +1,148 @@ +from typing import Iterator + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.functions import get_nearest +from stonesoup.sensor.action import Action, RealNumberActionGenerator +from stonesoup.types.angle import Angle + + +class ChangeAngleAction(Action): + """The action of changing the pan & tilt of sensors where `pan_tilt` is an + :class:`~.ActionableProperty`""" + + increasing_angle: bool = Property( + default=None, readonly=True, + doc="Indicated the direction of change in the dwell centre angle. The first element " + "relates to bearing, the second to elevation.") + + def act(self, current_time, timestamp, init_value): + """Assumes that duration keeps within the action end time + + Parameters + ---------- + current_time: datetime.datetime + Current time + timestamp: datetime.datetime + Modification of attribute ends at this time stamp + init_value: Any + Current value of the dwell centre + + Returns + ------- + Any + The new value of the dwell centre""" + + if timestamp >= self.end_time: + return self.target_value # target direction + else: + return init_value # same direction + + +class ChangePanAction(ChangeAngleAction): + pass + + +class ChangeTiltAction(ChangeAngleAction): + pass + + +class AngleUAVActionsGenerator(RealNumberActionGenerator): + """Generates possible actions for changing the dwell centre of a sensor in a given + time period.""" + + owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " + "dwell-centre attributes") + resolution: Angle = Property(default=np.radians(5), doc="Resolution of action space") + + _action_cls = ChangeAngleAction + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return ChangeAngleAction(generator=self, + end_time=self.end_time, + target_value=self.current_value, + increasing_angle=None) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution : Angle + Resolution of yielded action target values + epsilon: float + Tolerance of equality check in iteration + """ + if resolution is not None: + self.resolution = resolution + if epsilon is not None: + self.epsilon = epsilon + + @property + def initial_value(self): + return self.current_value + + @property + def min(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return Angle(-np.pi / 2) + + @property + def max(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return Angle(np.pi / 2) + + def __contains__(self, item): + + if isinstance(item, self._action_cls): + item = item.target_value + + return self.min <= item <= self.max + + def _get_direction(self, angle): + angle = Angle(angle) + + if self.initial_value - self.resolution / 2 \ + <= angle \ + <= self.initial_value + self.resolution / 2: + return None # no rotation, target angle achieved + + return angle > self.initial_value + + def __iter__(self) -> Iterator[ChangeAngleAction]: + """Returns all possible ChangePanTiltAction types""" + possible_angles = np.arange(self.min, self.max, self.resolution) + + yield self.default_action + for angle in possible_angles: + increasing = self._get_direction(angle) + yield self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle, + increasing_angle=increasing) + + def action_from_value(self, value): + if value not in self: + return None + possible_angles = np.arange(self.min, self.max, self.resolution) + angle = get_nearest(possible_angles, value) + increasing = self._get_direction(angle) + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle, + increasing_angle=increasing) + + +class PanUAVActionsGenerator(AngleUAVActionsGenerator): + _action_cls = ChangePanAction + pass + + +class TiltUAVActionsGenerator(AngleUAVActionsGenerator): + _action_cls = ChangeTiltAction + pass + diff --git a/stonesoup/custom/sensor/action/pan_tilt.py b/stonesoup/custom/sensor/action/pan_tilt.py deleted file mode 100644 index 797ae4175..000000000 --- a/stonesoup/custom/sensor/action/pan_tilt.py +++ /dev/null @@ -1,150 +0,0 @@ -import datetime -from itertools import product -from typing import Sequence, Iterator - -import numpy as np - -from stonesoup.base import Property -from stonesoup.custom.functions import get_nearest -from stonesoup.sensor.action import Action, RealNumberActionGenerator -from stonesoup.types.angle import Angle, Elevation, Bearing -from stonesoup.types.array import StateVector - - -class ChangePanTiltAction(Action): - """The action of changing the pan & tilt of sensors where `pan_tilt` is an - :class:`~.ActionableProperty`""" - - increasing_angle: Sequence[bool] = Property( - default=[None, None], readonly=True, - doc="Indicated the direction of change in the dwell centre angle. The first element " - "relates to bearing, the second to elevation.") - - def act(self, current_time, timestamp, init_value): - """Assumes that duration keeps within the action end time - - Parameters - ---------- - current_time: datetime.datetime - Current time - timestamp: datetime.datetime - Modification of attribute ends at this time stamp - init_value: Any - Current value of the dwell centre - - Returns - ------- - Any - The new value of the dwell centre""" - - if timestamp >= self.end_time: - return self.target_value # target direction - else: - return init_value # same direction - - -class PanTiltActionsGenerator(RealNumberActionGenerator): - """Generates possible actions for changing the dwell centre of a sensor in a given - time period.""" - - owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " - "dwell-centre attributes") - resolution: Angle = Property(default=np.radians(5), doc="Resolution of action space") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @property - def default_action(self): - return ChangePanTiltAction(generator=self, - end_time=self.end_time, - target_value=self.current_value, - increasing_angle=None) - - def __call__(self, resolution=None, epsilon=None): - """ - Parameters - ---------- - resolution : Angle - Resolution of yielded action target values - epsilon: float - Tolerance of equality check in iteration - """ - if resolution is not None: - self.resolution = resolution - if epsilon is not None: - self.epsilon = epsilon - - @property - def initial_value(self): - return self.current_value - - @property - def min(self): - # Pan can rotate freely, while tilt is limited to +/- 90 degrees - return np.array([Angle(-2*np.pi), Angle(-np.pi/2)]) - - @property - def max(self): - # Pan can rotate freely, while tilt is limited to +/- 90 degrees - return np.array([Angle(2*np.pi), Angle(np.pi/2)]) - - def __contains__(self, item): - - if isinstance(item, ChangePanTiltAction): - item = item.target_value - - return self.min[0] <= item[0] <= self.max[0] and self.min[1] <= item[1] <= self.max[1] - - def _get_direction(self, angle, idx): - angle = Angle(angle) - - if self.initial_value[idx] - self.resolution/2 \ - <= angle \ - <= self.initial_value[idx] + self.resolution/2: - return None # no rotation, target angle achieved - - return angle > self.initial_value[idx] - - def __iter__(self) -> Iterator[ChangePanTiltAction]: - """Returns all possible ChangePanTiltAction types""" - possible_pan_angles = np.arange(self.min[0], self.max[0], self.resolution) - possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) - yield self.default_action - for (pan_angle, tilt_angle) in product(possible_pan_angles, possible_tilt_angles): - increasing_p = self._get_direction(pan_angle, 0) - increasing_t = self._get_direction(tilt_angle, 1) - yield ChangePanTiltAction(generator=self, - end_time=self.end_time, - target_value=StateVector([Angle(pan_angle), - Angle(tilt_angle)]), - increasing_angle=[increasing_p, increasing_t]) - - def action_from_value(self, value): - if value not in self: - return None - pan_angle = Angle(value[0]) - tilt_angle = Angle(value[1]) - possible_pan_angles = np.arange(self.min[0], self.max[0], self.resolution) - possible_tilt_angles = np.arange(self.min[1], self.max[1], self.resolution) - pan_angle = get_nearest(possible_pan_angles, pan_angle) - tilt_angle = get_nearest(possible_tilt_angles, tilt_angle) - increasing_p = self._get_direction(pan_angle, 0) - increasing_t = self._get_direction(tilt_angle, 1) - return ChangePanTiltAction(generator=self, - end_time=self.end_time, - target_value=StateVector([pan_angle, tilt_angle]), - increasing_angle=[increasing_p, increasing_t]) - - -class PanTiltUAVActionsGenerator(PanTiltActionsGenerator): - - @property - def min(self): - # Pan and tilt are limited to +/- 90 degrees - return np.array([Angle(-np.pi / 2), Angle(-np.pi / 2)]) - - @property - def max(self): - # Pan and tilt are limited to +/- 90 degrees - return np.array([Angle(np.pi / 2), Angle(np.pi / 2)]) \ No newline at end of file diff --git a/stonesoup/custom/sensor/pan_tilt.py b/stonesoup/custom/sensor/pan_tilt.py index 6eeb7c618..2a14ccbd6 100644 --- a/stonesoup/custom/sensor/pan_tilt.py +++ b/stonesoup/custom/sensor/pan_tilt.py @@ -6,8 +6,7 @@ import numpy as np from stonesoup.base import Property -from stonesoup.custom.sensor.action.pan_tilt import PanTiltActionsGenerator, \ - PanTiltUAVActionsGenerator +from stonesoup.custom.sensor.action.angle import PanUAVActionsGenerator, TiltUAVActionsGenerator from stonesoup.functions import cart2sphere from stonesoup.models.clutter import ClutterModel from stonesoup.models.measurement.linear import LinearGaussian @@ -22,58 +21,6 @@ from stonesoup.functions import build_rotation_matrix -class PanTiltCamera(PassiveElevationBearing): - """A camera that can pan and tilt.""" - - pan_tilt: StateVector = ActionableProperty( - doc="A StateVector containing the sensor pan and tilt angles. Defaults to a zero vector", - default=None, - generator_cls=PanTiltActionsGenerator) - fov_angle: float = Property( - doc="The field of view (FOV) angle (in radians).") - clutter_model: ClutterModel = Property( - default=None, - doc="An optional clutter generator that adds a set of simulated " - ":class:`Clutter` objects to the measurements at each time step. " - "The clutter is simulated according to the provided distribution.") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, - **kwargs) -> Set[TrueDetection]: - detections = set() - measurement_model = self.measurement_model - - for truth in ground_truths: - # Transform state to measurement space and generate - # random noise - measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) - - # Check if state falls within sensor's FOV - fov_min = -self.fov_angle / 2 - fov_max = +self.fov_angle / 2 - bearing_t = measurement_vector[1, 0] - elevation_t = measurement_vector[0, 0] - - # Do not measure if state not in FOV - if (not fov_min <= bearing_t <= fov_max) or (not fov_min <= elevation_t <= fov_max): - continue - - detection = TrueDetection(measurement_vector, - measurement_model=measurement_model, - timestamp=truth.timestamp, - groundtruth_path=truth) - detections.add(detection) - - # Generate clutter at this time step - if self.clutter_model is not None: - self.clutter_model.measurement_model = measurement_model - clutter = self.clutter_model.function(ground_truths) - detections |= clutter - - return detections - class PanTiltUAVCamera(Sensor): """A camera that can pan and tilt.""" @@ -92,10 +39,14 @@ class PanTiltUAVCamera(Sensor): doc="The field of view (FOV) angle (in radians). If provided in a list, the first element " "is the pan FOV angle and the second element is the tilt FOV angle. Else, the same " "FOV angle is used for both pan and tilt.") - pan_tilt: StateVector = ActionableProperty( - doc="A StateVector containing the sensor pan and tilt angles. Defaults to a zero vector", - default=None, - generator_cls=PanTiltUAVActionsGenerator) + pan: Angle = ActionableProperty( + doc="The sensor pan. Defaults to zero", + default=Angle(0), + generator_cls=PanUAVActionsGenerator) + tilt: Angle = ActionableProperty( + doc="The sensor tilt. Defaults to zero", + default=Angle(0), + generator_cls=TiltUAVActionsGenerator) clutter_model: ClutterModel = Property( default=None, doc="An optional clutter generator that adds a set of simulated " @@ -106,8 +57,6 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if isinstance(self.fov_angle, float): self.fov_angle = [self.fov_angle, self.fov_angle] - if self.pan_tilt is None: - self.pan_tilt = StateVector([Angle(0), Angle(0)]) @property def measurement_model(self): @@ -176,4 +125,4 @@ def orientation(self) -> Optional[StateVector]: if self.movement_controller is None: return None return self.movement_controller.orientation + self.rotation_offset \ - + StateVector([0, self.pan_tilt[1], self.pan_tilt[0]]) \ No newline at end of file + + StateVector([0, self.tilt, self.pan]) \ No newline at end of file From 7a73825a2b97a5f86315f7f94916cedb9b7a48f1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 27 Sep 2022 17:13:00 +0100 Subject: [PATCH 14/87] Added SMCPHD_JIPDA tracker component --- stonesoup/custom/tracker.py | 152 ++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 stonesoup/custom/tracker.py diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py new file mode 100644 index 000000000..64e830402 --- /dev/null +++ b/stonesoup/custom/tracker.py @@ -0,0 +1,152 @@ +from datetime import datetime + +import numpy as np +from scipy.stats import multivariate_normal + +from stonesoup.base import Property, Base +from stonesoup.custom.jipda import JIPDAWithEHM2 +from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator +from stonesoup.functions import gm_reduce_single +from stonesoup.gater.distance import DistanceGater +from stonesoup.hypothesiser.probability import IPDAHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.models.measurement import MeasurementModel +from stonesoup.models.transition import TransitionModel +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.resampler import Resampler +from stonesoup.resampler.particle import SystematicResampler +from stonesoup.tracker import Tracker +from stonesoup.types.array import StateVectors +from stonesoup.types.numeric import Probability +from stonesoup.types.state import State, ParticleState +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import KalmanUpdater + + +class SMCPHD_JIPDA(Base): + transition_model: TransitionModel = Property(doc='The transition model') + measurement_model: MeasurementModel = Property(doc='The measurement model') + prob_detect: Probability = Property(doc='The probability of detection') + prob_death: Probability = Property(doc='The probability of death') + prob_birth: Probability = Property(doc='The probability of birth') + birth_rate: float = Property( + doc='The birth rate (i.e. number of new/born targets at each iteration(') + birth_density: State = Property( + doc='The birth density (i.e. density from which we sample birth particles)') + clutter_intensity: float = Property(doc='The clutter intensity per unit volume') + num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) + birth_scheme: str = Property( + doc='The scheme for birth particles. Options are "expansion" | "mixture". ' + 'Default is "expansion"', + default='expansion' + ) + start_time: datetime = Property(doc='Start time of the tracker', default=None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if self.start_time is None: + self.start_time = datetime.now() + + self._tracks = set() + self._predictor = KalmanPredictor(self.transition_model) + self._updater = KalmanUpdater(self.measurement_model) + self._hypothesiser = IPDAHypothesiser(self._predictor, self._updater, + self.clutter_intensity, + prob_detect=self.prob_detect, + prob_survive=1-self.prob_death) + self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) + self._associator = JIPDAWithEHM2(self._hypothesiser) + + resampler = SystematicResampler() + phd_filter = SMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) + # Sample prior state from birth density + state_vector = StateVectors(multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) + weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) + state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) + + self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + + @property + def tracks(self): + return self._tracks + + def track(self, detections, timestamp): + + tracks = list(self.tracks) + detections = list(detections) + num_tracks = len(tracks) + num_detections = len(detections) + + # Perform data association + associations = self._associator.associate(tracks, detections, timestamp) + + assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) + for i, track in enumerate(tracks): + for hyp in associations[track]: + if not hyp: + assoc_prob_matrix[i, 0] = hyp.weight + else: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + assoc_prob_matrix[i, j + 1] = hyp.weight + + rho = np.zeros((len(detections))) + for j, detection in enumerate(detections): + rho_tmp = 1 + if len(assoc_prob_matrix): + for i, track in enumerate(tracks): + rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] + rho[j] = rho_tmp + + for track, multihypothesis in associations.items(): + + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + + tracks = set(tracks) + new_tracks = self._initiator.initiate(detections, timestamp, weights=rho) + tracks |= new_tracks + + # Delete tracks that have not been updated for a while + del_tracks = set() + for track in tracks: + if track.exist_prob < 0.1: + del_tracks.add(track) + tracks -= del_tracks + + self._tracks = set(tracks) + return self._tracks \ No newline at end of file From cac2398bf0fdb1ecd4c21969fc9ee75fe76f6390 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 27 Sep 2022 17:13:36 +0100 Subject: [PATCH 15/87] Added sensor management examples --- examples/reactive-isr/sensor_management.py | 240 +++++++++++++ .../reactive-isr/smcphd_init-sm-example.py | 331 ++++++++++++++++++ 2 files changed, 571 insertions(+) create mode 100644 examples/reactive-isr/sensor_management.py create mode 100644 examples/reactive-isr/smcphd_init-sm-example.py diff --git a/examples/reactive-isr/sensor_management.py b/examples/reactive-isr/sensor_management.py new file mode 100644 index 000000000..a73421f78 --- /dev/null +++ b/examples/reactive-isr/sensor_management.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# coding: utf-8 + + +import numpy as np +import random +from datetime import datetime, timedelta +import time + +import matplotlib.pyplot as plt +from matplotlib.patches import Rectangle +from ordered_set import OrderedSet + +from stonesoup.custom.functions import get_camera_footprint +from stonesoup.plotter import Plotter +from stonesoup.types.state import StateVector +from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera +from stonesoup.types.angle import Angle +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.updater.kalman import ExtendedKalmanUpdater +from stonesoup.types.state import GaussianState +from stonesoup.types.track import Track +from stonesoup.sensormanager.reward import UncertaintyRewardFunction +from stonesoup.sensormanager import BruteForceSensorManager, OptimizeBruteSensorManager +from stonesoup.hypothesiser.distance import DistanceHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.dataassociator.neighbour import GNNWith2DAssignment +from stonesoup.metricgenerator.tracktotruthmetrics import SIAPMetrics +from stonesoup.measures import Euclidean +from stonesoup.dataassociator.tracktotrack import TrackToTruth +from stonesoup.metricgenerator.uncertaintymetric import SumofCovarianceNormsMetric +from stonesoup.metricgenerator.manager import SimpleManager + +np.random.seed(1990) +random.seed(1990) + +# Parameters +# ========== # Simulation start time +num_iter = 100 # Number of simulation steps +ntruths = 2 # Number of ground truths +total_no_sensors = 1 +start_time = datetime.now() + +# Models +# ======l +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.005), + ConstantVelocity(0.005), + ConstantVelocity(0)]) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) +truths = set() +truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, + time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, + time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +# Create sensors +# ============== +sensors = set() +for n in range(0, total_no_sensors): + rotation_offset = StateVector( + [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset + pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt + + sensor = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.001, 0.001, 0.001]), + fov_angle=[np.radians(15), np.radians(10)], + rotation_offset=rotation_offset, + pan=pan_tilt[0], tilt=pan_tilt[1], + position=StateVector([10., 10., 100.])) + sensors.add(sensor) +for sensor in sensors: + sensor.timestamp = start_time + +# Predctor and Updater +# ==================== +predictor = KalmanPredictor(transition_model) +updater = ExtendedKalmanUpdater(measurement_model=None) +# measurement model is added to detections by the sensor + +# Initialise tracks +# ================= +tracks = [] +for truth in truths: + sv = truth[0].state_vector + prior = GaussianState(sv, np.diag([0.5, 0.5, 0.5, 0.5, 0.5, 0.5]), timestamp=start_time) + tracks.append(Track([prior])) + +# Initialise sensor manager +# ========================= +reward_function = UncertaintyRewardFunction(predictor, updater) +sensor_manager = BruteForceSensorManager(sensors, reward_function) +# sensor_manager = OptimizeBruteSensorManager(sensors, +# reward_function=reward_function, +# n_grid_points=15, +# finish=True) + +# Hypothesiser and Data Associator +# ================================ +hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=5) +data_associator = GNNWith2DAssignment(hypothesiser) + +# Run the sensor manager +# ====================== + +# Start timer for cell execution time +cell_start_time2 = time.time() +timestamps = [] +for k in range(1, num_iter + 1): + timestamps.append(start_time + timedelta(seconds=k)) + +fig = plt.figure(figsize=(10, 6)) +ax = fig.add_subplot(1, 1, 1) +plt.ion() +for k, timestep in enumerate(timestamps): + + print(timestep) + # Generate chosen configuration + chosen_actions = sensor_manager.choose_actions(tracks, timestep) + + # Create empty dictionary for measurements + measurements = [] + + for chosen_action in chosen_actions: + for sensor, actions in chosen_action.items(): + sensor.add_actions(actions) + + ax.cla() + ax.set_xlabel("$x$") + ax.set_ylabel("$y$") + ax.set_xlim(-10, 30) + ax.set_ylim(-10, 30) + ax.set_aspect('equal') + + # Fov ranges (min, center, max) + xmin, xmax, ymin, ymax = get_camera_footprint(sensor) + + ax.add_patch( + Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='b')) + + truth_states = OrderedSet(truth[timestep] for truth in truths) + for sensor in sensors: + sensor.act(timestep) + + # Observe this ground truth + sensor_measurements = sensor.measure(truth_states, noise=True) + measurements.extend(sensor_measurements) + + # Fov ranges (min, center, max) + xmin, xmax, ymin, ymax = get_camera_footprint(sensor) + + ax.add_patch( + Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='r')) + for truth in truths: + data = np.array([state.state_vector for state in truth[:k + 2]]) + ax.plot(data[:, 0], data[:, 2], '--', label="Ground truth") + + hypotheses = data_associator.associate(tracks, + measurements, + timestep) + for track in tracks: + hypothesis = hypotheses[track] + if hypothesis.measurement: + post = updater.update(hypothesis) + track.append(post) + else: # When data associator says no detections are good enough, we'll keep the prediction + track.append(hypothesis.prediction) + + for track in tracks: + data = np.array([state.state_vector for state in track]) + ax.plot(data[:, 0], data[:, 2], '-', label="Ground truth") + + plt.pause(0.1) +cell_run_time2 = round(time.time() - cell_start_time2, 2) + +# Plot the results +# ================ + +# Plot ground truths, tracks and uncertainty ellipses for each target. +plotter = Plotter() +plotter.plot_sensors(sensors) +plotter.plot_ground_truths(truths, [0, 2]) +plotter.plot_tracks(set(tracks), [0, 2], uncertainty=True) + +# Metrics +siap_generator = SIAPMetrics(position_measure=Euclidean((0, 2)), + velocity_measure=Euclidean((1, 3))) +associator = TrackToTruth(association_threshold=30) +uncertainty_generator = SumofCovarianceNormsMetric() + +metric_manager = SimpleManager([siap_generator, uncertainty_generator], + associator=associator) +metric_manager.add_data(truths, tracks) +metricsA = metric_manager.generate_metrics() + +# SIAP metrics +fig, axes = plt.subplots(2) +times = metric_manager.list_timestamps() +pa_metricA = metricsA['SIAP Position Accuracy at times'] +va_metricA = metricsA['SIAP Velocity Accuracy at times'] +axes[0].set(title='Positional Accuracy', xlabel='Time', ylabel='PA') +axes[0].plot(times, [metric.value for metric in pa_metricA.value], + label='BruteForceSensorManager') +axes[0].legend() +axes[1].set(title='Velocity Accuracy', xlabel='Time', ylabel='VA') +axes[1].plot(times, [metric.value for metric in va_metricA.value], + label='BruteForceSensorManager') +axes[1].legend() + +# Uncertainty metrics +uncertainty_metricA = metricsA['Sum of Covariance Norms Metric'] +fig = plt.figure() +ax = fig.add_subplot(1, 1, 1) +ax.plot([i.timestamp for i in uncertainty_metricA.value], + [i.value for i in uncertainty_metricA.value], + label='BruteForceSensorManager') +ax.set_ylabel("Sum of covariance matrix norms") +ax.set_xlabel("Time") +ax.legend() + +# Print run time +print(f'Optimised Brute Force: {cell_run_time2} s') diff --git a/examples/reactive-isr/smcphd_init-sm-example.py b/examples/reactive-isr/smcphd_init-sm-example.py new file mode 100644 index 000000000..02e7bcca1 --- /dev/null +++ b/examples/reactive-isr/smcphd_init-sm-example.py @@ -0,0 +1,331 @@ +from matplotlib import pyplot as plt +from matplotlib.patches import Ellipse, Rectangle +from ordered_set import OrderedSet + +from stonesoup.custom.functions import get_camera_footprint +from stonesoup.custom.jipda import JIPDAWithEHM2 +from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera +from stonesoup.deleter.time import UpdateTimeDeleter +from stonesoup.functions import gm_reduce_single +from stonesoup.gater.distance import DistanceGater +from stonesoup.hypothesiser.probability import IPDAHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.resampler.particle import SystematicResampler +from stonesoup.sensormanager import BruteForceSensorManager +from stonesoup.sensormanager.reward import UncertaintyRewardFunction +from stonesoup.types.angle import Angle +from stonesoup.types.array import StateVector, StateVectors +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState, ParticleState +from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator +from stonesoup.custom.tracker import SMCPHD_JIPDA + +from datetime import datetime +from datetime import timedelta +import numpy as np +from scipy.stats import uniform, multivariate_normal + +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import KalmanUpdater + + +def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): + """ + Plots an `nstd` sigma error ellipse based on the specified covariance + matrix (`cov`). Additional keyword arguments are passed on to the + ellipse patch artist. + Parameters + ---------- + cov : The 2x2 covariance matrix to base the ellipse on + pos : The location of the center of the ellipse. Expects a 2-element + sequence of [x0, y0]. + nstd : The radius of the ellipse in numbers of standard deviations. + Defaults to 2 standard deviations. + ax : The axis that the ellipse will be plotted on. Defaults to the + current axis. + Additional keyword arguments are pass on to the ellipse patch. + Returns + ------- + A matplotlib ellipse artist + """ + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + if ax is None: + ax = plt.gca() + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + + ax.add_artist(ellip) + return ellip + +# np.random.seed(1991) + + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +prob_detect = Probability(.9) # 90% chance of detection. +prob_death = Probability(0.01) # Probability of death +prob_birth = Probability(0.1) # Probability of birth +prob_survive = Probability(0.99) # Probability of survival +birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) +clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) +surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] +surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ + * (surveillance_region[1][1] - surveillance_region[1][0]) +clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area +birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0, 0.0, 0.0])), + np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2, .0, .0])) # Birth density +birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' +num_particles = 2 ** 13 # Number of particles used by the PHD filter +num_iter = 100 # Number of simulation steps +total_no_sensors = 1 +PLOT = True # Set [True | False] to turn plotting [ON | OFF] + +# Models +# ====== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), + ConstantVelocity(0.01), + ConstantVelocity(0.01)]) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) +truths = set() +truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +timestamps = [] +for k in range(1, num_iter + 1): + timestamps.append(start_time + timedelta(seconds=k)) + + +# Create sensors +# ============== +sensors = set() +for n in range(0, total_no_sensors): + rotation_offset = StateVector( + [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset + pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt + + sensor = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.05, 0.05, 0.05]), + fov_angle=[np.radians(15), np.radians(10)], + rotation_offset=rotation_offset, + pan=pan_tilt[0], tilt=pan_tilt[1], + position=StateVector([10., 10., 100.])) + sensors.add(sensor) +for sensor in sensors: + sensor.timestamp = start_time + +# Predictor & Updater +# =================== +predictor = KalmanPredictor(transition_model) +updater = KalmanUpdater(None) + +# Hypothesiser & Data Associator +# ============================== +hypothesiser = IPDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect, + prob_survive=prob_survive) +# hypothesiser = PDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect) +hypothesiser = DistanceGater(hypothesiser, Mahalanobis(), 10) +associator = JIPDAWithEHM2(hypothesiser) + +# Track Deleter +# ============= +deleter = UpdateTimeDeleter(time_since_update=timedelta(minutes=5)) + +# Initiator +# ========= +# Initialise PHD Filter +resampler = SystematicResampler() +phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detect=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, resampler=resampler, + birth_scheme=birth_scheme) + +# Sample prior state from birth density +state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), + birth_density.covar, + size=num_particles).T) +weight = np.ones((num_particles,)) * Probability(1 / num_particles) +state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) + + +initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + +tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detect=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time) + +# Initialise sensor manager +# ========================= +reward_function = UncertaintyRewardFunction(tracker._predictor, tracker._updater) +sensor_manager = BruteForceSensorManager(sensors, reward_function) + +# Estimate +# ======== + +# Plot the prior +if PLOT: + fig1 = plt.figure(figsize=(10, 6)) + ax1 = plt.gca() + # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') + +# Main tracking loop +tracks = set() +for k, timestamp in enumerate(timestamps): + + tracks = list(tracks) + + # Generate chosen configuration + chosen_actions = sensor_manager.choose_actions(tracks, timestamp) + + # Create empty dictionary for measurements + detections = [] + + for chosen_action in chosen_actions: + for sensor, actions in chosen_action.items(): + sensor.add_actions(actions) + + # Fov ranges (min, center, max) + foot1 = get_camera_footprint(sensor) + + truth_states = OrderedSet(truth[timestamp] for truth in truths) + for sensor in sensors: + sensor.act(timestamp) + + # Observe this ground truth + sensor_measurements = sensor.measure(truth_states, noise=True) + detections.extend(sensor_measurements) + + foot2 = get_camera_footprint(sensor) + + # tracks = tracker.track(detections, timestamp) + detections = list(detections) + num_tracks = len(tracks) + num_detections = len(detections) + + # Perform data association + associations = associator.associate(tracks, detections, timestamp) + + assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) + for i, track in enumerate(tracks): + for hyp in associations[track]: + if not hyp: + assoc_prob_matrix[i, 0] = hyp.weight + else: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + assoc_prob_matrix[i, j + 1] = hyp.weight + + rho = np.zeros((len(detections))) + for j, detection in enumerate(detections): + rho_tmp = 1 + if len(assoc_prob_matrix): + for i, track in enumerate(tracks): + rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] + rho[j] = rho_tmp + + for track, multihypothesis in associations.items(): + + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + + tracks = set(tracks) + new_tracks = initiator.initiate(detections, timestamp, weights=rho) + tracks |= new_tracks + state = initiator._state + + # Delete tracks that have not been updated for a while + del_tracks = set() + for track in tracks: + if track.exist_prob < 0.1: + del_tracks.add(track) + tracks -= del_tracks + + print('\n===========================================') + # print(f'Num targets: {np.sum(state.weight)} - Num new targets: {len(new_tracks)}') + for track in tracks: + print(f'Track {track.id} - Exist prob: {track.exist_prob}') + + # Plot resulting density + if PLOT: + ax1.cla() + xmin, xmax, ymin, ymax = foot1 + ax1.add_patch( + Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='b')) + xmin, xmax, ymin, ymax = foot2 + ax1.add_patch( + Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='r')) + for i, truth in enumerate(truths): + data = np.array([s.state_vector for s in truth[:k + 1]]) + ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') + if len(detections): + det_data = np.array([det.state_vector for det in detections]) + ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') + # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], + # 'r.', label='Particles') + + for track in tracks: + data = np.array([s.state_vector for s in track]) + ax1.plot(data[:, 0], data[:, 2], label=f'Track {track.id}') + plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.axis([*surveillance_region[0], *surveillance_region[1]]) + plt.legend(loc='upper right') + plt.pause(0.01) From dcfc30acf9b1e341d8029446138d840dcc63efe1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 29 Sep 2022 10:54:52 +0100 Subject: [PATCH 16/87] Add position dependent probability of detection capability --- stonesoup/custom/hypothesiser/probability.py | 76 ++++++++++++++++++++ stonesoup/custom/smcphd.py | 18 +++-- stonesoup/custom/tracker.py | 25 ++++++- stonesoup/hypothesiser/probability.py | 1 - 4 files changed, 113 insertions(+), 7 deletions(-) create mode 100644 stonesoup/custom/hypothesiser/probability.py diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py new file mode 100644 index 000000000..bd860f1ce --- /dev/null +++ b/stonesoup/custom/hypothesiser/probability.py @@ -0,0 +1,76 @@ +from copy import copy +from typing import Union, Callable + +import numpy as np +from scipy.stats import multivariate_normal as mn + +from stonesoup.base import Property +from stonesoup.hypothesiser.probability import PDAHypothesiser +from stonesoup.types.detection import MissedDetection +from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.multihypothesis import MultipleHypothesis +from stonesoup.types.numeric import Probability +from stonesoup.types.state import State + + +class IPDAHypothesiser(PDAHypothesiser): + """ Integrated PDA Hypothesiser """ + prob_detect: Union[Probability, Callable[[State], Probability]] = Property( + default=Probability(0.85), + doc="Target Detection Probability") + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + a = 2 + + def hypothesise(self, track, detections, timestamp, **kwargs): + r"""Evaluate and return all track association hypotheses. + """ + + hypotheses = list() + + # Common state & measurement prediction + prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) + # Compute predicted existence + time_interval = timestamp - track.timestamp + prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + # Missed detection hypothesis + prob_detect = self.prob_detect(prediction.state_vector) + probability = Probability(1 - prob_detect * self.prob_gate * track.exist_prob) + w = (1 - track.exist_prob) / ((1 - prob_detect * self.prob_gate) * track.exist_prob) + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + MissedDetection(timestamp=timestamp), + probability, + metadata={"w": w} + )) + + # True detection hypotheses + for detection in detections: + # Re-evaluate prediction + prediction = self.predictor.predict( + track.state, timestamp=detection.timestamp) + # Compute measurement prediction and probability measure + measurement_prediction = self.updater.predict_measurement( + prediction, detection.measurement_model, **kwargs) + prob_detect = self.prob_detect(prediction.state_vector) + # Calculate difference before to handle custom types (mean defaults to zero) + # This is required as log pdf coverts arrays to floats + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar) + pdf = Probability(log_pdf, log_value=True) + probability = (pdf * prob_detect * track.exist_prob)/self.clutter_spatial_density + + # True detection hypothesis + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + detection, + probability, + measurement_prediction)) + + return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) \ No newline at end of file diff --git a/stonesoup/custom/smcphd.py b/stonesoup/custom/smcphd.py index 9fe6d502a..a48eecd78 100644 --- a/stonesoup/custom/smcphd.py +++ b/stonesoup/custom/smcphd.py @@ -1,5 +1,5 @@ from copy import copy -from typing import List, Any +from typing import List, Any, Union, Callable import numpy as np from scipy.stats import multivariate_normal @@ -34,7 +34,9 @@ class SMCPHDFilter(Base): transition_model: TransitionModel = Property(doc='The transition model') measurement_model: MeasurementModel = Property(doc='The measurement model') - prob_detect: Probability = Property(doc='The probability of detection') + prob_detect: Union[Probability, Callable[[State], Probability]] = Property( + default=Probability(0.85), + doc="Target Detection Probability") prob_death: Probability = Property(doc='The probability of death') prob_birth: Probability = Property(doc='The probability of birth') birth_rate: float = Property(doc='The birth rate (i.e. number of new/born targets at each iteration(') @@ -48,6 +50,12 @@ class SMCPHDFilter(Base): default='expansion' ) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if not callable(self.prob_detect): + prob_detect = copy(self.prob_detect) + self.prob_detect = lambda state: prob_detect + def predict(self, state, timestamp): """ Predict the next state of the target density @@ -213,8 +221,10 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): # Compute g(z|x) matrix as in [1] g = self.get_measurement_likelihoods(prediction, detections, meas_weights) + prob_detect = self.prob_detect(prediction) + # Calculate w^{n,i} Eq. (20) of [2] - Ck = meas_weights * self.prob_detect * g * prediction.weight[:, np.newaxis] + Ck = meas_weights * prob_detect * g * prediction.weight[:, np.newaxis] C = np.sum(Ck, axis=0) k = np.array([detection.metadata['clutter_density'] if 'clutter_density' in detection.metadata else self.clutter_intensity @@ -222,7 +232,7 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): C_plus = C + k weights_per_hyp = np.zeros((num_samples, len(detections) + 1), dtype=Probability) - weights_per_hyp[:, 0] = (1 - self.prob_detect) * prediction.weight + weights_per_hyp[:, 0] = (1 - prob_detect) * prediction.weight if len(detections): weights_per_hyp[:, 1:] = Ck / C_plus diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 64e830402..cddae9aee 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -1,3 +1,4 @@ +from copy import copy from datetime import datetime import numpy as np @@ -8,7 +9,7 @@ from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator from stonesoup.functions import gm_reduce_single from stonesoup.gater.distance import DistanceGater -from stonesoup.hypothesiser.probability import IPDAHypothesiser +from stonesoup.custom.hypothesiser.probability import IPDAHypothesiser from stonesoup.measures import Mahalanobis from stonesoup.models.measurement import MeasurementModel from stonesoup.models.transition import TransitionModel @@ -26,7 +27,7 @@ class SMCPHD_JIPDA(Base): transition_model: TransitionModel = Property(doc='The transition model') measurement_model: MeasurementModel = Property(doc='The measurement model') - prob_detect: Probability = Property(doc='The probability of detection') + prob_detection: Probability = Property(doc='The probability of detection') prob_death: Probability = Property(doc='The probability of death') prob_birth: Probability = Property(doc='The probability of birth') birth_rate: float = Property( @@ -45,6 +46,8 @@ class SMCPHD_JIPDA(Base): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + self.prob_detect = self.prob_detection + if self.start_time is None: self.start_time = datetime.now() @@ -79,10 +82,25 @@ def __init__(self, *args, **kwargs): self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + @property def tracks(self): return self._tracks + @property + def prob_detect(self): + return self._prob_detect + + @prob_detect.setter + def prob_detect(self, prob_detect): + if not callable(prob_detect): + prob_detect = copy(prob_detect) + self._prob_detect = lambda state: prob_detect + if hasattr(self, '_hypothesiser'): + self._hypothesiser.prob_detect = prob_detect + if hasattr(self, '_initiator'): + self._initiator.filter.prob_detect = self._prob_detect + def track(self, detections, timestamp): tracks = list(self.tracks) @@ -90,6 +108,9 @@ def track(self, detections, timestamp): num_tracks = len(tracks) num_detections = len(detections) + if not len(detections): + return self.tracks + # Perform data association associations = self._associator.associate(tracks, detections, timestamp) diff --git a/stonesoup/hypothesiser/probability.py b/stonesoup/hypothesiser/probability.py index 51e6515f3..f54b3da62 100644 --- a/stonesoup/hypothesiser/probability.py +++ b/stonesoup/hypothesiser/probability.py @@ -246,4 +246,3 @@ def hypothesise(self, track, detections, timestamp, **kwargs): measurement_prediction)) return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) - From bbcd7faf888842b5fdc24e92866485e500dc29ba Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 29 Sep 2022 11:40:16 +0100 Subject: [PATCH 17/87] Add missing __init__.py files --- stonesoup/custom/__init__.py | 0 stonesoup/custom/hypothesiser/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 stonesoup/custom/__init__.py create mode 100644 stonesoup/custom/hypothesiser/__init__.py diff --git a/stonesoup/custom/__init__.py b/stonesoup/custom/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/hypothesiser/__init__.py b/stonesoup/custom/hypothesiser/__init__.py new file mode 100644 index 000000000..e69de29bb From 4ad2b745d116a18873ee779f0a99ef8df2250199 Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Wed, 26 Oct 2022 18:27:46 +0100 Subject: [PATCH 18/87] Position dependent probability of detection fix --- stonesoup/custom/hypothesiser/probability.py | 4 ++-- stonesoup/custom/smcphd.py | 2 +- stonesoup/custom/tracker.py | 13 +++++++++---- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index bd860f1ce..2f390388b 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -37,7 +37,7 @@ def hypothesise(self, track, detections, timestamp, **kwargs): prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) track.exist_prob = prob_survive * track.exist_prob # Missed detection hypothesis - prob_detect = self.prob_detect(prediction.state_vector) + prob_detect = self.prob_detect(prediction) probability = Probability(1 - prob_detect * self.prob_gate * track.exist_prob) w = (1 - track.exist_prob) / ((1 - prob_detect * self.prob_gate) * track.exist_prob) hypotheses.append( @@ -56,7 +56,7 @@ def hypothesise(self, track, detections, timestamp, **kwargs): # Compute measurement prediction and probability measure measurement_prediction = self.updater.predict_measurement( prediction, detection.measurement_model, **kwargs) - prob_detect = self.prob_detect(prediction.state_vector) + prob_detect = self.prob_detect(prediction) # Calculate difference before to handle custom types (mean defaults to zero) # This is required as log pdf coverts arrays to floats log_pdf = mn.logpdf( diff --git a/stonesoup/custom/smcphd.py b/stonesoup/custom/smcphd.py index a48eecd78..ce4177285 100644 --- a/stonesoup/custom/smcphd.py +++ b/stonesoup/custom/smcphd.py @@ -224,7 +224,7 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): prob_detect = self.prob_detect(prediction) # Calculate w^{n,i} Eq. (20) of [2] - Ck = meas_weights * prob_detect * g * prediction.weight[:, np.newaxis] + Ck = meas_weights * prob_detect[:, np.newaxis] * g * prediction.weight[:, np.newaxis] C = np.sum(Ck, axis=0) k = np.array([detection.metadata['clutter_density'] if 'clutter_density' in detection.metadata else self.clutter_intensity diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index cddae9aee..f7c663d28 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -96,10 +96,15 @@ def prob_detect(self, prob_detect): if not callable(prob_detect): prob_detect = copy(prob_detect) self._prob_detect = lambda state: prob_detect - if hasattr(self, '_hypothesiser'): - self._hypothesiser.prob_detect = prob_detect - if hasattr(self, '_initiator'): - self._initiator.filter.prob_detect = self._prob_detect + else: + self._prob_detect = copy(prob_detect) + if hasattr(self, '_hypothesiser'): + if hasattr(self._hypothesiser, 'hypothesiser'): + self._hypothesiser.hypothesiser.prob_detect = self._prob_detect + else: + self._hypothesiser.prob_detect = self._prob_detect + if hasattr(self, '_initiator'): + self._initiator.filter.prob_detect = self._prob_detect def track(self, detections, timestamp): From 4697fa5b8cd113633c63f5ca3d08e545bc7cb1ba Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Mon, 31 Oct 2022 13:15:33 +0000 Subject: [PATCH 19/87] Fix wrong action class in AngleUAVActionsGenerator default_action() overloads --- stonesoup/custom/sensor/action/angle.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stonesoup/custom/sensor/action/angle.py b/stonesoup/custom/sensor/action/angle.py index 4df8c5802..521184f45 100644 --- a/stonesoup/custom/sensor/action/angle.py +++ b/stonesoup/custom/sensor/action/angle.py @@ -63,10 +63,10 @@ def __init__(self, *args, **kwargs): @property def default_action(self): - return ChangeAngleAction(generator=self, - end_time=self.end_time, - target_value=self.current_value, - increasing_angle=None) + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=self.current_value, + increasing_angle=None) def __call__(self, resolution=None, epsilon=None): """ From 5c742ee22f84ac46c7eced6e2128c6c1ac7723ce Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Sun, 6 Nov 2022 23:38:59 +0000 Subject: [PATCH 20/87] WIP: Correct implementation of calculating camera footprint --- stonesoup/custom/functions/__init__.py | 389 ++++++++++++++++++++++++- 1 file changed, 376 insertions(+), 13 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 048914c92..a1d772156 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,23 +1,386 @@ +import math import numpy as np +# pip install vector3d +from vector3d.vector import Vector + +from stonesoup.functions import cart2sphere, sphere2cart +from stonesoup.types.angle import Angle + + +class CameraCalculator: + """Ported and modified from https://gist.github.com/luipir/dc33864b53cf6634f9cdd2bce712d3d9""" + + @staticmethod + def getBoundingPolygon(FOVh, FOVv, altitude, roll, pitch, heading): + """Get corners of the polygon captured by the camera on the ground. + The calculations are performed in the axes origin (0, 0, altitude) + and the points are not yet translated to camera's X-Y coordinates. + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + altitude (float): Altitude of the camera in meters + heading (float): Heading of the camera (z axis) in radians + roll (float): Roll of the camera (x axis) in radians + pitch (float): Pitch of the camera (y axis) in radians + Returns: + vector3d.vector.Vector: Array with 4 points defining a polygon + """ + # import ipdb; ipdb.set_trace() + ray11 = CameraCalculator.ray1(FOVh, FOVv) + ray22 = CameraCalculator.ray2(FOVh, FOVv) + ray33 = CameraCalculator.ray3(FOVh, FOVv) + ray44 = CameraCalculator.ray4(FOVh, FOVv) + + rotatedVectors = CameraCalculator.rotateRays(ray11, ray22, ray33, ray44, + roll, pitch, heading) + + origin = Vector(0, 0, altitude) + intersections = CameraCalculator.getRayGroundIntersections(rotatedVectors, origin) + + roll2, pitch2, heading2, FOVv2, FOVh2 = CameraCalculator.getFovRPH(intersections, altitude) + return intersections + + @staticmethod + def getFovRPH(intersections, altitude): + + # Calculate unit vectors to the ground, assuming camera is at the origin + rotVecs = [Vector(i.x, i.y, -altitude).normalize() for i in intersections] + + # First rotation aligns the centroid of the polygon with the negative z axis + centroidVec = (rotVecs[0] + rotVecs[1] + rotVecs[2] + rotVecs[3]).normalize() + rot1 = rotation_matrix_from_vectors(Vector(z=-1), centroidVec).T + + # Get vectors after first rotation + rotVecs1 = [Vector(*(rot1@np.array([[r.x], [r.y], [r.z]])).flatten()) for r in rotVecs] + + # Second rotation alligns the centroid of the polygon with the negative y axis + rot2 = rotation_matrix_from_vectors(Vector(y=1), (rotVecs1[0]-rotVecs1[1]).normalize()).T + + # Get final rotation matrix + R = rot2@rot1 + + # Calculate roll, pitch and heading + roll, pitch, heading = roll_pitch_yaw_from_matrix(R.T) + + # Calculate FOVv and FOVh + rays2 = CameraCalculator.unrotateRays(*rotVecs, roll, pitch, heading) + rays2norm = [Vector(ray.x / -ray.z, ray.y / -ray.z, -1) for i, ray in enumerate(rays2)] + FOVv = np.arctan2(rays2norm[0].x, 1) * 2 + FOVh = np.arctan2(rays2norm[0].y, 1) * 2 + return roll, pitch, heading, FOVv, FOVh + + # Ray-vectors defining the camera's field of view. FOVh and FOVv are interchangeable + # depending on the camera's orientation + @staticmethod + def ray1(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(math.tan(FOVv / 2), math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def ray2(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(math.tan(FOVv / 2), -math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def ray3(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(-math.tan(FOVv / 2), -math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def ray4(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(-math.tan(FOVv / 2), math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def rotationMatrix(roll, pitch, yaw): + """Calculate rotation matrix + Parameters: + roll float: Roll rotation + pitch float: Pitch rotation + yaw float: Yaw rotation + Returns: + Returns new rotated ray-vectors + """ + sinAlpha = math.sin(yaw) + sinBeta = math.sin(pitch) + sinGamma = math.sin(roll) + cosAlpha = math.cos(yaw) + cosBeta = math.cos(pitch) + cosGamma = math.cos(roll) + m00 = cosAlpha * cosBeta + m01 = cosAlpha * sinBeta * sinGamma - sinAlpha * cosGamma + m02 = cosAlpha * sinBeta * cosGamma + sinAlpha * sinGamma + m10 = sinAlpha * cosBeta + m11 = sinAlpha * sinBeta * sinGamma + cosAlpha * cosGamma + m12 = sinAlpha * sinBeta * cosGamma - cosAlpha * sinGamma + m20 = -sinBeta + m21 = cosBeta * sinGamma + m22 = cosBeta * cosGamma + + return np.array([[m00, m01, m02], + [m10, m11, m12], + [m20, m21, m22]]) + + @staticmethod + def rotateRays(ray1, ray2, ray3, ray4, roll, pitch, yaw): + """Rotates the four ray-vectors around all 3 axes + Parameters: + ray1 (vector3d.vector.Vector): First ray-vector + ray2 (vector3d.vector.Vector): Second ray-vector + ray3 (vector3d.vector.Vector): Third ray-vector + ray4 (vector3d.vector.Vector): Fourth ray-vector + roll float: Roll rotation + pitch float: Pitch rotation + yaw float: Yaw rotation + Returns: + Returns new rotated ray-vectors + """ + rotationMatrix = CameraCalculator.rotationMatrix(roll, pitch, yaw) + rayMatrix = np.array([[ray.x, ray.y, ray.z] for ray in [ray1, ray2, ray3, ray4]]).T + rotatedRayMatrix = (rotationMatrix @ rayMatrix).T + rayArray = [Vector(*rayMatrix.flatten()) for rayMatrix in rotatedRayMatrix] + return rayArray + + @staticmethod + def unrotateRays(ray1, ray2, ray3, ray4, roll, pitch, yaw): + """Unrotates the four ray-vectors around all 3 axes + Parameters: + ray1 (vector3d.vector.Vector): First ray-vector + ray2 (vector3d.vector.Vector): Second ray-vector + ray3 (vector3d.vector.Vector): Third ray-vector + ray4 (vector3d.vector.Vector): Fourth ray-vector + roll float: Roll rotation + pitch float: Pitch rotation + yaw float: Yaw rotation + Returns: + Returns new rotated ray-vectors + """ + rotationMatrix = CameraCalculator.rotationMatrix(roll, pitch, yaw).T + rayMatrix = np.array([[ray.x, ray.y, ray.z] for ray in [ray1, ray2, ray3, ray4]]).T + rotatedRayMatrix = (rotationMatrix @ rayMatrix).T + rayArray = [Vector(*rayMatrix.flatten()) for rayMatrix in rotatedRayMatrix] + return rayArray + + @staticmethod + def getRayGroundIntersections(rays, origin): + """ + Finds the intersections of the camera's ray-vectors + and the ground approximated by a horizontal plane + Parameters: + rays (vector3d.vector.Vector[]): Array of 4 ray-vectors + origin (vector3d.vector.Vector): Position of the camera. The computation were developed + assuming the camera was at the axes origin (0, 0, altitude) and the + results translated by the camera's real position afterwards. + Returns: + vector3d.vector.Vector + """ + # Vector3d [] intersections = new Vector3d[rays.length]; + # for (int i = 0; i < rays.length; i ++) { + # intersections[i] = CameraCalculator.findRayGroundIntersection(rays[i], origin); + # } + # return intersections + + # 1to1 translation without python syntax optimisation + # intersections = [] + # for i in range(len(rays)): + # intersections.append(CameraCalculator.findRayGroundIntersection(rays[i], origin)) + return [CameraCalculator.findRayGroundIntersection(ray, origin) for ray in rays] + + @staticmethod + def findRayGroundIntersection(ray, origin): + """ + Finds a ray-vector's intersection with the ground approximated by a planeç + Parameters: + ray (vector3d.vector.Vector): Ray-vector + origin (vector3d.vector.Vector): Camera's position + Returns: + vector3d.vector.Vector + """ + # Parametric form of an equation + # P = origin + vector * t + x = Vector(origin.x, ray.x) + y = Vector(origin.y, ray.y) + z = Vector(origin.z, ray.z) + + # Equation of the horizontal plane (ground) + # -z = 0 + + # Calculate t by substituting z + t = - (z.x / z.y) + + # Substitute t in the original parametric equations to get points of intersection + return Vector(x.x + x.y * t, y.x + y.y * t, z.x + z.y * t) + def get_camera_footprint(camera): - try: - pan, tilt = camera.pan_tilt - except: - pan, tilt = camera.pan, camera.tilt - altitude = camera.position[2] - fov_range_pan = (pan-camera.fov_angle[0]/2, pan, pan+camera.fov_angle[0]/2) - fov_range_tilt = (tilt-camera.fov_angle[1]/2, tilt, tilt+camera.fov_angle[1]/2) - x_min = altitude * np.tan(fov_range_tilt[0]) + camera.position[0] - x_max = altitude * np.tan(fov_range_tilt[2]) + camera.position[0] - y_min = altitude * np.tan(fov_range_pan[0]) + camera.position[1] - y_max = altitude * np.tan(fov_range_pan[2]) + camera.position[1] - return x_min, x_max, y_min, y_max + # altitude = camera.position[2] + # try: + # pan, tilt = camera.pan_tilt + # except: + # pan, tilt = camera.pan, camera.tilt + # + # fov_range_pan = (pan - camera.fov_angle[0] / 2, pan, pan + camera.fov_angle[0] / 2) + # fov_range_tilt = (tilt - camera.fov_angle[1] / 2, tilt, tilt + camera.fov_angle[1] / 2) + # x_min = altitude * np.tan(fov_range_tilt[0]) + camera.position[0] + # x_max = altitude * np.tan(fov_range_tilt[2]) + camera.position[0] + # y_min = altitude * np.tan(fov_range_pan[0]) + camera.position[1] + # y_max = altitude * np.tan(fov_range_pan[2]) + camera.position[1] + + # Once the camera is rotated, the z axis becomes the x axis, and the x axis becomes the z axis + # TODO: More testing is needed to make sure this is correct + roll, pitch, heading = (camera.orientation[2], + camera.orientation[1] + np.pi/2, + camera.orientation[0]) + + xmin, xmax, ymin, ymax = get_camera_footprint_low(camera.position, roll, pitch, heading, + camera.fov_angle) + return xmin, xmax, ymin, ymax + + +def get_camera_footprint_low(position, roll, pitch, heading, fov_angle): + bpol = CameraCalculator.getBoundingPolygon(fov_angle[0], + fov_angle[1], + position[2], + roll, # Tested, works + -pitch, # Tested, works + -heading) + xvals = np.sort(np.unique(np.round([v.x + position[0] for v in bpol], 2))) + yvals = np.sort(np.unique(np.round([v.y + position[1] for v in bpol], 2))) + + xmin = xvals[0] + xmax = xvals[-1] + ymin = yvals[0] + ymax = yvals[-1] + + return xmin, xmax, ymin, ymax + + +def get_roll_pitch_yaw_fov(x_min, x_max, y_min, y_max, altitude): + intersections = [Vector(x_max, y_max), Vector(x_max, y_min), Vector(x_min, y_min), + Vector(x_min, y_max)] + + roll, pitch, heading, fov_tilt, fov_pan = CameraCalculator.getFovRPH(intersections, altitude) + # Pitch needs to be inverted to get the tilt angle + pitch = -pitch + + return Angle(roll), Angle(pitch), Angle(heading), Angle(fov_tilt), Angle(fov_pan) + + +def lla_to_pan_tilt_fov(pos, x_min, x_max, y_min, y_max): + """Converts lat, lon, alt to az, el""" + + # We assume that the camera is looking at the ground, with heading pointing east + # Hence, panning is along the latitude axis, and tilting is along the longitude axis + + alt = pos[2] + phi1 = np.arctan2(x_min - pos[0], alt) + phi2 = np.arctan2(x_max - pos[0], alt) + + theta1 = np.arctan2(y_min - pos[1], alt) + theta2 = np.arctan2(y_max - pos[1], alt) + + fov_angle = (theta2 - theta1, phi2 - phi1) + pan, tilt = (fov_angle[0]) / 2 + theta1, (fov_angle[1]) / 2 + phi1 + + return pan, tilt, fov_angle def get_nearest(array, value): array = np.asarray(array) idx = (np.abs(array - value)).argmin() - return array[idx] \ No newline at end of file + return array[idx] + + +def rotation_matrix_from_vectors(vec1, vec2): + """ Find the rotation matrix that aligns vec1 to vec2 + :param vec1: A 3d "source" vector + :param vec2: A 3d "destination" vector + :return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2. + """ + if isinstance(vec1, Vector): + vec1 = np.array([vec1.x, vec1.y, vec1.z]) + if isinstance(vec2, Vector): + vec2 = np.array([vec2.x, vec2.y, vec2.z]) + if np.allclose(vec1, vec2): + return np.eye(3) + a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3) + v = np.cross(a, b) + c = np.dot(a, b) + s = np.linalg.norm(v) + kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) + rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2)) + return rotation_matrix + + +def roll_pitch_yaw_from_matrix(matrix): + """ Returns the Euler angles from a rotation matrix + :param matrix: A transform matrix (3x3) + :return: Euler angles in the form of a tuple (roll, pitch, yaw) + """ + roll = np.arctan2(matrix[2, 1], matrix[2, 2]) + pitch = np.arctan2(-matrix[2, 0], np.sqrt(matrix[2, 1] ** 2 + matrix[2, 2] ** 2)) + yaw = np.arctan2(matrix[1, 0], matrix[0, 0]) + return roll, pitch, yaw + + +def rigid_transform_3D(A, B): + assert A.shape == B.shape + num_rows, num_cols = A.shape + if num_rows != 3: + raise Exception(f"matrix A is not 3xN, it is {num_rows}x{num_cols}") + num_rows, num_cols = B.shape + if num_rows != 3: + raise Exception(f"matrix B is not 3xN, it is {num_rows}x{num_cols}") + # find mean column wise + centroid_A = np.mean(A, axis=1) + centroid_B = np.mean(B, axis=1) + # ensure centroids are 3x1 + centroid_A = centroid_A.reshape(-1, 1) + centroid_B = centroid_B.reshape(-1, 1) + # subtract mean + Am = A - centroid_A + Bm = B - centroid_B + H = Am @ np.transpose(Bm) + # sanity check + #if linalg.matrix_rank(H) < 3: + # raise ValueError("rank of H = {}, expecting 3".format(linalg.matrix_rank(H))) + # find rotation + U, S, Vt = np.linalg.svd(H) + R = Vt.T @ U.T + # special reflection case + if np.linalg.det(R) < 0: + print("det(R) < R, reflection detected!, correcting for it ...") + Vt[2,:] *= -1 + R = Vt.T @ U.T + t = -R @ centroid_A + centroid_B + return R, t \ No newline at end of file From a99de2a6700f400178d9ae84b1677a8d837aca22 Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Wed, 9 Nov 2022 15:07:36 +0000 Subject: [PATCH 21/87] Increase default resolution of AngleUAVActionsGenerator --- stonesoup/custom/sensor/action/angle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/sensor/action/angle.py b/stonesoup/custom/sensor/action/angle.py index 521184f45..68331672c 100644 --- a/stonesoup/custom/sensor/action/angle.py +++ b/stonesoup/custom/sensor/action/angle.py @@ -54,7 +54,7 @@ class AngleUAVActionsGenerator(RealNumberActionGenerator): owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " "dwell-centre attributes") - resolution: Angle = Property(default=np.radians(5), doc="Resolution of action space") + resolution: Angle = Property(default=np.radians(10), doc="Resolution of action space") _action_cls = ChangeAngleAction From 5aaee5724740688924cf3aa6fd00ebb52cccf58c Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Tue, 15 Nov 2022 18:00:41 +0000 Subject: [PATCH 22/87] Added pyehm and vector3d to dependencies --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index fd186fa7c..92f9ec5fe 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,8 @@ install_requires = rtree scipy utm + pyehm + vector3d [options.extras_require] dev = From f5c0cf6a82ee0b105ff3bc29e729369aa0c152a9 Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Tue, 15 Nov 2022 18:01:21 +0000 Subject: [PATCH 23/87] Use log-probability calculations in SMCPHDFilter --- stonesoup/custom/smcphd.py | 80 +++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/stonesoup/custom/smcphd.py b/stonesoup/custom/smcphd.py index ce4177285..a745f81df 100644 --- a/stonesoup/custom/smcphd.py +++ b/stonesoup/custom/smcphd.py @@ -2,6 +2,7 @@ from typing import List, Any, Union, Callable import numpy as np +from scipy.special import logsumexp from scipy.stats import multivariate_normal from stonesoup.base import Base, Property @@ -39,8 +40,10 @@ class SMCPHDFilter(Base): doc="Target Detection Probability") prob_death: Probability = Property(doc='The probability of death') prob_birth: Probability = Property(doc='The probability of birth') - birth_rate: float = Property(doc='The birth rate (i.e. number of new/born targets at each iteration(') - birth_density: State = Property(doc='The birth density (i.e. density from which we sample birth particles)') + birth_rate: float = Property( + doc='The birth rate (i.e. number of new/born targets at each iteration(') + birth_density: State = Property( + doc='The birth density (i.e. density from which we sample birth particles)') clutter_intensity: float = Property(doc='The clutter intensity per unit volume') resampler: Resampler = Property(default=None, doc='Resampler to prevent particle degeneracy') num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) @@ -84,16 +87,16 @@ def predict(self, state, timestamp): if self.birth_scheme == 'expansion': # Expansion birth scheme, as described in [1] # Compute number of birth particles (J_k) as a fraction of the number of particles - num_birth = round(float(self.prob_birth * self.num_samples)) + num_birth = round(float(self.prob_birth) * self.num_samples) # Sample birth particles birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), self.birth_density.covar, num_birth) - birth_weights = np.ones((num_birth,)) * Probability(self.birth_rate / num_birth) + birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) # Surviving particle weights - prob_survive = np.exp(-self.prob_death*time_interval.total_seconds()) + prob_survive = np.exp(-float(self.prob_death)*time_interval.total_seconds()) pred_weights = prob_survive * prior_weights # Append birth particles to predicted ones @@ -161,15 +164,18 @@ def update(self, prediction, detections, timestamp, meas_weights=None): # Update weights Eq. (8) of [1] # w_k^i = \sum_{z \in Z_k}{w^{n,i}}, where i is the index of z in Z_k - post_weights = np.sum(weights_per_hyp, axis=1) + log_post_weights = logsumexp(np.log(weights_per_hyp).astype(float), axis=1) # Resample - num_targets = np.sum(post_weights) # N_{k|k} + log_num_targets = logsumexp(log_post_weights) # N_{k|k} update = copy(prediction) - update.weight = post_weights / num_targets # Normalize weights + # Normalize weights + update.weight = Probability.from_log_ufunc(log_post_weights - log_num_targets) if self.resampler is not None: update = self.resampler.resample(update, self.num_samples) # Resample - update.weight = np.array(update.weight) * num_targets # De-normalize + # De-normalize + update.weight = Probability.from_log_ufunc(np.log(update.weight).astype(float) + + log_num_targets) return Update.from_state( state=prediction, @@ -201,16 +207,16 @@ def iterate(self, state, detections: List[Detection], timestamp): update = self.update(prediction, detections, timestamp) return update - def get_measurement_likelihoods(self, prediction, detections, meas_weights): + def get_measurement_loglikelihoods(self, prediction, detections, meas_weights): num_samples = prediction.state_vector.shape[1] # Compute g(z|x) matrix as in [1] g = np.zeros((num_samples, len(detections)), dtype=Probability) for i, detection in enumerate(detections): if not meas_weights[i]: - g[:, i] = Probability(0) + g[:, i] = -np.inf continue - g[:, i] = detection.measurement_model.pdf(detection, prediction, - noise=True) + g[:, i] = detection.measurement_model.logpdf(detection, prediction, + noise=True) return g def get_weights_per_hypothesis(self, prediction, detections, meas_weights): @@ -219,24 +225,29 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): meas_weights = np.array([Probability(1) for _ in range(len(detections))]) # Compute g(z|x) matrix as in [1] - g = self.get_measurement_likelihoods(prediction, detections, meas_weights) + g = self.get_measurement_loglikelihoods(prediction, detections, meas_weights) - prob_detect = self.prob_detect(prediction) + # Get probability of detection + prob_detect = np.asfarray(self.prob_detect(prediction)) # Calculate w^{n,i} Eq. (20) of [2] - Ck = meas_weights * prob_detect[:, np.newaxis] * g * prediction.weight[:, np.newaxis] - C = np.sum(Ck, axis=0) - k = np.array([detection.metadata['clutter_density'] - if 'clutter_density' in detection.metadata else self.clutter_intensity - for detection in detections]) - C_plus = C + k - - weights_per_hyp = np.zeros((num_samples, len(detections) + 1), dtype=Probability) - weights_per_hyp[:, 0] = (1 - prob_detect) * prediction.weight + try: + Ck = np.log(meas_weights) + np.log(prob_detect[:, np.newaxis]) + g \ + + np.log(prediction.weight[:, np.newaxis]) + except IndexError: + Ck = np.log(meas_weights) + np.log(prob_detect) + g \ + + np.log(prediction.weight[:, np.newaxis]) + C = logsumexp(np.asfarray(Ck), axis=0) + k = np.log([detection.metadata['clutter_density'] + if 'clutter_density' in detection.metadata else self.clutter_intensity + for detection in detections]) + C_plus = np.logaddexp(C, k) + weights_per_hyp = np.full((num_samples, len(detections) + 1), -np.inf) + weights_per_hyp[:, 0] = np.log(1 - prob_detect) + np.log(prediction.weight) if len(detections): - weights_per_hyp[:, 1:] = Ck / C_plus + weights_per_hyp[:, 1:] = Ck - C_plus - return weights_per_hyp + return Probability.from_log_ufunc(weights_per_hyp) class SMCPHDInitiator(Initiator): @@ -255,21 +266,26 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): # Predict forward prediction = self.filter.predict(self._state, timestamp) + # Calculate weights per hypothesis weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights) + log_weights_per_hyp = np.log(weights_per_hyp).astype(float) - intensity_per_hyp = np.sum(weights_per_hyp, axis=0) - valid_inds = np.flatnonzero(intensity_per_hyp > self.threshold) + # Calculate intensity per hypothesis + log_intensity_per_hyp = logsumexp(log_weights_per_hyp, axis=0) + + # Find detections with intensity above threshold and initiate + valid_inds = np.flatnonzero(np.exp(log_intensity_per_hyp) > self.threshold) for idx in valid_inds: if not idx: continue particles_sv = copy(prediction.state_vector) - weight = weights_per_hyp[:, idx] / intensity_per_hyp[idx] + weight = np.exp(log_weights_per_hyp[:, idx] - log_intensity_per_hyp[idx]) mu = np.average(particles_sv, axis=1, weights=weight) - cov = np.cov(particles_sv, ddof=0, aweights=np.array(weight)) + cov = np.cov(particles_sv, ddof=0, aweights=weight) hypothesis = SingleProbabilityHypothesis(prediction, measurement=detections[idx-1], @@ -281,10 +297,12 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): # if np.trace(track_state.covar) < 10: weights_per_hyp[:, idx] = Probability(0) track = Track([track_state]) - track.exist_prob = intensity_per_hyp[idx] + track.exist_prob = Probability(log_intensity_per_hyp[idx], log_value=True) tracks.add(track) weights[idx-1] = 0 + # Update filter self._state = self.filter.update(prediction, detections, timestamp, weights) + return tracks From a1111a288301a832b22e9666386df31fdc80f65d Mon Sep 17 00:00:00 2001 From: Lyudmil Vladimirov Date: Mon, 21 Nov 2022 11:15:40 +0000 Subject: [PATCH 24/87] Added camera actions example --- examples/reactive-isr/camera_actions.py | 85 +++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 examples/reactive-isr/camera_actions.py diff --git a/examples/reactive-isr/camera_actions.py b/examples/reactive-isr/camera_actions.py new file mode 100644 index 000000000..cbffb0d39 --- /dev/null +++ b/examples/reactive-isr/camera_actions.py @@ -0,0 +1,85 @@ +import itertools +from datetime import datetime + +import numpy as np + +from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera +from stonesoup.types.angle import Angle +from stonesoup.types.array import StateVector + +# Specify the rotation offset of the camera +# In this case we rotate the camera around the Y axis by 90 degrees, meaning that the camera is +# pointing downwards +# NOTE: Panning moves the footprint of the camera along the Y axis, and tilting moves the +# footprint along the X axis +rotation_offset = StateVector([Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset + +# Specify the initial pan and tilt of the camera +pan = Angle(0) +tilt = Angle(0) + +# The camera is positioned at x=10, y=10, z=100 +position = StateVector([10., 10., 100.]) + +# We can also set the resolution of each actionable property. The resolution is used when +# discretising the action space. In this case, we set the resolution of both the pan and tilt to +# 10 degrees, meaning that the action space will contain values in the range [-pi/2, pi/2] with +# a step size of 10 degrees for each property. +# NOTE: Currently, the current state of each property is appended to the action space, meaning +# that the action space will contain 19 values for each property (not 18). In the current example, +# this means that the action for 0 degrees will be duplicated. This is a known "feature" and will +# be fixed in a future release. +resolutions = {'pan': np.radians(10), 'tilt': np.radians(10)} + +# Create a camera object +sensor = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.05, 0.05, 0.05]), + fov_angle=[np.radians(15), np.radians(10)], + rotation_offset=rotation_offset, + pan=pan, tilt=pan, + resolutions=resolutions, + position=position) + +# Set a query time +timestamp = datetime.now() + +# Calling sensor.actions() will return a set of action generators. Each action generator is an +# object that contains all the actions that can be performed by the sensor at a given time. In this +# case, the sensor can perform two actions: pan and tilt. Hence, the result of sensor.actions() is +# a set of two action generators: one for panning and one for tilting. +action_generators = sensor.actions(timestamp) + +# Let's look at the action generators +# The first action generator is for panning. We can extract the action generator by searching for +# the action generator that controls the 'pan'. So, the following line of code simply filters the +# action generators that control the 'pan' of the camera (the for-if statement) and then selects +# the first action generator (since there is only one), via the next() statement. +pan_action_generator = next(ag for ag in action_generators if ag.attribute == 'pan') +# The second action generator is for tilting. We can extract the action generator by searching for +# the action generator that controls the 'tilt'. +tilt_action_generator = next(ag for ag in action_generators if ag.attribute == 'tilt') + +# We can now look at the actions that can be performed by the action generators. The action +# generators provide a Python "iterator" interface. This means that we can iterate over the action +# generators to get the actions that can be performed (e.g. with a "for" loop). Instead, we can +# also use the list() function to get a list of all the actions that can be performed. +possible_pan_actions = list(pan_action_generator) +possible_tilt_actions = list(tilt_action_generator) + +# Each action has a "target_value" property that specifies the value that the property will be +# set to if the action is performed. The following line of code prints the target values of the +# 10th action for pan and tilt. +print(possible_pan_actions[9].target_value) +print(possible_tilt_actions[9].target_value) + +# To get all the possible combinations of actions, we can use the itertools.product() function. +possible_action_combinations = list(itertools.product(possible_pan_actions, possible_tilt_actions)) + +# Let us now select the 10th action combination and task the sensor to perform the action. +chosen_action_combination = possible_action_combinations[9] +sensor.add_actions(chosen_action_combination) +sensor.act(timestamp) + +# The statement below is just an extra statement to allow us to breakpoint the code and inspect +# the possible actions. +end = True \ No newline at end of file From 02c79079d59a576bb0d25df2ac880406b94f0534 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 5 Dec 2022 15:01:40 +0000 Subject: [PATCH 25/87] Added MoveableUAVCamera --- stonesoup/custom/sensor/action/location.py | 116 ++++++++++++++ stonesoup/custom/sensor/moveable.py | 173 +++++++++++++++++++++ 2 files changed, 289 insertions(+) create mode 100644 stonesoup/custom/sensor/action/location.py create mode 100644 stonesoup/custom/sensor/moveable.py diff --git a/stonesoup/custom/sensor/action/location.py b/stonesoup/custom/sensor/action/location.py new file mode 100644 index 000000000..67ce34c27 --- /dev/null +++ b/stonesoup/custom/sensor/action/location.py @@ -0,0 +1,116 @@ +import itertools + +import numpy as np +from typing import Iterator + +from stonesoup.custom.functions import get_nearest +from stonesoup.types.array import StateVector + +from stonesoup.base import Property + +from stonesoup.sensor.action import Action, RealNumberActionGenerator + + +class ChangeLocationAction(Action): + def act(self, current_time, timestamp, init_value): + """Assumes that duration keeps within the action end time + + Parameters + ---------- + current_time: datetime.datetime + Current time + timestamp: datetime.datetime + Modification of attribute ends at this time stamp + init_value: Any + Current value of the dwell centre + + Returns + ------- + Any + The new value of the dwell centre""" + + if timestamp >= self.end_time: + return self.target_value # target direction + else: + return init_value # same direction + + +class LocationActionGenerator(RealNumberActionGenerator): + """Generates possible actions for changing the dwell centre of a sensor in a given + time period.""" + + owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " + "dwell-centre attributes") + resolution: float = Property(default=10, doc="Resolution of action space") + minmax: StateVector = Property(doc="Min and max values of the action space", + default=StateVector([-100, 100])) + + _action_cls = ChangeLocationAction + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=self.current_value) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution : float + Resolution of yielded action target values + epsilon: float + Epsilon value for action target values + + Returns + ------- + :class:`.Action` + Action with target value + """ + if resolution is not None: + self.resolution = resolution + if epsilon is not None: + self.epsilon = epsilon + + @property + def initial_value(self): + return self.current_value + + @property + def min(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return self.minmax[0] + + @property + def max(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return self.minmax[1] + + def __contains__(self, item): + + if isinstance(item, self._action_cls): + item = item.target_value + + return self.min <= item <= self.max + + def __iter__(self) -> Iterator[ChangeLocationAction]: + """Returns all possible ChangePanTiltAction types""" + possible_values = np.arange(self.min, self.max, self.resolution, dtype=float) + + yield self.default_action + for angle in possible_values: + yield self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle) + + def action_from_value(self, value): + if value not in self: + return None + possible_values = np.arange(self.min, self.max, self.resolution, dtype=float) + angle = get_nearest(possible_values, value) + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle) \ No newline at end of file diff --git a/stonesoup/custom/sensor/moveable.py b/stonesoup/custom/sensor/moveable.py new file mode 100644 index 000000000..622b4cb22 --- /dev/null +++ b/stonesoup/custom/sensor/moveable.py @@ -0,0 +1,173 @@ +import datetime +from typing import Union, List, Set + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.sensor.action.location import LocationActionGenerator +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.action import ActionGenerator +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.array import CovarianceMatrix, StateVector +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState + + +class MoveableUAVCamera(Sensor): + """A camera that can pan and tilt.""" + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.CartesianToElevationBearing` model") + radius: Union[float, List[float]] = Property( + doc="The field of view (FOV) angle (in radians). If provided in a list, the first element " + "is the pan FOV angle and the second element is the tilt FOV angle. Else, the same " + "FOV angle is used for both pan and tilt.") + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + location_x: float = ActionableProperty( + doc="The sensor x location. Defaults to zero", + default=0, + generator_cls=LocationActionGenerator + ) + location_y: float = ActionableProperty( + doc="The sensor y location. Defaults to zero", + default=0, + generator_cls=LocationActionGenerator + ) + minmax: StateVector = Property( + doc="The sensor min max location", + default=StateVector([-100, 100]) + ) + + @location_x.setter + def location_x(self, value): + self._property_location_x = value + if not self.movement_controller: + return + new_position = self.movement_controller.position.copy() + new_position[0] = value + self.movement_controller.position = new_position + + @location_y.setter + def location_y(self, value): + self._property_location_y = value + if not self.movement_controller: + return + new_position = self.movement_controller.position.copy() + new_position[1] = value + self.movement_controller.position = new_position + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + # Normalise measurement vector relative to sensor position + norm_measurement_vector = measurement_vector.astype(float) - self.position.astype( + float) + + distance = np.linalg.norm(norm_measurement_vector[0:2]) + + # Do not measure if state not in FOV + if distance > self.radius: + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths) + detections |= clutter + + return detections + + def _default_action(self, name, property_, timestamp): + """Returns the default action of the action generator associated with the property + (assumes the property is an :class:`~.ActionableProperty`.""" + + if self.resolutions and name in self.resolutions.keys(): + generator = property_.generator_cls(owner=self, + attribute=name, + start_time=self.timestamp, + end_time=timestamp, + resolution=self.resolutions[name], + minmax=self.minmax) + else: + generator = property_.generator_cls(owner=self, + attribute=name, + start_time=self.timestamp, + end_time=timestamp, + minmax=self.minmax) + return generator.default_action + + def actions(self, timestamp: datetime.datetime, start_timestamp: datetime.datetime = None + ) -> Set[ActionGenerator]: + """Method to return a set of action generators available up to a provided timestamp. + + A generator is returned for each actionable property that the sensor has. + + Parameters + ---------- + timestamp: datetime.datetime + Time of action finish. + start_timestamp: datetime.datetime, optional + Time of action start. + + Returns + ------- + : set of :class:`~.ActionGenerator` + Set of action generators, that describe the bounds of each action space. + """ + + if not self.validate_timestamp(): + self.timestamp = timestamp + + if start_timestamp is None: + start_timestamp = self.timestamp + + generators = set() + for name, property_ in self._actionable_properties.items(): + if self.resolutions and name in self.resolutions.keys(): + generators.add(property_.generator_cls(owner=self, + attribute=name, + start_time=start_timestamp, + end_time=timestamp, + resolution=self.resolutions[name], + minmax=self.minmax)) + else: + generators.add(property_.generator_cls(owner=self, + attribute=name, + start_time=start_timestamp, + end_time=timestamp, + minmax=self.minmax)) + return generators \ No newline at end of file From 152711102a18238f48a27aeb14aa7f1b8f253760 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 5 Dec 2022 15:02:14 +0000 Subject: [PATCH 26/87] Added Sensor Management example with MoveableUAVCamera --- .../smcphd_init-sm-example-movable.py | 297 ++++++++++++++++++ stonesoup/custom/sensor/moveable.py | 2 +- 2 files changed, 298 insertions(+), 1 deletion(-) create mode 100644 examples/reactive-isr/smcphd_init-sm-example-movable.py diff --git a/examples/reactive-isr/smcphd_init-sm-example-movable.py b/examples/reactive-isr/smcphd_init-sm-example-movable.py new file mode 100644 index 000000000..2bff1b6c7 --- /dev/null +++ b/examples/reactive-isr/smcphd_init-sm-example-movable.py @@ -0,0 +1,297 @@ +from matplotlib import pyplot as plt +from matplotlib.patches import Ellipse, Rectangle +from ordered_set import OrderedSet + +from shapely.geometry import Point +from shapely.ops import unary_union + +from stonesoup.custom.sensor.moveable import MovableUAVCamera +from stonesoup.sensormanager import BruteForceSensorManager +from stonesoup.sensormanager.reward import UncertaintyRewardFunction +from stonesoup.types.angle import Angle +from stonesoup.types.array import StateVector +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState, ParticleState +from stonesoup.custom.tracker import SMCPHD_JIPDA +from matplotlib.path import Path + +from datetime import datetime +from datetime import timedelta +import numpy as np + +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import KalmanUpdater + + +def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): + """ + Plots an `nstd` sigma error ellipse based on the specified covariance + matrix (`cov`). Additional keyword arguments are passed on to the + ellipse patch artist. + Parameters + ---------- + cov : The 2x2 covariance matrix to base the ellipse on + pos : The location of the center of the ellipse. Expects a 2-element + sequence of [x0, y0]. + nstd : The radius of the ellipse in numbers of standard deviations. + Defaults to 2 standard deviations. + ax : The axis that the ellipse will be plotted on. Defaults to the + current axis. + Additional keyword arguments are pass on to the ellipse patch. + Returns + ------- + A matplotlib ellipse artist + """ + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + if ax is None: + ax = plt.gca() + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + + ax.add_artist(ellip) + return ellip + +# np.random.seed(1991) + +def _prob_detect_func(fovs): + """Closure to return the probability of detection function for a given environment scan""" + + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + # Probability of detection nested function + def prob_detect_func(state): + for poly in fovs: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0)) + path_p = Path(poly.boundary) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) + return prob_detect if poly.contains(point) else Probability(0) + + return prob_detect_func + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +prob_detect = Probability(.9) # 90% chance of detection. +prob_death = Probability(0.01) # Probability of death +prob_birth = Probability(0.1) # Probability of birth +prob_survive = Probability(0.99) # Probability of survival +birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) +clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) +surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] +surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ + * (surveillance_region[1][1] - surveillance_region[1][0]) +clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area +birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0, 0.0, 0.0])), + np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2, .0, .0])) # Birth density +birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' +num_particles = 2 ** 13 # Number of particles used by the PHD filter +num_iter = 100 # Number of simulation steps +total_no_sensors = 1 +PLOT = True # Set [True | False] to turn plotting [ON | OFF] + +# Models +# ====== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), + ConstantVelocity(0.01), + ConstantVelocity(0.01)]) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) +truths = set() +truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) +for k in range(1, num_iter + 1): + truth.append(GroundTruthState( + gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=start_time + timedelta(seconds=k))) +truths.add(truth) + +timestamps = [] +for k in range(1, num_iter + 1): + timestamps.append(start_time + timedelta(seconds=k)) + + +# Create sensors +# ============== +sensors = set() +for i in range(1, total_no_sensors+1): + rotation_offset = StateVector( + [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset + pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt + + position = StateVector([i * 10., i * 10., 100.]) + resolutions = {'location_x': 10., 'location_y': 10.} + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.05, 0.05, 0.05]), + location_x=position[0], location_y=position[1], + resolutions=resolutions, + position=position, + radius=10, + minmax=StateVector([-100, 100])) + sensors.add(sensor) +for sensor in sensors: + sensor.timestamp = start_time + +# # Predictor & Updater +# # =================== +# predictor = KalmanPredictor(transition_model) +# updater = KalmanUpdater(None) +# +# # Hypothesiser & Data Associator +# # ============================== +# hypothesiser = IPDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect, +# prob_survive=prob_survive) +# # hypothesiser = PDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect) +# hypothesiser = DistanceGater(hypothesiser, Mahalanobis(), 10) +# associator = JIPDAWithEHM2(hypothesiser) +# +# # Track Deleter +# # ============= +# deleter = UpdateTimeDeleter(time_since_update=timedelta(minutes=5)) +# +# # Initiator +# # ========= +# # Initialise PHD Filter +# resampler = SystematicResampler() +# phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, +# measurement_model=None, prob_detect=prob_detect, +# prob_death=prob_death, prob_birth=prob_birth, +# birth_rate=birth_rate, clutter_intensity=clutter_intensity, +# num_samples=num_particles, resampler=resampler, +# birth_scheme=birth_scheme) +# +# # Sample prior state from birth density +# state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), +# birth_density.covar, +# size=num_particles).T) +# weight = np.ones((num_particles,)) * Probability(1 / num_particles) +# state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) +# +# +# initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + +tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time) + +# Initialise sensor manager +# ========================= +reward_function = UncertaintyRewardFunction(tracker._predictor, tracker._updater) +sensor_manager = BruteForceSensorManager(sensors, reward_function) + +# Estimate +# ======== + +# Plot the prior +if PLOT: + fig1 = plt.figure(figsize=(10, 6)) + ax1 = plt.gca() + # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') + +# Main tracking loop +tracks = set() +for k, timestamp in enumerate(timestamps): + + tracks = list(tracks) + + # Generate chosen configuration + chosen_actions = sensor_manager.choose_actions(tracks, timestamp) + + # Create empty dictionary for measurements + detections = [] + + for chosen_action in chosen_actions: + for sensor, actions in chosen_action.items(): + sensor.add_actions(actions) + + fovs = [] + truth_states = OrderedSet(truth[timestamp] for truth in truths) + for sensor in sensors: + sensor.act(timestamp) + center = (sensor.position[0], sensor.position[1]) + radius = sensor.radius + p = Point(center).buffer(radius) + fovs.append(p) + + tracker.prob_detect = _prob_detect_func(fovs) + + for sensor in sensors: + + # Observe this ground truth + sensor_measurements = sensor.measure(truth_states, noise=True) + detections.extend(sensor_measurements) + + detections = list(detections) + num_tracks = len(tracks) + num_detections = len(detections) + + tracks = tracker.track(detections, timestamp) + + print('\n===========================================') + # print(f'Num targets: {np.sum(state.weight)} - Num new targets: {len(new_tracks)}') + for track in tracks: + print(f'Track {track.id} - Exist prob: {track.exist_prob}') + + # Plot resulting density + if PLOT: + ax1.cla() + + circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.radius, + color='r', + fill=False) + ax1.add_artist(circle) + for i, truth in enumerate(truths): + data = np.array([s.state_vector for s in truth[:k + 1]]) + ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') + if len(detections): + det_data = np.array([det.state_vector for det in detections]) + ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') + # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], + # 'r.', label='Particles') + + for track in tracks: + data = np.array([s.state_vector for s in track]) + ax1.plot(data[:, 0], data[:, 2], label=f'Track {track.id}') + plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.axis([*surveillance_region[0], *surveillance_region[1]]) + plt.legend(loc='upper right') + plt.pause(0.01) diff --git a/stonesoup/custom/sensor/moveable.py b/stonesoup/custom/sensor/moveable.py index 622b4cb22..30ffee164 100644 --- a/stonesoup/custom/sensor/moveable.py +++ b/stonesoup/custom/sensor/moveable.py @@ -15,7 +15,7 @@ from stonesoup.types.groundtruth import GroundTruthState -class MoveableUAVCamera(Sensor): +class MovableUAVCamera(Sensor): """A camera that can pan and tilt.""" ndim_state: int = Property( doc="Number of state dimensions. This is utilised by (and follows in\ From 76feb86cdd4d0986238f122964afcdc5c22800e4 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 5 Dec 2022 21:52:34 +0000 Subject: [PATCH 27/87] Renamed minmax to limits and changed data type for MovableUAVCamera and LocationActionGenerator --- .../smcphd_init-sm-example-movable.py | 9 ++- stonesoup/custom/sensor/action/location.py | 6 +- stonesoup/custom/sensor/moveable.py | 75 +++++++++++++------ 3 files changed, 59 insertions(+), 31 deletions(-) diff --git a/examples/reactive-isr/smcphd_init-sm-example-movable.py b/examples/reactive-isr/smcphd_init-sm-example-movable.py index 2bff1b6c7..ba4b275da 100644 --- a/examples/reactive-isr/smcphd_init-sm-example-movable.py +++ b/examples/reactive-isr/smcphd_init-sm-example-movable.py @@ -149,20 +149,21 @@ def prob_detect_func(state): # Create sensors # ============== sensors = set() -for i in range(1, total_no_sensors+1): +for i in range(0, total_no_sensors): rotation_offset = StateVector( [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt - position = StateVector([i * 10., i * 10., 100.]) - resolutions = {'location_x': 10., 'location_y': 10.} + position = StateVector([i * 10., 10., 100.]) + resolutions = {'location_x': 5., 'location_y': 5.} + limits = {'location_x': surveillance_region[0], 'location_y': surveillance_region[1]} sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], noise_covar=np.diag([0.05, 0.05, 0.05]), location_x=position[0], location_y=position[1], resolutions=resolutions, position=position, radius=10, - minmax=StateVector([-100, 100])) + limits=limits) sensors.add(sensor) for sensor in sensors: sensor.timestamp = start_time diff --git a/stonesoup/custom/sensor/action/location.py b/stonesoup/custom/sensor/action/location.py index 67ce34c27..8d61b0698 100644 --- a/stonesoup/custom/sensor/action/location.py +++ b/stonesoup/custom/sensor/action/location.py @@ -42,7 +42,7 @@ class LocationActionGenerator(RealNumberActionGenerator): owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " "dwell-centre attributes") resolution: float = Property(default=10, doc="Resolution of action space") - minmax: StateVector = Property(doc="Min and max values of the action space", + limits: StateVector = Property(doc="Min and max values of the action space", default=StateVector([-100, 100])) _action_cls = ChangeLocationAction @@ -82,12 +82,12 @@ def initial_value(self): @property def min(self): # Pan can rotate freely, while tilt is limited to +/- 90 degrees - return self.minmax[0] + return self.limits[0] @property def max(self): # Pan can rotate freely, while tilt is limited to +/- 90 degrees - return self.minmax[1] + return self.limits[1] def __contains__(self, item): diff --git a/stonesoup/custom/sensor/moveable.py b/stonesoup/custom/sensor/moveable.py index 30ffee164..33d89a9a2 100644 --- a/stonesoup/custom/sensor/moveable.py +++ b/stonesoup/custom/sensor/moveable.py @@ -47,11 +47,12 @@ class MovableUAVCamera(Sensor): default=0, generator_cls=LocationActionGenerator ) - minmax: StateVector = Property( + limits: dict = Property( doc="The sensor min max location", - default=StateVector([-100, 100]) + default=None ) + @location_x.setter def location_x(self, value): self._property_location_x = value @@ -116,18 +117,31 @@ def _default_action(self, name, property_, timestamp): (assumes the property is an :class:`~.ActionableProperty`.""" if self.resolutions and name in self.resolutions.keys(): - generator = property_.generator_cls(owner=self, - attribute=name, - start_time=self.timestamp, - end_time=timestamp, - resolution=self.resolutions[name], - minmax=self.minmax) + if self.limits and name in self.limits.keys(): + generator = property_.generator_cls(owner=self, + attribute=name, + start_time=self.timestamp, + end_time=timestamp, + resolution=self.resolutions[name], + limits=self.limits[name]) + else: + generator = property_.generator_cls(owner=self, + attribute=name, + start_time=self.timestamp, + end_time=timestamp, + resolution=self.resolutions[name]) else: - generator = property_.generator_cls(owner=self, - attribute=name, - start_time=self.timestamp, - end_time=timestamp, - minmax=self.minmax) + if self.limits and name in self.limits.keys(): + generator = property_.generator_cls(owner=self, + attribute=name, + start_time=self.timestamp, + end_time=timestamp, + limits=self.limits) + else: + generator = property_.generator_cls(owner=self, + attribute=name, + start_time=self.timestamp, + end_time=timestamp) return generator.default_action def actions(self, timestamp: datetime.datetime, start_timestamp: datetime.datetime = None @@ -158,16 +172,29 @@ def actions(self, timestamp: datetime.datetime, start_timestamp: datetime.dateti generators = set() for name, property_ in self._actionable_properties.items(): if self.resolutions and name in self.resolutions.keys(): - generators.add(property_.generator_cls(owner=self, - attribute=name, - start_time=start_timestamp, - end_time=timestamp, - resolution=self.resolutions[name], - minmax=self.minmax)) + if self.limits and name in self.limits.keys(): + generators.add(property_.generator_cls(owner=self, + attribute=name, + start_time=start_timestamp, + end_time=timestamp, + resolution=self.resolutions[name], + limits=self.limits[name])) + else: + generators.add(property_.generator_cls(owner=self, + attribute=name, + start_time=start_timestamp, + end_time=timestamp, + resolution=self.resolutions[name])) else: - generators.add(property_.generator_cls(owner=self, - attribute=name, - start_time=start_timestamp, - end_time=timestamp, - minmax=self.minmax)) + if self.limits and name in self.limits.keys(): + generators.add(property_.generator_cls(owner=self, + attribute=name, + start_time=start_timestamp, + end_time=timestamp, + limits=self.limits[name])) + else: + generators.add(property_.generator_cls(owner=self, + attribute=name, + start_time=start_timestamp, + end_time=timestamp)) return generators \ No newline at end of file From cff691bd3c503bcab947af5d26c2529c6d390aa1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 5 Dec 2022 22:17:56 +0000 Subject: [PATCH 28/87] Renamed radius property to fov_radius in MovableUAVCamera --- examples/reactive-isr/smcphd_init-sm-example-movable.py | 6 +++--- stonesoup/custom/sensor/moveable.py | 9 +++------ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/examples/reactive-isr/smcphd_init-sm-example-movable.py b/examples/reactive-isr/smcphd_init-sm-example-movable.py index ba4b275da..6b4b0f5ca 100644 --- a/examples/reactive-isr/smcphd_init-sm-example-movable.py +++ b/examples/reactive-isr/smcphd_init-sm-example-movable.py @@ -162,7 +162,7 @@ def prob_detect_func(state): location_x=position[0], location_y=position[1], resolutions=resolutions, position=position, - radius=10, + fov_radius=10, limits=limits) sensors.add(sensor) for sensor in sensors: @@ -248,7 +248,7 @@ def prob_detect_func(state): for sensor in sensors: sensor.act(timestamp) center = (sensor.position[0], sensor.position[1]) - radius = sensor.radius + radius = sensor.fov_radius p = Point(center).buffer(radius) fovs.append(p) @@ -275,7 +275,7 @@ def prob_detect_func(state): if PLOT: ax1.cla() - circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.radius, + circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, color='r', fill=False) ax1.add_artist(circle) diff --git a/stonesoup/custom/sensor/moveable.py b/stonesoup/custom/sensor/moveable.py index 33d89a9a2..ce9e92bf6 100644 --- a/stonesoup/custom/sensor/moveable.py +++ b/stonesoup/custom/sensor/moveable.py @@ -28,10 +28,8 @@ class MovableUAVCamera(Sensor): doc="The sensor noise covariance matrix. This is utilised by\ (and follow in format) the underlying \ :class:`~.CartesianToElevationBearing` model") - radius: Union[float, List[float]] = Property( - doc="The field of view (FOV) angle (in radians). If provided in a list, the first element " - "is the pan FOV angle and the second element is the tilt FOV angle. Else, the same " - "FOV angle is used for both pan and tilt.") + fov_radius: Union[float, List[float]] = Property( + doc="The detection field of view radius of the sensor") clutter_model: ClutterModel = Property( default=None, doc="An optional clutter generator that adds a set of simulated " @@ -52,7 +50,6 @@ class MovableUAVCamera(Sensor): default=None ) - @location_x.setter def location_x(self, value): self._property_location_x = value @@ -95,7 +92,7 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, distance = np.linalg.norm(norm_measurement_vector[0:2]) # Do not measure if state not in FOV - if distance > self.radius: + if distance > self.fov_radius: continue detection = TrueDetection(measurement_vector, From 3fdc8538c05b352629e0d61b7567d5ec62a075dd Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 5 Dec 2022 22:31:53 +0000 Subject: [PATCH 29/87] Tidy up code for MovableUAVCamera --- .../smcphd_init-sm-example-movable.py | 2 +- .../custom/sensor/{moveable.py => movable.py} | 76 +++++-------------- 2 files changed, 19 insertions(+), 59 deletions(-) rename stonesoup/custom/sensor/{moveable.py => movable.py} (59%) diff --git a/examples/reactive-isr/smcphd_init-sm-example-movable.py b/examples/reactive-isr/smcphd_init-sm-example-movable.py index 6b4b0f5ca..5b8a61f2e 100644 --- a/examples/reactive-isr/smcphd_init-sm-example-movable.py +++ b/examples/reactive-isr/smcphd_init-sm-example-movable.py @@ -5,7 +5,7 @@ from shapely.geometry import Point from shapely.ops import unary_union -from stonesoup.custom.sensor.moveable import MovableUAVCamera +from stonesoup.custom.sensor.movable import MovableUAVCamera from stonesoup.sensormanager import BruteForceSensorManager from stonesoup.sensormanager.reward import UncertaintyRewardFunction from stonesoup.types.angle import Angle diff --git a/stonesoup/custom/sensor/moveable.py b/stonesoup/custom/sensor/movable.py similarity index 59% rename from stonesoup/custom/sensor/moveable.py rename to stonesoup/custom/sensor/movable.py index ce9e92bf6..063bec089 100644 --- a/stonesoup/custom/sensor/moveable.py +++ b/stonesoup/custom/sensor/movable.py @@ -10,7 +10,7 @@ from stonesoup.sensor.action import ActionGenerator from stonesoup.sensor.actionable import ActionableProperty from stonesoup.sensor.sensor import Sensor -from stonesoup.types.array import CovarianceMatrix, StateVector +from stonesoup.types.array import CovarianceMatrix from stonesoup.types.detection import TrueDetection from stonesoup.types.groundtruth import GroundTruthState @@ -111,34 +111,8 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, def _default_action(self, name, property_, timestamp): """Returns the default action of the action generator associated with the property - (assumes the property is an :class:`~.ActionableProperty`.""" - - if self.resolutions and name in self.resolutions.keys(): - if self.limits and name in self.limits.keys(): - generator = property_.generator_cls(owner=self, - attribute=name, - start_time=self.timestamp, - end_time=timestamp, - resolution=self.resolutions[name], - limits=self.limits[name]) - else: - generator = property_.generator_cls(owner=self, - attribute=name, - start_time=self.timestamp, - end_time=timestamp, - resolution=self.resolutions[name]) - else: - if self.limits and name in self.limits.keys(): - generator = property_.generator_cls(owner=self, - attribute=name, - start_time=self.timestamp, - end_time=timestamp, - limits=self.limits) - else: - generator = property_.generator_cls(owner=self, - attribute=name, - start_time=self.timestamp, - end_time=timestamp) + (assumes the property is an :class:`~.ActionableProperty`).""" + generator = self._get_generator(name, property_, timestamp, self.timestamp) return generator.default_action def actions(self, timestamp: datetime.datetime, start_timestamp: datetime.datetime = None @@ -166,32 +140,18 @@ def actions(self, timestamp: datetime.datetime, start_timestamp: datetime.dateti if start_timestamp is None: start_timestamp = self.timestamp - generators = set() - for name, property_ in self._actionable_properties.items(): - if self.resolutions and name in self.resolutions.keys(): - if self.limits and name in self.limits.keys(): - generators.add(property_.generator_cls(owner=self, - attribute=name, - start_time=start_timestamp, - end_time=timestamp, - resolution=self.resolutions[name], - limits=self.limits[name])) - else: - generators.add(property_.generator_cls(owner=self, - attribute=name, - start_time=start_timestamp, - end_time=timestamp, - resolution=self.resolutions[name])) - else: - if self.limits and name in self.limits.keys(): - generators.add(property_.generator_cls(owner=self, - attribute=name, - start_time=start_timestamp, - end_time=timestamp, - limits=self.limits[name])) - else: - generators.add(property_.generator_cls(owner=self, - attribute=name, - start_time=start_timestamp, - end_time=timestamp)) - return generators \ No newline at end of file + generators = {self._get_generator(name, property_, timestamp, start_timestamp) + for name, property_ in self._actionable_properties.items()} + + return generators + + def _get_generator(self, name, prop, timestamp, start_timestamp): + """Returns the action generator associated with the """ + kwargs = {'owner': self, 'attribute': name, 'start_time': start_timestamp, + 'end_time': timestamp} + if self.resolutions and name in self.resolutions.keys(): + kwargs['resolution'] = self.resolutions[name] + if self.limits and name in self.limits.keys(): + kwargs['limits'] = self.limits[name] + generator = prop.generator_cls(**kwargs) + return generator From a506c508e7551db9b7a7ab5cdccf3015ea3f620a Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 8 Dec 2022 15:24:49 +0000 Subject: [PATCH 30/87] Added RolloutUncertaintyRewardFunction --- .../smcphd_init-sm-example-movable.py | 10 +- stonesoup/sensormanager/reward.py | 158 +++++++++++++++++- 2 files changed, 164 insertions(+), 4 deletions(-) diff --git a/examples/reactive-isr/smcphd_init-sm-example-movable.py b/examples/reactive-isr/smcphd_init-sm-example-movable.py index 5b8a61f2e..af434d2a5 100644 --- a/examples/reactive-isr/smcphd_init-sm-example-movable.py +++ b/examples/reactive-isr/smcphd_init-sm-example-movable.py @@ -7,7 +7,8 @@ from stonesoup.custom.sensor.movable import MovableUAVCamera from stonesoup.sensormanager import BruteForceSensorManager -from stonesoup.sensormanager.reward import UncertaintyRewardFunction +from stonesoup.sensormanager.reward import UncertaintyRewardFunction, \ + RolloutUncertaintyRewardFunction from stonesoup.types.angle import Angle from stonesoup.types.array import StateVector from stonesoup.types.numeric import Probability @@ -93,6 +94,7 @@ def prob_detect_func(state): return prob_detect_func +# if __name__ == '__main__': # Parameters # ========== start_time = datetime.now() # Simulation start time @@ -215,7 +217,9 @@ def prob_detect_func(state): # Initialise sensor manager # ========================= -reward_function = UncertaintyRewardFunction(tracker._predictor, tracker._updater) +# reward_function = UncertaintyRewardFunction(tracker._predictor, tracker._updater) +reward_function = RolloutUncertaintyRewardFunction(tracker._predictor, tracker._updater, 2, + num_samples=10, interval=timedelta(seconds=5)) sensor_manager = BruteForceSensorManager(sensors, reward_function) # Estimate @@ -295,4 +299,4 @@ def prob_detect_func(state): edgecolor='r', facecolor='none', ax=ax1) plt.axis([*surveillance_region[0], *surveillance_region[1]]) plt.legend(loc='upper right') - plt.pause(0.01) + plt.pause(0.1) diff --git a/stonesoup/sensormanager/reward.py b/stonesoup/sensormanager/reward.py index 2bbfd1dd3..587bcd6fb 100644 --- a/stonesoup/sensormanager/reward.py +++ b/stonesoup/sensormanager/reward.py @@ -1,9 +1,12 @@ from abc import ABC import copy import datetime -from typing import Mapping, Sequence, Set +from random import random +from typing import Mapping, Sequence, Set, List +import itertools as it import numpy as np +from tqdm import tqdm from ..types.detection import TrueDetection from ..base import Base, Property @@ -15,6 +18,19 @@ from ..sensor.action import Action +import multiprocessing as mpp + + +def imap_tqdm(pool, f, inputs, chunksize=None, **tqdm_kwargs): + # Calculation of chunksize taken from pool._map_async + if not chunksize: + chunksize, extra = divmod(len(inputs), len(pool._pool) * 4) + if extra: + chunksize += 1 + results = list(tqdm(pool.imap_unordered(f, inputs, chunksize=chunksize), total=len(inputs), **tqdm_kwargs)) + return results + + class RewardFunction(Base, ABC): """ The reward function base class. @@ -126,3 +142,143 @@ def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] # Return value of configuration metric return config_metric + + +class RolloutUncertaintyRewardFunction(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") + updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " + "the track to the new state.") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.predictor.predict(predicted_track, timestamp=timestamp)) + predicted_tracks.add(predicted_track) + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection.groundtruth_path: detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + for predicted_track, detection in detections.items(): + # Generate hypothesis based on prediction/previous update and detection + hypothesis = SingleHypothesis(predicted_track.state, detection) + + # Do the update based on this hypothesis and store covariance matrix + update = self.updater.update(hypothesis) + + previous_cov_norm = np.linalg.norm(predicted_track.covar) + update_cov_norm = np.linalg.norm(update.covar) + + # Replace prediction with update + predicted_track.append(update) + + # Calculate metric for the track observation and add to the metric + # for the configuration + metric = previous_cov_norm - update_cov_norm + config_metric += metric + + if timestamp == end_time: + return config_metric + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + configs = list({sensor: action + for sensor, action in zip(all_action_choices.keys(), actionconfig)} + for actionconfig in it.product(*all_action_choices.values())) + + idx = np.random.choice(len(configs), self.num_samples) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, tracks, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric \ No newline at end of file From aa6960edf7944c99a3694c4ab83f861a16cd9fa6 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 15 Dec 2022 09:21:03 +0000 Subject: [PATCH 31/87] Fix minor bug for SMCPHD_JIPDA tracker which prevented update when no detections are present --- stonesoup/custom/tracker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index f7c663d28..cf4f38c91 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -113,8 +113,8 @@ def track(self, detections, timestamp): num_tracks = len(tracks) num_detections = len(detections) - if not len(detections): - return self.tracks + # if not len(detections): + # return self.tracks # Perform data association associations = self._associator.associate(tracks, detections, timestamp) From 281d3765d5f43b87054fa5a4db41bc267eac70c1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 15 Dec 2022 09:21:47 +0000 Subject: [PATCH 32/87] Add RolloutPriorityRewardFunction reward --- stonesoup/custom/functions/__init__.py | 51 +++++- stonesoup/sensormanager/reward.py | 214 ++++++++++++++++++++++++- 2 files changed, 259 insertions(+), 6 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index a1d772156..f4a9560a4 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,11 +1,16 @@ import math -import numpy as np +from typing import Set -# pip install vector3d +import numpy as np +from matplotlib.path import Path +from scipy.special import logsumexp +from scipy.stats import multivariate_normal +from shapely.geometry.base import BaseGeometry from vector3d.vector import Vector -from stonesoup.functions import cart2sphere, sphere2cart from stonesoup.types.angle import Angle +from stonesoup.types.state import ParticleState +from stonesoup.types.track import Track class CameraCalculator: @@ -383,4 +388,42 @@ def rigid_transform_3D(A, B): Vt[2,:] *= -1 R = Vt.T @ U.T t = -R @ centroid_A + centroid_B - return R, t \ No newline at end of file + return R, t + + +def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, + phd_state: ParticleState = None): + num_samples = 100 + mu_overall = 0 + var_overall = np.inf if len(tracks) == 0 else 0 + path_p = Path(geom.boundary) + + # Calculate PHD density inside polygon + if phd_state is not None: + points = phd_state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + if np.sum(inside_points) > 0: + # The mean of the PHD density inside the polygon is the sum of the weights of the + # particles inside the polygon + mu_overall = np.exp(logsumexp(np.log(phd_state.weight[inside_points].astype(float)))) + # The variance of a Poisson distribution is equal to the mean + var_overall = mu_overall + + # Calculate number of tracks inside polygon + for track in tracks: + # Sample points from the track state + points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), + cov=track.covar[0:2, 0:2], + size=num_samples) + # Check which points are inside the polygon + inside_points = path_p.contains_points(points) + # Probability of existence inside the polygon is the fraction of points inside the polygon + # times the probability of existence + p_success = float(track.exist_prob) * (np.sum(inside_points) / num_samples) + # Mean of a Bernoulli distribution is equal to the probability of success + mu_overall += p_success + # Variance of a Bernoulli distribution is equal to the probability of success, + # times the probability of failure + var_overall += p_success * (1 - p_success) + + return mu_overall, var_overall \ No newline at end of file diff --git a/stonesoup/sensormanager/reward.py b/stonesoup/sensormanager/reward.py index 587bcd6fb..fd18c9be1 100644 --- a/stonesoup/sensormanager/reward.py +++ b/stonesoup/sensormanager/reward.py @@ -2,15 +2,25 @@ import copy import datetime from random import random -from typing import Mapping, Sequence, Set, List +from typing import Mapping, Sequence, Set, List, Any, Union import itertools as it import numpy as np +from matplotlib.path import Path +from shapely.geometry import Polygon, Point +from shapely.ops import unary_union from tqdm import tqdm +from ..custom.functions import calculate_num_targets_dist +from ..functions import gm_reduce_single +from ..tracker import Tracker +from ..types.array import StateVectors from ..types.detection import TrueDetection from ..base import Base, Property from ..predictor.kalman import KalmanPredictor +from ..types.numeric import Probability +from ..types.state import ParticleState +from ..types.update import GaussianStateUpdate from ..updater.kalman import ExtendedKalmanUpdater from ..types.track import Track from ..types.hypothesis import SingleHypothesis @@ -281,4 +291,204 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] rewards = [self._rollout(config, tracks, timestamp, end_time) for config in configs] config_metric += np.max(rewards) - return config_metric \ No newline at end of file + return config_metric + + +class RolloutPriorityRewardFunction(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + tracker: Tracker = Property(doc="Tracker used to track the tracks") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) + predicted_tracks.add(predicted_track) + + tracks_copy = [copy.copy(track) for track in tracks] + fovs = [] + for sensor in predicted_sensors: + center = (sensor.position[0], sensor.position[1]) + radius = sensor.fov_radius + p = Point(center).buffer(radius) + fovs.append(p) + self.tracker.prob_detect = _prob_detect_func(fovs) + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) + + for track, multihypothesis in associations.items(): + + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self.tracker._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + + for rfi in self.rfis: + xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + _, var = calculate_num_targets_dist(tracks_copy, geom) + if var < rfi.threshold: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] #1/var + + + if timestamp == end_time: + return config_metric + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + configs = list({sensor: action + for sensor, action in zip(all_action_choices.keys(), actionconfig)} + for actionconfig in it.product(*all_action_choices.values())) + + idx = np.random.choice(len(configs), self.num_samples) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, tracks_copy, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric + + +def _prob_detect_func(fovs): + """Closure to return the probability of detection function for a given environment scan""" + prob_detect = Probability(0.9) + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + # Probability of detection nested function + def prob_detect_func(state): + for poly in fovs: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.1)) + path_p = Path(poly.boundary) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) + return prob_detect if poly.contains(point) else Probability(0) + + return prob_detect_func \ No newline at end of file From 2f3f8efa0bff9e53e98a36b3c57b8fbc0aee3bc0 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 15 Dec 2022 19:08:00 +0000 Subject: [PATCH 33/87] Monir fix in calculate_num_targets_dist() function --- stonesoup/custom/functions/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index f4a9560a4..22cb00802 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -413,7 +413,7 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, for track in tracks: # Sample points from the track state points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), - cov=track.covar[0:2, 0:2], + cov=track.covar[[0, 2], :][:, [0, 2]], size=num_samples) # Check which points are inside the polygon inside_points = path_p.contains_points(points) From d829f322d7f9e3f88ae3d17076e88326a7b97cc9 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 15 Dec 2022 19:26:44 +0000 Subject: [PATCH 34/87] Patch for false initiation of closely spaced targets --- stonesoup/custom/tracker.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index cf4f38c91..2bdcc1f44 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -58,7 +58,7 @@ def __init__(self, *args, **kwargs): self.clutter_intensity, prob_detect=self.prob_detect, prob_survive=1-self.prob_death) - self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) + self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 20) self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() @@ -131,10 +131,11 @@ def track(self, detections, timestamp): rho = np.zeros((len(detections))) for j, detection in enumerate(detections): - rho_tmp = 1 - if len(assoc_prob_matrix): - for i, track in enumerate(tracks): - rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] + rho_tmp = 0 if len(assoc_prob_matrix) and np.sum(assoc_prob_matrix[:, j + 1]) > 0 else 1 + # rho_tmp = 1 + # if len(assoc_prob_matrix): + # for i, track in enumerate(tracks): + # rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] rho[j] = rho_tmp for track, multihypothesis in associations.items(): From 09caf1147c6b6ee7f57e3784b15156656b49b4d1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 21 Dec 2022 02:47:28 +0000 Subject: [PATCH 35/87] Code refactoring --- stonesoup/custom/dataassociator/__init__.py | 0 .../custom/{ => dataassociator}/jipda.py | 0 stonesoup/custom/hypothesiser/probability.py | 29 +- stonesoup/custom/initiator/__init__.py | 0 stonesoup/custom/{ => initiator}/smcphd.py | 0 stonesoup/custom/sensor/action/location.py | 4 +- stonesoup/custom/sensor/movable.py | 3 +- stonesoup/custom/sensormanager/__init__.py | 0 stonesoup/custom/sensormanager/base.py | 193 ++++++ stonesoup/custom/sensormanager/reward.py | 602 ++++++++++++++++++ stonesoup/custom/tracker.py | 158 ++++- stonesoup/sensormanager/base.py | 46 +- stonesoup/sensormanager/reward.py | 368 +---------- 13 files changed, 1008 insertions(+), 395 deletions(-) create mode 100644 stonesoup/custom/dataassociator/__init__.py rename stonesoup/custom/{ => dataassociator}/jipda.py (100%) create mode 100644 stonesoup/custom/initiator/__init__.py rename stonesoup/custom/{ => initiator}/smcphd.py (100%) create mode 100644 stonesoup/custom/sensormanager/__init__.py create mode 100644 stonesoup/custom/sensormanager/base.py create mode 100644 stonesoup/custom/sensormanager/reward.py diff --git a/stonesoup/custom/dataassociator/__init__.py b/stonesoup/custom/dataassociator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/jipda.py b/stonesoup/custom/dataassociator/jipda.py similarity index 100% rename from stonesoup/custom/jipda.py rename to stonesoup/custom/dataassociator/jipda.py diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index 2f390388b..99d464b5c 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -14,15 +14,18 @@ class IPDAHypothesiser(PDAHypothesiser): + """ Integrated PDA Hypothesiser """ + prob_detect: Union[Probability, Callable[[State], Probability]] = Property( default=Probability(0.85), doc="Target Detection Probability") prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + predict: bool = Property(default=True, doc="Perform prediction step") + per_measurement: bool = Property(default=False, doc="Generate per measurement predictions") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - a = 2 def hypothesise(self, track, detections, timestamp, **kwargs): r"""Evaluate and return all track association hypotheses. @@ -30,12 +33,15 @@ def hypothesise(self, track, detections, timestamp, **kwargs): hypotheses = list() - # Common state & measurement prediction - prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) - # Compute predicted existence - time_interval = timestamp - track.timestamp - prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) - track.exist_prob = prob_survive * track.exist_prob + if self.predict: + # Common state & measurement prediction + prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) + # Compute predicted existence + time_interval = timestamp - track.timestamp + prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + else: + prediction = track.state # Missed detection hypothesis prob_detect = self.prob_detect(prediction) probability = Probability(1 - prob_detect * self.prob_gate * track.exist_prob) @@ -50,13 +56,14 @@ def hypothesise(self, track, detections, timestamp, **kwargs): # True detection hypotheses for detection in detections: - # Re-evaluate prediction - prediction = self.predictor.predict( - track.state, timestamp=detection.timestamp) + if self.predict and self.per_measurement: + # Re-evaluate prediction + prediction = self.predictor.predict( + track.state, timestamp=detection.timestamp) + prob_detect = self.prob_detect(prediction) # Compute measurement prediction and probability measure measurement_prediction = self.updater.predict_measurement( prediction, detection.measurement_model, **kwargs) - prob_detect = self.prob_detect(prediction) # Calculate difference before to handle custom types (mean defaults to zero) # This is required as log pdf coverts arrays to floats log_pdf = mn.logpdf( diff --git a/stonesoup/custom/initiator/__init__.py b/stonesoup/custom/initiator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/smcphd.py b/stonesoup/custom/initiator/smcphd.py similarity index 100% rename from stonesoup/custom/smcphd.py rename to stonesoup/custom/initiator/smcphd.py diff --git a/stonesoup/custom/sensor/action/location.py b/stonesoup/custom/sensor/action/location.py index 8d61b0698..a6f8b21c9 100644 --- a/stonesoup/custom/sensor/action/location.py +++ b/stonesoup/custom/sensor/action/location.py @@ -98,10 +98,12 @@ def __contains__(self, item): def __iter__(self) -> Iterator[ChangeLocationAction]: """Returns all possible ChangePanTiltAction types""" - possible_values = np.arange(self.min, self.max, self.resolution, dtype=float) + possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, dtype=float) yield self.default_action for angle in possible_values: + if angle == self.current_value: + continue yield self._action_cls(generator=self, end_time=self.end_time, target_value=angle) diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 063bec089..c072788d9 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -16,7 +16,8 @@ class MovableUAVCamera(Sensor): - """A camera that can pan and tilt.""" + """A movable UAV camera sensor.""" + ndim_state: int = Property( doc="Number of state dimensions. This is utilised by (and follows in\ format) the underlying :class:`~.CartesianToElevationBearing`\ diff --git a/stonesoup/custom/sensormanager/__init__.py b/stonesoup/custom/sensormanager/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/sensormanager/base.py b/stonesoup/custom/sensormanager/base.py new file mode 100644 index 000000000..a42ee5eb3 --- /dev/null +++ b/stonesoup/custom/sensormanager/base.py @@ -0,0 +1,193 @@ +import numpy as np +import itertools as it + +from ...base import Property +from ...sensormanager import SensorManager, BruteForceSensorManager + + +class UniqueBruteForceSensorManager(SensorManager): + """A sensor manager which returns a choice of action from those available. The sensor manager + iterates through every possible configuration of sensors and actions and + selects the configuration which returns the maximum reward as calculated by a reward function. + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): + """Returns a chosen [list of] action(s) from the action set for each sensor. + Chosen action(s) is selected by finding the configuration of sensors: actions which returns + the maximum reward, as calculated by a reward function. + + Parameters + ---------- + tracks: set of :class:`~Track` + Set of tracks at given time. Used in reward function. + timestamp: :class:`datetime.datetime` + Time at which the actions are carried out until + nchoose : int + Number of actions from the set to choose (default is 1) + + Returns + ------- + : dict + The pairs of :class:`~.Sensor`: [:class:`~.Action`] selected + """ + + all_action_choices = dict() + + for sensor in self.sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + best_rewards = np.zeros(nchoose) - np.inf + selected_configs = [None] * nchoose + rewards = [] + + for i, config in enumerate(configs): + reward, var = self.reward_function(config, tracks, timestamp) + rewards.append(reward) + # vars.append(var) + if reward > min(best_rewards): + selected_configs[np.argmin(best_rewards)] = config + best_rewards[np.argmin(best_rewards)] = reward + + # Return mapping of sensors and chosen actions for sensors + return selected_configs + + +class SampleBruteForceSensorManager(BruteForceSensorManager): + """A sensor manager which returns a choice of action from those available. The sensor manager + iterates through every possible configuration of sensors and actions and + selects the configuration which returns the maximum reward as calculated by a reward function. + + """ + + num_samples: int = Property(doc="Number of samples to take for each timestep", default=10) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): + """Returns a chosen [list of] action(s) from the action set for each sensor. + Chosen action(s) is selected by finding the configuration of sensors: actions which returns + the maximum reward, as calculated by a reward function. + + Parameters + ---------- + tracks: set of :class:`~Track` + Set of tracks at given time. Used in reward function. + timestamp: :class:`datetime.datetime` + Time at which the actions are carried out until + nchoose : int + Number of actions from the set to choose (default is 1) + + Returns + ------- + : dict + The pairs of :class:`~.Sensor`: [:class:`~.Action`] selected + """ + + all_action_choices = dict() + + for sensor in self.sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # get tuple of dictionaries of sensors: actions + configs = list({sensor: action + for sensor, action in zip(all_action_choices.keys(), actionconfig)} + for actionconfig in it.product(*all_action_choices.values())) + cfgs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + cfgs.append(cfg) + poss.append(pos) + + idx = np.random.choice(len(configs), self.num_samples) + + configs = np.array([configs[i] for i in idx]) + + best_rewards = np.zeros(nchoose) - np.inf + selected_configs = [None] * nchoose + rewards = [] + for config in configs: + # calculate reward for dictionary of sensors: actions + reward = self.reward_function(config, tracks, timestamp) + rewards.append(reward) + # if reward > min(best_rewards): + # selected_configs[np.argmin(best_rewards)] = config + # best_rewards[np.argmin(best_rewards)] = reward + max_idx = np.argwhere(rewards == np.amax(rewards)).flatten() + best_configs = configs[max_idx] + if best_configs.size == 1: + best_config = best_configs[0] + else: + best_config = None + min_dist = np.inf + for config in best_configs: + dist = 0 + for sensor, actions in config.items(): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + sensor_loc = sensor.position[0:2].flatten() + action_loc = np.array([action_x.target_value, action_y.target_value]) + dist += np.linalg.norm(sensor_loc - action_loc) + if dist < min_dist: + min_dist = dist + best_config = config + # Return mapping of sensors and chosen actions for sensors + return [best_config] + + +def is_valid_config(config, **kwargs): + num_sensors = int(len(kwargs)/2) + actions_sets = list(config.values()) + for i in range(num_sensors): + x = kwargs[f'x{i+1}'] + y = kwargs[f'y{i+1}'] + actions = actions_sets[i] + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + if action_x.target_value != x or action_y.target_value != y: + return False + return True \ No newline at end of file diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py new file mode 100644 index 000000000..e8f8335ee --- /dev/null +++ b/stonesoup/custom/sensormanager/reward.py @@ -0,0 +1,602 @@ +import copy +import datetime +from typing import Mapping, Sequence, Set, List, Any +import itertools as it + +import numpy as np +from matplotlib.path import Path +from shapely.geometry import Point, Polygon +from shapely.ops import unary_union + +from stonesoup.base import Property +from stonesoup.custom.functions import calculate_num_targets_dist +from stonesoup.custom.tracker import SMCPHD_JIPDA +from stonesoup.functions import gm_reduce_single +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.sensor.action import Action +from stonesoup.sensor.sensor import Sensor +from stonesoup.sensormanager.reward import RewardFunction +from stonesoup.tracker import Tracker +from stonesoup.types.array import StateVectors +from stonesoup.types.detection import TrueDetection +from stonesoup.types.hypothesis import SingleHypothesis +from stonesoup.types.numeric import Probability +from stonesoup.types.state import ParticleState +from stonesoup.types.track import Track +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import ExtendedKalmanUpdater + + +class RolloutUncertaintyRewardFunction(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") + updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " + "the track to the new state.") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.predictor.predict(predicted_track, timestamp=timestamp)) + predicted_tracks.add(predicted_track) + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection.groundtruth_path: detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + for predicted_track, detection in detections.items(): + # Generate hypothesis based on prediction/previous update and detection + hypothesis = SingleHypothesis(predicted_track.state, detection) + + # Do the update based on this hypothesis and store covariance matrix + update = self.updater.update(hypothesis) + + previous_cov_norm = np.linalg.norm(predicted_track.covar) + update_cov_norm = np.linalg.norm(update.covar) + + # Replace prediction with update + predicted_track.append(update) + + # Calculate metric for the track observation and add to the metric + # for the configuration + metric = previous_cov_norm - update_cov_norm + config_metric += metric + + if timestamp == end_time: + return config_metric + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + configs = list({sensor: action + for sensor, action in zip(all_action_choices.keys(), actionconfig)} + for actionconfig in it.product(*all_action_choices.values())) + + idx = np.random.choice(len(configs), self.num_samples) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, tracks, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric + + +class RolloutPriorityRewardFunction(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + tracker: Tracker = Property(doc="Tracker used to track the tracks") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + if not len(self.rfis): + return 0, np.inf + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) + time_interval = timestamp - predicted_track.timestamp + prob_survive = np.exp(-self.tracker.prob_death* time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + predicted_tracks.add(predicted_track) + + + tracks_copy = [copy.copy(track) for track in tracks] + # fovs = [] + # for sensor in predicted_sensors: + # center = (sensor.position[0], sensor.position[1]) + # radius = sensor.fov_radius + # p = Point(center).buffer(radius) + # fovs.append(p) + # self.tracker.prob_detect = _prob_detect_func(fovs) + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + center = (sensor.position[0], sensor.position[1]) + radius = sensor.fov_radius + p = Point(center).buffer(radius) + self.tracker.prob_detect = _prob_detect_func([p]) + + associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) + + for track, multihypothesis in associations.items(): + if isinstance(self.tracker, SMCPHD_JIPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self.tracker._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + else: + if multihypothesis: + # Update track + state_post = self.tracker._updater.update(multihypothesis) + track.append(state_post) + track.exist_prob = Probability(1.) + else: + time_interval = timestamp - track.timestamp + track.append(multihypothesis.prediction) + non_exist_weight = 1 - track.exist_prob + prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) + non_det_weight = prob_survive * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + var = np.inf + for rfi in self.rfis: + xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + _, var = calculate_num_targets_dist(tracks_copy, geom) + if var < rfi.threshold: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if self.use_variance: + config_metric += 1/var + + + if timestamp == end_time: + return config_metric, 0 + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + if len(configs) > self.num_samples: + idx = np.random.choice(len(configs), self.num_samples, replace=False) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, tracks_copy, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric, var + + +class RolloutPriorityRewardFunction2(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") + updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " + "the track to the new state.") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + prob_death: Probability = Property(doc="Probability of death", default=Probability(0.01)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + if not len(self.rfis): + return 0, np.inf + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.predictor.predict(predicted_track, timestamp=timestamp)) + time_interval = timestamp - predicted_track.timestamp + prob_survive = np.exp(-self.prob_death* time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + predicted_tracks.add(predicted_track) + + detected_tracks = set() + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection.groundtruth_path: detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + for predicted_track, detection in detections.items(): + # Generate hypothesis based on prediction/previous update and detection + hypothesis = SingleHypothesis(predicted_track.state, detection) + + # Do the update based on this hypothesis and store covariance matrix + update = self.updater.update(hypothesis) + + # Replace prediction with update + predicted_track.append(update) + predicted_track.exist_prob = Probability(1.) + detected_tracks.add(predicted_track) + + non_detected_tracks = predicted_tracks - detected_tracks + for track in non_detected_tracks: + time_interval = timestamp - track.timestamp + non_exist_weight = 1 - track.exist_prob + prob_survive = np.exp(-self.prob_death * time_interval.total_seconds()) + non_det_weight = prob_survive * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + var = np.inf + for rfi in self.rfis: + xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + _, var = calculate_num_targets_dist(predicted_tracks, geom) + if var < rfi.threshold: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if self.use_variance: + config_metric += 1/var + + + if timestamp == end_time: + return config_metric, 0 + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + if len(configs) > self.num_samples: + idx = np.random.choice(len(configs), self.num_samples, replace=False) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, predicted_tracks, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric, var + +def _prob_detect_func(fovs): + """Closure to return the probability of detection function for a given environment scan""" + prob_detect = Probability(0.9) + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + paths = [Path(poly.boundary) for poly in fovs] + + # Probability of detection nested function + def prob_detect_func(state): + for path_p in paths: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.1)) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + points = state.state_vector[[0, 2], :].T + return prob_detect if np.alltrue(path_p.contains_points(points)) \ + else Probability(0) + + return prob_detect_func \ No newline at end of file diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 2bdcc1f44..c1201d5be 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -5,18 +5,18 @@ from scipy.stats import multivariate_normal from stonesoup.base import Property, Base -from stonesoup.custom.jipda import JIPDAWithEHM2 -from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator +from stonesoup.custom.dataassociator.jipda import JIPDAWithEHM2 +from stonesoup.custom.initiator.smcphd import SMCPHDFilter, SMCPHDInitiator +from stonesoup.dataassociator.neighbour import GNNWith2DAssignment from stonesoup.functions import gm_reduce_single from stonesoup.gater.distance import DistanceGater from stonesoup.custom.hypothesiser.probability import IPDAHypothesiser +from stonesoup.hypothesiser.distance import DistanceHypothesiser from stonesoup.measures import Mahalanobis from stonesoup.models.measurement import MeasurementModel from stonesoup.models.transition import TransitionModel from stonesoup.predictor.kalman import KalmanPredictor -from stonesoup.resampler import Resampler from stonesoup.resampler.particle import SystematicResampler -from stonesoup.tracker import Tracker from stonesoup.types.array import StateVectors from stonesoup.types.numeric import Probability from stonesoup.types.state import State, ParticleState @@ -25,6 +25,8 @@ class SMCPHD_JIPDA(Base): + """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" + transition_model: TransitionModel = Property(doc='The transition model') measurement_model: MeasurementModel = Property(doc='The measurement model') prob_detection: Probability = Property(doc='The probability of detection') @@ -58,7 +60,7 @@ def __init__(self, *args, **kwargs): self.clutter_intensity, prob_detect=self.prob_detect, prob_survive=1-self.prob_death) - self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 20) + self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() @@ -107,19 +109,17 @@ def prob_detect(self, prob_detect): self._initiator.filter.prob_detect = self._prob_detect def track(self, detections, timestamp): - tracks = list(self.tracks) detections = list(detections) num_tracks = len(tracks) num_detections = len(detections) - # if not len(detections): - # return self.tracks - # Perform data association associations = self._associator.associate(tracks, detections, timestamp) + # Compute measurement weights assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) + rho = np.zeros((len(detections))) for i, track in enumerate(tracks): for hyp in associations[track]: if not hyp: @@ -128,8 +128,6 @@ def track(self, detections, timestamp): j = next(d_i for d_i, detection in enumerate(detections) if hyp.measurement == detection) assoc_prob_matrix[i, j + 1] = hyp.weight - - rho = np.zeros((len(detections))) for j, detection in enumerate(detections): rho_tmp = 0 if len(assoc_prob_matrix) and np.sum(assoc_prob_matrix[:, j + 1]) > 0 else 1 # rho_tmp = 1 @@ -138,6 +136,7 @@ def track(self, detections, timestamp): # rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] rho[j] = rho_tmp + # Update tracks for track, multihypothesis in associations.items(): # calculate each Track's state as a Gaussian Mixture of @@ -164,6 +163,141 @@ def track(self, detections, timestamp): multihypothesis, multihypothesis[0].prediction.timestamp)) + # Initiate new tracks + tracks = set(tracks) + new_tracks = self._initiator.initiate(detections, timestamp, weights=rho) + tracks |= new_tracks + + # Delete tracks that have not been updated for a while + del_tracks = set() + for track in tracks: + if track.exist_prob < 0.01: + del_tracks.add(track) + tracks -= del_tracks + + self._tracks = set(tracks) + return self._tracks + + +class SMCPHD_IGNN(Base): + """ A IGNN tracker using an SMC-PHD filter as the track initiator. """ + + transition_model: TransitionModel = Property(doc='The transition model') + measurement_model: MeasurementModel = Property(doc='The measurement model') + prob_detection: Probability = Property(doc='The probability of detection') + prob_death: Probability = Property(doc='The probability of death') + prob_birth: Probability = Property(doc='The probability of birth') + birth_rate: float = Property( + doc='The birth rate (i.e. number of new/born targets at each iteration(') + birth_density: State = Property( + doc='The birth density (i.e. density from which we sample birth particles)') + clutter_intensity: float = Property(doc='The clutter intensity per unit volume') + num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) + birth_scheme: str = Property( + doc='The scheme for birth particles. Options are "expansion" | "mixture". ' + 'Default is "expansion"', + default='expansion' + ) + start_time: datetime = Property(doc='Start time of the tracker', default=None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.prob_detect = self.prob_detection + + if self.start_time is None: + self.start_time = datetime.now() + + self._tracks = set() + self._predictor = KalmanPredictor(self.transition_model) + self._updater = KalmanUpdater(self.measurement_model) + self._hypothesiser = IPDAHypothesiser(self._predictor, self._updater, + self.clutter_intensity, + prob_detect=self.prob_detect, + prob_survive=1-self.prob_death, + predict=False) + self._hypothesiser = DistanceHypothesiser(self._predictor, self._updater, + Mahalanobis(), 10) + self._associator = GNNWith2DAssignment(self._hypothesiser) + + resampler = SystematicResampler() + phd_filter = SMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) + # Sample prior state from birth density + state_vector = StateVectors(multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) + weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) + state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) + + self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + + + @property + def tracks(self): + return self._tracks + + @property + def prob_detect(self): + return self._prob_detect + + @prob_detect.setter + def prob_detect(self, prob_detect): + if not callable(prob_detect): + prob_detect = copy(prob_detect) + self._prob_detect = lambda state: prob_detect + else: + self._prob_detect = copy(prob_detect) + if hasattr(self, '_hypothesiser'): + if hasattr(self._hypothesiser, 'hypothesiser'): + self._hypothesiser.hypothesiser.prob_detect = self._prob_detect + else: + self._hypothesiser.prob_detect = self._prob_detect + if hasattr(self, '_initiator'): + self._initiator.filter.prob_detect = self._prob_detect + + def track(self, detections, timestamp): + + tracks = list(self.tracks) + detections = list(detections) + + # Perform data association + associations = self._associator.associate(tracks, detections, timestamp) + + # Compute measurement weights + rho = np.ones((len(detections))) + for i, track in enumerate(tracks): + for hyp in associations[track]: + if hyp: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + rho[j] = 0 + + # Update tracks + for track, hypothesis in associations.items(): + if hypothesis: + # Update track + state_post = self._updater.update(hypothesis) + track.append(state_post) + track.exist_prob = Probability(1.) + else: + time_interval = timestamp - track.timestamp + track.append(hypothesis.prediction) + non_exist_weight = 1 - track.exist_prob + prob_survive = np.exp(-self.prob_death * time_interval.total_seconds()) + non_det_weight = prob_survive * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + # Initiate new tracks tracks = set(tracks) new_tracks = self._initiator.initiate(detections, timestamp, weights=rho) tracks |= new_tracks @@ -171,7 +305,7 @@ def track(self, detections, timestamp): # Delete tracks that have not been updated for a while del_tracks = set() for track in tracks: - if track.exist_prob < 0.1: + if track.exist_prob < 0.01: del_tracks.add(track) tracks -= del_tracks diff --git a/stonesoup/sensormanager/base.py b/stonesoup/sensormanager/base.py index 6c9a6fc40..cffd5b961 100644 --- a/stonesoup/sensormanager/base.py +++ b/stonesoup/sensormanager/base.py @@ -1,4 +1,5 @@ from abc import abstractmethod, ABC +from multiprocessing import Pool, cpu_count from typing import Callable, Set import random import numpy as np @@ -129,18 +130,57 @@ def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): all_action_choices[sensor] = action_choices # get tuple of dictionaries of sensors: actions - configs = ({sensor: action + configs = list({sensor: action for sensor, action in zip(all_action_choices.keys(), actionconfig)} for actionconfig in it.product(*all_action_choices.values())) best_rewards = np.zeros(nchoose) - np.inf selected_configs = [None] * nchoose - for config in configs: + rewards = [] + vars = [] + for i, config in enumerate(configs): # calculate reward for dictionary of sensors: actions - reward = self.reward_function(config, tracks, timestamp) + # actions_sets = list(config.values()) + # flag = True + # for actions in actions_sets: + # action_x = next( + # action for action in actions if action.generator.attribute == 'location_x') + # action_y = next( + # action for action in actions if action.generator.attribute == 'location_y') + # if action_x.target_value != -4.5 or action_y.target_value != 51.5: + # flag = False + # break + # if flag: + # a = 2 + reward, var = self.reward_function(config, tracks, timestamp) + rewards.append(reward) + vars.append(var) if reward > min(best_rewards): selected_configs[np.argmin(best_rewards)] = config best_rewards[np.argmin(best_rewards)] = reward + # inputs = [(config, tracks, timestamp) for config in configs] + # with Pool(cpu_count()) as pool: + # rewards = pool.starmap(self.reward_function, inputs) + + # rewards = [self.reward_function(config, tracks, timestamp) for config in configs] + # selected_configs = [configs[i] for i in np.argsort(rewards)[-nchoose:]] + # Return mapping of sensors and chosen actions for sensors return selected_configs + + +def is_valid_config(config, **kwargs): + num_sensors = int(len(kwargs)/2) + actions_sets = list(config.values()) + for i in range(num_sensors): + x = kwargs[f'x{i+1}'] + y = kwargs[f'y{i+1}'] + actions = actions_sets[i] + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + if action_x.target_value != x or action_y.target_value != y: + return False + return True \ No newline at end of file diff --git a/stonesoup/sensormanager/reward.py b/stonesoup/sensormanager/reward.py index fd18c9be1..2bbfd1dd3 100644 --- a/stonesoup/sensormanager/reward.py +++ b/stonesoup/sensormanager/reward.py @@ -1,26 +1,13 @@ from abc import ABC import copy import datetime -from random import random -from typing import Mapping, Sequence, Set, List, Any, Union -import itertools as it +from typing import Mapping, Sequence, Set import numpy as np -from matplotlib.path import Path -from shapely.geometry import Polygon, Point -from shapely.ops import unary_union -from tqdm import tqdm -from ..custom.functions import calculate_num_targets_dist -from ..functions import gm_reduce_single -from ..tracker import Tracker -from ..types.array import StateVectors from ..types.detection import TrueDetection from ..base import Base, Property from ..predictor.kalman import KalmanPredictor -from ..types.numeric import Probability -from ..types.state import ParticleState -from ..types.update import GaussianStateUpdate from ..updater.kalman import ExtendedKalmanUpdater from ..types.track import Track from ..types.hypothesis import SingleHypothesis @@ -28,19 +15,6 @@ from ..sensor.action import Action -import multiprocessing as mpp - - -def imap_tqdm(pool, f, inputs, chunksize=None, **tqdm_kwargs): - # Calculation of chunksize taken from pool._map_async - if not chunksize: - chunksize, extra = divmod(len(inputs), len(pool._pool) * 4) - if extra: - chunksize += 1 - results = list(tqdm(pool.imap_unordered(f, inputs, chunksize=chunksize), total=len(inputs), **tqdm_kwargs)) - return results - - class RewardFunction(Base, ABC): """ The reward function base class. @@ -152,343 +126,3 @@ def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] # Return value of configuration metric return config_metric - - -class RolloutUncertaintyRewardFunction(RewardFunction): - """A reward function which calculates the potential reduction in the uncertainty of track estimates - if a particular action is taken by a sensor or group of sensors. - - Given a configuration of sensors and actions, a metric is calculated for the potential - reduction in the uncertainty of the tracks that would occur if the sensing configuration - were used to make an observation. A larger value indicates a greater reduction in - uncertainty. - """ - - predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") - updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " - "the track to the new state.") - timesteps: int = Property(doc="Number of timesteps to rollout") - num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) - interval: datetime.timedelta = Property(doc="Interval between timesteps", - default=datetime.timedelta(seconds=1)) - - def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], - metric_time: datetime.datetime, *args, **kwargs): - """ - For a given configuration of sensors and actions this reward function calculates the - potential uncertainty reduction of each track by - computing the difference between the covariance matrix norms of the prediction - and the posterior assuming a predicted measurement corresponding to that prediction. - - This requires a mapping of sensors to action(s) - to be evaluated by reward function, a set of tracks at given time and the time at which - the actions would be carried out until. - - The metric returned is the total potential reduction in uncertainty across all tracks. - - Returns - ------- - : float - Metric of uncertainty for given configuration - - """ - - # Reward value - end_time = metric_time + datetime.timedelta(seconds=self.timesteps) - config_metric = self._rollout(config, tracks, metric_time, end_time) - - # Return value of configuration metric - return config_metric - - def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], - timestamp: datetime.datetime, end_time: datetime.datetime): - """ - For a given configuration of sensors and actions this reward function calculates the - potential uncertainty reduction of each track by - computing the difference between the covariance matrix norms of the prediction - and the posterior assuming a predicted measurement corresponding to that prediction. - - This requires a mapping of sensors to action(s) - to be evaluated by reward function, a set of tracks at given time and the time at which - the actions would be carried out until. - - The metric returned is the total potential reduction in uncertainty across all tracks. - - Returns - ------- - : float - Metric of uncertainty for given configuration - - """ - - # Reward value - config_metric = 0 - - predicted_sensors = list() - memo = {} - - # For each sensor in the configuration - for sensor, actions in config.items(): - predicted_sensor = copy.deepcopy(sensor, memo) - predicted_sensor.add_actions(actions) - predicted_sensor.act(timestamp) - if isinstance(sensor, Sensor): - predicted_sensors.append(predicted_sensor) # checks if its a sensor - - # Create dictionary of predictions for the tracks in the configuration - predicted_tracks = set() - for track in tracks: - predicted_track = copy.copy(track) - predicted_track.append(self.predictor.predict(predicted_track, timestamp=timestamp)) - predicted_tracks.add(predicted_track) - - for sensor in predicted_sensors: - - # Assumes one detection per track - detections = {detection.groundtruth_path: detection - for detection in sensor.measure(predicted_tracks, noise=False) - if isinstance(detection, TrueDetection)} - - for predicted_track, detection in detections.items(): - # Generate hypothesis based on prediction/previous update and detection - hypothesis = SingleHypothesis(predicted_track.state, detection) - - # Do the update based on this hypothesis and store covariance matrix - update = self.updater.update(hypothesis) - - previous_cov_norm = np.linalg.norm(predicted_track.covar) - update_cov_norm = np.linalg.norm(update.covar) - - # Replace prediction with update - predicted_track.append(update) - - # Calculate metric for the track observation and add to the metric - # for the configuration - metric = previous_cov_norm - update_cov_norm - config_metric += metric - - if timestamp == end_time: - return config_metric - - timestamp = timestamp + datetime.timedelta(seconds=1) - - all_action_choices = dict() - for sensor in predicted_sensors: - # get action 'generator(s)' - action_generators = sensor.actions(timestamp) - # list possible action combinations for the sensor - action_choices = list(it.product(*action_generators)) - # dictionary of sensors: list(action combinations) - all_action_choices[sensor] = action_choices - - configs = list({sensor: action - for sensor, action in zip(all_action_choices.keys(), actionconfig)} - for actionconfig in it.product(*all_action_choices.values())) - - idx = np.random.choice(len(configs), self.num_samples) - configs = [configs[i] for i in idx] - - rewards = [self._rollout(config, tracks, timestamp, end_time) for config in configs] - config_metric += np.max(rewards) - - return config_metric - - -class RolloutPriorityRewardFunction(RewardFunction): - """A reward function which calculates the potential reduction in the uncertainty of track estimates - if a particular action is taken by a sensor or group of sensors. - - Given a configuration of sensors and actions, a metric is calculated for the potential - reduction in the uncertainty of the tracks that would occur if the sensing configuration - were used to make an observation. A larger value indicates a greater reduction in - uncertainty. - """ - - tracker: Tracker = Property(doc="Tracker used to track the tracks") - timesteps: int = Property(doc="Number of timesteps to rollout") - num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) - interval: datetime.timedelta = Property(doc="Interval between timesteps", - default=datetime.timedelta(seconds=1)) - rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", - default=None) - prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - if self.rfis is None: - self.rfis = [] - def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], - metric_time: datetime.datetime, *args, **kwargs): - """ - For a given configuration of sensors and actions this reward function calculates the - potential uncertainty reduction of each track by - computing the difference between the covariance matrix norms of the prediction - and the posterior assuming a predicted measurement corresponding to that prediction. - - This requires a mapping of sensors to action(s) - to be evaluated by reward function, a set of tracks at given time and the time at which - the actions would be carried out until. - - The metric returned is the total potential reduction in uncertainty across all tracks. - - Returns - ------- - : float - Metric of uncertainty for given configuration - - """ - - # Reward value - end_time = metric_time + datetime.timedelta(seconds=self.timesteps) - config_metric = self._rollout(config, tracks, metric_time, end_time) - - # Return value of configuration metric - return config_metric - - def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], timestamp: datetime.datetime, end_time: datetime.datetime): - """ - For a given configuration of sensors and actions this reward function calculates the - potential uncertainty reduction of each track by - computing the difference between the covariance matrix norms of the prediction - and the posterior assuming a predicted measurement corresponding to that prediction. - - This requires a mapping of sensors to action(s) - to be evaluated by reward function, a set of tracks at given time and the time at which - the actions would be carried out until. - - The metric returned is the total potential reduction in uncertainty across all tracks. - - Returns - ------- - : float - Metric of uncertainty for given configuration - - """ - - # Reward value - config_metric = 0 - - predicted_sensors = list() - memo = {} - - # For each sensor in the configuration - for sensor, actions in config.items(): - predicted_sensor = copy.deepcopy(sensor, memo) - predicted_sensor.add_actions(actions) - predicted_sensor.act(timestamp) - if isinstance(sensor, Sensor): - predicted_sensors.append(predicted_sensor) # checks if its a sensor - - # Create dictionary of predictions for the tracks in the configuration - predicted_tracks = set() - for track in tracks: - predicted_track = copy.copy(track) - predicted_track.append(self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) - predicted_tracks.add(predicted_track) - - tracks_copy = [copy.copy(track) for track in tracks] - fovs = [] - for sensor in predicted_sensors: - center = (sensor.position[0], sensor.position[1]) - radius = sensor.fov_radius - p = Point(center).buffer(radius) - fovs.append(p) - self.tracker.prob_detect = _prob_detect_func(fovs) - - for sensor in predicted_sensors: - - # Assumes one detection per track - detections = {detection - for detection in sensor.measure(predicted_tracks, noise=False) - if isinstance(detection, TrueDetection)} - - associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) - - for track, multihypothesis in associations.items(): - - # calculate each Track's state as a Gaussian Mixture of - # its possible associations with each detection, then - # reduce the Mixture to a single Gaussian State - posterior_states = [] - posterior_state_weights = [] - for hypothesis in multihypothesis: - posterior_state_weights.append(hypothesis.probability) - if hypothesis: - posterior_states.append(self.tracker._updater.update(hypothesis)) - else: - posterior_states.append(hypothesis.prediction) - - # Merge/Collapse to single Gaussian - means = StateVectors([state.state_vector for state in posterior_states]) - covars = np.stack([state.covar for state in posterior_states], axis=2) - weights = np.asarray(posterior_state_weights) - - post_mean, post_covar = gm_reduce_single(means, covars, weights) - - track.append(GaussianStateUpdate( - np.array(post_mean), np.array(post_covar), - multihypothesis, - multihypothesis[0].prediction.timestamp)) - - for rfi in self.rfis: - xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude - xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude - geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) - _, var = calculate_num_targets_dist(tracks_copy, geom) - if var < rfi.threshold: - # TODO: Need to select the priority - config_metric += rfi.priority_over_time.priority[0] #1/var - - - if timestamp == end_time: - return config_metric - - timestamp = timestamp + datetime.timedelta(seconds=1) - - all_action_choices = dict() - for sensor in predicted_sensors: - # get action 'generator(s)' - action_generators = sensor.actions(timestamp) - # list possible action combinations for the sensor - action_choices = list(it.product(*action_generators)) - # dictionary of sensors: list(action combinations) - all_action_choices[sensor] = action_choices - - configs = list({sensor: action - for sensor, action in zip(all_action_choices.keys(), actionconfig)} - for actionconfig in it.product(*all_action_choices.values())) - - idx = np.random.choice(len(configs), self.num_samples) - configs = [configs[i] for i in idx] - - rewards = [self._rollout(config, tracks_copy, timestamp, end_time) for config in configs] - config_metric += np.max(rewards) - - return config_metric - - -def _prob_detect_func(fovs): - """Closure to return the probability of detection function for a given environment scan""" - prob_detect = Probability(0.9) - # Get the union of all field of views - fovs_union = unary_union(fovs) - if fovs_union.geom_type == 'MultiPolygon': - fovs = [poly for poly in fovs_union] - else: - fovs = [fovs_union] - - # Probability of detection nested function - def prob_detect_func(state): - for poly in fovs: - if isinstance(state, ParticleState): - prob_detect_arr = np.full((len(state),), Probability(0.1)) - path_p = Path(poly.boundary) - points = state.state_vector[[0, 2], :].T - inside_points = path_p.contains_points(points) - prob_detect_arr[inside_points] = prob_detect - return prob_detect_arr - else: - point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) - return prob_detect if poly.contains(point) else Probability(0) - - return prob_detect_func \ No newline at end of file From 27ff1fab9a7b902b4211d8e3410388e78a07a2ee Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 3 Jan 2023 14:18:46 +0000 Subject: [PATCH 36/87] Fix issue with shapely 2.0 --- stonesoup/custom/functions/__init__.py | 2 +- stonesoup/custom/sensormanager/reward.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 22cb00802..3cf81254a 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -396,7 +396,7 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, num_samples = 100 mu_overall = 0 var_overall = np.inf if len(tracks) == 0 else 0 - path_p = Path(geom.boundary) + path_p = Path(geom.boundary.coords) # Calculate PHD density inside polygon if phd_state is not None: diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py index e8f8335ee..87b5bd1b8 100644 --- a/stonesoup/custom/sensormanager/reward.py +++ b/stonesoup/custom/sensormanager/reward.py @@ -583,7 +583,7 @@ def _prob_detect_func(fovs): else: fovs = [fovs_union] - paths = [Path(poly.boundary) for poly in fovs] + paths = [Path(poly.boundary.coords) for poly in fovs] # Probability of detection nested function def prob_detect_func(state): From a8413b335001e950cf298caa26643debcd3ed126 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 6 Jan 2023 15:41:22 +0000 Subject: [PATCH 37/87] Fix possible values bug in location action generators --- stonesoup/custom/sensor/action/location.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stonesoup/custom/sensor/action/location.py b/stonesoup/custom/sensor/action/location.py index a6f8b21c9..fcee4cfa2 100644 --- a/stonesoup/custom/sensor/action/location.py +++ b/stonesoup/custom/sensor/action/location.py @@ -94,7 +94,10 @@ def __contains__(self, item): if isinstance(item, self._action_cls): item = item.target_value - return self.min <= item <= self.max + possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, + dtype=float) + + return possible_values[0] <= item <= possible_values[-1] def __iter__(self) -> Iterator[ChangeLocationAction]: """Returns all possible ChangePanTiltAction types""" @@ -111,7 +114,7 @@ def __iter__(self) -> Iterator[ChangeLocationAction]: def action_from_value(self, value): if value not in self: return None - possible_values = np.arange(self.min, self.max, self.resolution, dtype=float) + possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, dtype=float) angle = get_nearest(possible_values, value) return self._action_cls(generator=self, end_time=self.end_time, From 94cd65d1fb80aa8a57d11b8d51c75accccd4b605 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 6 Jan 2023 15:42:29 +0000 Subject: [PATCH 38/87] Reduce gate threshold for distance gater in SMCPHD_JIPDA tracker --- stonesoup/custom/tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index c1201d5be..302bd9707 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -60,7 +60,7 @@ def __init__(self, *args, **kwargs): self.clutter_intensity, prob_detect=self.prob_detect, prob_survive=1-self.prob_death) - self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) + self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 5) self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() From 0bcb91104987b2267f5ece77f6014889cfa26e3a Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 11 Jan 2023 15:55:16 +0000 Subject: [PATCH 39/87] Add timestamp debugging --- stonesoup/custom/initiator/smcphd.py | 2 ++ stonesoup/predictor/kalman.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index a745f81df..d88c2d4ef 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -77,6 +77,8 @@ def predict(self, state, timestamp): """ prior_weights = state.weight + print(f"Timestamp: {timestamp}") + print(f"State timestamp: {state.timestamp}") time_interval = timestamp - state.timestamp # Predict particles forward diff --git a/stonesoup/predictor/kalman.py b/stonesoup/predictor/kalman.py index 52df07430..2f6633779 100644 --- a/stonesoup/predictor/kalman.py +++ b/stonesoup/predictor/kalman.py @@ -179,6 +179,9 @@ def predict(self, prior, timestamp=None, **kwargs): """ + print(f"Timestamp: {timestamp}") + print(f"State timestamp: {prior.timestamp}") + # Get the prediction interval predict_over_interval = self._predict_over_interval(prior, timestamp) From 3e7efa9cf725f0dbf103a94b4a70729de9811af4 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 13 Jan 2023 09:39:27 +0000 Subject: [PATCH 40/87] Remove timestamp debugging --- stonesoup/custom/initiator/smcphd.py | 2 -- stonesoup/predictor/kalman.py | 3 --- 2 files changed, 5 deletions(-) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index d88c2d4ef..a745f81df 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -77,8 +77,6 @@ def predict(self, state, timestamp): """ prior_weights = state.weight - print(f"Timestamp: {timestamp}") - print(f"State timestamp: {state.timestamp}") time_interval = timestamp - state.timestamp # Predict particles forward diff --git a/stonesoup/predictor/kalman.py b/stonesoup/predictor/kalman.py index 2f6633779..52df07430 100644 --- a/stonesoup/predictor/kalman.py +++ b/stonesoup/predictor/kalman.py @@ -179,9 +179,6 @@ def predict(self, prior, timestamp=None, **kwargs): """ - print(f"Timestamp: {timestamp}") - print(f"State timestamp: {prior.timestamp}") - # Get the prediction interval predict_over_interval = self._predict_over_interval(prior, timestamp) From 9f1333ff68c650540930d519ce726956c757fe40 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 11 Jan 2023 16:11:50 +0000 Subject: [PATCH 41/87] Fix: timezone aware timestmap in SMCPHD_JIPDA --- stonesoup/custom/initiator/smcphd.py | 2 ++ stonesoup/custom/tracker.py | 15 ++++++--------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index a745f81df..639715668 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -263,6 +263,8 @@ def __init__(self, *args, **kwargs): def initiate(self, detections, timestamp, weights=None, **kwargs): tracks = set() + if self._state.timestamp is None: + self._state.timestamp = timestamp # Predict forward prediction = self.filter.predict(self._state, timestamp) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 302bd9707..b654fe5df 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -1,5 +1,5 @@ from copy import copy -from datetime import datetime +from datetime import datetime, timezone import numpy as np from scipy.stats import multivariate_normal @@ -50,9 +50,6 @@ def __init__(self, *args, **kwargs): self.prob_detect = self.prob_detection - if self.start_time is None: - self.start_time = datetime.now() - self._tracks = set() self._predictor = KalmanPredictor(self.transition_model) self._updater = KalmanUpdater(self.measurement_model) @@ -129,11 +126,11 @@ def track(self, detections, timestamp): if hyp.measurement == detection) assoc_prob_matrix[i, j + 1] = hyp.weight for j, detection in enumerate(detections): - rho_tmp = 0 if len(assoc_prob_matrix) and np.sum(assoc_prob_matrix[:, j + 1]) > 0 else 1 - # rho_tmp = 1 - # if len(assoc_prob_matrix): - # for i, track in enumerate(tracks): - # rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] + # rho_tmp = 0 if len(assoc_prob_matrix) and np.sum(assoc_prob_matrix[:, j + 1]) > 0 else 1 + rho_tmp = 1 + if len(assoc_prob_matrix): + for i, track in enumerate(tracks): + rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] rho[j] = rho_tmp # Update tracks From 0430200fb1e55a2f734b20a8ed6505868196a43b Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 17 Jan 2023 17:56:52 +0000 Subject: [PATCH 42/87] Fix wrong weighing of measurements in SMC_PHD --- stonesoup/custom/initiator/smcphd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index 639715668..fc058ea23 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -232,10 +232,10 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): # Calculate w^{n,i} Eq. (20) of [2] try: - Ck = np.log(meas_weights) + np.log(prob_detect[:, np.newaxis]) + g \ + Ck = np.log(prob_detect[:, np.newaxis]) + g \ + np.log(prediction.weight[:, np.newaxis]) except IndexError: - Ck = np.log(meas_weights) + np.log(prob_detect) + g \ + Ck = np.log(prob_detect) + g \ + np.log(prediction.weight[:, np.newaxis]) C = logsumexp(np.asfarray(Ck), axis=0) k = np.log([detection.metadata['clutter_density'] @@ -245,7 +245,7 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): weights_per_hyp = np.full((num_samples, len(detections) + 1), -np.inf) weights_per_hyp[:, 0] = np.log(1 - prob_detect) + np.log(prediction.weight) if len(detections): - weights_per_hyp[:, 1:] = Ck - C_plus + weights_per_hyp[:, 1:] = np.log(meas_weights) + Ck - C_plus return Probability.from_log_ufunc(weights_per_hyp) From 41f8069c0f6e81cde5fc96301717eb21f448cf7f Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 23 Jan 2023 12:59:41 +0000 Subject: [PATCH 43/87] Increase gate threshold distance in SMCPHD_JIPDA track --- stonesoup/custom/tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index b654fe5df..9ec7714f8 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -57,7 +57,7 @@ def __init__(self, *args, **kwargs): self.clutter_intensity, prob_detect=self.prob_detect, prob_survive=1-self.prob_death) - self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 5) + self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() From f5463f3b2acbe81e6309c335d5d4dd0c423f92ad Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 23 Jan 2023 13:22:49 +0000 Subject: [PATCH 44/87] Added fov_in_km to MoveableUAVSensor --- stonesoup/custom/sensor/movable.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index c072788d9..9aeeb3c7e 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -2,6 +2,8 @@ from typing import Union, List, Set import numpy as np +import geopandas as gpd +from shapely.geometry import Point from stonesoup.base import Property from stonesoup.custom.sensor.action.location import LocationActionGenerator @@ -50,6 +52,9 @@ class MovableUAVCamera(Sensor): doc="The sensor min max location", default=None ) + fov_in_km: bool = Property( + doc="Whether the FOV radius is in kilo-meters or degrees", + default=False) @location_x.setter def location_x(self, value): @@ -92,6 +97,11 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, distance = np.linalg.norm(norm_measurement_vector[0:2]) + if self.fov_in_km: + # Note: this is a very approximate conversion, and does not take into account the + # curvature of the earth. Should be replaced with a more accurate check. + distance *= 110.574 + # Do not measure if state not in FOV if distance > self.fov_radius: continue From f4e8b01da05d1ba10821f9cbc5d2435b1b2bd61c Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 23 Jan 2023 13:23:31 +0000 Subject: [PATCH 45/87] Rollout updates and FOLLOW RFI --- stonesoup/custom/functions/__init__.py | 13 +- stonesoup/custom/sensormanager/reward.py | 310 ++++++++++++++++++++++- 2 files changed, 308 insertions(+), 15 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 3cf81254a..1f6bcae23 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,5 +1,5 @@ import math -from typing import Set +from typing import Set, List import numpy as np from matplotlib.path import Path @@ -392,7 +392,7 @@ def rigid_transform_3D(A, B): def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, - phd_state: ParticleState = None): + phd_state: ParticleState = None, target_types: List[str] = None): num_samples = 100 mu_overall = 0 var_overall = np.inf if len(tracks) == 0 else 0 @@ -411,6 +411,12 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, # Calculate number of tracks inside polygon for track in tracks: + + if target_types \ + and not any(item in track.metadata['target_type_confidences'] + for item in target_types): + continue + # Sample points from the track state points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), cov=track.covar[[0, 2], :][:, [0, 2]], @@ -426,4 +432,7 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, # times the probability of failure var_overall += p_success * (1 - p_success) + if var_overall == 0: + var_overall = np.inf + return mu_overall, var_overall \ No newline at end of file diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py index 87b5bd1b8..0ecab3e66 100644 --- a/stonesoup/custom/sensormanager/reward.py +++ b/stonesoup/custom/sensormanager/reward.py @@ -8,6 +8,7 @@ from shapely.geometry import Point, Polygon from shapely.ops import unary_union +from reactive_isr_core.data import TaskType from stonesoup.base import Property from stonesoup.custom.functions import calculate_num_targets_dist from stonesoup.custom.tracker import SMCPHD_JIPDA @@ -270,13 +271,6 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] tracks_copy = [copy.copy(track) for track in tracks] - # fovs = [] - # for sensor in predicted_sensors: - # center = (sensor.position[0], sensor.position[1]) - # radius = sensor.fov_radius - # p = Point(center).buffer(radius) - # fovs.append(p) - # self.tracker.prob_detect = _prob_detect_func(fovs) for sensor in predicted_sensors: @@ -335,12 +329,23 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) - _, var = calculate_num_targets_dist(tracks_copy, geom) - if var < rfi.threshold: - # TODO: Need to select the priority - config_metric += rfi.priority_over_time.priority[0] - if self.use_variance: - config_metric += 1/var + + if rfi.task_type == TaskType.COUNT: + target_types = [t.target_type.value for t in rfi.targets] + _, var = calculate_num_targets_dist(tracks_copy, geom, target_types=target_types) + if var < rfi.threshold_over_time.threshold[0]: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if self.use_variance: + config_metric += 1/var + elif rfi.task_type == TaskType.FOLLOW: + for target in rfi.targets: + track = next((track for track in tracks_copy + if track.id == str(target.target_UUID)), None) + if track is not None: + var = track.covar[0, 0] + track.covar[2, 2] + if var < rfi.threshold_over_time.threshold[0]: + config_metric += rfi.priority_over_time.priority[0] if timestamp == end_time: @@ -396,6 +401,284 @@ class RolloutPriorityRewardFunction2(RewardFunction): uncertainty. """ + tracker: Tracker = Property(doc="Tracker used to track the tracks") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + if not len(self.rfis): + return 0 + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + + # Reward value + config_metric, updated_tracks, predicted_sensors = self._compute_metric(config, tracks, + metric_time) + + if metric_time == end_time: + return config_metric + + timestamp = metric_time + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + if len(configs) > self.num_samples: + idx = np.random.choice(len(configs), self.num_samples, replace=False) + configs = [configs[i] for i in idx] + + rewards = [config_metric + self._rollout(config, updated_tracks, timestamp, end_time) + for config in configs] + + # Return value of configuration metric + return np.max(rewards) + + def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime): + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append( + self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) + time_interval = timestamp - predicted_track.timestamp + prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + predicted_tracks.add(predicted_track) + + tracks_copy = [copy.copy(track) for track in tracks] + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + center = (sensor.position[0], sensor.position[1]) + radius = sensor.fov_radius + p = Point(center).buffer(radius) + self.tracker.prob_detect = _prob_detect_func([p]) + + associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) + + for track, multihypothesis in associations.items(): + if isinstance(self.tracker, SMCPHD_JIPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self.tracker._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + else: + if multihypothesis: + # Update track + state_post = self.tracker._updater.update(multihypothesis) + track.append(state_post) + track.exist_prob = Probability(1.) + else: + time_interval = timestamp - track.timestamp + track.append(multihypothesis.prediction) + non_exist_weight = 1 - track.exist_prob + prob_survive = np.exp( + -self.tracker.prob_death * time_interval.total_seconds()) + non_det_weight = prob_survive * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + var = np.inf + for rfi in self.rfis: + xmin, ymin = rfi.region_of_interest.corners[0].longitude, \ + rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, \ + rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + + if rfi.task_type == TaskType.COUNT: + target_types = [t.target_type.value for t in rfi.targets] + _, var = calculate_num_targets_dist(tracks_copy, geom, target_types=target_types) + if var < rfi.threshold_over_time.threshold[0]: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if self.use_variance: + config_metric += 1 / var + elif rfi.task_type == TaskType.FOLLOW: + for target in rfi.targets: + track = next((track for track in tracks_copy + if track.id == str(target.target_UUID)), None) + if track is not None: + var = track.covar[0, 0] + track.covar[2, 2] + if var < rfi.threshold_over_time.threshold[0]: + config_metric += rfi.priority_over_time.priority[0] + + return config_metric, tracks_copy, predicted_sensors + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + if not len(self.rfis): + return 0 + + # Reward value + config_metric, updated_tracks, predicted_sensors = self._compute_metric(config, tracks, + timestamp) + + if timestamp == end_time: + return config_metric + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + idx = np.random.choice(len(configs), 1, replace=False) + next_config = configs[idx[0]] + + config_metric += self._rollout(next_config, updated_tracks, timestamp, end_time) + + return config_metric + + +class RolloutPriorityRewardFunction3(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " "the track to the new state.") @@ -572,6 +855,7 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] config_metric += np.max(rewards) return config_metric, var + def _prob_detect_func(fovs): """Closure to return the probability of detection function for a given environment scan""" From f485961ae0507c1b5a2b48c4f7112a844208b5df Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 26 Jan 2023 13:36:48 +0000 Subject: [PATCH 46/87] Minor correction to calculate_num_targets_dist --- stonesoup/custom/functions/__init__.py | 33 ++++++++++---------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 1f6bcae23..d1a14ad70 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -48,7 +48,6 @@ def getBoundingPolygon(FOVh, FOVv, altitude, roll, pitch, heading): @staticmethod def getFovRPH(intersections, altitude): - # Calculate unit vectors to the ground, assuming camera is at the origin rotVecs = [Vector(i.x, i.y, -altitude).normalize() for i in intersections] @@ -57,13 +56,13 @@ def getFovRPH(intersections, altitude): rot1 = rotation_matrix_from_vectors(Vector(z=-1), centroidVec).T # Get vectors after first rotation - rotVecs1 = [Vector(*(rot1@np.array([[r.x], [r.y], [r.z]])).flatten()) for r in rotVecs] + rotVecs1 = [Vector(*(rot1 @ np.array([[r.x], [r.y], [r.z]])).flatten()) for r in rotVecs] # Second rotation alligns the centroid of the polygon with the negative y axis - rot2 = rotation_matrix_from_vectors(Vector(y=1), (rotVecs1[0]-rotVecs1[1]).normalize()).T + rot2 = rotation_matrix_from_vectors(Vector(y=1), (rotVecs1[0] - rotVecs1[1]).normalize()).T # Get final rotation matrix - R = rot2@rot1 + R = rot2 @ rot1 # Calculate roll, pitch and heading roll, pitch, heading = roll_pitch_yaw_from_matrix(R.T) @@ -247,7 +246,6 @@ def findRayGroundIntersection(ray, origin): def get_camera_footprint(camera): - # altitude = camera.position[2] # try: # pan, tilt = camera.pan_tilt @@ -264,7 +262,7 @@ def get_camera_footprint(camera): # Once the camera is rotated, the z axis becomes the x axis, and the x axis becomes the z axis # TODO: More testing is needed to make sure this is correct roll, pitch, heading = (camera.orientation[2], - camera.orientation[1] + np.pi/2, + camera.orientation[1] + np.pi / 2, camera.orientation[0]) xmin, xmax, ymin, ymax = get_camera_footprint_low(camera.position, roll, pitch, heading, @@ -377,7 +375,7 @@ def rigid_transform_3D(A, B): Bm = B - centroid_B H = Am @ np.transpose(Bm) # sanity check - #if linalg.matrix_rank(H) < 3: + # if linalg.matrix_rank(H) < 3: # raise ValueError("rank of H = {}, expecting 3".format(linalg.matrix_rank(H))) # find rotation U, S, Vt = np.linalg.svd(H) @@ -385,7 +383,7 @@ def rigid_transform_3D(A, B): # special reflection case if np.linalg.det(R) < 0: print("det(R) < R, reflection detected!, correcting for it ...") - Vt[2,:] *= -1 + Vt[2, :] *= -1 R = Vt.T @ U.T t = -R @ centroid_A + centroid_B return R, t @@ -394,8 +392,12 @@ def rigid_transform_3D(A, B): def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, phd_state: ParticleState = None, target_types: List[str] = None): num_samples = 100 + valid_tracks = [track for track in tracks + if not (target_types) + or (target_types and any(item in track.metadata['target_type_confidences'] + for item in target_types))] mu_overall = 0 - var_overall = np.inf if len(tracks) == 0 else 0 + var_overall = np.inf if len(valid_tracks) == 0 else 0 path_p = Path(geom.boundary.coords) # Calculate PHD density inside polygon @@ -410,13 +412,7 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, var_overall = mu_overall # Calculate number of tracks inside polygon - for track in tracks: - - if target_types \ - and not any(item in track.metadata['target_type_confidences'] - for item in target_types): - continue - + for track in valid_tracks: # Sample points from the track state points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), cov=track.covar[[0, 2], :][:, [0, 2]], @@ -432,7 +428,4 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, # times the probability of failure var_overall += p_success * (1 - p_success) - if var_overall == 0: - var_overall = np.inf - - return mu_overall, var_overall \ No newline at end of file + return mu_overall, var_overall From 34156f32d8472420c63b39ef2368c6c153dc9d32 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 26 Jan 2023 15:20:10 +0000 Subject: [PATCH 47/87] Remove unnecessary imports --- stonesoup/custom/sensor/movable.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 9aeeb3c7e..987137825 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -2,8 +2,6 @@ from typing import Union, List, Set import numpy as np -import geopandas as gpd -from shapely.geometry import Point from stonesoup.base import Property from stonesoup.custom.sensor.action.location import LocationActionGenerator From 9fa177bcbd2f2db10dfb7b962ab0c9bd52e67931 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 30 Jan 2023 22:47:50 +0000 Subject: [PATCH 48/87] Implement Gaussian Mixture based birth intensity in SMCPHDFilter --- stonesoup/custom/initiator/smcphd.py | 59 ++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 16 deletions(-) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index fc058ea23..bcdc7e4bf 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -13,6 +13,7 @@ from stonesoup.types.array import StateVectors from stonesoup.types.detection import Detection, MissedDetection from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.mixture import GaussianMixture from stonesoup.types.multihypothesis import MultipleHypothesis from stonesoup.types.numeric import Probability from stonesoup.types.prediction import Prediction @@ -90,13 +91,25 @@ def predict(self, state, timestamp): num_birth = round(float(self.prob_birth) * self.num_samples) # Sample birth particles - birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), - self.birth_density.covar, - num_birth) + birth_particles = np.zeros((pred_particles_sv.shape[0], 0)) + if isinstance(self.birth_density, GaussianMixture): + particles_per_component = num_birth // len(self.birth_density) + for i, component in enumerate(self.birth_density): + if i == len(self.birth_density) - 1: + particles_per_component += num_birth % len(self.birth_density) + birth_particles_component = multivariate_normal.rvs( + component.mean.ravel(), + component.covar, + particles_per_component).T + birth_particles = np.hstack((birth_particles, birth_particles_component)) + else: + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + num_birth) birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) # Surviving particle weights - prob_survive = np.exp(-float(self.prob_death)*time_interval.total_seconds()) + prob_survive = np.exp(-float(self.prob_death) * time_interval.total_seconds()) pred_weights = prob_survive * prior_weights # Append birth particles to predicted ones @@ -111,13 +124,27 @@ def predict(self, state, timestamp): birth_inds = np.flatnonzero(np.random.binomial(1, self.prob_birth, self.num_samples)) # Sample birth particles and replace in original state vector matrix - birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), - self.birth_density.covar, - len(birth_inds)) - pred_particles_sv[:, birth_inds] = birth_particles.T + num_birth = len(birth_inds) + birth_particles = np.zeros((pred_particles_sv.shape[0], 0)) + if isinstance(self.birth_density, GaussianMixture): + particles_per_component = num_birth // len(self.birth_density) + for i, component in enumerate(self.birth_density): + if i == len(self.birth_density) - 1: + particles_per_component += num_birth % len(self.birth_density) + birth_particles_component = multivariate_normal.rvs( + component.mean.ravel(), + component.covar, + particles_per_component).T + birth_particles = np.hstack((birth_particles, birth_particles_component)) + else: + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + len(birth_inds)).T + pred_particles_sv[:, birth_inds] = birth_particles # Process weights - pred_weights = ((1 - self.prob_death) + Probability(self.birth_rate / total_samples)) * prior_weights + pred_weights = ((1 - self.prob_death) + Probability( + self.birth_rate / total_samples)) * prior_weights prediction = Prediction.from_state(state, state_vector=pred_particles_sv, weight=pred_weights, @@ -210,7 +237,7 @@ def iterate(self, state, detections: List[Detection], timestamp): def get_measurement_loglikelihoods(self, prediction, detections, meas_weights): num_samples = prediction.state_vector.shape[1] # Compute g(z|x) matrix as in [1] - g = np.zeros((num_samples, len(detections)), dtype=Probability) + g = np.zeros((num_samples, len(detections))) for i, detection in enumerate(detections): if not meas_weights[i]: g[:, i] = -np.inf @@ -219,7 +246,7 @@ def get_measurement_loglikelihoods(self, prediction, detections, meas_weights): noise=True) return g - def get_weights_per_hypothesis(self, prediction, detections, meas_weights): + def get_weights_per_hypothesis(self, prediction, detections, meas_weights, *args, **kwargs): num_samples = prediction.state_vector.shape[1] if meas_weights is None: meas_weights = np.array([Probability(1) for _ in range(len(detections))]) @@ -233,19 +260,19 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights): # Calculate w^{n,i} Eq. (20) of [2] try: Ck = np.log(prob_detect[:, np.newaxis]) + g \ - + np.log(prediction.weight[:, np.newaxis]) + + np.log(prediction.weight[:, np.newaxis].astype(float)) except IndexError: Ck = np.log(prob_detect) + g \ - + np.log(prediction.weight[:, np.newaxis]) - C = logsumexp(np.asfarray(Ck), axis=0) + + np.log(prediction.weight[:, np.newaxis].astype(float)) + C = logsumexp(Ck, axis=0) k = np.log([detection.metadata['clutter_density'] if 'clutter_density' in detection.metadata else self.clutter_intensity for detection in detections]) C_plus = np.logaddexp(C, k) weights_per_hyp = np.full((num_samples, len(detections) + 1), -np.inf) - weights_per_hyp[:, 0] = np.log(1 - prob_detect) + np.log(prediction.weight) + weights_per_hyp[:, 0] = np.log(1 - prob_detect) + np.log(np.asfarray(prediction.weight)) if len(detections): - weights_per_hyp[:, 1:] = np.log(meas_weights) + Ck - C_plus + weights_per_hyp[:, 1:] = np.log(np.asfarray(meas_weights)) + Ck - C_plus return Probability.from_log_ufunc(weights_per_hyp) From 4a34b81f5ac0b4b6ab40ec039b6bf02e66d553b7 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 30 Jan 2023 22:49:34 +0000 Subject: [PATCH 49/87] Add implementation of Improved SMCPHD Filter (ISMCPHDFilter) and initiator --- stonesoup/custom/initiator/smcphd.py | 247 +++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index bcdc7e4bf..ee9cde455 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -277,6 +277,195 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights, *args return Probability.from_log_ufunc(weights_per_hyp) +class ISMCPHDFilter(SMCPHDFilter): + def predict(self, state, timestamp): + """ + Predict the next state of the target density + + Parameters + ---------- + state: :class:`~.State` + The current state of the target + timestamp: :class:`datetime.datetime` + The time at which the state is valid + + Returns + ------- + : :class:`~.State` + The predicted next state of the target + """ + + prior_weights = state.weight + time_interval = timestamp - state.timestamp + + # Predict particles forward + pred_particles_sv = self.transition_model.function(state, + time_interval=time_interval, + noise=True) + + # Surviving particle weights + prob_survive = np.exp(-float(self.prob_death) * time_interval.total_seconds()) + pred_weights = prob_survive * prior_weights + + prediction = Prediction.from_state(state, state_vector=pred_particles_sv, + weight=pred_weights, + timestamp=timestamp, particle_list=None, + transition_model=self.transition_model) + prediction.birth_idx = state.birth_idx if hasattr(state, 'birth_idx') else [] + return prediction + + def update(self, prediction, detections, timestamp, meas_weights=None): + """ + Update the predicted state of the target density with the given detections + + Parameters + ---------- + prediction: :class:`~.State` + The predicted state of the target + detections: :class:`~.Detection` + The detections at the current time step + timestamp: :class:`datetime.datetime` + The time at which the update is valid + meas_weights: :class:`np.ndarray` + The weights of the measurements + + Returns + ------- + : :class:`~.State` + The updated state of the target + """ + num_persistent = prediction.state_vector.shape[1] + birth_state = self.get_birth_state(prediction, detections, timestamp) + + weights_per_hyp = self.get_weights_per_hypothesis(prediction, detections, meas_weights, + birth_state) + + # Construct hypothesis objects (StoneSoup specific) + single_hypotheses = [ + SingleProbabilityHypothesis(prediction, + measurement=MissedDetection(timestamp=timestamp), + probability=weights_per_hyp[:num_persistent, 0])] + for i, detection in enumerate(detections): + single_hypotheses.append( + SingleProbabilityHypothesis(prediction, + measurement=detection, + probability=weights_per_hyp[:num_persistent, + i + 1]) + ) + hypothesis = MultipleHypothesis(single_hypotheses, normalise=False) + + # Update weights Eq. (8) of [1] + # w_k^i = \sum_{z \in Z_k}{w^{n,i}}, where i is the index of z in Z_k + log_post_weights = logsumexp(np.log(weights_per_hyp).astype(float), axis=1) + log_post_weights_pers = log_post_weights[:num_persistent] + log_post_weights_birth = log_post_weights[num_persistent:] + + # Resample persistent + log_num_targets_pers = logsumexp(log_post_weights_pers) # N_{k|k} + update = copy(prediction) + # Normalize weights + update.weight = Probability.from_log_ufunc(log_post_weights_pers - log_num_targets_pers) + if self.resampler is not None: + update = self.resampler.resample(update, self.num_samples) # Resample + # De-normalize + update.weight = Probability.from_log_ufunc(np.log(update.weight).astype(float) + + log_num_targets_pers) + + if len(detections): + # Resample birth + log_num_targets_birth = logsumexp(log_post_weights_birth) # N_{k|k} + update2 = copy(birth_state) + # Normalize weights + update2.weight = Probability.from_log_ufunc(log_post_weights_birth - log_num_targets_birth) + if self.resampler is not None: + update2 = self.resampler.resample(update2, update2.state_vector.shape[1]) # Resample + # De-normalize + update2.weight = Probability.from_log_ufunc(np.log(update2.weight).astype(float) + + log_num_targets_birth) + + full_update = Update.from_state( + state=prediction, + state_vector=StateVectors(np.hstack((update.state_vector, update2.state_vector))), + weight=np.hstack((update.weight, update2.weight)), + particle_list=None, + hypothesis=hypothesis, + timestamp=timestamp) + else: + full_update = Update.from_state( + state=prediction, + state_vector=update.state_vector, + weight=update.weight, + particle_list=None, + hypothesis=hypothesis, + timestamp=timestamp) + full_update.birth_idx = [i for i in range(len(update.weight), len(full_update.weight))] + return full_update + + def get_birth_state(self, prediction, detections, timestamp): + # Sample birth particles + num_birth = round(float(self.prob_birth) * self.num_samples) + birth_particles = np.zeros((prediction.state_vector.shape[0], 0)) + if len(detections): + num_birth_per_detection = num_birth // len(detections) + for i, detection in enumerate(detections): + if i == len(detections) - 1: + num_birth_per_detection += num_birth % len(detections) + mu = self.birth_density.mean + mu[0::2] = detection.state_vector + cov = self.birth_density.covar + cov[0::2, 0::2] = detection.measurement_model.covar() + birth_particles_i = multivariate_normal.rvs(mu.ravel(), + cov, + num_birth_per_detection).T + birth_particles = np.hstack((birth_particles, birth_particles_i)) + birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) + birth_particles = StateVectors(birth_particles) + birth_state = Prediction.from_state(prediction, + state_vector=birth_particles, + weight=birth_weights, + timestamp=timestamp, particle_list=None, + transition_model=self.transition_model) + return birth_state + + def get_weights_per_hypothesis(self, prediction, detections, meas_weights, birth_state, + *args, **kwargs): + num_samples = prediction.state_vector.shape[1] + if meas_weights is None: + meas_weights = np.array([Probability(1) for _ in range(len(detections))]) + + # Compute g(z|x) matrix as in [1] + g = self.get_measurement_loglikelihoods(prediction, detections, meas_weights) + + # Get probability of detection + prob_detect = np.asfarray(self.prob_detect(prediction)) + + # Calculate w^{n,i} Eq. (20) of [2] + try: + Ck = np.log(prob_detect[:, np.newaxis]) + g \ + + np.log(prediction.weight[:, np.newaxis].astype(float)) + except IndexError: + Ck = np.log(prob_detect) + g \ + + np.log(prediction.weight[:, np.newaxis].astype(float)) + C = logsumexp(Ck, axis=0) + Ck_birth = np.tile(np.log(np.asfarray(birth_state.weight)[:, np.newaxis]), len(detections)) + C_birth = logsumexp(Ck_birth, axis=0) + + k = np.log([detection.metadata['clutter_density'] + if 'clutter_density' in detection.metadata else self.clutter_intensity + for detection in detections]) + C_plus = np.logaddexp(C, k) + L = np.logaddexp(C_plus, C_birth) + + weights_per_hyp = np.full((num_samples + birth_state.state_vector.shape[1], + len(detections) + 1), -np.inf) + weights_per_hyp[:num_samples, 0] = np.log(1 - prob_detect) + np.log( + np.asfarray(prediction.weight)) + if len(detections): + weights_per_hyp[:num_samples, 1:] = np.log(np.asfarray(meas_weights)) + Ck - L + weights_per_hyp[num_samples:, 1:] = np.log(np.asfarray(meas_weights)) + Ck_birth - L + return Probability.from_log_ufunc(weights_per_hyp) + + class SMCPHDInitiator(Initiator): filter: SMCPHDFilter = Property(doc='The phd filter') prior: Any = Property(doc='The prior state') @@ -335,3 +524,61 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): self._state = self.filter.update(prediction, detections, timestamp, weights) return tracks + + +class ISMCPHDInitiator(SMCPHDInitiator): + filter: ISMCPHDFilter = Property(doc='The phd filter') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._state = self.prior + + def initiate(self, detections, timestamp, weights=None, **kwargs): + tracks = set() + + if self._state.timestamp is None: + self._state.timestamp = timestamp + # Predict forward + prediction = self.filter.predict(self._state, timestamp) + + # Calculate weights per hypothesis + birth_state = self.filter.get_birth_state(prediction, detections, timestamp) + weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights, birth_state) + log_weights_per_hyp = np.log(weights_per_hyp[:self.filter.num_samples, :]).astype(float) + + # Calculate intensity per hypothesis + log_intensity_per_hyp = logsumexp(log_weights_per_hyp, axis=0) + + # Find detections with intensity above threshold and initiate + valid_inds = np.flatnonzero(np.exp(log_intensity_per_hyp) > self.threshold) + for idx in valid_inds: + if not idx: + continue + + particles_sv = copy(prediction.state_vector[:, :len(prediction)-len(prediction.birth_idx)]) + weight = np.exp(log_weights_per_hyp[:self.filter.num_samples, idx] - log_intensity_per_hyp[idx]) + + mu = np.average(particles_sv, + axis=1, + weights=weight) + cov = np.cov(particles_sv, ddof=0, aweights=weight) + + hypothesis = SingleProbabilityHypothesis(prediction, + measurement=detections[idx - 1], + probability=weights_per_hyp[:self.filter.num_samples, idx]) + + track_state = GaussianStateUpdate(mu, cov, hypothesis=hypothesis, + timestamp=timestamp) + + # if np.trace(track_state.covar) < 10: + weights_per_hyp[:, idx] = Probability(0) + track = Track([track_state]) + track.exist_prob = Probability(log_intensity_per_hyp[idx], log_value=True) + tracks.add(track) + + weights[idx - 1] = 0 + + # Update filter + self._state = self.filter.update(prediction, detections, timestamp, weights) + + return tracks \ No newline at end of file From a56e196db81ed367445118bd9019b5bca1925347 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 31 Jan 2023 10:19:59 +0000 Subject: [PATCH 50/87] Replaced SMCPHD(Initiator) with ISMCPHD(Initiator) in SMCPHD_JIPDA tracker --- stonesoup/custom/tracker.py | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 9ec7714f8..f1e22970b 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -6,7 +6,8 @@ from stonesoup.base import Property, Base from stonesoup.custom.dataassociator.jipda import JIPDAWithEHM2 -from stonesoup.custom.initiator.smcphd import SMCPHDFilter, SMCPHDInitiator +from stonesoup.custom.initiator.smcphd import SMCPHDFilter, SMCPHDInitiator, ISMCPHDFilter, \ + ISMCPHDInitiator from stonesoup.dataassociator.neighbour import GNNWith2DAssignment from stonesoup.functions import gm_reduce_single from stonesoup.gater.distance import DistanceGater @@ -18,6 +19,7 @@ from stonesoup.predictor.kalman import KalmanPredictor from stonesoup.resampler.particle import SystematicResampler from stonesoup.types.array import StateVectors +from stonesoup.types.mixture import GaussianMixture from stonesoup.types.numeric import Probability from stonesoup.types.state import State, ParticleState from stonesoup.types.update import GaussianStateUpdate @@ -61,7 +63,7 @@ def __init__(self, *args, **kwargs): self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() - phd_filter = SMCPHDFilter(birth_density=self.birth_density, + phd_filter = ISMCPHDFilter(birth_density=self.birth_density, transition_model=self.transition_model, measurement_model=self.measurement_model, prob_detect=self.prob_detect, @@ -73,13 +75,26 @@ def __init__(self, *args, **kwargs): resampler=resampler, birth_scheme=self.birth_scheme) # Sample prior state from birth density - state_vector = StateVectors(multivariate_normal.rvs(self.birth_density.state_vector.ravel(), - self.birth_density.covar, - size=self.num_samples).T) - weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) + if isinstance(self.birth_density, GaussianMixture): + state_vector = np.zeros((self.transition_model.ndim_state, 0)) + particles_per_component = self.num_samples // len(self.birth_density) + for i, component in enumerate(self.birth_density): + if i == len(self.birth_density) - 1: + particles_per_component += self.num_samples % len(self.birth_density) + particles_component = multivariate_normal.rvs( + component.mean.ravel(), + component.covar, + particles_per_component).T + state_vector = np.hstack((state_vector, particles_component)) + state_vector = StateVectors(state_vector) + else: + state_vector = StateVectors(multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) + weight = np.full((self.num_samples,), Probability(1 / self.num_samples))*self.birth_rate state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) - self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) @property From 898b74460126cd5fc555dce6b595c51c27ee3a40 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 1 Feb 2023 22:01:47 +0000 Subject: [PATCH 51/87] Remove duplicated code --- stonesoup/hypothesiser/probability.py | 54 --------------------------- 1 file changed, 54 deletions(-) diff --git a/stonesoup/hypothesiser/probability.py b/stonesoup/hypothesiser/probability.py index f54b3da62..b95049542 100644 --- a/stonesoup/hypothesiser/probability.py +++ b/stonesoup/hypothesiser/probability.py @@ -192,57 +192,3 @@ def _validation_region_volume(cls, prob_gate, meas_pred): @lru_cache() def _gate_threshold(prob_gate, n): return chi2.ppf(float(prob_gate), n) - - -class IPDAHypothesiser(PDAHypothesiser): - """ Integrated PDA Hypothesiser """ - prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) - - def hypothesise(self, track, detections, timestamp, **kwargs): - r"""Evaluate and return all track association hypotheses. - """ - - hypotheses = list() - - # Common state & measurement prediction - prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) - # Compute predicted existence - time_interval = timestamp - track.timestamp - prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) - track.exist_prob = prob_survive * track.exist_prob - # Missed detection hypothesis - probability = Probability(1 - self.prob_detect * self.prob_gate * track.exist_prob) - w = (1 - track.exist_prob) / ((1 - self.prob_detect * self.prob_gate) * track.exist_prob) - hypotheses.append( - SingleProbabilityHypothesis( - prediction, - MissedDetection(timestamp=timestamp), - probability, - metadata={"w": w} - )) - - # True detection hypotheses - for detection in detections: - # Re-evaluate prediction - prediction = self.predictor.predict( - track.state, timestamp=detection.timestamp) - # Compute measurement prediction and probability measure - measurement_prediction = self.updater.predict_measurement( - prediction, detection.measurement_model, **kwargs) - # Calculate difference before to handle custom types (mean defaults to zero) - # This is required as log pdf coverts arrays to floats - log_pdf = multivariate_normal.logpdf( - (detection.state_vector - measurement_prediction.state_vector).ravel(), - cov=measurement_prediction.covar) - pdf = Probability(log_pdf, log_value=True) - probability = (pdf * self.prob_detect * track.exist_prob)/self.clutter_spatial_density - - # True detection hypothesis - hypotheses.append( - SingleProbabilityHypothesis( - prediction, - detection, - probability, - measurement_prediction)) - - return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) From ec1abebf5523fcd9af7de6e87b1d607c62276055 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 1 Feb 2023 22:02:45 +0000 Subject: [PATCH 52/87] Create custom PDAHypothesiser and use it in defined tracker(s) --- stonesoup/custom/hypothesiser/probability.py | 169 ++++++++++++++++++- stonesoup/custom/tracker.py | 95 +++++------ 2 files changed, 207 insertions(+), 57 deletions(-) diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index 99d464b5c..801cd7a6e 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -5,25 +5,176 @@ from scipy.stats import multivariate_normal as mn from stonesoup.base import Property -from stonesoup.hypothesiser.probability import PDAHypothesiser +from stonesoup.hypothesiser import Hypothesiser +from stonesoup.measures import SquaredMahalanobis +from stonesoup.predictor import Predictor from stonesoup.types.detection import MissedDetection from stonesoup.types.hypothesis import SingleProbabilityHypothesis from stonesoup.types.multihypothesis import MultipleHypothesis from stonesoup.types.numeric import Probability from stonesoup.types.state import State +from stonesoup.updater import Updater -class IPDAHypothesiser(PDAHypothesiser): +class PDAHypothesiser(Hypothesiser): + """Hypothesiser based on Probabilistic Data Association (PDA) - """ Integrated PDA Hypothesiser """ + Generate track predictions at detection times and calculate probabilities + for all prediction-detection pairs for single prediction and multiple + detections. + """ + predictor: Predictor = Property(doc="Predict tracks to detection times") + updater: Updater = Property(doc="Updater used to get measurement prediction") + clutter_spatial_density: float = Property( + default=None, + doc="Spatial density of clutter - tied to probability of false detection. Default is None " + "where the clutter spatial density is calculated based on assumption that " + "all but one measurement within the validation region of the track are clutter.") + prob_gate: Probability = Property( + default=Probability(0.95), + doc="Gate Probability - prob. gate contains true measurement " + "if detected") prob_detect: Union[Probability, Callable[[State], Probability]] = Property( - default=Probability(0.85), + default=None, doc="Target Detection Probability") - prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) predict: bool = Property(default=True, doc="Perform prediction step") per_measurement: bool = Property(default=False, doc="Generate per measurement predictions") + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.prob_detect is None: + self.prob_detect = lambda x: Probability(0.85) + + def hypothesise(self, track, detections, timestamp, **kwargs): + r"""Evaluate and return all track association hypotheses. + + For a given track and a set of N detections, return a + MultipleHypothesis with N+1 detections (first detection is + a 'MissedDetection'), each with an associated probability. + Probabilities are assumed to be exhaustive (sum to 1) and mutually + exclusive (two detections cannot be the correct association at the + same time). + + Detection 0: missed detection, none of the detections are associated + with the track. + Detection :math:`i, i \in {1...N}`: detection i is associated + with the track. + + The probabilities for these detections are calculated as follow: + + .. math:: + + \beta_i(k) = \begin{cases} + \frac{\mathcal{L}_{i}(k)}{1-P_{D}P_{G}+\sum_{j=1}^{m(k)} + \mathcal{L}_{j}(k)}, \quad i=1,...,m(k) \\ + \frac{1-P_{D}P_{G}}{1-P_{D}P_{G}+\sum_{j=1}^{m(k)} + \mathcal{L}_{j}(k)}, \quad i=0 + \end{cases} + + where + + .. math:: + + \mathcal{L}_{i}(k) = \frac{\mathcal{N}[z_{i}(k);\hat{z}(k|k-1), + S(k)]P_{D}}{\lambda} + + :math:`\lambda` is the clutter density + + :math:`P_{D}` is the detection probability + + :math:`P_{G}` is the gate probability + + :math:`\mathcal{N}[z_{i}(k);\hat{z}(k|k-1),S(k)]` is the likelihood + ratio of the measurement :math:`z_{i}(k)` originating from the track + target rather than the clutter. + + NOTE: Since all probabilities have the same denominator and are + normalized later, the denominator can be discarded. + + References: + + [1] "The Probabilistic Data Association Filter: Estimation in the + Presence of Measurement Origin Uncertainty" - + https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=5338565 + + [2] "Robotics 2 Data Association" (Lecture notes) - + http://ais.informatik.uni-freiburg.de/teaching/ws10/robotics2/pdfs/rob2-15-dataassociation.pdf + + Parameters + ---------- + track : Track + The track object to hypothesise on + detections : set of :class:`~.Detection` + The available detections + timestamp : datetime.datetime + A timestamp used when evaluating the state and measurement + predictions. Note that if a given detection has a non empty + timestamp, then prediction will be performed according to + the timestamp of the detection. + + Returns + ------- + : :class:`~.MultipleHypothesis` + A container of :class:`~.SingleProbabilityHypothesis` objects + """ + + hypotheses = list() + + if self.predict: + # Common state & measurement prediction + prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) + else: + prediction = track.state + # Missed detection hypothesis + prob_detect = self.prob_detect(prediction) + # Missed detection hypothesis + probability = Probability(1 - prob_detect*self.prob_gate) + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + MissedDetection(timestamp=timestamp), + probability + )) + + # True detection hypotheses + measurement_prediction = None + for detection in detections: + if self.predict and self.per_measurement: + # Re-evaluate prediction + prediction = self.predictor.predict( + track.state, timestamp=detection.timestamp) + prob_detect = self.prob_detect(prediction) + + if self.per_measurement or measurement_prediction is None: + # Compute measurement prediction and probability measure + measurement_prediction = self.updater.predict_measurement( + prediction, detection.measurement_model, **kwargs) + + # Calculate difference before to handle custom types (mean defaults to zero) + # This is required as log pdf coverts arrays to floats + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar) + pdf = Probability(log_pdf, log_value=True) + probability = (pdf * prob_detect) / self.clutter_spatial_density + + # True detection hypothesis + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + detection, + probability, + measurement_prediction)) + + return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) + + +class IPDAHypothesiser(PDAHypothesiser): + """ Integrated PDA Hypothesiser """ + + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -55,15 +206,17 @@ def hypothesise(self, track, detections, timestamp, **kwargs): )) # True detection hypotheses + measurement_prediction = None for detection in detections: if self.predict and self.per_measurement: # Re-evaluate prediction prediction = self.predictor.predict( track.state, timestamp=detection.timestamp) prob_detect = self.prob_detect(prediction) - # Compute measurement prediction and probability measure - measurement_prediction = self.updater.predict_measurement( - prediction, detection.measurement_model, **kwargs) + if self.per_measurement or measurement_prediction is None: + # Compute measurement prediction and probability measure + measurement_prediction = self.updater.predict_measurement( + prediction, detection.measurement_model, **kwargs) # Calculate difference before to handle custom types (mean defaults to zero) # This is required as log pdf coverts arrays to floats log_pdf = mn.logpdf( diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index f1e22970b..5c0e6c6d2 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -11,7 +11,7 @@ from stonesoup.dataassociator.neighbour import GNNWith2DAssignment from stonesoup.functions import gm_reduce_single from stonesoup.gater.distance import DistanceGater -from stonesoup.custom.hypothesiser.probability import IPDAHypothesiser +from stonesoup.custom.hypothesiser.probability import PDAHypothesiser, IPDAHypothesiser from stonesoup.hypothesiser.distance import DistanceHypothesiser from stonesoup.measures import Mahalanobis from stonesoup.models.measurement import MeasurementModel @@ -58,22 +58,22 @@ def __init__(self, *args, **kwargs): self._hypothesiser = IPDAHypothesiser(self._predictor, self._updater, self.clutter_intensity, prob_detect=self.prob_detect, - prob_survive=1-self.prob_death) + prob_survive=1 - self.prob_death) self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() phd_filter = ISMCPHDFilter(birth_density=self.birth_density, - transition_model=self.transition_model, - measurement_model=self.measurement_model, - prob_detect=self.prob_detect, - prob_death=self.prob_death, - prob_birth=self.prob_birth, - birth_rate=self.birth_rate, - clutter_intensity=self.clutter_intensity, - num_samples=self.num_samples, - resampler=resampler, - birth_scheme=self.birth_scheme) + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) # Sample prior state from birth density if isinstance(self.birth_density, GaussianMixture): state_vector = np.zeros((self.transition_model.ndim_state, 0)) @@ -88,15 +88,15 @@ def __init__(self, *args, **kwargs): state_vector = np.hstack((state_vector, particles_component)) state_vector = StateVectors(state_vector) else: - state_vector = StateVectors(multivariate_normal.rvs(self.birth_density.state_vector.ravel(), - self.birth_density.covar, - size=self.num_samples).T) - weight = np.full((self.num_samples,), Probability(1 / self.num_samples))*self.birth_rate + state_vector = StateVectors( + multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) + weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) * self.birth_rate state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) - @property def tracks(self): return self._tracks @@ -217,42 +217,38 @@ def __init__(self, *args, **kwargs): self.prob_detect = self.prob_detection - if self.start_time is None: - self.start_time = datetime.now() - self._tracks = set() self._predictor = KalmanPredictor(self.transition_model) self._updater = KalmanUpdater(self.measurement_model) - self._hypothesiser = IPDAHypothesiser(self._predictor, self._updater, - self.clutter_intensity, - prob_detect=self.prob_detect, - prob_survive=1-self.prob_death, - predict=False) + self._hypothesiser = PDAHypothesiser(self._predictor, self._updater, + self.clutter_intensity, + prob_detect=self.prob_detect) self._hypothesiser = DistanceHypothesiser(self._predictor, self._updater, Mahalanobis(), 10) self._associator = GNNWith2DAssignment(self._hypothesiser) resampler = SystematicResampler() - phd_filter = SMCPHDFilter(birth_density=self.birth_density, - transition_model=self.transition_model, - measurement_model=self.measurement_model, - prob_detect=self.prob_detect, - prob_death=self.prob_death, - prob_birth=self.prob_birth, - birth_rate=self.birth_rate, - clutter_intensity=self.clutter_intensity, - num_samples=self.num_samples, - resampler=resampler, - birth_scheme=self.birth_scheme) + phd_filter = ISMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) + # Sample prior state from birth density - state_vector = StateVectors(multivariate_normal.rvs(self.birth_density.state_vector.ravel(), - self.birth_density.covar, - size=self.num_samples).T) + state_vector = StateVectors( + multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) - self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) - + self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) @property def tracks(self): @@ -288,11 +284,11 @@ def track(self, detections, timestamp): # Compute measurement weights rho = np.ones((len(detections))) for i, track in enumerate(tracks): - for hyp in associations[track]: - if hyp: - j = next(d_i for d_i, detection in enumerate(detections) - if hyp.measurement == detection) - rho[j] = 0 + hyp = associations[track] + if hyp: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + rho[j] = 0 # Update tracks for track, hypothesis in associations.items(): @@ -304,9 +300,10 @@ def track(self, detections, timestamp): else: time_interval = timestamp - track.timestamp track.append(hypothesis.prediction) - non_exist_weight = 1 - track.exist_prob prob_survive = np.exp(-self.prob_death * time_interval.total_seconds()) - non_det_weight = prob_survive * track.exist_prob + track.exist_prob *= prob_survive + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1-self.prob_detect(hypothesis.prediction.state_vector)) * track.exist_prob track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) # Initiate new tracks @@ -322,4 +319,4 @@ def track(self, detections, timestamp): tracks -= del_tracks self._tracks = set(tracks) - return self._tracks \ No newline at end of file + return self._tracks From 9b761bcd3d000e8ddae2c51f8d3a049a5bbff2af Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 1 Feb 2023 22:13:19 +0000 Subject: [PATCH 53/87] Tidy up custom/tracker.py --- stonesoup/custom/tracker.py | 115 ++++++++++++------------------------ 1 file changed, 39 insertions(+), 76 deletions(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 5c0e6c6d2..475c17967 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -1,3 +1,4 @@ +from abc import abstractmethod from copy import copy from datetime import datetime, timezone @@ -26,9 +27,7 @@ from stonesoup.updater.kalman import KalmanUpdater -class SMCPHD_JIPDA(Base): - """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" - +class _BaseTracker(Base): transition_model: TransitionModel = Property(doc='The transition model') measurement_model: MeasurementModel = Property(doc='The measurement model') prob_detection: Probability = Property(doc='The probability of detection') @@ -49,10 +48,42 @@ class SMCPHD_JIPDA(Base): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.prob_detect = self.prob_detection - self._tracks = set() + + @property + def tracks(self): + return self._tracks + + @property + def prob_detect(self): + return self._prob_detect + + @prob_detect.setter + def prob_detect(self, prob_detect): + if not callable(prob_detect): + prob_detect = copy(prob_detect) + self._prob_detect = lambda state: prob_detect + else: + self._prob_detect = copy(prob_detect) + if hasattr(self, '_hypothesiser'): + if hasattr(self._hypothesiser, 'hypothesiser'): + self._hypothesiser.hypothesiser.prob_detect = self._prob_detect + else: + self._hypothesiser.prob_detect = self._prob_detect + if hasattr(self, '_initiator'): + self._initiator.filter.prob_detect = self._prob_detect + + @abstractmethod + def track(self, detections, timestamp, *args, **kwargs): + raise NotImplementedError + + +class SMCPHD_JIPDA(_BaseTracker): + """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self._predictor = KalmanPredictor(self.transition_model) self._updater = KalmanUpdater(self.measurement_model) self._hypothesiser = IPDAHypothesiser(self._predictor, self._updater, @@ -97,30 +128,7 @@ def __init__(self, *args, **kwargs): self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) - @property - def tracks(self): - return self._tracks - - @property - def prob_detect(self): - return self._prob_detect - - @prob_detect.setter - def prob_detect(self, prob_detect): - if not callable(prob_detect): - prob_detect = copy(prob_detect) - self._prob_detect = lambda state: prob_detect - else: - self._prob_detect = copy(prob_detect) - if hasattr(self, '_hypothesiser'): - if hasattr(self._hypothesiser, 'hypothesiser'): - self._hypothesiser.hypothesiser.prob_detect = self._prob_detect - else: - self._hypothesiser.prob_detect = self._prob_detect - if hasattr(self, '_initiator'): - self._initiator.filter.prob_detect = self._prob_detect - - def track(self, detections, timestamp): + def track(self, detections, timestamp, *args, **kwargs): tracks = list(self.tracks) detections = list(detections) num_tracks = len(tracks) @@ -191,33 +199,11 @@ def track(self, detections, timestamp): return self._tracks -class SMCPHD_IGNN(Base): +class SMCPHD_IGNN(_BaseTracker): """ A IGNN tracker using an SMC-PHD filter as the track initiator. """ - transition_model: TransitionModel = Property(doc='The transition model') - measurement_model: MeasurementModel = Property(doc='The measurement model') - prob_detection: Probability = Property(doc='The probability of detection') - prob_death: Probability = Property(doc='The probability of death') - prob_birth: Probability = Property(doc='The probability of birth') - birth_rate: float = Property( - doc='The birth rate (i.e. number of new/born targets at each iteration(') - birth_density: State = Property( - doc='The birth density (i.e. density from which we sample birth particles)') - clutter_intensity: float = Property(doc='The clutter intensity per unit volume') - num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) - birth_scheme: str = Property( - doc='The scheme for birth particles. Options are "expansion" | "mixture". ' - 'Default is "expansion"', - default='expansion' - ) - start_time: datetime = Property(doc='Start time of the tracker', default=None) - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - - self.prob_detect = self.prob_detection - - self._tracks = set() self._predictor = KalmanPredictor(self.transition_model) self._updater = KalmanUpdater(self.measurement_model) self._hypothesiser = PDAHypothesiser(self._predictor, self._updater, @@ -250,30 +236,7 @@ def __init__(self, *args, **kwargs): self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) - @property - def tracks(self): - return self._tracks - - @property - def prob_detect(self): - return self._prob_detect - - @prob_detect.setter - def prob_detect(self, prob_detect): - if not callable(prob_detect): - prob_detect = copy(prob_detect) - self._prob_detect = lambda state: prob_detect - else: - self._prob_detect = copy(prob_detect) - if hasattr(self, '_hypothesiser'): - if hasattr(self._hypothesiser, 'hypothesiser'): - self._hypothesiser.hypothesiser.prob_detect = self._prob_detect - else: - self._hypothesiser.prob_detect = self._prob_detect - if hasattr(self, '_initiator'): - self._initiator.filter.prob_detect = self._prob_detect - - def track(self, detections, timestamp): + def track(self, detections, timestamp, *args, **kwargs): tracks = list(self.tracks) detections = list(detections) From bd48d985e7f189f3e180905e3af688e57cadbfae Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 1 Feb 2023 23:11:36 +0000 Subject: [PATCH 54/87] Ensure trackers are picklable --- stonesoup/custom/tracker.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 475c17967..975b2e608 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -62,8 +62,8 @@ def prob_detect(self): @prob_detect.setter def prob_detect(self, prob_detect): if not callable(prob_detect): - prob_detect = copy(prob_detect) - self._prob_detect = lambda state: prob_detect + self.prob_detection = prob_detect + self._prob_detect = self._prob_detect_simple else: self._prob_detect = copy(prob_detect) if hasattr(self, '_hypothesiser'): @@ -78,6 +78,9 @@ def prob_detect(self, prob_detect): def track(self, detections, timestamp, *args, **kwargs): raise NotImplementedError + def _prob_detect_simple(self, state_vector): + """A simple probability of detection function.""" + return self.prob_detection class SMCPHD_JIPDA(_BaseTracker): """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" From 1dc3600531d43d624ced288ecacf00c50c3a33a3 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 2 Feb 2023 10:51:02 +0000 Subject: [PATCH 55/87] Fix StateVector has no state_vector error in SMCPHD_IGNN tracker --- stonesoup/custom/tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index 975b2e608..f9bac0a5a 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -269,7 +269,7 @@ def track(self, detections, timestamp, *args, **kwargs): prob_survive = np.exp(-self.prob_death * time_interval.total_seconds()) track.exist_prob *= prob_survive non_exist_weight = 1 - track.exist_prob - non_det_weight = (1-self.prob_detect(hypothesis.prediction.state_vector)) * track.exist_prob + non_det_weight = (1-self.prob_detect(hypothesis.prediction)) * track.exist_prob track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) # Initiate new tracks From 2afa5f0a68e9df332032f00fd1dbf2f1df2848d8 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 3 Feb 2023 12:33:42 +0000 Subject: [PATCH 56/87] fov_in_km now defaults to True for MoveableUAVSensor --- stonesoup/custom/sensor/movable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 987137825..9e246ed45 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -52,7 +52,7 @@ class MovableUAVCamera(Sensor): ) fov_in_km: bool = Property( doc="Whether the FOV radius is in kilo-meters or degrees", - default=False) + default=True) @location_x.setter def location_x(self, value): From a00ffe2f1973bb6b003a90eaed4ec2d1a6ad45e1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 3 Feb 2023 12:57:25 +0000 Subject: [PATCH 57/87] Added shapely and geopy to dependencies --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index 92f9ec5fe..c54c0ed7c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -29,6 +29,8 @@ install_requires = utm pyehm vector3d + shapely + geopy [options.extras_require] dev = From 4e085fa512b43cf4925b1bcc5bbc19f40ec6ecc9 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 3 Feb 2023 12:58:17 +0000 Subject: [PATCH 58/87] Updated distance calculation when fov_in_km is True in MovableUAVSensor --- stonesoup/custom/sensor/movable.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 9e246ed45..8c22f87d2 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -2,6 +2,7 @@ from typing import Union, List, Set import numpy as np +import geopy.distance from stonesoup.base import Property from stonesoup.custom.sensor.action.location import LocationActionGenerator @@ -89,16 +90,13 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, # Transform state to measurement space and generate random noise measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) - # Normalise measurement vector relative to sensor position - norm_measurement_vector = measurement_vector.astype(float) - self.position.astype( - float) - - distance = np.linalg.norm(norm_measurement_vector[0:2]) - if self.fov_in_km: - # Note: this is a very approximate conversion, and does not take into account the - # curvature of the earth. Should be replaced with a more accurate check. - distance *= 110.574 + distance = geopy.distance.distance(self.position[0:2], measurement_vector[0:2]).km + else: + # Normalise measurement vector relative to sensor position + norm_measurement_vector = measurement_vector.astype(float) - self.position.astype( + float) + distance = np.linalg.norm(norm_measurement_vector[0:2]) # Do not measure if state not in FOV if distance > self.fov_radius: From bd7718129a691b3b9530c82484d4f43eb2358196 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 3 Feb 2023 17:47:03 +0000 Subject: [PATCH 59/87] Added pyproj to dependencies and pinned version of shapely --- setup.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index c54c0ed7c..2460ba5dc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -29,8 +29,9 @@ install_requires = utm pyehm vector3d - shapely + shapely==2.0.1 geopy + pyproj==3.4.1 [options.extras_require] dev = From 2830dbf7cbe8f3e0e28ad3ca6965d2d621c25e29 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 3 Feb 2023 17:48:23 +0000 Subject: [PATCH 60/87] Correctly implemented prob detect functions for fov in km --- stonesoup/custom/functions/__init__.py | 16 ++++++++++++++++ stonesoup/custom/sensor/movable.py | 3 ++- stonesoup/custom/sensormanager/reward.py | 13 +++++++------ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index d1a14ad70..febd041cf 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,7 +1,11 @@ +from functools import partial import math from typing import Set, List import numpy as np +import pyproj +from shapely.geometry import Point +from shapely.ops import transform from matplotlib.path import Path from scipy.special import logsumexp from scipy.stats import multivariate_normal @@ -429,3 +433,15 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, var_overall += p_success * (1 - p_success) return mu_overall, var_overall + + +def geodesic_point_buffer(lat, lon, km): + # Azimuthal equidistant projection + proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') + aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0' + project = partial( + pyproj.transform, + pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)), + proj_wgs84) + buf = Point(0, 0).buffer(km * 1000) # distance in metres + return transform(project, buf) \ No newline at end of file diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 8c22f87d2..e62f78b47 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -91,7 +91,8 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) if self.fov_in_km: - distance = geopy.distance.distance(self.position[0:2], measurement_vector[0:2]).km + distance = geopy.distance.distance(np.flip(self.position[0:2]), + np.flip(measurement_vector[0:2])).km else: # Normalise measurement vector relative to sensor position norm_measurement_vector = measurement_vector.astype(float) - self.position.astype( diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py index 0ecab3e66..9eb1acb7d 100644 --- a/stonesoup/custom/sensormanager/reward.py +++ b/stonesoup/custom/sensormanager/reward.py @@ -10,7 +10,7 @@ from reactive_isr_core.data import TaskType from stonesoup.base import Property -from stonesoup.custom.functions import calculate_num_targets_dist +from stonesoup.custom.functions import calculate_num_targets_dist, geodesic_point_buffer from stonesoup.custom.tracker import SMCPHD_JIPDA from stonesoup.functions import gm_reduce_single from stonesoup.predictor.kalman import KalmanPredictor @@ -279,9 +279,9 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] for detection in sensor.measure(predicted_tracks, noise=False) if isinstance(detection, TrueDetection)} - center = (sensor.position[0], sensor.position[1]) + center = (sensor.position[1], sensor.position[0]) radius = sensor.fov_radius - p = Point(center).buffer(radius) + p = geodesic_point_buffer(*center, radius) self.tracker.prob_detect = _prob_detect_func([p]) associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) @@ -527,9 +527,9 @@ def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set for detection in sensor.measure(predicted_tracks, noise=False) if isinstance(detection, TrueDetection)} - center = (sensor.position[0], sensor.position[1]) + center = (sensor.position[1], sensor.position[0]) radius = sensor.fov_radius - p = Point(center).buffer(radius) + p = geodesic_point_buffer(*center, radius) self.tracker.prob_detect = _prob_detect_func([p]) associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) @@ -584,6 +584,7 @@ def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set if rfi.task_type == TaskType.COUNT: target_types = [t.target_type.value for t in rfi.targets] _, var = calculate_num_targets_dist(tracks_copy, geom, target_types=target_types) + if var < rfi.threshold_over_time.threshold[0]: # TODO: Need to select the priority config_metric += rfi.priority_over_time.priority[0] @@ -855,7 +856,7 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] config_metric += np.max(rewards) return config_metric, var - + def _prob_detect_func(fovs): """Closure to return the probability of detection function for a given environment scan""" From f3ad30df2c6811c3bb8dc694a42e7deba7a2520d Mon Sep 17 00:00:00 2001 From: sglvladi Date: Sun, 5 Feb 2023 18:01:05 +0000 Subject: [PATCH 61/87] Fix minor bug in LocationActionGenerator --- stonesoup/custom/sensor/action/location.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stonesoup/custom/sensor/action/location.py b/stonesoup/custom/sensor/action/location.py index fcee4cfa2..d947af892 100644 --- a/stonesoup/custom/sensor/action/location.py +++ b/stonesoup/custom/sensor/action/location.py @@ -96,7 +96,8 @@ def __contains__(self, item): possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, dtype=float) - + possible_values = np.append(possible_values, self.current_value) + possible_values.sort() return possible_values[0] <= item <= possible_values[-1] def __iter__(self) -> Iterator[ChangeLocationAction]: @@ -115,6 +116,7 @@ def action_from_value(self, value): if value not in self: return None possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, dtype=float) + possible_values = np.append(possible_values, self.current_value) angle = get_nearest(possible_values, value) return self._action_cls(generator=self, end_time=self.end_time, From cbde933dca611eda775ff809ffbf639ac648b147 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Sun, 5 Feb 2023 18:01:58 +0000 Subject: [PATCH 62/87] Added eval_rfi function, that also attempts to reward actions that look in unobsered RFI areas --- stonesoup/custom/functions/__init__.py | 79 +++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index febd041cf..53ae00c3d 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,9 +1,10 @@ from functools import partial import math -from typing import Set, List +from typing import Set, List, Sequence import numpy as np import pyproj +from shapely import Polygon from shapely.geometry import Point from shapely.ops import transform from matplotlib.path import Path @@ -12,6 +13,8 @@ from shapely.geometry.base import BaseGeometry from vector3d.vector import Vector +from reactive_isr_core.data import RFI, TaskType +from stonesoup.sensor.sensor import Sensor from stonesoup.types.angle import Angle from stonesoup.types.state import ParticleState from stonesoup.types.track import Track @@ -401,7 +404,7 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, or (target_types and any(item in track.metadata['target_type_confidences'] for item in target_types))] mu_overall = 0 - var_overall = np.inf if len(valid_tracks) == 0 else 0 + var_overall = 0 # np.inf if len(valid_tracks) == 0 else 0 path_p = Path(geom.boundary.coords) # Calculate PHD density inside polygon @@ -435,6 +438,78 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, return mu_overall, var_overall +def eval_rfi(rfi: RFI, tracks: Sequence[Track], sensors: Sequence[Sensor], + phd_state: ParticleState = None, use_variance=True): + num_samples = 100 + mu_overall = 0 + var_overall = 0 # np.inf if len(valid_tracks) == 0 else 0 + config_metric = 0 + + xmin, ymin = rfi.region_of_interest.corners[0].longitude, \ + rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, \ + rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + path_p = Path(geom.boundary.coords) + + target_types = [t.target_type.value for t in rfi.targets] + valid_tracks = [track for track in tracks + if not (target_types) + or (target_types and any(item in track.metadata['target_type_confidences'] + for item in target_types))] + + # Calculate PHD density inside polygon + if phd_state is not None: + points = phd_state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + if np.sum(inside_points) > 0: + # The mean of the PHD density inside the polygon is the sum of the weights of the + # particles inside the polygon + mu_overall = np.exp(logsumexp(np.log(phd_state.weight[inside_points].astype(float)))) + # The variance of a Poisson distribution is equal to the mean + var_overall = mu_overall + + # Calculate number of tracks inside polygon + for track in valid_tracks: + # Sample points from the track state + points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), + cov=track.covar[[0, 2], :][:, [0, 2]], + size=num_samples) + # Check which points are inside the polygon + inside_points = path_p.contains_points(points) + # Probability of existence inside the polygon is the fraction of points inside the polygon + # times the probability of existence + p_success = float(track.exist_prob) * (np.sum(inside_points) / num_samples) + # Mean of a Bernoulli distribution is equal to the probability of success + mu_overall += p_success + # Variance of a Bernoulli distribution is equal to the probability of success, + # times the probability of failure + var_overall += p_success * (1 - p_success) + + if rfi.task_type == TaskType.COUNT: + if mu_overall > 0 and var_overall < rfi.threshold_over_time.threshold[0]: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if use_variance: + config_metric += 1 / var_overall + elif mu_overall == 0 and var_overall == 0: + aoi = 0 + for sensor in sensors: + center = (sensor.position[1], sensor.position[0]) + radius = sensor.fov_radius + p = geodesic_point_buffer(*center, radius) + aoi = max([geom.intersection(p).area / geom.area, aoi]) + config_metric += aoi + elif rfi.task_type == TaskType.FOLLOW: + for target in rfi.targets: + track = next((track for track in tracks if track.id == str(target.target_UUID)), None) + if track is not None: + var = track.covar[0, 0] + track.covar[2, 2] + if var < rfi.threshold_over_time.threshold[0]: + config_metric += rfi.priority_over_time.priority[0] + + return config_metric + def geodesic_point_buffer(lat, lon, km): # Azimuthal equidistant projection proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') From a61239e24e18d1b97c8d3eb8221620b14cb7354f Mon Sep 17 00:00:00 2001 From: sglvladi Date: Sun, 5 Feb 2023 18:03:13 +0000 Subject: [PATCH 63/87] Bugfixes in RolloutPriorityRewardFunction2 --- stonesoup/custom/sensormanager/reward.py | 42 +++++++----------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py index 9eb1acb7d..f09e663c4 100644 --- a/stonesoup/custom/sensormanager/reward.py +++ b/stonesoup/custom/sensormanager/reward.py @@ -10,7 +10,7 @@ from reactive_isr_core.data import TaskType from stonesoup.base import Property -from stonesoup.custom.functions import calculate_num_targets_dist, geodesic_point_buffer +from stonesoup.custom.functions import calculate_num_targets_dist, geodesic_point_buffer, eval_rfi from stonesoup.custom.tracker import SMCPHD_JIPDA from stonesoup.functions import gm_reduce_single from stonesoup.predictor.kalman import KalmanPredictor @@ -441,7 +441,7 @@ def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] return 0 # Reward value - end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + end_time = metric_time + self.timesteps * self.interval # Reward value config_metric, updated_tracks, predicted_sensors = self._compute_metric(config, tracks, @@ -450,7 +450,7 @@ def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] if metric_time == end_time: return config_metric - timestamp = metric_time + datetime.timedelta(seconds=1) + timestamp = metric_time + self.interval all_action_choices = dict() for sensor in predicted_sensors: @@ -568,36 +568,16 @@ def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set else: time_interval = timestamp - track.timestamp track.append(multihypothesis.prediction) + prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) + track.exist_prob *= prob_survive non_exist_weight = 1 - track.exist_prob - prob_survive = np.exp( - -self.tracker.prob_death * time_interval.total_seconds()) - non_det_weight = prob_survive * track.exist_prob + non_det_weight = (1 - self.tracker.prob_detect( + multihypothesis.prediction)) * track.exist_prob track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) - var = np.inf - for rfi in self.rfis: - xmin, ymin = rfi.region_of_interest.corners[0].longitude, \ - rfi.region_of_interest.corners[0].latitude - xmax, ymax = rfi.region_of_interest.corners[1].longitude, \ - rfi.region_of_interest.corners[1].latitude - geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) - if rfi.task_type == TaskType.COUNT: - target_types = [t.target_type.value for t in rfi.targets] - _, var = calculate_num_targets_dist(tracks_copy, geom, target_types=target_types) - - if var < rfi.threshold_over_time.threshold[0]: - # TODO: Need to select the priority - config_metric += rfi.priority_over_time.priority[0] - if self.use_variance: - config_metric += 1 / var - elif rfi.task_type == TaskType.FOLLOW: - for target in rfi.targets: - track = next((track for track in tracks_copy - if track.id == str(target.target_UUID)), None) - if track is not None: - var = track.covar[0, 0] + track.covar[2, 2] - if var < rfi.threshold_over_time.threshold[0]: - config_metric += rfi.priority_over_time.priority[0] + for rfi in self.rfis: + config_metric += eval_rfi(rfi, tracks_copy, predicted_sensors, + use_variance=self.use_variance) return config_metric, tracks_copy, predicted_sensors @@ -632,7 +612,7 @@ def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track] if timestamp == end_time: return config_metric - timestamp = timestamp + datetime.timedelta(seconds=1) + timestamp += self.interval all_action_choices = dict() for sensor in predicted_sensors: From 0a893ecb5fd65a2eb430a6b32599d3529b30178e Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 6 Feb 2023 00:35:52 +0000 Subject: [PATCH 64/87] Remove unnecessary predicts in RolloutPriorityRewardFunction2 --- stonesoup/custom/sensormanager/reward.py | 8 +++----- stonesoup/custom/tracker.py | 5 +++-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py index f09e663c4..1033546e9 100644 --- a/stonesoup/custom/sensormanager/reward.py +++ b/stonesoup/custom/sensormanager/reward.py @@ -513,12 +513,9 @@ def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set predicted_track = copy.copy(track) predicted_track.append( self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) - time_interval = timestamp - predicted_track.timestamp - prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) - track.exist_prob = prob_survive * track.exist_prob predicted_tracks.add(predicted_track) - tracks_copy = [copy.copy(track) for track in tracks] + tracks_copy = [copy.copy(track) for track in predicted_tracks] for sensor in predicted_sensors: @@ -566,7 +563,8 @@ def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set track.append(state_post) track.exist_prob = Probability(1.) else: - time_interval = timestamp - track.timestamp + timestamp_m1 = track.timestamp if self.tracker.predict else track[-2].timestamp + time_interval = timestamp - timestamp_m1 track.append(multihypothesis.prediction) prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) track.exist_prob *= prob_survive diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker.py index f9bac0a5a..417fe1afd 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker.py @@ -204,14 +204,15 @@ def track(self, detections, timestamp, *args, **kwargs): class SMCPHD_IGNN(_BaseTracker): """ A IGNN tracker using an SMC-PHD filter as the track initiator. """ - + predict = Property(bool, default=True, doc="Whether to predict tracks") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._predictor = KalmanPredictor(self.transition_model) self._updater = KalmanUpdater(self.measurement_model) self._hypothesiser = PDAHypothesiser(self._predictor, self._updater, self.clutter_intensity, - prob_detect=self.prob_detect) + prob_detect=self.prob_detect, + predict=self.predict) self._hypothesiser = DistanceHypothesiser(self._predictor, self._updater, Mahalanobis(), 10) self._associator = GNNWith2DAssignment(self._hypothesiser) From 18fcfa73498106d8c9e89746e30eea344631ac98 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 6 Feb 2023 00:38:26 +0000 Subject: [PATCH 65/87] Sensor footprint is now a property of MovableUAVSensor, that gets automatically re-evaluated upon action --- stonesoup/custom/functions/__init__.py | 14 +++++++----- stonesoup/custom/sensor/movable.py | 28 +++++++++++++++++++----- stonesoup/custom/sensormanager/reward.py | 7 +++--- 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 53ae00c3d..b404d396a 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -495,11 +495,12 @@ def eval_rfi(rfi: RFI, tracks: Sequence[Track], sensors: Sequence[Sensor], elif mu_overall == 0 and var_overall == 0: aoi = 0 for sensor in sensors: - center = (sensor.position[1], sensor.position[0]) - radius = sensor.fov_radius - p = geodesic_point_buffer(*center, radius) + # center = (sensor.position[1], sensor.position[0]) + # radius = sensor.fov_radius + # p = geodesic_point_buffer(*center, radius) + p = sensor.footprint aoi = max([geom.intersection(p).area / geom.area, aoi]) - config_metric += aoi + config_metric += aoi*rfi.priority_over_time.priority[0] elif rfi.task_type == TaskType.FOLLOW: for target in rfi.targets: track = next((track for track in tracks if track.id == str(target.target_UUID)), None) @@ -510,9 +511,12 @@ def eval_rfi(rfi: RFI, tracks: Sequence[Track], sensors: Sequence[Sensor], return config_metric + +proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') + + def geodesic_point_buffer(lat, lon, km): # Azimuthal equidistant projection - proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0' project = partial( pyproj.transform, diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index e62f78b47..6e398760f 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -3,8 +3,10 @@ import numpy as np import geopy.distance +from shapely import Point from stonesoup.base import Property +from stonesoup.custom.functions import geodesic_point_buffer from stonesoup.custom.sensor.action.location import LocationActionGenerator from stonesoup.models.clutter import ClutterModel from stonesoup.models.measurement.linear import LinearGaussian @@ -55,6 +57,10 @@ class MovableUAVCamera(Sensor): doc="Whether the FOV radius is in kilo-meters or degrees", default=True) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._footprint = None + @location_x.setter def location_x(self, value): self._property_location_x = value @@ -80,6 +86,16 @@ def measurement_model(self): mapping=self.mapping, noise_covar=self.noise_covar) + @property + def footprint(self): + if self._footprint is None: + self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), self.fov_radius) + return self._footprint + + def act(self, timestamp: datetime.datetime): + super().act(timestamp) + self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), self.fov_radius) + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, **kwargs) -> Set[TrueDetection]: @@ -91,17 +107,19 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) if self.fov_in_km: - distance = geopy.distance.distance(np.flip(self.position[0:2]), - np.flip(measurement_vector[0:2])).km + # distance = geopy.distance.distance(np.flip(self.position[0:2]), + # np.flip(measurement_vector[0:2])).km + if not self._footprint.contains(Point(measurement_vector[0:2])): + continue else: # Normalise measurement vector relative to sensor position norm_measurement_vector = measurement_vector.astype(float) - self.position.astype( float) distance = np.linalg.norm(norm_measurement_vector[0:2]) - # Do not measure if state not in FOV - if distance > self.fov_radius: - continue + # Do not measure if state not in FOV + if distance > self.fov_radius: + continue detection = TrueDetection(measurement_vector, measurement_model=measurement_model, diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py index 1033546e9..ee5dd8e9d 100644 --- a/stonesoup/custom/sensormanager/reward.py +++ b/stonesoup/custom/sensormanager/reward.py @@ -524,9 +524,10 @@ def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set for detection in sensor.measure(predicted_tracks, noise=False) if isinstance(detection, TrueDetection)} - center = (sensor.position[1], sensor.position[0]) - radius = sensor.fov_radius - p = geodesic_point_buffer(*center, radius) + # center = (sensor.position[1], sensor.position[0]) + # radius = sensor.fov_radius + # p = geodesic_point_buffer(*center, radius) + p = sensor.footprint self.tracker.prob_detect = _prob_detect_func([p]) associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) From 0423edb8fdee948c37360af9eec032a7484f4b6a Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 20 Mar 2023 15:19:24 +0000 Subject: [PATCH 66/87] Ensure MovableUAVSensor footprint is calculated correctly --- stonesoup/custom/sensor/movable.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 6e398760f..0477083ec 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -89,12 +89,19 @@ def measurement_model(self): @property def footprint(self): if self._footprint is None: - self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), self.fov_radius) + if self.fov_in_km: + self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), + self.fov_radius) + else: + self._footprint = Point(self.position[0:2]).buffer(self.fov_radius) return self._footprint def act(self, timestamp: datetime.datetime): super().act(timestamp) - self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), self.fov_radius) + if self.fov_in_km: + self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), self.fov_radius) + else: + self._footprint = Point(self.position[0:2]).buffer(self.fov_radius) def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, **kwargs) -> Set[TrueDetection]: From 08c2db551334db1bc1671d1256fef32bee17a21a Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 20 Mar 2023 15:24:45 +0000 Subject: [PATCH 67/87] SensorManager bugfix --- stonesoup/custom/sensormanager/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/custom/sensormanager/base.py b/stonesoup/custom/sensormanager/base.py index a42ee5eb3..be82cddc8 100644 --- a/stonesoup/custom/sensormanager/base.py +++ b/stonesoup/custom/sensormanager/base.py @@ -66,7 +66,7 @@ def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): rewards = [] for i, config in enumerate(configs): - reward, var = self.reward_function(config, tracks, timestamp) + reward = self.reward_function(config, tracks, timestamp) rewards.append(reward) # vars.append(var) if reward > min(best_rewards): From 478552bc2cc7b035a85187948a5a42e7459c31eb Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 20 Mar 2023 15:25:24 +0000 Subject: [PATCH 68/87] Added rective_ist/risr-demo.py example --- examples/reactive-isr/risr-demo.py | 318 +++++++++++++++++++++++++++++ 1 file changed, 318 insertions(+) create mode 100644 examples/reactive-isr/risr-demo.py diff --git a/examples/reactive-isr/risr-demo.py b/examples/reactive-isr/risr-demo.py new file mode 100644 index 000000000..334fa3fd6 --- /dev/null +++ b/examples/reactive-isr/risr-demo.py @@ -0,0 +1,318 @@ +import uuid +from datetime import datetime, timedelta +import warnings +warnings.simplefilter(action='ignore', category=FutureWarning) + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.widgets import Button +from ordered_set import OrderedSet +from shapely.geometry import Point, Polygon + +from stonesoup.custom.functions import calculate_num_targets_dist, geodesic_point_buffer +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.custom.sensormanager.base import UniqueBruteForceSensorManager +from stonesoup.custom.sensormanager.reward import RolloutPriorityRewardFunction, \ + RolloutPriorityRewardFunction2 +from stonesoup.types.angle import Angle +from stonesoup.types.array import StateVector +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState, ParticleState +from stonesoup.custom.tracker import SMCPHD_JIPDA, SMCPHD_IGNN +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState + +from reactive_isr_core.data import RFI, TaskType, GeoRegion, GeoLocation, PriorityOverTime, \ + ThresholdOverTime, TargetSpecification, TargetType + +from utils import plot_cov_ellipse, _prob_detect_func + +# np.random.seed(5547) +np.random.seed(95146) + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +prob_detect = Probability(.9) # 90% chance of detection. +prob_death = Probability(0.01) # Probability of death +prob_birth = Probability(0.1) # Probability of birth +prob_survive = Probability(0.99) # Probability of survival +birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) +clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) +surveillance_region = [[-5, -2], # The surveillance region + [50.1, 53.2]] +surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ + * (surveillance_region[1][1] - surveillance_region[1][0]) # Surveillance volume +clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area +birth_density = GaussianState( + StateVector(np.array([-2.5, 0.0, 51, 0.0, 0.0, 0.0])), + np.diag([3. ** 2, .01 ** 2, 3. ** 2, .01 ** 2, 0., 0.])) # Birth density +birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' +num_particles = 2 ** 8 # Number of particles used by the PHD filter +num_iter = 200 # Number of simulation steps +total_no_sensors = 3 # Total number of sensors +PLOT = True # Set [True | False] to turn plotting [ON | OFF] +MANUAL_RFI = True # Set [True | False] to turn manual RFI [ON | OFF] +colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # Colors for plotting + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) +timestamps = [] +for k in range(0, num_iter + 1, 2): + timestamps.append(start_time + timedelta(seconds=k)) + +truths = set() +truth = GroundTruthPath([GroundTruthState([-3.7, 0.0, 52.0, 0.01, 0, 0], timestamp=start_time)]) +for timestamp in timestamps[1:]: + truth.append(GroundTruthState( + gnd_transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=timestamp)) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([-4.6, 0.01, 52.1, -0.01, 0, 0], timestamp=start_time)]) +for timestamp in timestamps[1:]: + truth.append(GroundTruthState( + gnd_transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=timestamp)) +truths.add(truth) + +truth = GroundTruthPath([GroundTruthState([-3.5, 0, 51.3, -0.01, 0, 0], timestamp=start_time)]) +for timestamp in timestamps[1:]: + truth.append(GroundTruthState( + gnd_transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)), + timestamp=timestamp)) +truths.add(truth) + +# Create sensors +# ============== +sensors = [] +for i in range(0, total_no_sensors): + rotation_offset = StateVector( + [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset + pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt + increment = 1.0*i + x = -4.5 + increment + y = 51.5 if i == 0 else 51.5 + increment + position = StateVector([-4.5+increment, 51.5, 100.]) + resolutions = {'location_x': 1, 'location_y': 1} + limits = {'location_x': [surveillance_region[0][0]+0.5, surveillance_region[0][1]-0.5], + 'location_y': [round(surveillance_region[1][0])+0.5, round(surveillance_region[1][1])-0.5]} + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.0001, 0.0001, 0.0001]), + location_x=position[0], location_y=position[1], + resolutions=resolutions, + position=position, + fov_radius=70, + limits=limits) + sensors.append(sensor) +for sensor in sensors: + sensor.timestamp = start_time + +# Plot groundtruth and sensors +# ============================ +fig = plt.figure(figsize=(10, 6)) +ax = fig.add_subplot(111) +ax.set_xlim(surveillance_region[0][0]-1, surveillance_region[0][1]+1) +ax.set_ylim(surveillance_region[1][0], surveillance_region[1][1]) +ax.set_xlabel('Longitude') +ax.set_ylabel('Latitude') +ax.set_title('Groundtruth and initial sensor locations') +ax.grid(True) +ax.set_aspect('equal') +for i, track in enumerate(truths): + ax.plot([state.state_vector[0] for state in track], + [state.state_vector[2] for state in track], + color=colors[i], linestyle='--', linewidth=2, label=f'Truth {i+1}') +for j, sensor in enumerate(sensors): + coords = geodesic_point_buffer(sensor.position[1], sensor.position[0], sensor.fov_radius).exterior.coords[:] + ax.plot([coord[0] for coord in coords], [coord[1] for coord in coords], + color=colors[j], linewidth=2, label=f'Sensor {j+1} FOV') + # circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, + # color=colors[j], + # fill=False, + # label=f'Sensor {j + 1}') + # ax.add_artist(circle, ) +ax.legend() +plt.show() + +# Tracking Components +# =================== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.000001), + ConstantVelocity(0.000001), + ConstantVelocity(0.000001)]) + +# Main tracker +tracker = SMCPHD_IGNN(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time) + +# Evaluator tracker +tracker2 = SMCPHD_IGNN(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time) +# tracker2 = copy.deepcopy(tracker) + + +# Sensor Management Components +# ============================ +# Reward function +roi = GeoRegion(corners=[ + GeoLocation( + longitude=surveillance_region[0][0], + latitude=surveillance_region[1][0], + altitude=0), + GeoLocation( + longitude=surveillance_region[0][1], + latitude=surveillance_region[1][1], + altitude=0)] +) +rfi = RFI(id=uuid.uuid4(), + task_type=TaskType.COUNT, + region_of_interest=roi, + start_time=datetime.now(), + end_time=datetime.now(), + priority_over_time=PriorityOverTime(timescale=[datetime.now()], priority=[5]), + targets=[], #TargetSpecification(target_type=TargetType.VEHICLE, existence_probability=0.9) + threshold_over_time=ThresholdOverTime(timescale=[datetime.now()], threshold=[.00001])) +rfis = [rfi] if not MANUAL_RFI else [] +reward_function = RolloutPriorityRewardFunction2(tracker2, 0, + num_samples=100, interval=timedelta(seconds=5), + rfis=rfis) +sensor_manager = UniqueBruteForceSensorManager(sensors, reward_function) + + +# Estimate +# ======== +# Plotting setup +if PLOT: + fig1 = plt.figure(figsize=(20, 7)) + ax1, ax2 = fig1.subplots(1, 2) + ax1.set_title('Simulation') + ax2.set_title('Variance') + fig1.subplots_adjust(bottom=0.2) + axbtn = fig1.add_axes([0.81, 0.05, 0.15, 0.075]) + btn = Button(axbtn, 'New RFI') + def set_rfis(*args, **kwargs): + print("Added RFI") + reward_function.rfis = [rfi] + btn.on_clicked(set_rfis) + plt.ion() + +# Main tracking loop +tracks = set() +vars = [] +for k, timestamp in enumerate(timestamps): + + if k == 20: + sensors.pop(1) + + sensor_detections = [] + tracks = list(tracks) + truth_states = OrderedSet(truth[timestamp] for truth in truths) + + # Compute variance of number of targets + region_corners = rfi.region_of_interest.corners + xmin, ymin = region_corners[0].longitude, region_corners[0].latitude + xmax, ymax = region_corners[1].longitude, region_corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + _, var = calculate_num_targets_dist(tracks, geom) + vars.append(var) + + # Check if RFI is satisfied and remove it + if MANUAL_RFI and len(reward_function.rfis) > 0: + if var < rfi.threshold_over_time.threshold[0]: + reward_function.rfis.remove(rfi) + + # Generate chosen configuration + chosen_actions = sensor_manager.choose_actions(tracks, timestamp) + for chosen_action in chosen_actions: + for sensor, actions in chosen_action.items(): + sensor.add_actions(actions) + + # Cue sensors + for sensor in sensors: + sensor.act(timestamp) + + # For each sensor + for j, sensor in enumerate(sensors): + + # Compute probability of detection + # center = (sensor.position[0], sensor.position[1]) + # radius = sensor.fov_radius + # p = Point(center).buffer(radius) + p = geodesic_point_buffer(sensor.position[1], sensor.position[0], sensor.fov_radius) + tracker.prob_detect = _prob_detect_func(prob_detect, [p]) + + # Observe the ground truth + detections = sensor.measure(truth_states, noise=True) + for detection in detections: + detection.metadata['target_type_confidences'] = { + 'person': 1.0 + } + sensor_detections.append(detections) + + detections = list(detections) + num_tracks = len(tracks) + num_detections = len(detections) + + # Track using main tracker + tracks = tracker.track(detections, timestamp) + + # Print debug info + tracks = list(tracks) + print(f'\n Sensor {j+1} ===========================================') + for track in tracks: + print(f'Track {track.id} - Exist prob: {track.exist_prob}') + + # Plot output + if PLOT: + ax1.cla() + ax2.cla() + ax2.plot([i for i in range(k+1)], vars, 'r') + ax2.set_xlabel('Time') + ax2.set_ylabel('Variance') + ax1.set_title('Simulation') + ax2.set_title('Variance') + for j, sensor in enumerate(sensors): + coords = geodesic_point_buffer(sensor.position[1], sensor.position[0], + sensor.fov_radius).exterior.coords[:] + ax1.plot([coord[0] for coord in coords], [coord[1] for coord in coords], + color=colors[j], linewidth=2, label=f'Sensor {j + 1} FOV') + # circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, + # color=colors[j], + # fill=False, + # label=f'Sensor {j+1}') + # ax1.add_artist(circle, ) + detections = sensor_detections[j] + if len(detections): + det_data = np.array([det.state_vector for det in detections]) + ax1.plot(det_data[:, 0], det_data[:, 1], f'*{colors[j]}', label='Detections') + + for i, truth in enumerate(truths): + data = np.array([s.state_vector for s in truth[:k + 1]]) + ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') + + for i, track in enumerate(tracks): + data = np.array([s.state_vector for s in track]) + ax1.plot(data[:, 0], data[:, 2], label=f'Track {i}') + plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], + edgecolor='r', facecolor='none', ax=ax1) + ax1.set_aspect('equal', adjustable='box', anchor='C') + ax1.set_xlim(np.array(surveillance_region[0]) + np.array([-1, 1])) + ax1.set_ylim(surveillance_region[1]) + ax1.set_xlabel('Longitude') + ax1.set_ylabel('Latitude') + ax1.legend(loc='upper right') + plt.pause(0.1) + From b8d83ca7ddd3a7cf4c7099d590a71d0b0d15b19a Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 17 May 2023 22:46:24 +0100 Subject: [PATCH 69/87] Add FuseTracker components --- stonesoup/custom/functions/__init__.py | 66 ++- stonesoup/custom/hypothesiser/probability.py | 10 +- stonesoup/custom/initiator/twostate.py | 112 ++++ stonesoup/custom/models/__init__.py | 0 .../custom/models/measurement/__init__.py | 0 stonesoup/custom/models/measurement/linear.py | 97 ++++ stonesoup/custom/predictor/__init__.py | 0 stonesoup/custom/predictor/twostate.py | 29 + stonesoup/custom/reader/__init__.py | 0 stonesoup/custom/reader/track.py | 61 +++ stonesoup/custom/reader/tracklet.py | 503 ++++++++++++++++++ stonesoup/custom/simulator/__init__.py | 0 stonesoup/custom/simulator/platform.py | 31 ++ .../{tracker.py => tracker/__init__.py} | 10 + stonesoup/custom/tracker/fuse.py | 271 ++++++++++ stonesoup/custom/types/__init__.py | 0 stonesoup/custom/types/hypothesis.py | 14 + stonesoup/custom/types/prediction.py | 7 + stonesoup/custom/types/state.py | 18 + stonesoup/custom/types/tracklet.py | 69 +++ stonesoup/custom/types/update.py | 7 + stonesoup/custom/updater/__init__.py | 0 stonesoup/custom/updater/twostate.py | 62 +++ stonesoup/sensormanager/base.py | 4 +- stonesoup/types/track.py | 5 + 25 files changed, 1371 insertions(+), 5 deletions(-) create mode 100644 stonesoup/custom/initiator/twostate.py create mode 100644 stonesoup/custom/models/__init__.py create mode 100644 stonesoup/custom/models/measurement/__init__.py create mode 100644 stonesoup/custom/models/measurement/linear.py create mode 100644 stonesoup/custom/predictor/__init__.py create mode 100644 stonesoup/custom/predictor/twostate.py create mode 100644 stonesoup/custom/reader/__init__.py create mode 100644 stonesoup/custom/reader/track.py create mode 100644 stonesoup/custom/reader/tracklet.py create mode 100644 stonesoup/custom/simulator/__init__.py create mode 100644 stonesoup/custom/simulator/platform.py rename stonesoup/custom/{tracker.py => tracker/__init__.py} (97%) create mode 100644 stonesoup/custom/tracker/fuse.py create mode 100644 stonesoup/custom/types/__init__.py create mode 100644 stonesoup/custom/types/hypothesis.py create mode 100644 stonesoup/custom/types/prediction.py create mode 100644 stonesoup/custom/types/state.py create mode 100644 stonesoup/custom/types/tracklet.py create mode 100644 stonesoup/custom/types/update.py create mode 100644 stonesoup/custom/updater/__init__.py create mode 100644 stonesoup/custom/updater/twostate.py diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index b404d396a..5592bb372 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -3,6 +3,8 @@ from typing import Set, List, Sequence import numpy as np +from numpy import linalg as la +from scipy.linalg import block_diag import pyproj from shapely import Polygon from shapely.geometry import Point @@ -523,4 +525,66 @@ def geodesic_point_buffer(lat, lon, km): pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)), proj_wgs84) buf = Point(0, 0).buffer(km * 1000) # distance in metres - return transform(project, buf) \ No newline at end of file + return transform(project, buf) + + +def predict_state_to_two_state(old_mean, old_cov, tx_model, dt): + A = tx_model.matrix(time_interval=dt) + Q = tx_model.covar(time_interval=dt) + statedim = A.shape[0] + AA = np.concatenate((np.eye(statedim), A)) + QQ = block_diag(np.zeros((statedim, statedim)), Q) + return AA @ old_mean, AA @ old_cov @ AA.T + QQ + + +def nearestPD(A): + """Find the nearest positive-definite matrix to input + + A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which + credits [2]. + + [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd + + [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite + matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 + """ + + B = (A + A.T) / 2 + _, s, V = la.svd(B) + + H = np.dot(V.T, np.dot(np.diag(s), V)) + + A2 = (B + H) / 2 + + A3 = (A2 + A2.T) / 2 + + if isPD(A3): + return A3 + + spacing = np.spacing(la.norm(A)) + # The above is different from [1]. It appears that MATLAB's `chol` Cholesky + # decomposition will accept matrixes with exactly 0-eigenvalue, whereas + # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab + # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` + # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on + # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas + # `spacing` will, for Gaussian random matrixes of small dimension, be on + # othe order of 1e-16. In practice, both ways converge, as the unit test + # below suggests. + I = np.eye(A.shape[0]) + k = 1 + while not isPD(A3): + mineig = np.min(np.real(la.eigvals(A3))) + A3 += I * (-mineig * k**2 + spacing) + k += 1 + + return A3 + + +def isPD(B): + """Returns true when input is positive-definite, via Cholesky""" + try: + _ = la.cholesky(B) + return True + except la.LinAlgError: + return False \ No newline at end of file diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index 801cd7a6e..7dad20760 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -127,7 +127,10 @@ def hypothesise(self, track, detections, timestamp, **kwargs): else: prediction = track.state # Missed detection hypothesis - prob_detect = self.prob_detect(prediction) + try: + prob_detect = self.prob_detect(prediction) + except TypeError: + prob_detect = self.prob_detect # Missed detection hypothesis probability = Probability(1 - prob_detect*self.prob_gate) hypotheses.append( @@ -144,7 +147,10 @@ def hypothesise(self, track, detections, timestamp, **kwargs): # Re-evaluate prediction prediction = self.predictor.predict( track.state, timestamp=detection.timestamp) - prob_detect = self.prob_detect(prediction) + try: + prob_detect = self.prob_detect(prediction) + except TypeError: + prob_detect = self.prob_detect if self.per_measurement or measurement_prediction is None: # Compute measurement prediction and probability measure diff --git a/stonesoup/custom/initiator/twostate.py b/stonesoup/custom/initiator/twostate.py new file mode 100644 index 000000000..b67146be5 --- /dev/null +++ b/stonesoup/custom/initiator/twostate.py @@ -0,0 +1,112 @@ +import numpy as np + +from ...base import Base, Property +from ...models.base import LinearModel, ReversibleModel +from ...models.transition import TransitionModel +from ...types.state import GaussianState, State +from ...updater import Updater +from ..functions import predict_state_to_two_state, nearestPD, isPD +from ...types.hypothesis import SingleProbabilityHypothesis +from ...types.track import Track +from ...types.numeric import Probability + +from ..types.state import TwoStateGaussianState + + +class TwoStateInitiator(Base): + + def __init__(self, *args, **kwargs): + super(TwoStateInitiator, self).__init__(*args, **kwargs) + self._max_track_id = 0 + + prior: GaussianState = Property(doc='The prior used to initiate fused tracks') + transition_model: TransitionModel = Property(doc='The transition model') + updater: Updater = Property(doc='Updater used to update fused tracks') + + def initiate(self, detections, start_time, end_time, **kwargs): + init_mean = self.prior.mean + init_cov = self.prior.covar + init_mean, init_cov = predict_state_to_two_state(init_mean, init_cov, + self.transition_model, + end_time - start_time) + + prior = TwoStateGaussianState(init_mean, init_cov, start_time=start_time, + end_time=end_time) + new_tracks = set() + for detection in detections: + hyp = SingleProbabilityHypothesis(prediction=prior, measurement=detection, + probability=Probability(1.0)) + state = self.updater.update(hyp) + track = Track([state], id=self._max_track_id) + track.exist_prob = Probability(1) + self._max_track_id += 1 + new_tracks.add(track) + + return new_tracks + + + +class TwoStateMeasurementInitiator(TwoStateInitiator): + + skip_non_reversible: bool = Property(default=False) + diag_load: float = Property(default=0.0, doc="Positive float value for diagonal loading") + + def initiate(self, detections, start_time, end_time, **kwargs): + + new_tracks = set() + for detection in detections: + measurement_model = detection.measurement_model + + if isinstance(measurement_model, LinearModel): + model_matrix = measurement_model.matrix() + inv_model_matrix = np.linalg.pinv(model_matrix) + state_vector = inv_model_matrix @ detection.state_vector + else: + if isinstance(measurement_model, ReversibleModel): + try: + state_vector = measurement_model.inverse_function(detection) + except NotImplementedError: + if not self.skip_non_reversible: + raise + else: + continue + model_matrix = measurement_model.jacobian(State(state_vector)) + inv_model_matrix = np.linalg.pinv(model_matrix) + elif self.skip_non_reversible: + continue + else: + raise Exception("Invalid measurement model used.\ + Must be instance of linear or reversible.") + + model_covar = measurement_model.covar() + + init_mean = self.prior.state_vector.copy() + init_cov = self.prior.covar.copy() + + + init_mean, init_cov = predict_state_to_two_state(init_mean, init_cov, + self.transition_model, + end_time - start_time) + mapped_dimensions = measurement_model.mapping + + init_mean[mapped_dimensions, :] = 0 + init_cov[mapped_dimensions, :] = 0 + C0 = inv_model_matrix @ model_covar @ inv_model_matrix.T + C0 = C0 + init_cov + np.diag(np.array([self.diag_load] * C0.shape[0])) + if not isPD(C0): + C0 = nearestPD(C0) + init_mean = init_mean + state_vector + prior = TwoStateGaussianState(init_mean, C0, start_time=start_time, + end_time=end_time) + hyp = SingleProbabilityHypothesis(prediction=prior, measurement=detection, + probability=Probability(1.0)) + state = self.updater.update(hyp) + track = Track([state], id=self._max_track_id) + track.exist_prob = Probability(1) + self._max_track_id += 1 + new_tracks.add(track) + + return new_tracks + + + diff --git a/stonesoup/custom/models/__init__.py b/stonesoup/custom/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/models/measurement/__init__.py b/stonesoup/custom/models/measurement/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/models/measurement/linear.py b/stonesoup/custom/models/measurement/linear.py new file mode 100644 index 000000000..75d9f12ea --- /dev/null +++ b/stonesoup/custom/models/measurement/linear.py @@ -0,0 +1,97 @@ +from stonesoup.base import Property +from stonesoup.models.base import LinearModel, GaussianModel +from stonesoup.models.measurement import MeasurementModel +from stonesoup.types.array import Matrix, CovarianceMatrix + + +class LinearGaussianPredefinedH(MeasurementModel, LinearModel, GaussianModel): + r"""This is a class implementation of a time-invariant 1D + Linear-Gaussian Measurement Model. + + The model is described by the following equations: + + .. math:: + + y_t = H_k*x_t + v_k,\ \ \ \ v(k)\sim \mathcal{N}(0,R) + + where ``H_k`` is a (:py:attr:`~ndim_meas`, :py:attr:`~ndim_state`) \ + matrix and ``v_k`` is Gaussian distributed. + + """ + + h_matrix: Matrix = Property(doc="The model matrix") + noise_covar: CovarianceMatrix = Property(doc="Noise covariance") + + @property + def ndim_state(self): + """ndim_meas getter method + + Returns + ------- + :class:`int` + The number of measurement dimensions + """ + + return self.h_matrix.shape[1] + + @property + def ndim_meas(self): + """ndim_meas getter method + + Returns + ------- + :class:`int` + The number of measurement dimensions + """ + + return len(self.mapping) + + def matrix(self, **kwargs): + """Model matrix :math:`H(t)` + + Returns + ------- + :class:`numpy.ndarray` of shape \ + (:py:attr:`~ndim_meas`, :py:attr:`~ndim_state`) + The model matrix evaluated given the provided time interval. + """ + + return self.h_matrix + + def function(self, state, noise=False, **kwargs): + """Model function :math:`h(t,x(t),w(t))` + + Parameters + ---------- + state: :class:`~.State` + An input state + noise: :class:`numpy.ndarray` or bool + An externally generated random process noise sample (the default is + `False`, in which case no noise will be added + if 'True', the output of :meth:`~.Model.rvs` is added) + + Returns + ------- + :class:`numpy.ndarray` of shape (:py:attr:`~ndim_meas`, 1) + The model function evaluated given the provided time interval. + """ + + if isinstance(noise, bool) or noise is None: + if noise: + noise = self.rvs() + else: + noise = 0 + + return self.matrix(**kwargs)@state.state_vector + noise + + def covar(self, **kwargs): + """Returns the measurement model noise covariance matrix. + + Returns + ------- + :class:`~.CovarianceMatrix` of shape\ + (:py:attr:`~ndim_meas`, :py:attr:`~ndim_meas`) + The measurement noise covariance. + """ + + return self.noise_covar \ No newline at end of file diff --git a/stonesoup/custom/predictor/__init__.py b/stonesoup/custom/predictor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/predictor/twostate.py b/stonesoup/custom/predictor/twostate.py new file mode 100644 index 000000000..4e8d875ff --- /dev/null +++ b/stonesoup/custom/predictor/twostate.py @@ -0,0 +1,29 @@ +from stonesoup.predictor import Predictor +from stonesoup.predictor._utils import predict_lru_cache +from stonesoup.types.prediction import Prediction + +from stonesoup.custom.functions import predict_state_to_two_state + +class TwoStatePredictor(Predictor): + + @predict_lru_cache() + def predict(self, prior, current_end_time=None, new_start_time=None, new_end_time=None, + **kwargs): + statedim = self.transition_model.ndim_state + mu = prior.mean[-statedim:] + C = prior.covar[-statedim:, -statedim:] + if new_start_time > current_end_time: + dt = new_start_time - current_end_time + A = self.transition_model.matrix(time_interval=dt) + Q = self.transition_model.covar(time_interval=dt) + mu = A @ mu + C = A @ C @ A.T + Q + elif new_start_time < current_end_time: + raise ValueError('newStartTime < currentEndTime - scan times messed up!') + + two_state_mu, two_state_cov = predict_state_to_two_state(mu, C, self.transition_model, + new_end_time - new_start_time) + + return Prediction.from_state(prior, two_state_mu, two_state_cov, + start_time=new_start_time, + end_time=new_end_time) diff --git a/stonesoup/custom/reader/__init__.py b/stonesoup/custom/reader/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/reader/track.py b/stonesoup/custom/reader/track.py new file mode 100644 index 000000000..2cb7e13a6 --- /dev/null +++ b/stonesoup/custom/reader/track.py @@ -0,0 +1,61 @@ +import threading +from copy import copy + +from ...base import Property +from ...models.transition import TransitionModel +from ...reader.base import Reader +from ...tracker.base import Tracker +from ..types.tracklet import SensorTracks +from ...buffered_generator import BufferedGenerator + + +class TrackReader(Reader): + tracker: Tracker = Property(doc='Tracker from which to read tracks') + run_async: bool = Property( + doc="If set to ``True``, the reader will read tracks from the tracker asynchronously " + "and only yield the latest set of tracks when iterated. Defaults to ``False``", + default=False) + transition_model: TransitionModel = Property(doc='Transition model used by the tracker', + default=None) + sensor_id: str = Property(doc='The id of the sensor', default=None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Variables used in async mode + if self.run_async: + self._buffer = None + # Initialise frame capture thread + self._capture_thread = threading.Thread(target=self._capture) + self._capture_thread.daemon = True + self._thread_lock = threading.Lock() + self._capture_thread.start() + + @property + def tracks(self): + return self.current[1] + + @BufferedGenerator.generator_method + def tracks_gen(self): + if self.run_async: + yield from self._tracks_gen_async() + else: + yield from self._tracks_gen() + + def _capture(self): + for timestamp, tracks in self.tracker: + self._thread_lock.acquire() + self._buffer = (timestamp, SensorTracks(tracks, self.sensor_id, self.transition_model)) + self._thread_lock.release() + + def _tracks_gen(self): + for timestamp, tracks in self.tracker: + yield timestamp, SensorTracks(tracks, self.sensor_id, self.transition_model) + + def _tracks_gen_async(self): + while self._capture_thread.is_alive(): + if self._buffer is not None: + self._thread_lock.acquire() + timestamp, tracks = copy(self._buffer) + self._buffer = None + self._thread_lock.release() + yield timestamp, tracks diff --git a/stonesoup/custom/reader/tracklet.py b/stonesoup/custom/reader/tracklet.py new file mode 100644 index 000000000..036af828e --- /dev/null +++ b/stonesoup/custom/reader/tracklet.py @@ -0,0 +1,503 @@ +from typing import List +import datetime +from copy import deepcopy + +import numpy as np +from scipy.linalg import block_diag, inv + +from .track import TrackReader +from ...base import Base, Property +from ...buffered_generator import BufferedGenerator +from ...detector import Detector +from ...models.transition.base import TransitionModel +from ..models.measurement.linear import LinearGaussianPredefinedH +from ...tracker.base import Tracker +from ...types.mixture import GaussianMixture +from ...types.numeric import Probability +from ..types.prediction import TwoStateGaussianStatePrediction, Prediction +from ..types.tracklet import Tracklet, SensorTracklets, Scan, SensorScan +from ...types.state import GaussianState +from ...types.array import StateVector +from ...types.detection import Detection +from ..functions import predict_state_to_two_state +from ...predictor.kalman import ExtendedKalmanPredictor +from ..types.update import TwoStateGaussianStateUpdate, Update + + +class TrackletExtractor(Base, BufferedGenerator): + trackers: List[Tracker] = Property(doc='List of trackers from which to extract tracks') + transition_model: TransitionModel = Property(doc='Transition model') + fuse_interval: datetime.timedelta = Property(doc='Fusion interval') + real_time: bool = Property(doc='Flag indicating whether the extractor should report ' + 'real time', default=False) + + def __init__(self, *args, **kwargs): + super(TrackletExtractor, self).__init__(*args, **kwargs) + self._tracklets = [] + self._fuse_times = [] + + @property + def tracklets(self): + return self.current[1] + + @BufferedGenerator.generator_method + def tracklets_gen(self): + """Returns a generator of detections for each time step. + + Yields + ------ + : :class:`datetime.datetime` + Datetime of current time step + : set of :class:`~.Detection` + Detections generate in the time step + """ + for data in zip(*self.trackers): + timestamp = data[0][0] + alltracks = [d[1] for d in data] + if self.real_time: + timestamp = datetime.datetime.now() + if not len(self._fuse_times) or timestamp - self._fuse_times[-1] >= self.fuse_interval: + # Append current fuse time to fuse times + self._fuse_times.append(timestamp) + yield timestamp, self.get_tracklets_seq(alltracks, timestamp) + + def extract(self, alltracks, timestamp): + if not len(self._fuse_times) or timestamp - self._fuse_times[-1] >= self.fuse_interval: + # Append current fuse time to fuse times + self._fuse_times.append(timestamp) + self._tracklets = self.get_tracklets_seq(alltracks, timestamp) + self.current = (timestamp, self._tracklets) + return self._tracklets + + def get_tracklets_seq(self, alltracks, timestamp): + # Iterate over the local tracks of each sensor + for sensor_tracks in alltracks: + sensor_id = sensor_tracks.sensor_id + # Get tracklets for sensor + idx = next((i for i, t in enumerate(self._tracklets) + if t.sensor_id == sensor_id), None) + sensor_tracklets = self._tracklets[idx] if idx is not None else [] + # Temporary tracklet list + tracklets_tmp = [] + # Transition model + transition_model = self.transition_model + if sensor_tracks.transition_model is not None: + transition_model = sensor_tracks.transition_model + # For each local track + for track in sensor_tracks: + tracklet = next((t for t in sensor_tracklets if track.id == t.id), None) + # If the tracklet doesn't already exist + if tracklet is None and len(self._fuse_times) > 1: + # Create it + tracklet = self.init_tracklet(track, transition_model, + np.array(self._fuse_times), sensor_id) + elif tracklet is not None: + # Else simply augment + self.augment_tracklet(tracklet, track, transition_model, timestamp) + # Append tracklet to temporary tracklets + if tracklet: + tracklets_tmp.append(tracklet) + # If a tracklet set for the sensor doesn't already exist + if idx is None: + # Add it + self._tracklets.append(SensorTracklets(tracklets_tmp, sensor_id)) + else: + # Else replace the existing one + self._tracklets[idx] = SensorTracklets(tracklets_tmp, sensor_id) + # Return the stored tracklets + return self._tracklets + + def get_tracklets_batch(self, alltracks, fuse_times): + tracklets = [] + for tracks in alltracks: + tracklets_tmp = [] + # Transition model + transition_model = self.transition_model + if tracks.transition_model is not None: + transition_model = tracks.transition_model + for track in tracks: + tracklet = self.init_tracklet(track, transition_model, fuse_times) + if tracklet: + tracklets_tmp.append(tracklet) + tracklets.append(tracklets_tmp) + return tracklets + + def augment_tracklet(self, tracklet, track, transition_model, timestamp): + track_times = np.array([s.timestamp for s in track]) + + filtered_means = np.concatenate([s.mean for s in track], 1) + filtered_covs = np.stack([s.covar for s in track], 2) + filtered_times = np.array([s.timestamp for s in track]) + + start_time = tracklet.states[-1].timestamp + end_time = timestamp + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + # The end states + end_states = [track.states[ind0], track.states[ind1]] + # All means, covs and times that fall inbetween + means = filtered_means[:, ind0 + 1:ind1 + 1] + covs = filtered_covs[:, :, ind0 + 1: ind1 + 1] + times = filtered_times[ind0 + 1:ind1 + 1] + # Compute interval distribution + post_mean, post_cov, prior_mean, prior_cov = \ + self.get_interval_dist(means, covs, times, end_states, + transition_model, start_time, end_time) + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + tracklet.states.append(prior) + tracklet.states.append(posterior) + + @classmethod + def init_tracklet(cls, track, tx_model, fuse_times, sensor_id=None): + track_times = np.array([s.timestamp for s in track]) + idx0 = np.flatnonzero(fuse_times >= track_times[0]) + idx1 = np.flatnonzero(fuse_times <= track_times[-1]) + + if not len(idx0) or not len(idx1): + return None + else: + idx0 = idx0[0] + idx1 = idx1[-1] + + states = [] + + filtered_means = np.concatenate([s.mean for s in track], 1) + filtered_covs = np.stack([s.covar for s in track], 2) + filtered_times = np.array([s.timestamp for s in track]) + + cnt = 0 + for i in range(idx0, idx1): + start_time = fuse_times[i] + end_time = fuse_times[i + 1] + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + cnt += 1 + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + # The end states + end_states = [track.states[ind0], track.states[ind1]] + # All means, covs and times that fall inbetween + means = filtered_means[:, ind0 + 1:ind1 + 1] + covs = filtered_covs[:, :, ind0 + 1: ind1 + 1] + times = filtered_times[ind0 + 1:ind1 + 1] + # Compute interval distribution + post_mean, post_cov, prior_mean, prior_cov = \ + cls.get_interval_dist(means, covs, times, end_states, + tx_model, start_time, end_time) + + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + + states.append(prior) + states.append(posterior) + + if not cnt: + return None + + tracklet = Tracklet(id=track.id, states=states, init_metadata={'sensor_id': sensor_id}) + + return tracklet + + @classmethod + def get_interval_dist(cls, filtered_means, filtered_covs, filtered_times, states, tx_model, + start_time, end_time): + + # Get filtered distributions at start and end of interval + predictor = ExtendedKalmanPredictor(tx_model) + state0 = states[0] + state1 = states[1] + + pred0 = predictor.predict(state0, start_time) + pred1 = predictor.predict(state1, end_time) + + # Predict prior mean + prior_mean, prior_cov = predict_state_to_two_state(pred0.mean, pred0.covar, tx_model, + end_time - start_time) + + # Get posterior mean by running smoother + mn = np.concatenate([pred0.mean, filtered_means, pred1.mean], 1) + cv = np.stack([pred0.covar, *list(np.swapaxes(filtered_covs, 0, 2)), pred1.covar], 2) + t = np.array([start_time, *filtered_times, end_time]) + post_mean, post_cov = cls.rts_smoother_endpoints(mn, cv, t, tx_model) + + return post_mean, post_cov, prior_mean, prior_cov + + @classmethod + def rts_smoother_endpoints(cls, filtered_means, filtered_covs, times, tx_model): + statedim, ntimesteps = filtered_means.shape + + joint_smoothed_mean = np.tile(filtered_means[:, -1], (1, 2)).T + joint_smoothed_cov = np.tile(filtered_covs[:, :, -1], (2, 2)) + + for k in reversed(range(ntimesteps - 1)): + dt = times[k + 1] - times[k] + A = tx_model.matrix(time_interval=dt) + Q = tx_model.covar(time_interval=dt) + # Filtered distribution + m = filtered_means[:, k][:, np.newaxis] + P = filtered_covs[:, :, k] + # Get transition model x_{k+1} -> x_k + # p(x_k | x_{k+1}, y_{1:T}) = Norm(x_k; Fx_{k+1} + b, Omega) + F = P @ A.T @ inv(A @ P @ A.T + Q) + b = m - F @ A @ m + Omega = P - F @ (A @ P @ A.T + Q) @ F.T + # Two-state transition model (x_{k+1}, x_T) -> (x_k, x_T) + F2 = block_diag(F, np.eye(statedim)) + b2 = np.concatenate((b, np.zeros((statedim, 1)))) + Omega2 = block_diag(Omega, np.zeros((statedim, statedim))) + # Predict back + joint_smoothed_mean = F2 @ joint_smoothed_mean + b2 + joint_smoothed_cov = F2 @ joint_smoothed_cov @ F2.T + Omega2 + return joint_smoothed_mean, joint_smoothed_cov + + +class TrackletExtractorWithTracker(TrackletExtractor): + detectors: List[Detector] = Property(doc='List of detectors') + core_tracker: Tracker = Property(doc='Core tracker used for each detector') + run_async: bool = Property( + doc="If set to ``True``, the reader will read tracks from the tracker asynchronously " + "and only yield the latest set of tracks when iterated." + "Defaults to ``False``", + default=False) + + def __init__(self, *args, **kwargs): + super(TrackletExtractorWithTracker, self).__init__(*args, **kwargs) + sensor_id_offset = len(self.trackers) + for i, detector in enumerate(self.detectors): + tracker = deepcopy(self.core_tracker) + tracker.detector = detector + # Extract transition model + hypothesiser = tracker.data_associator.hypothesiser + while not hasattr(hypothesiser, 'predictor'): + hypothesiser = hypothesiser.hypothesiser + transition_model = hypothesiser.predictor.transition_model + self.trackers.append(TrackReader(tracker, + transition_model=transition_model, + sensor_id=sensor_id_offset+i, + run_async=self.run_async)) + + +class PseudoMeasExtractor(Base, BufferedGenerator): + tracklet_extractor: TrackletExtractor = Property(doc='The tracket extractor') + target_state_dim: int = Property(doc='The target state dim', default=None) + state_idx_to_use: List[int] = Property(doc='The indices of the state corresponding to pos/vel', + default=None) + use_prior: bool = Property(doc="", default=True) + + def __init__(self, *args, **kwargs): + super(PseudoMeasExtractor, self).__init__(*args, **kwargs) + self._last_scan = None + + @property + def scans(self): + return self.current[1] + + @BufferedGenerator.generator_method + def scans_gen(self): + """Returns a generator of detections for each time step. + + Yields + ------ + : :class:`datetime.datetime` + Datetime of current time step + : set of :class:`~.Detection` + Detections generate in the time step + """ + for timestamp, tracklets in self.tracklet_extractor: + scans = self.get_scans_from_tracklets(tracklets, timestamp) + yield timestamp, scans + # for scan in scans: + # yield timestamp, scan + + def extract(self, tracklets, timestamp): + scans = self.get_scans_from_tracklets(tracklets, timestamp) + self.current = timestamp, scans + return scans + + def get_scans_from_tracklets(self, tracklets, timestamp): + measdata = self.get_pseudomeas(tracklets) + self._last_scan = timestamp + scans = self.get_scans_from_measdata(measdata) + # Sort the scans by start time + scans.sort(key=lambda x: x.start_time) + return scans + + def get_pseudomeas(self, all_tracklets): + measurements = [] + for i, tracklets in enumerate(all_tracklets): + for j, tracklet in enumerate(tracklets): + measdata = self.get_pseudomeas_from_tracklet(tracklet, i, self._last_scan) + measurements += measdata + # Sort the measurements by end time + measurements.sort(key=lambda x: x.end_time) + return measurements + + @classmethod + def get_scans_from_measdata(cls, measdata): + if not len(measdata): + return [] + + start = np.min([m.start_time for m in measdata]) + times = np.array([[(m.end_time - start).total_seconds(), + (m.start_time - start).total_seconds()] for m in measdata]) + true_times = np.array([[m.end_time, m.start_time] for m in measdata]) + end_start_times, idx = np.unique(times, return_index=True, axis=0) + idx2 = [] + for previous, current in zip(idx, idx[1:]): + idx2.append([i for i in range(previous, current)]) + else: + idx2.append([i for i in range(idx[-1], len(measdata))]) + nscans = len(idx) + + scans = [] + for i in range(nscans): + thesescans = [measdata[j] for j in idx2[i]] + if not len(thesescans): + continue + start_time = true_times[idx[i], 1] # end_start_times[i, 1] + end_time = true_times[idx[i], 0] + sens_ids = [m.metadata['sensor_id'] for m in thesescans] + sens_ids, sidx = np.unique(sens_ids, return_index=True) + sidx2 = [] + for previous, current in zip(sidx, sidx[1:]): + sidx2.append([i for i in range(previous, current)]) + else: + sidx2.append([i for i in range(sidx[-1], len(thesescans))]) + nsensscans = len(sidx) + scan = Scan(start_time, end_time, []) + for s in range(nsensscans): + sensor_id = sens_ids[s] + sscan = SensorScan(sensor_id, []) + sscan.detections = [thesescans[j] for j in sidx2[s]] + for detection in sscan.detections: + detection.metadata['scan_id'] = scan.id + detection.metadata['sensor_scan_id'] = sscan.id + detection.metadata['clutter_density'] = Probability(-70, log_value=True) + scan.sensor_scans.append(sscan) + scans.append(scan) + return scans + + def get_pseudomeas_from_tracklet(self, tracklet, sensor_id, last_scan=None): + + priors = [s for s in tracklet.states if isinstance(s, Prediction)] + posteriors = [s for s in tracklet.states if isinstance(s, Update)] + + if last_scan is None: + inds = [i for i in range(len(posteriors))] + else: + inds = [i for i, p in enumerate(posteriors) if p.timestamp > last_scan] + + measdata = [] + + state_dim = posteriors[-1].state_vector.shape[0] + if self.state_idx_to_use is not None: + state_idx = list(self.state_idx_to_use) + offset = state_dim//2 + for i in self.state_idx_to_use: + state_idx.append(offset+i) + else: + state_idx = [i for i in range(state_dim)] + + for k in inds: + post_mean = posteriors[k].mean[state_idx, :] + post_cov = posteriors[k].covar[state_idx, :][:, state_idx] + prior_mean = priors[k].mean[state_idx, :] + prior_cov = priors[k].covar[state_idx, :][:, state_idx] + + H, z, R, _ = self.get_pseudomeasurement(post_mean, post_cov, prior_mean, prior_cov) + + if len(H): + num_rows, num_cols = H.shape + if self.target_state_dim is not None: + # Add zero columns for bias state indices + col_diff = (self.target_state_dim-num_cols)//2 + H1 = H[:, :num_cols//2] + H2 = H[:, num_cols//2:] + H1 = np.append(H1, np.zeros((H.shape[0], col_diff)), axis=1) + H2 = np.append(H2, np.zeros((H.shape[0], col_diff)), axis=1) + H = np.append(H1, H2, axis=1) + meas_model = LinearGaussianPredefinedH(h_matrix=H, noise_covar=R, + mapping=[i for i in range(H.shape[0])]) + detection = Detection(state_vector=StateVector(z), measurement_model=meas_model, + timestamp=posteriors[k].timestamp, + metadata=tracklet.metadata) + detection.metadata['track_id'] = tracklet.id + detection.start_time = posteriors[k].start_time + detection.end_time = posteriors[k].end_time + measdata.append(detection) + + return measdata + + def get_pseudomeasurement(self, mu1, C1, mu2, C2): + eigthresh = 1e-6 + matthresh = 1e-6 + + invC1 = inv(C1) + invC2 = inv(C2) + # Ensure inverses are symmetric + invC1 = (invC1 + invC1.T) / 2 + invC2 = (invC2 + invC2.T) / 2 + invC = invC1 - invC2 + + D, v = np.linalg.eig(invC) + D = np.diag(D) + Htilde = v.T + evals = np.diag(D) + + idx = np.flatnonzero(np.abs(evals) > eigthresh) + + H = Htilde[idx, :] + + statedim = mu1.shape[0] + + if not self.use_prior: + H = np.eye(statedim) + z = mu1 + R = C1 + return H, z, R, evals + + + if np.max(np.abs(C1.flatten() - C2.flatten())) < matthresh: + # print('Discarded - matrices too similar') + H = np.zeros((0, statedim)) + z = np.zeros((0, 1)) + R = np.zeros((0, 0)) + return H, z, R, evals + + if np.all(np.abs(evals) <= eigthresh): + # print('Discarded - all eigenvalues zero') + H = np.zeros((0, statedim)) + z = np.zeros((0, 1)) + R = np.zeros((0, 0)) + return H, z, R, evals + + R = inv(D[idx, :][:, idx]) + z = R @ (H @ invC1 @ mu1 - H @ invC2 @ mu2) + + # Discard measurement if R is not positive definite + try: + np.linalg.cholesky(R) + except np.linalg.LinAlgError: + # if not np.all(np.linalg.eigvals(R) > 0): + # print('Discarded - singular R') + H = np.zeros((0, statedim)) + z = np.zeros((0, 1)) + R = np.zeros((0, 0)) + return H, z, R, evals + + return H, z, R, evals diff --git a/stonesoup/custom/simulator/__init__.py b/stonesoup/custom/simulator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/simulator/platform.py b/stonesoup/custom/simulator/platform.py new file mode 100644 index 000000000..0be4398dc --- /dev/null +++ b/stonesoup/custom/simulator/platform.py @@ -0,0 +1,31 @@ +from typing import Sequence + +from stonesoup.base import Property +from stonesoup.buffered_generator import BufferedGenerator +from stonesoup.platform import Platform +from stonesoup.simulator.platform import PlatformDetectionSimulator + + +class PlatformTargetDetectionSimulator(PlatformDetectionSimulator): + """A simple platform detection simulator. + + Processes ground truth data and generates :class:`~.Detection` data + according to a list of platforms by calling each sensor in these platforms. + + """ + targets: Sequence[Platform] = Property( + doc='List of target platforms to be detected' + ) + + @BufferedGenerator.generator_method + def detections_gen(self): + for time, truths in self.groundtruth: + for platform in self.platforms: + platform.move(time) + for platform in self.targets: + platform.move(time) + for platform in self.platforms: + for sensor in platform.sensors: + truths_to_be_measured = truths.union(self.targets) + detections = sensor.measure(truths_to_be_measured, timestamp=time) + yield time, detections \ No newline at end of file diff --git a/stonesoup/custom/tracker.py b/stonesoup/custom/tracker/__init__.py similarity index 97% rename from stonesoup/custom/tracker.py rename to stonesoup/custom/tracker/__init__.py index 417fe1afd..e8b00aca5 100644 --- a/stonesoup/custom/tracker.py +++ b/stonesoup/custom/tracker/__init__.py @@ -82,9 +82,11 @@ def _prob_detect_simple(self, state_vector): """A simple probability of detection function.""" return self.prob_detection + class SMCPHD_JIPDA(_BaseTracker): """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" + detector: Base = Property(doc='The detector used to generate detections', default=None) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._predictor = KalmanPredictor(self.transition_model) @@ -131,6 +133,14 @@ def __init__(self, *args, **kwargs): self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) + def __iter__(self): + self.detector_iter = iter(self.detector) + return self + + def __next__(self): + timestamp, detections = next(self.detector_iter) + return timestamp, self.track(detections, timestamp) + def track(self, detections, timestamp, *args, **kwargs): tracks = list(self.tracks) detections = list(detections) diff --git a/stonesoup/custom/tracker/fuse.py b/stonesoup/custom/tracker/fuse.py new file mode 100644 index 000000000..089459c96 --- /dev/null +++ b/stonesoup/custom/tracker/fuse.py @@ -0,0 +1,271 @@ +import numpy as np + +from ..types.hypothesis import MultiHypothesis +from ...base import Base, Property +from ...dataassociator.probability import JPDA +from ...tracker import Tracker +from ...predictor import Predictor +from ...types.mixture import GaussianMixture +from ...types.multihypothesis import MultipleHypothesis +from ...updater import Updater +from ...dataassociator import DataAssociator +from ...types.numeric import Probability +from ...types.prediction import Prediction +from ...types.array import StateVectors +from ...types.update import Update +from ...initiator import Initiator +from ...functions import gm_reduce_single + + +from ..reader.tracklet import PseudoMeasExtractor, TrackletExtractor +from ..types.update import TwoStateGaussianStateUpdate + + +class _BaseFuseTracker(Base): + initiator: Initiator = Property(doc='The initiator used to initiate fused tracks') + predictor: Predictor = Property(doc='Predictor used to predict fused tracks') + updater: Updater = Property(doc='Updater used to update fused tracks') + associator: DataAssociator = Property(doc='Associator used to associate fused tracks with' + 'pseudomeasurements') + death_rate: float = Property(doc='The exponential death rate of tracks. Default is 1e-4', + default=1e-4) + prob_detect: Probability = Property(doc='The probability of detection', default=0.9) + delete_thresh: Probability = Property(doc='The existence probability deletion threshold', + default=0.1) + + def __init__(self, *args, **kwargs): + super(_BaseFuseTracker, self).__init__(*args, **kwargs) + self._max_track_id = 0 + + def process_scan(self, scan, tracks, current_end_time): + new_start_time = scan.start_time + new_end_time = scan.end_time + if current_end_time and new_start_time < current_end_time: + print('Scans out of order! Skipping a scan...') + return tracks, current_end_time + + if hasattr(self.initiator, 'predict'): + self.initiator.predict(new_start_time, new_end_time) + self.initiator.current_end_time = new_end_time + + # Predict two-state tracks forward + for track in tracks: + self.predict_track(track, current_end_time, new_start_time, new_end_time, + self.death_rate) + + current_start_time = new_start_time + current_end_time = new_end_time + + for sensor_scan in scan.sensor_scans: + tracks = list(tracks) + detections = set(sensor_scan.detections) + + # Perform data association + associations = self.associator.associate(tracks, detections, + timestamp=current_end_time) + # Update tracks + for track in tracks: + self.update_track(track, associations[track], scan.id) + + # Initiate new tracks on unassociated detections + if isinstance(self.associator, JPDA): + assoc_detections = set( + [h.measurement for hyp in associations.values() for h in hyp if h]) + else: + assoc_detections = set( + [hyp.measurement for hyp in associations.values() if hyp]) + + + tracks = set(tracks) + unassoc_detections = set(detections) - assoc_detections + if isinstance(sensor_scan.sensor_id, str): + tracks |= self.initiator.initiate(unassoc_detections, sensor_scan.timestamp, + sensor_scan.timestamp, + sensor_id=sensor_scan.sensor_id) + else: + tracks |= self.initiator.initiate(unassoc_detections, current_start_time, + current_end_time, sensor_id=sensor_scan.sensor_id) + try: + self.initiator.current_end_time = current_end_time + except AttributeError: + pass + + tracks -= self.delete_tracks(tracks) + return tracks, current_end_time + + def predict_track(self, track, current_end_time, new_start_time, new_end_time, + death_rate=0.): + + # Predict existence + survive_prob = np.exp(-death_rate * (new_end_time - current_end_time).total_seconds()) + track.exist_prob = track.exist_prob * survive_prob + + # Predict forward + # p(x_k, x_{k+\Delta} | y^{1:S}_{1:k}) + if not isinstance(track.state, GaussianMixture): + prediction = self.predictor.predict(track.state, current_end_time=current_end_time, + new_start_time=new_start_time, + new_end_time=new_end_time) + else: + pred_components = [] + for component in track.state: + pred_components.append(self.predictor.predict(component, + current_end_time=current_end_time, + new_start_time=new_start_time, + new_end_time=new_end_time)) + prediction = GaussianMixture(pred_components) + # Append prediction to track history + track.append(prediction) + + def update_track(self, track, hypothesis, scan_id): + last_state = track.states[-1] + + if isinstance(self.associator, JPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hyp in hypothesis: + if not hyp: + posterior_states.append(hyp.prediction) + # Ensure null hyp weight is at index 0 + posterior_state_weights.insert(0, hyp.probability) + else: + posterior_states.append( + self.updater.update(hyp)) + posterior_state_weights.append( + hyp.probability) + if 'track_id' in hyp.measurement.metadata: + try: + track.track_ids.add(hyp.measurement.metadata['track_id']) + except AttributeError: + track.track_ids = {hyp.measurement.metadata['track_id']} + + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + update = TwoStateGaussianStateUpdate(post_mean, post_covar, + start_time=posterior_states[0].start_time, + end_time=posterior_states[0].end_time, + hypothesis=hypothesis) + track[-1] = update + # Compute existence probability + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1 - self.prob_detect) * track.exist_prob + null_exist_weight = non_det_weight / (non_exist_weight + non_det_weight) + exist_probs = np.array([null_exist_weight, *[1. for i in range(len(weights) - 1)]]) + track.exist_prob = Probability.sum(exist_probs * weights) + else: + if hypothesis: + # Perform update using the hypothesis + update = self.updater.update(hypothesis) + # Modify track states depending on type of last state + if isinstance(last_state, Update) and last_state.timestamp == update.timestamp: + # If the last scan was an update with the same timestamp, we need to modify this + # state to reflect the computed mean and covariance, as well as the hypotheses that + # resulted to this + hyp = last_state.hypothesis + try: + hyp.measurements.append(hypothesis.measurement) + except AttributeError: + hyp = MultiHypothesis(prediction=hypothesis.prediction, + measurements=[hyp.measurement, + hypothesis.measurement]) + update.hypothesis = hyp # Update the hypothesis + track[-1] = update # Replace the last state + elif isinstance(last_state, + Prediction) and last_state.timestamp == update.timestamp: + # If the last state was a prediction with the same timestamp, it means that the + # state was created by a sensor scan in the same overall scan, due to the track not + # having been associated to any measurement. Therefore, we replace the prediction + # with the update + update.hypothesis = MultiHypothesis(prediction=hypothesis.prediction, + measurements=[hypothesis.measurement]) + track[-1] = update + else: + # Else simply append the update to the track history + update.hypothesis = MultiHypothesis(prediction=hypothesis.prediction, + measurements=[hypothesis.measurement]) + track.append(update) + # Set existence probability to 1 + track.exist_prob = 1 + if 'track_id' in hypothesis.measurement.metadata: + try: + track.track_ids.add(hypothesis.measurement.metadata['track_id']) + except AttributeError: + track.track_ids = {hypothesis.measurement.metadata['track_id']} + else: + # If the track was not associated to any measurement, simply update the existence + # probability + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1 - self.prob_detect) * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + def delete_tracks(self, tracks): + del_tracks = set([track for track in tracks if track.exist_prob < self.delete_thresh]) + return del_tracks + + +class FuseTracker(Tracker, _BaseFuseTracker): + """ + + """ + + detector: PseudoMeasExtractor = Property(doc='The pseudo-measurement extractor') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._tracks = set() + self._current_end_time = None + + @property + def tracks(self): + return self._tracks + + def __iter__(self): + self.detector_iter = iter(self.detector) + return super().__iter__() + + def __next__(self): + timestamp, scans = next(self.detector_iter) + for scan in scans: + self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, self._current_end_time) + return timestamp, self.tracks + + +class FuseTracker2(_BaseFuseTracker): + + tracklet_extractor: TrackletExtractor = Property(doc='The tracklet extractor') + pseudomeas_extractor: PseudoMeasExtractor = Property(doc='The pseudo-measurement extractor') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._tracks = set() + self._current_end_time = None + + @property + def tracks(self): + return self._tracks + + def process_tracks(self, alltracks, timestamp): + # Extract tracklets + tracklets = self.tracklet_extractor.extract(alltracks, timestamp) + # Extract pseudo-measurements + scans = self.pseudomeas_extractor.extract(tracklets, timestamp) + + for scan in scans: + self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, + self._current_end_time) + return timestamp, self.tracks + + def process_scans(self, scans): + for scan in scans: + self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, + self._current_end_time) + return self.tracks + + diff --git a/stonesoup/custom/types/__init__.py b/stonesoup/custom/types/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/types/hypothesis.py b/stonesoup/custom/types/hypothesis.py new file mode 100644 index 000000000..9e52e33a3 --- /dev/null +++ b/stonesoup/custom/types/hypothesis.py @@ -0,0 +1,14 @@ +from typing import List + +from stonesoup.base import Property +from stonesoup.types.detection import Detection +from stonesoup.types.hypothesis import Hypothesis +from stonesoup.types.prediction import Prediction, MeasurementPrediction + + +class MultiHypothesis(Hypothesis): + """A hypothesis based on multiple measurements. """ + prediction: Prediction = Property(doc="Predicted track state") + measurements: List[Detection] = Property(doc="Detection used for hypothesis and updating") + measurement_prediction: MeasurementPrediction = Property( + default=None, doc="Optional track prediction in measurement space") \ No newline at end of file diff --git a/stonesoup/custom/types/prediction.py b/stonesoup/custom/types/prediction.py new file mode 100644 index 000000000..024aca9d2 --- /dev/null +++ b/stonesoup/custom/types/prediction.py @@ -0,0 +1,7 @@ +from stonesoup.custom.types.state import TwoStateGaussianState +from stonesoup.types.prediction import Prediction + + +class TwoStateGaussianStatePrediction(Prediction, TwoStateGaussianState): + """ A Gaussian state object representing the predicted distribution + :math:`p(x_{k+T}, x_{k} | Y)` """ \ No newline at end of file diff --git a/stonesoup/custom/types/state.py b/stonesoup/custom/types/state.py new file mode 100644 index 000000000..6eff39140 --- /dev/null +++ b/stonesoup/custom/types/state.py @@ -0,0 +1,18 @@ +import datetime + +from stonesoup.base import Property +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState + + +class TwoStateGaussianState(GaussianState): + """ A Gaussian state object representing the distribution :math:`p(x_{k+T}, x_{k} | Y)` """ + start_time: datetime.datetime = Property(doc='Timestamp at t_k') + end_time: datetime.datetime = Property(doc='Timestamp at t_{k+T}') + weight: Probability = Property(default=0, doc="Weight of the Gaussian State.") + tag: str = Property(default=None, doc="Unique tag of the Gaussian State.") + # scan_id: int = Property(doc='The scan id') + + @property + def timestamp(self): + return self.end_time \ No newline at end of file diff --git a/stonesoup/custom/types/tracklet.py b/stonesoup/custom/types/tracklet.py new file mode 100644 index 000000000..6038a1cf1 --- /dev/null +++ b/stonesoup/custom/types/tracklet.py @@ -0,0 +1,69 @@ +import datetime +import uuid +from typing import Set, List +import collections.abc + +from ...base import Property +from ...types.base import Type +from ...types.track import Track +from ...types.detection import Detection +from ...models.transition import TransitionModel + + +class Tracklet(Track): + pass + + +class SensorTracks(Type, collections.abc.Set): + """ A container object for tracks relating to a particular sensor """ + tracks: Set[Track] = Property(doc='A list of tracks', default=None) + sensor_id: str = Property(doc='The id of the sensor', default=None) + transition_model: TransitionModel = Property(doc='Transition model used by the tracker', + default=None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.tracks is None: + self.tracks = set() + + def __iter__(self): + return (t for t in self.tracks) + + def __len__(self): + return self.tracks.__len__() + + def __contains__(self, item): + return self.tracks.__contains__(item) + + +class SensorTracklets(SensorTracks): + """ A container object for tracklets relating to a particular sensor """ + pass + + +class SensorScan(Type): + """ A wrapper around a set of detections produced by a particular sensor """ + sensor_id: str = Property(doc='The id of the sensor') + detections: Set[Detection] = Property(doc='The detections contained in the scan') + id: str = Property(default=None, doc="The unique scan ID") + timestamp: datetime.datetime = Property(default=None, doc='The scan timestamp') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.id is None: + self.id = str(uuid.uuid4()) + + +class Scan(Type): + """ A wrapper around a set of sensor scans within a given time interval """ + start_time: datetime.datetime = Property(doc='The scan start time') + end_time: datetime.datetime = Property(doc='The scan end time') + sensor_scans: List[SensorScan] = Property(doc='The sensor scans') + id: str = Property(default=None, doc="The unique scan ID") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.id is None: + self.id = str(uuid.uuid4()) + + diff --git a/stonesoup/custom/types/update.py b/stonesoup/custom/types/update.py new file mode 100644 index 000000000..23124b949 --- /dev/null +++ b/stonesoup/custom/types/update.py @@ -0,0 +1,7 @@ +from stonesoup.custom.types.state import TwoStateGaussianState +from stonesoup.types.update import Update + + +class TwoStateGaussianStateUpdate(Update, TwoStateGaussianState): + """ A Gaussian state object representing the predicted distribution + :math:`p(x_{k+T}, x_{k} | Y)` """ \ No newline at end of file diff --git a/stonesoup/custom/updater/__init__.py b/stonesoup/custom/updater/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/updater/twostate.py b/stonesoup/custom/updater/twostate.py new file mode 100644 index 000000000..4a23ddfdd --- /dev/null +++ b/stonesoup/custom/updater/twostate.py @@ -0,0 +1,62 @@ +from ...types.update import Update +from stonesoup.updater.kalman import ExtendedKalmanUpdater + + +class TwoStateKalmanUpdater(ExtendedKalmanUpdater): + + def update(self, hypothesis, **kwargs): + r"""The Kalman update method. Given a hypothesised association between + a predicted state or predicted measurement and an actual measurement, + calculate the posterior state. + + Parameters + ---------- + hypothesis : :class:`~.SingleHypothesis` + the prediction-measurement association hypothesis. This hypothesis + may carry a predicted measurement, or a predicted state. In the + latter case a predicted measurement will be calculated. + **kwargs : various + These are passed to :meth:`predict_measurement` + + Returns + ------- + : :class:`~.GaussianStateUpdate` + The posterior state Gaussian with mean :math:`\mathbf{x}_{k|k}` and + covariance :math:`P_{x|x}` + + """ + # Get the predicted state out of the hypothesis + predicted_state = hypothesis.prediction + + # If there is no measurement prediction in the hypothesis then do the + # measurement prediction (and attach it back to the hypothesis). + if hypothesis.measurement_prediction is None: + # Get the measurement model out of the measurement if it's there. + # If not, use the one native to the updater (which might still be + # none) + measurement_model = hypothesis.measurement.measurement_model + measurement_model = self._check_measurement_model( + measurement_model) + + # Attach the measurement prediction to the hypothesis + hypothesis.measurement_prediction = self.predict_measurement( + predicted_state, measurement_model=measurement_model, **kwargs) + + # Kalman gain and posterior covariance + posterior_covariance, kalman_gain = self._posterior_covariance(hypothesis) + + # Posterior mean + posterior_mean = predicted_state.state_vector + \ + kalman_gain@(hypothesis.measurement.state_vector - + hypothesis.measurement_prediction.state_vector) + + if self.force_symmetric_covariance: + posterior_covariance = \ + (posterior_covariance + posterior_covariance.T)/2 + + return Update.from_state( + hypothesis.prediction, + posterior_mean, posterior_covariance, + start_time=hypothesis.prediction.start_time, + end_time=hypothesis.measurement.timestamp, + hypothesis=hypothesis) diff --git a/stonesoup/sensormanager/base.py b/stonesoup/sensormanager/base.py index cffd5b961..c3c8dbe60 100644 --- a/stonesoup/sensormanager/base.py +++ b/stonesoup/sensormanager/base.py @@ -152,9 +152,9 @@ def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): # break # if flag: # a = 2 - reward, var = self.reward_function(config, tracks, timestamp) + reward= self.reward_function(config, tracks, timestamp) rewards.append(reward) - vars.append(var) + # vars.append(var) if reward > min(best_rewards): selected_configs[np.argmin(best_rewards)] = config best_rewards[np.argmin(best_rewards)] = reward diff --git a/stonesoup/types/track.py b/stonesoup/types/track.py index 99baa22ee..4895b1978 100644 --- a/stonesoup/types/track.py +++ b/stonesoup/types/track.py @@ -6,6 +6,7 @@ from .state import State, StateMutableSequence from .update import Update from ..base import Property +from ..custom.types.hypothesis import MultiHypothesis class Track(StateMutableSequence): @@ -133,6 +134,10 @@ def _update_metadata_from_state(self, state): if hypothesis \ and hypothesis.measurement.metadata is not None: self.metadata.update(hypothesis.measurement.metadata) + elif isinstance(state.hypothesis, MultiHypothesis): + hypothesis = state.hypothesis + for measurement in hypothesis.measurements: + self.metadata.update(measurement.metadata) else: hypothesis = state.hypothesis if hypothesis and hypothesis.measurement.metadata is not None: From 88d1486b04c04a00e6f218ef47a14cb05644b90b Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 18 May 2023 10:50:04 +0100 Subject: [PATCH 70/87] ISMCPHD fix --- stonesoup/custom/initiator/smcphd.py | 10 +++++-- stonesoup/custom/tracker/__init__.py | 41 ++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index ee9cde455..d1ed8398a 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -405,6 +405,7 @@ def get_birth_state(self, prediction, detections, timestamp): # Sample birth particles num_birth = round(float(self.prob_birth) * self.num_samples) birth_particles = np.zeros((prediction.state_vector.shape[0], 0)) + birth_weights= np.zeros((0, )) if len(detections): num_birth_per_detection = num_birth // len(detections) for i, detection in enumerate(detections): @@ -417,8 +418,13 @@ def get_birth_state(self, prediction, detections, timestamp): birth_particles_i = multivariate_normal.rvs(mu.ravel(), cov, num_birth_per_detection).T + birth_weights_i = multivariate_normal.pdf(birth_particles_i.T, + mu.ravel(), + cov, + allow_singular=True) * Probability(self.birth_rate / num_birth) birth_particles = np.hstack((birth_particles, birth_particles_i)) - birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) + birth_weights = np.hstack((birth_weights, birth_weights_i)) + # birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) birth_particles = StateVectors(birth_particles) birth_state = Prediction.from_state(prediction, state_vector=birth_particles, @@ -548,7 +554,7 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): # Calculate intensity per hypothesis log_intensity_per_hyp = logsumexp(log_weights_per_hyp, axis=0) - + # print(np.exp(log_intensity_per_hyp)) # Find detections with intensity above threshold and initiate valid_inds = np.flatnonzero(np.exp(log_intensity_per_hyp) > self.threshold) for idx in valid_inds: diff --git a/stonesoup/custom/tracker/__init__.py b/stonesoup/custom/tracker/__init__.py index e8b00aca5..ee72b4ce3 100644 --- a/stonesoup/custom/tracker/__init__.py +++ b/stonesoup/custom/tracker/__init__.py @@ -87,6 +87,7 @@ class SMCPHD_JIPDA(_BaseTracker): """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" detector: Base = Property(doc='The detector used to generate detections', default=None) + use_ismcphd: bool = Property(doc='Use ISMC-PHD filter for track initiation', default=True) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._predictor = KalmanPredictor(self.transition_model) @@ -99,17 +100,30 @@ def __init__(self, *args, **kwargs): self._associator = JIPDAWithEHM2(self._hypothesiser) resampler = SystematicResampler() - phd_filter = ISMCPHDFilter(birth_density=self.birth_density, - transition_model=self.transition_model, - measurement_model=self.measurement_model, - prob_detect=self.prob_detect, - prob_death=self.prob_death, - prob_birth=self.prob_birth, - birth_rate=self.birth_rate, - clutter_intensity=self.clutter_intensity, - num_samples=self.num_samples, - resampler=resampler, - birth_scheme=self.birth_scheme) + if self.use_ismcphd: + phd_filter = ISMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) + else: + phd_filter = SMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) # Sample prior state from birth density if isinstance(self.birth_density, GaussianMixture): state_vector = np.zeros((self.transition_model.ndim_state, 0)) @@ -131,7 +145,10 @@ def __init__(self, *args, **kwargs): weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) * self.birth_rate state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) - self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) + if self.use_ismcphd: + self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) + else: + self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) def __iter__(self): self.detector_iter = iter(self.detector) From 2d06355c3b3ba58c0852a6f8fc8bd2602739d7b1 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 22 May 2023 11:07:26 +0100 Subject: [PATCH 71/87] Added example fusion scenario --- .../reactive-isr/multi-sonar-ehm-fuse-3.py | 254 ++++++++++++++++++ examples/reactive-isr/utils.py | 184 +++++++++++++ 2 files changed, 438 insertions(+) create mode 100644 examples/reactive-isr/multi-sonar-ehm-fuse-3.py create mode 100644 examples/reactive-isr/utils.py diff --git a/examples/reactive-isr/multi-sonar-ehm-fuse-3.py b/examples/reactive-isr/multi-sonar-ehm-fuse-3.py new file mode 100644 index 000000000..f8ccbea61 --- /dev/null +++ b/examples/reactive-isr/multi-sonar-ehm-fuse-3.py @@ -0,0 +1,254 @@ +""" +multi-sonar-ehm-fuse.py + +This example script simulates 3 moving platforms, each equipped with a single active sonar sensor +(StoneSoup does not have an implementation of an active sonar so a radar is used instead), and 1 +target. Each sensor generates detections of all other objects (excluding itself). + +The tracking configuration is as follows: +- For each sensor whose index in the 'all_detectors' list is not in 'bias_tracker_idx', a + local tracker is configured that acts like a contact follower and generates Track objects. The + outputs of these trackers are the fed into the Fusion engine. +- For all other sensors, their data is fed directly into the Fusion engine. Note that the + TrackletExtractorWithTracker is used here, meaning that a (local) bias estimation tracker is run + on the data read from each sensor, before it is fed into the main Fuse Tracker (i.e. the + component of the Fusion Engine that produces the fused tracks). +- The data association algorithm used for both the local and fuse trackers is JPDA with EHM. + +""" +import numpy as np +from datetime import datetime, timedelta +from copy import deepcopy, copy +import matplotlib.pyplot as plt +from matplotlib.patches import Ellipse + +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.custom.tracker import SMCPHD_JIPDA +from stonesoup.custom.types.tracklet import SensorTracks +from stonesoup.custom.initiator.twostate import TwoStateInitiator +from stonesoup.types.numeric import Probability +from stonesoup.types.state import State, GaussianState +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.platform.base import MovingPlatform +from stonesoup.models.transition.linear import (CombinedLinearGaussianTransitionModel, + ConstantVelocity, KnownTurnRate, NthDerivativeDecay, + OrnsteinUhlenbeck) +from stonesoup.platform.base import MultiTransitionMovingPlatform +from stonesoup.simulator.simple import DummyGroundTruthSimulator +from stonesoup.types.update import Update +from stonesoup.gater.distance import DistanceGater +from stonesoup.plugins.pyehm import JPDAWithEHM2 +from stonesoup.measures import Mahalanobis + +from utils import plot_cov_ellipse + +from stonesoup.custom.hypothesiser.probability import PDAHypothesiser +from stonesoup.custom.simulator.platform import PlatformTargetDetectionSimulator +from stonesoup.custom.predictor.twostate import TwoStatePredictor +from stonesoup.custom.updater.twostate import TwoStateKalmanUpdater +from stonesoup.custom.reader.tracklet import TrackletExtractor, PseudoMeasExtractor +from stonesoup.custom.tracker.fuse import FuseTracker2 + +# Parameters +np.random.seed(1000) +clutter_rate = 1 # Mean number of clutter points per scan +max_range = 130 # Max range of sensor (meters) +surveillance_area = np.pi*max_range**2 # Surveillance region area +clutter_density = clutter_rate/surveillance_area # Mean number of clutter points per unit area +prob_detect = 0.9 # Probability of Detection +num_timesteps = 101 # Number of simulation timesteps +PLOT = True + +# Simulation start time +start_time = datetime.now() + +# Define transition model and position for 3D platform +platform_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) + +# Create platforms +init_states = [State(StateVector([-50, 0, -25, 1, 0, 0]), start_time), + State(StateVector([50, 0, -25, 1, 0, 0]), start_time), + State(StateVector([-25, 1, 50, 0, 0, 0]), start_time)] +platforms = [] +for i, init_state in enumerate(init_states): + # Platform + platform = MovingPlatform(states=init_state, + position_mapping=(0, 2, 4), + velocity_mapping=(1, 3, 5), + transition_model=platform_transition_model) + + # Sensor + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([3, 3, 3]), + mounting_offset=StateVector([0, 0, 0]), + rotation_offset=StateVector([0, 0, 0]), + fov_radius=max_range, + limits=None, + fov_in_km=False) + platform.add_sensor(sensor) + platforms.append(platform) + + +# Simulation components + +# The target +cv_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)]) +ct_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)]) +manoeuvres = [cv_model, ct_model] +manoeuvre_times = [timedelta(seconds=4), timedelta(seconds=4)] +init_state_gnd = State(StateVector([25, -1, 25, -1, 0, 0]), start_time) +target = MultiTransitionMovingPlatform(transition_models=manoeuvres, + transition_times=manoeuvre_times, + states=init_state_gnd, + position_mapping=(0, 2, 4), + velocity_mapping=(1, 3, 5), + sensors=None) + +times = np.arange(0, num_timesteps, 1) +timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times] + +gnd_simulator = DummyGroundTruthSimulator(times=timestamps) + +# Detection simulators (1 for each platform) +detector1 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[0]], + targets=[platforms[1], platforms[2], target]) +detector2 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[1]], + targets=[platforms[0], platforms[2], target]) +detector3 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[2]], + targets=[platforms[0], platforms[1], target]) + +all_detectors = [detector1, detector2, detector3] + +# Multi-Target Trackers (1 per platform) +base_trackers = [] +for i, detector in enumerate(all_detectors): + transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)]*3) + birth_density = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), + CovarianceMatrix(np.diag([50, 2, 50, 2, 0, 0]))) + prob_death = Probability(0.01) # Probability of death + prob_birth = Probability(0.1) # Probability of birth + prob_survive = Probability(0.99) # Probability of survival + birth_rate = 0.02 + num_particles = 2 ** 11 + birth_scheme = 'mixture' + tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_density, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time, detector=detector, use_ismcphd=True) + base_trackers.append(tracker) + +# Fusion Tracker +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)]*3) +prior = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), + CovarianceMatrix(np.diag([50, 5, 50, 5, 0, 0]))) +tracklet_extractor = TrackletExtractor(trackers=None, + transition_model=transition_model, + fuse_interval=timedelta(seconds=3)) +pseudomeas_extractor = PseudoMeasExtractor(None, state_idx_to_use=None, use_prior=False) + +two_state_predictor = TwoStatePredictor(transition_model) +two_state_updater = TwoStateKalmanUpdater(None, True) +hypothesiser1 = PDAHypothesiser(predictor=None, + updater=two_state_updater, + clutter_spatial_density=Probability(-80, log_value=True), + prob_detect=Probability(prob_detect), + prob_gate=Probability(0.99), + predict=False, + per_measurement=True) +hypothesiser1 = DistanceGater(hypothesiser1, Mahalanobis(), 10) # Uncomment to use JPDA+EHM2 +fuse_associator = JPDAWithEHM2(hypothesiser1) # in Fuse tracker +# fuse_associator = GNNWith2DAssignment(hypothesiser1) # Uncomment for GNN in Fuse Tracker +initiator1 = TwoStateInitiator(prior, transition_model, two_state_updater) +fuse_tracker = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, + updater=two_state_updater, associator=fuse_associator, + tracklet_extractor=tracklet_extractor, + pseudomeas_extractor=detector, death_rate=1e-4, + prob_detect=Probability(prob_detect), + delete_thresh=Probability(0.1)) + +sim_start_time = datetime.now() +tracks = set() + +if PLOT: + plt.figure(figsize=(10, 10)) + plt.ion() +for (timestamp, tracks1), (_, tracks2), (_, tracks3) in zip(*base_trackers): + + alltracks = [SensorTracks(tracks, i, transition_model) for i, tracks + in enumerate([tracks1, tracks2, tracks3])] + + # Perform fusion + + # _, ctracks = fuse_tracker.process_tracks(alltracks, timestamp) + + # Extract tracklets + tracklets = tracklet_extractor.extract(alltracks, timestamp) + + # Extract pseudo-measurements + scans = pseudomeas_extractor.extract(tracklets, timestamp) + + # Process pseudo-measurements + ctracks = fuse_tracker.process_scans(scans) + + # Update tracks + tracks.update(ctracks) + + print(f'{timestamp-start_time} - No. Tracks: {len(ctracks)}') + tracks.update(ctracks) + # Plot + if PLOT: + plt.clf() + all_detections = [detector.detections for detector in all_detectors] + colors = ['r', 'g', 'b'] + data = np.array([state.state_vector for state in target]) + plt.plot(data[:, 0], data[:, 2], '--k', label='Groundtruth (Target)') + for i, (platform, color) in enumerate(zip(platforms, colors)): + data = np.array([state.state_vector for state in platform]) + plt.plot(data[:, 0], data[:, 2], f'--{color}') + + ax1 = plt.gca() + for j, platform in enumerate(platforms): + sensor = platform.sensors[0] + circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, + color=colors[j], + fill=False, + label=f'Sensor {j+1}') + ax1.add_artist(circle, ) + + for i, (detections, color) in enumerate(zip(all_detections, colors)): + for detection in detections: + model = detection.measurement_model + x, y = detection.state_vector[0], detection.state_vector[1] + plt.plot(x, y, f'{color}x') + + for i, (tracklets, color) in enumerate(zip(tracklet_extractor.current[1], colors)): + for tracklet in tracklets: + data = np.array([s.mean for s in tracklet.states if isinstance(s, Update)]) + plt.plot(data[:, 6], data[:, 8], f':{color}') + + for track in tracks: + data = np.array([state.state_vector for state in track]) + plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.plot(data[:, 6], data[:, 8], '-*m') + + # Add legend info + for i, color in enumerate(colors): + plt.plot([], [], f'--{color}', label=f'Groundtruth (Sensor {i + 1})') + plt.plot([], [], f':{color}', label=f'Tracklets (Sensor {i + 1})') + plt.plot([], [], f'x{color}', label=f'Detections (Sensor {i + 1})') + plt.plot([], [], f'-*m', label=f'Fused Tracks') + + # state_smc = non_bias_trackers[0]._initiator._state + # plt.plot(state_smc.state_vector[0, :], state_smc.state_vector[2, :], 'r.') + + plt.legend(loc='upper right') + plt.xlim((-200, 200)) + plt.ylim((-200, 200)) + plt.pause(0.01) + +print(datetime.now() - sim_start_time) \ No newline at end of file diff --git a/examples/reactive-isr/utils.py b/examples/reactive-isr/utils.py new file mode 100644 index 000000000..966586319 --- /dev/null +++ b/examples/reactive-isr/utils.py @@ -0,0 +1,184 @@ +from datetime import datetime +from typing import Sequence, List, Dict, Mapping + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.patches import Ellipse +from matplotlib.path import Path +from shapely.geometry import Point +from shapely.ops import unary_union + +from reactive_isr_core.data import BeliefState, AssetList, GeoLocation, SensorType, ActionList + +from stonesoup.types.track import Track +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.array import StateVector +from stonesoup.types.numeric import Probability +from stonesoup.types.state import ParticleState, GaussianState +from stonesoup.sensor.action import Action as ssAction + + +def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + +def compute_ellipse(cov, pos, nstd=1, **kwargs): + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + return ellip.get_path() + + +def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): + """ + Plots an `nstd` sigma error ellipse based on the specified covariance + matrix (`cov`). Additional keyword arguments are passed on to the + ellipse patch artist. + Parameters + ---------- + cov : The 2x2 covariance matrix to base the ellipse on + pos : The location of the center of the ellipse. Expects a 2-element + sequence of [x0, y0]. + nstd : The radius of the ellipse in numbers of standard deviations. + Defaults to 2 standard deviations. + ax : The axis that the ellipse will be plotted on. Defaults to the + current axis. + Additional keyword arguments are pass on to the ellipse patch. + Returns + ------- + A matplotlib ellipse artist + """ + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + if ax is None: + ax = plt.gca() + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + + ax.add_artist(ellip) + return ellip + + +def _prob_detect_func(prob_detect, fovs): + """Closure to return the probability of detection function for a given environment scan""" + + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + # Probability of detection nested function + def prob_detect_func(state): + for poly in fovs: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.1)) + path_p = Path(poly.boundary.coords) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) + return prob_detect if poly.contains(point) else Probability(0.1) + + return prob_detect_func + + +def belief_state_to_tracks(belief: BeliefState) -> Sequence[Track]: + """Converts a belief state to a set of stonesoup tracks""" + targets = belief.targets + tracks = [] + for target_id, target_detection in targets.items(): + state_vector = StateVector([target_detection.location.longitude, + target_detection.velocity.longitude, + target_detection.location.latitude, + target_detection.velocity.latitude, + target_detection.location.altitude, + target_detection.velocity.altitude]) + covariance_matrix = np.zeros((6, 6), dtype=float) + covariance_matrix[0::2, 0::2] = target_detection.location_error + covariance_matrix[1::2, 1::2] = target_detection.velocity_error + metadata = { + 'target_type_confidences': target_detection.target_type_confidences, + } + state = GaussianState(state_vector, covariance_matrix, + timestamp=target_detection.time) + track = Track(id=target_id, states=[state], init_metadata=metadata) + track.exist_prob = Probability(target_detection.confidence) + tracks.append(track) + return tracks + + +def assets_to_sensors(assets: AssetList, region_corners: List[GeoLocation], + action_resolutions: Dict[str, float]) -> Sequence[Sensor]: + """Converts a set of assets to a list of stonesoup sensors""" + sensors = [] + for asset in assets.assets: + if SensorType.AERIAL_V_CAMERA in asset.asset_description.sensor_types: + sensor_position = StateVector([asset.asset_status.location.longitude, + asset.asset_status.location.latitude, + asset.asset_status.location.altitude]) + lim_x = np.sort([loc.longitude for loc in region_corners]) + lim_y = np.sort([loc.latitude for loc in region_corners]) + limits = {'location_x': lim_x, 'location_y': lim_y} + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.05, 0.05, 0.05]), + fov_radius=asset.asset_description.fov_radius, + location_x=sensor_position[0], + location_y=sensor_position[1], + resolutions=action_resolutions, + position=sensor_position, + limits=limits) + sensor.id = asset.asset_description.id + sensors.append(sensor) + else: + raise NotImplementedError("Only aerial cameras are supported") + return sensors + + +def action_list_to_config(assets, action_list: ActionList, + region_corners: List[GeoLocation], + action_resolutions: Dict[str, float], + time: datetime) -> Mapping[Sensor, Sequence[ssAction]]: + """Converts a reactive_isr_core action list to a stonesoup config""" + sensors = assets_to_sensors(assets, region_corners, action_resolutions) + config = {} + + for action in action_list.actions: + try: + sensor = next(s for s in sensors if s.id == action.asset_id) + except StopIteration as exc: + raise ValueError(f"Asset {action.asset_id} not found") from exc + location_x, location_y = action.location.longitude, action.location.latitude + action_generators = sensor.actions(time) + x_action_gen = next(a for a in action_generators if a.attribute == 'location_x') + y_action_gen = next(a for a in action_generators if a.attribute == 'location_y') + x_action = x_action_gen.action_from_value(location_x) + y_action = y_action_gen.action_from_value(location_y) + config[sensor] = (x_action, y_action) + return config \ No newline at end of file From e3280b2bf49fc37a05dcef4403d1eb9dbdd5607e Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 26 May 2023 22:49:58 +0100 Subject: [PATCH 72/87] Added MovableUAVCamera actions demo script --- .../movable_uav_camera_actions.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 examples/reactive-isr/movable_uav_camera_actions.py diff --git a/examples/reactive-isr/movable_uav_camera_actions.py b/examples/reactive-isr/movable_uav_camera_actions.py new file mode 100644 index 000000000..cddc3e1b7 --- /dev/null +++ b/examples/reactive-isr/movable_uav_camera_actions.py @@ -0,0 +1,87 @@ +import itertools +from datetime import datetime + +import numpy as np + +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.types.angle import Angle +from stonesoup.types.array import StateVector + +# The camera is initially positioned at x=10, y=10, z=100 +position = StateVector([10., 10., 100.]) + +# We can also set the resolution of each actionable property. The resolution is used when +# discretising the action space. In this case, we set the resolution of both the X and Y locations +# to 10 units. +resolutions = {'location_x': 10., 'location_y': 10.} + +# Furthermore, we can specify the limits of the action space. In this case, we set the limits of +# both the X and Y locations to [-100, 100]. This means that the action space will contain values +# in the range [-100, 100] with a step size of 10 units for each property (based on the resolution +# specified above). +limits = {'location_x': [-100, 100], 'location_y': [-100, 100]} + +# Create a camera object +sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.05, 0.05, 0.05]), + location_x=position[0], location_y=position[1], + resolutions=resolutions, + position=position, + fov_radius=100, + limits=limits) + +# Set a query time +timestamp = datetime.now() + +# Calling sensor.actions() will return a set of action generators. Each action generator is an +# object that contains all the actions that can be performed by the sensor at a given time. In this +# case, the sensor has two actionable properties: X and Y location. Hence, the result of +# sensor.actions() is a set of two action generators: one for moving on the X-axis and one for +# moving on the Y-axis. +action_generators = sensor.actions(timestamp) + +# Let's look at the action generators +# The first action generator is for the X location. We can extract the action generator by +# searching for the action generator that controls the 'location_x' property. So, the following +# line of code simply filters the action generators that control 'location_x' (the for-if +# statement) and then selects the first action generator (since there is only one), via the next() +# statement. +x_action_generator = next(ag for ag in action_generators if ag.attribute == 'location_x') +# The second action generator is for the Y location. We can extract the action generator by +# searching for the action generator that controls 'location_y'. +y_action_generator = next(ag for ag in action_generators if ag.attribute == 'location_y') + +# We can now look at the actions that can be performed by the action generators. The action +# generators provide a Python "iterator" interface. This means that we can iterate over the action +# generators to get the actions that can be performed (e.g. with a "for" loop). Instead, we can +# also use the list() function to get a list of all the actions that can be performed. +possible_x_actions = list(x_action_generator) +possible_y_actions = list(y_action_generator) + +# Each action has a "target_value" property that specifies the value that the property will be +# set to if the action is performed. The following line of code prints the target values of the +# 10th action for pan and tilt. +print(possible_x_actions[9].target_value) +print(possible_y_actions[9].target_value) + +# To get all the possible combinations of actions, we can use the itertools.product() function. +possible_action_combinations = list(itertools.product(possible_x_actions, possible_y_actions)) + +# Let us now select the 10th action combination and task the sensor to perform the action. +chosen_action_combination = possible_action_combinations[9] +sensor.add_actions(chosen_action_combination) +sensor.act(timestamp) + +# We can also create a custom action combination. For example, we can move the camera to the +# location (0, 10, 100) by generating an action that sets the X location to 0 and an action that +# sets the Y location to 10. We can then combine these two actions into a single action combination +# and task the sensor to perform the action. +custom_action_x = x_action_generator.action_from_value(0) # Action that sets the X location to 0 +custom_action_y = y_action_generator.action_from_value(10) # Action that sets the Y location to 10 +custom_action_combination = (custom_action_x, custom_action_y) +sensor.add_actions(custom_action_combination) +sensor.act(timestamp) + +# The statement below is just an extra statement to allow us to breakpoint the code and inspect +# the possible actions. +end = True From 8584b0c8f26a299ad87f5f18a23dd47bc07e95f0 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 5 Jun 2023 14:45:31 +0100 Subject: [PATCH 73/87] Added MovableUAVCamera actions demo script --- examples/reactive-isr/movable_uav_camera_actions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/reactive-isr/movable_uav_camera_actions.py b/examples/reactive-isr/movable_uav_camera_actions.py index cddc3e1b7..41a06aec8 100644 --- a/examples/reactive-isr/movable_uav_camera_actions.py +++ b/examples/reactive-isr/movable_uav_camera_actions.py @@ -19,7 +19,7 @@ # both the X and Y locations to [-100, 100]. This means that the action space will contain values # in the range [-100, 100] with a step size of 10 units for each property (based on the resolution # specified above). -limits = {'location_x': [-100, 100], 'location_y': [-100, 100]} +limits = {'location_x': [-100, 100], 'location_y': [-90, 90]} # Create a camera object sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], @@ -28,6 +28,7 @@ resolutions=resolutions, position=position, fov_radius=100, + fov_in_km=False, limits=limits) # Set a query time From 6436aa52d4c1705d14f1cdc4ca2424bf0f1965c2 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 7 Jun 2023 15:51:03 +0100 Subject: [PATCH 74/87] ISMCPHDFilter fix for no detections --- stonesoup/custom/initiator/smcphd.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index d1ed8398a..e6aff468b 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -424,6 +424,14 @@ def get_birth_state(self, prediction, detections, timestamp): allow_singular=True) * Probability(self.birth_rate / num_birth) birth_particles = np.hstack((birth_particles, birth_particles_i)) birth_weights = np.hstack((birth_weights, birth_weights_i)) + else: + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + num_birth).T + birth_weights = multivariate_normal.pdf(birth_particles.T, + self.birth_density.mean.ravel(), + self.birth_density.covar, + allow_singular=True) * Probability(self.birth_rate / num_birth) # birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) birth_particles = StateVectors(birth_particles) birth_state = Prediction.from_state(prediction, From 0ace0b1d56e309326096b53cc1c5421cebf9ae0d Mon Sep 17 00:00:00 2001 From: sglvladi Date: Sat, 10 Jun 2023 00:56:21 +0100 Subject: [PATCH 75/87] Add hierarchical tracking example --- .../hierarchical-tracking-example.py | 377 ++++++++++++++++++ stonesoup/custom/reader/tracklet.py | 35 +- 2 files changed, 383 insertions(+), 29 deletions(-) create mode 100644 examples/reactive-isr/hierarchical-tracking-example.py diff --git a/examples/reactive-isr/hierarchical-tracking-example.py b/examples/reactive-isr/hierarchical-tracking-example.py new file mode 100644 index 000000000..9abcc1a22 --- /dev/null +++ b/examples/reactive-isr/hierarchical-tracking-example.py @@ -0,0 +1,377 @@ +""" +This example demonstrates a simple hierarchical tracker: + + |----------------| + | Top Tracker | + |----------------| + | + -------------------- + | | + |----------------| |----------------| + | Fuse Tracker 1 | | Fuse Tracker 2 | + |----------------| |----------------| + | | + ------------------- ------------ + | | | +|----------------| |----------------| |----------------| +| Leaf Tracker 1 | | Leaf Tracker 2 | | Leaf Tracker 3 | +|----------------| |----------------| |----------------| + | | | +|----------------| |----------------| |----------------| +| Sensor 1 | | Sensor 2 | | Sensor 3 | +|----------------| |----------------| |----------------| + +""" +import numpy as np +from datetime import datetime, timedelta +from copy import deepcopy, copy +import matplotlib.pyplot as plt +from matplotlib.patches import Ellipse + +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.custom.tracker import SMCPHD_JIPDA +from stonesoup.custom.types.tracklet import SensorTracks +from stonesoup.custom.initiator.twostate import TwoStateInitiator +from stonesoup.types.numeric import Probability +from stonesoup.types.state import State, GaussianState +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.platform.base import MovingPlatform +from stonesoup.models.transition.linear import (CombinedLinearGaussianTransitionModel, + ConstantVelocity, KnownTurnRate, + NthDerivativeDecay, + OrnsteinUhlenbeck) +from stonesoup.platform.base import MultiTransitionMovingPlatform +from stonesoup.simulator.simple import DummyGroundTruthSimulator +from stonesoup.types.track import Track +from stonesoup.types.update import Update, GaussianStateUpdate +from stonesoup.gater.distance import DistanceGater +from stonesoup.plugins.pyehm import JPDAWithEHM2 +from stonesoup.measures import Mahalanobis + +from utils import plot_cov_ellipse + +from stonesoup.custom.hypothesiser.probability import PDAHypothesiser +from stonesoup.custom.simulator.platform import PlatformTargetDetectionSimulator +from stonesoup.custom.predictor.twostate import TwoStatePredictor +from stonesoup.custom.updater.twostate import TwoStateKalmanUpdater +from stonesoup.custom.reader.tracklet import TrackletExtractor, PseudoMeasExtractor +from stonesoup.custom.tracker.fuse import FuseTracker2 + + +def to_single_state(tracks): + """ Converts a set of tracks with two-state vectors to a set of tracks with one-state vectors""" + new_tracks = set() + for track in tracks: + states = [] + for state in track.states: + if isinstance(state, Update): + new_state = GaussianStateUpdate(state.state_vector[6:], state.covar[6:, 6:], + hypothesis=state.hypothesis, + timestamp=state.timestamp) + else: + new_state = GaussianState(state.state_vector[6:], state.covar[6:, 6:], + timestamp=state.timestamp) + states.append(new_state) + new_tracks.add(Track(id=track.id, states=states)) + return new_tracks + + +# Parameters +np.random.seed(1000) +clutter_rate = 1 # Mean number of clutter points per scan +max_range = 50 # Max range of sensor (meters) +surveillance_area = np.pi * max_range ** 2 # Surveillance region area +clutter_density = clutter_rate / surveillance_area # Mean number of clutter points per unit area +prob_detect = 0.9 # Probability of Detection +num_timesteps = 151 # Number of simulation timesteps +PLOT = True # Plot the results or not + +# Simulation components +# --------------------- +# In this simulation, we have 3 platforms, each with a sensor. The sensors are mounted on the +# platforms and can move with them. The platforms are moving in a straight line at constant +# velocity. There also exists a (non-cooperative) target that is also moving in a straight line at +# constant velocity. + +# Simulation start time +start_time = datetime.now() + +# Define transition model and position for 3D platform +platform_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) + +# Create platforms. Each platform has a sensor and a transition model. The platform's sensor can +# only detect targets within its field of view (FOV), but not itself. +init_states = [State(StateVector([-50, 0, -25, 1, 0, 0]), start_time), + State(StateVector([50, 0, -25, 1, 0, 0]), start_time), + State(StateVector([-25, 1, 50, 0, 0, 0]), start_time)] +platforms = [] +for i, init_state in enumerate(init_states): + # Platform + platform = MovingPlatform(states=init_state, + position_mapping=(0, 2, 4), + velocity_mapping=(1, 3, 5), + transition_model=platform_transition_model) + + # Sensor + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([.1, .1, .1]), + mounting_offset=StateVector([0, 0, 0]), + rotation_offset=StateVector([0, 0, 0]), + fov_radius=max_range, + limits=None, + fov_in_km=False) + platform.add_sensor(sensor) + platforms.append(platform) + +# The (non-cooperative) target +cv_model = CombinedLinearGaussianTransitionModel( + [ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)]) +init_state_gnd = State(StateVector([25, -1, 25, -1, 0, 0]), start_time) +target = MovingPlatform(transition_model=cv_model, + states=init_state_gnd, + position_mapping=(0, 2, 4), + velocity_mapping=(1, 3, 5), + sensors=None) + +# Simulation timestamps +times = np.arange(0, num_timesteps, 1) +timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times] + +# A dummy ground truth simulator, which simply acts as a clock +gnd_simulator = DummyGroundTruthSimulator(times=timestamps) + +# Detection simulators (1 for each platform) +detector1 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[0]], + targets=[platforms[1], platforms[2], target]) +detector2 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[1]], + targets=[platforms[0], platforms[2], target]) +detector3 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[2]], + targets=[platforms[0], platforms[1], target]) +all_detectors = [detector1, detector2, detector3] + +# Hierarchical tracking components +# -------------------------------- +# In this section, we define the components of the hierarchical trackers. Recall that the +# hierarchy is as follows: +# 1. 3 Leaf trackers (one for each sensor) +# 2. 2 Branch fuse trackers: +# a. One that fuses the tracks from leaf trackers 1 and 2 +# b. One that fuses the tracks from leaf tracker 3 +# 3. The root (top) fuse tracker that fuses the tracks from the branch trackers + +# Leaf trackers (one for each sensor) +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Each leaf tracker is a JIPDA tracker that uses a SMC-PHD filter to initialise tracks. +leaf_trackers = [] +for i, detector in enumerate(all_detectors): + transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)] * 3) + birth_density = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), + CovarianceMatrix(np.diag([50, 2, 50, 2, 0, 0]))) + prob_death = Probability(0.01) # Probability of death + prob_birth = Probability(0.1) # Probability of birth + prob_survive = Probability(0.99) # Probability of survival + birth_rate = 0.02 + num_particles = 2 ** 11 + birth_scheme = 'mixture' + tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_density, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time, detector=detector, use_ismcphd=True) + leaf_trackers.append(tracker) + +# Fusion Tracker components +# ~~~~~~~~~~~~~~~~~~~~~~~~~ +# The fusion trackers are JPDA trackers, that use the tracks from the leaf trackers as inputs. +# The transition model, predictor, updater, hypothesiser, data associator, and initiator can +# be shared between the fusion trackers. +# +# On the contrary, the tracklet and pseudo-measurement extractors must be defined separately for +# each fusion tracker. This is because, these components perform caching of the tracks and +# pseudo-measurements, generated at each time step. + +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)] * 3) + +# Predictors and updaters +two_state_predictor = TwoStatePredictor(transition_model) +two_state_updater = TwoStateKalmanUpdater(None, True) + +# Hypothesiser +hypothesiser1 = PDAHypothesiser(predictor=None, + updater=two_state_updater, + clutter_spatial_density=Probability(-80, log_value=True), + prob_detect=Probability(prob_detect), + prob_gate=Probability(0.99), + predict=False, + per_measurement=True) +hypothesiser1 = DistanceGater(hypothesiser1, Mahalanobis(), 10) # Uncomment to use JPDA+EHM2 + +# Data associator +fuse_associator = JPDAWithEHM2(hypothesiser1) # in Fuse tracker + +# Initiator +prior = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), + CovarianceMatrix(np.diag([50, 5, 50, 5, 0, 0]))) # Uncomment for GNN in Fuse Tracker +initiator1 = TwoStateInitiator(prior, transition_model, two_state_updater) + +# Fuse tracker 1 (fuses tracks from leaf trackers 1 and 2) +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +tracklet_extractor = TrackletExtractor(transition_model=transition_model, + fuse_interval=timedelta(seconds=2)) +pseudomeas_extractor = PseudoMeasExtractor() +fuse_tracker1 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, + updater=two_state_updater, associator=fuse_associator, + tracklet_extractor=tracklet_extractor, + pseudomeas_extractor=detector, death_rate=1e-4, + prob_detect=Probability(prob_detect), + delete_thresh=Probability(0.1)) + +# Fuse tracker 2 (fuses tracks from leaf tracker 3) +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +tracklet_extractor2 = TrackletExtractor(transition_model=transition_model, + fuse_interval=timedelta(seconds=2)) +pseudomeas_extractor2 = PseudoMeasExtractor() +fuse_tracker2 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, + updater=two_state_updater, associator=fuse_associator, + tracklet_extractor=tracklet_extractor, + pseudomeas_extractor=detector, death_rate=1e-4, + prob_detect=Probability(prob_detect), + delete_thresh=Probability(0.1)) + +# Root tracker (fuses tracks from the branch trackers) +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +tracklet_extractor3 = TrackletExtractor(transition_model=transition_model, + fuse_interval=timedelta(seconds=4)) +pseudomeas_extractor3 = PseudoMeasExtractor() +fuse_tracker3 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, + updater=two_state_updater, associator=fuse_associator, + tracklet_extractor=tracklet_extractor2, + pseudomeas_extractor=detector, death_rate=1e-4, + prob_detect=Probability(prob_detect), + delete_thresh=Probability(0.1)) + +# Run the simulation +# ------------------ +sim_start_time = datetime.now() +tracks = set() +if PLOT: + plt.figure(figsize=(10, 10)) + plt.ion() + +# We use the leaf trackers as our clock for the simulation. Each leaf tracker provides an +# iterator over the tracks it generates at each time step. We use the `zip` function to +# iterate over the tracks from all the leaf trackers simultaneously. +for (timestamp, tracks1), (_, tracks2), (_, tracks3) in zip(*leaf_trackers): + + # Run Fuse tracker 1 + # ~~~~~~~~~~~~~~~~~~~~ + # Group tracks from leaf trackers 1 and 2 + alltracks1 = [SensorTracks(tracks, i, transition_model) for i, tracks + in enumerate([tracks1, tracks2])] + # Extract tracklets + tracklets1 = tracklet_extractor.extract(alltracks1, timestamp) + # Extract pseudo-measurements + scans1 = pseudomeas_extractor.extract(tracklets1, timestamp) + # Generate fused tracks + ctracks1 = fuse_tracker1.process_scans(scans1) + + # Run Fuse tracker 2 + # ~~~~~~~~~~~~~~~~~~~~ + # Group tracks from leaf tracker 3 + alltracks2 = [SensorTracks(tracks3, 2, transition_model)] + # Extract tracklets + tracklets2 = tracklet_extractor2.extract(alltracks2, timestamp) + # Extract pseudo-measurements + scans2 = pseudomeas_extractor2.extract(tracklets2, timestamp) + # Generate fused tracks + ctracks2 = fuse_tracker2.process_scans(scans2) + + # Run Root tracker + # ~~~~~~~~~~~~~~~~ + # Convert two-state tracks to single-state tracks + ctracks11 = to_single_state(ctracks1) + ctracks22 = to_single_state(ctracks2) + # Group tracks from Fuse trackers 1 and 2 + alltracks3 = [SensorTracks(tracks, i, transition_model) for i, tracks + in enumerate([ctracks11, ctracks22])] + # Extract tracklets + tracklets3 = tracklet_extractor3.extract(alltracks3, timestamp) + # Extract pseudo-measurements + scans3 = pseudomeas_extractor3.extract(tracklets3, timestamp) + # Generate fused tracks + ctracks3 = fuse_tracker3.process_scans(scans3) + + # Store tracks + tracks.update(ctracks3) + + # Print progress + print(f'{timestamp - start_time} - No. Tracks: {len(ctracks3)}') + + # Plot + if PLOT: + plt.clf() + colors = ['r', 'g', 'b'] + + # Plot groundtruth + data = np.array([state.state_vector for state in target]) + plt.plot(data[:, 0], data[:, 2], '--k', label='Groundtruth (Target)') + for i, (platform, color) in enumerate(zip(platforms, colors)): + data = np.array([state.state_vector for state in platform]) + plt.plot(data[:, 0], data[:, 2], f'--{color}') + + # Plot sensor FOVs + ax1 = plt.gca() + for j, platform in enumerate(platforms): + sensor = platform.sensors[0] + circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, + color=colors[j], + fill=False, + label=f'Sensor {j + 1}') + ax1.add_artist(circle, ) + + # Plot detections + all_detections = [detector.detections for detector in all_detectors] + for i, (detections, color) in enumerate(zip(all_detections, colors)): + for detection in detections: + model = detection.measurement_model + x, y = detection.state_vector[0], detection.state_vector[1] + plt.plot(x, y, f'{color}x') + + # Plot tracks from Fuse tracker 1 + for track in ctracks1: + data = np.array([state.state_vector for state in track]) + plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.plot(data[:, 6], data[:, 8], '-*c') + + # Plot tracks from Fuse tracker 2 + for track in ctracks2: + data = np.array([state.state_vector for state in track]) + plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.plot(data[:, 6], data[:, 8], '-*r') + + # Plot tracks from Root tracker + for track in tracks: + data = np.array([state.state_vector for state in track]) + plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], + edgecolor='r', facecolor='none', ax=ax1) + plt.plot(data[:, 6], data[:, 8], '-*m') + + # Add legend info + for i, color in enumerate(colors): + plt.plot([], [], f'--{color}', label=f'Groundtruth (Sensor {i + 1})') + plt.plot([], [], f':{color}', label=f'Tracklets (Sensor {i + 1})') + plt.plot([], [], f'x{color}', label=f'Detections (Sensor {i + 1})') + plt.plot([], [], f'-*m', label=f'Fused Tracks') + + plt.legend(loc='upper right') + plt.xlim((-200, 200)) + plt.ylim((-200, 200)) + plt.pause(0.01) + +print(datetime.now() - sim_start_time) diff --git a/stonesoup/custom/reader/tracklet.py b/stonesoup/custom/reader/tracklet.py index 036af828e..8cb086081 100644 --- a/stonesoup/custom/reader/tracklet.py +++ b/stonesoup/custom/reader/tracklet.py @@ -25,9 +25,12 @@ class TrackletExtractor(Base, BufferedGenerator): - trackers: List[Tracker] = Property(doc='List of trackers from which to extract tracks') transition_model: TransitionModel = Property(doc='Transition model') fuse_interval: datetime.timedelta = Property(doc='Fusion interval') + trackers: List[Tracker] = Property( + doc='List of trackers from which to extract tracks.', + default=None + ) real_time: bool = Property(doc='Flag indicating whether the extractor should report ' 'real time', default=False) @@ -266,38 +269,12 @@ def rts_smoother_endpoints(cls, filtered_means, filtered_covs, times, tx_model): return joint_smoothed_mean, joint_smoothed_cov -class TrackletExtractorWithTracker(TrackletExtractor): - detectors: List[Detector] = Property(doc='List of detectors') - core_tracker: Tracker = Property(doc='Core tracker used for each detector') - run_async: bool = Property( - doc="If set to ``True``, the reader will read tracks from the tracker asynchronously " - "and only yield the latest set of tracks when iterated." - "Defaults to ``False``", - default=False) - - def __init__(self, *args, **kwargs): - super(TrackletExtractorWithTracker, self).__init__(*args, **kwargs) - sensor_id_offset = len(self.trackers) - for i, detector in enumerate(self.detectors): - tracker = deepcopy(self.core_tracker) - tracker.detector = detector - # Extract transition model - hypothesiser = tracker.data_associator.hypothesiser - while not hasattr(hypothesiser, 'predictor'): - hypothesiser = hypothesiser.hypothesiser - transition_model = hypothesiser.predictor.transition_model - self.trackers.append(TrackReader(tracker, - transition_model=transition_model, - sensor_id=sensor_id_offset+i, - run_async=self.run_async)) - - class PseudoMeasExtractor(Base, BufferedGenerator): - tracklet_extractor: TrackletExtractor = Property(doc='The tracket extractor') + tracklet_extractor: TrackletExtractor = Property(doc='The tracket extractor', default=None) target_state_dim: int = Property(doc='The target state dim', default=None) state_idx_to_use: List[int] = Property(doc='The indices of the state corresponding to pos/vel', default=None) - use_prior: bool = Property(doc="", default=True) + use_prior: bool = Property(doc="", default=False) def __init__(self, *args, **kwargs): super(PseudoMeasExtractor, self).__init__(*args, **kwargs) From 019fb089366dbda60b9a8a939479ab80f3181fe5 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 15 Jun 2023 15:59:26 +0100 Subject: [PATCH 76/87] Hierarchical tracking example update --- .../hierarchical-tracking-example.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/examples/reactive-isr/hierarchical-tracking-example.py b/examples/reactive-isr/hierarchical-tracking-example.py index 9abcc1a22..f1c144e44 100644 --- a/examples/reactive-isr/hierarchical-tracking-example.py +++ b/examples/reactive-isr/hierarchical-tracking-example.py @@ -226,7 +226,7 @@ def to_single_state(tracks): fuse_tracker1 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, updater=two_state_updater, associator=fuse_associator, tracklet_extractor=tracklet_extractor, - pseudomeas_extractor=detector, death_rate=1e-4, + pseudomeas_extractor=pseudomeas_extractor, death_rate=1e-4, prob_detect=Probability(prob_detect), delete_thresh=Probability(0.1)) @@ -237,8 +237,8 @@ def to_single_state(tracks): pseudomeas_extractor2 = PseudoMeasExtractor() fuse_tracker2 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, updater=two_state_updater, associator=fuse_associator, - tracklet_extractor=tracklet_extractor, - pseudomeas_extractor=detector, death_rate=1e-4, + tracklet_extractor=tracklet_extractor2, + pseudomeas_extractor=pseudomeas_extractor2, death_rate=1e-4, prob_detect=Probability(prob_detect), delete_thresh=Probability(0.1)) @@ -249,8 +249,8 @@ def to_single_state(tracks): pseudomeas_extractor3 = PseudoMeasExtractor() fuse_tracker3 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, updater=two_state_updater, associator=fuse_associator, - tracklet_extractor=tracklet_extractor2, - pseudomeas_extractor=detector, death_rate=1e-4, + tracklet_extractor=tracklet_extractor3, + pseudomeas_extractor=pseudomeas_extractor3, death_rate=1e-4, prob_detect=Probability(prob_detect), delete_thresh=Probability(0.1)) @@ -278,6 +278,8 @@ def to_single_state(tracks): scans1 = pseudomeas_extractor.extract(tracklets1, timestamp) # Generate fused tracks ctracks1 = fuse_tracker1.process_scans(scans1) + # The above steps can be combined into a single function call + # ctracks1 = fuse_tracker1.process_tracks(alltracks1, timestamp) # Run Fuse tracker 2 # ~~~~~~~~~~~~~~~~~~~~ @@ -289,6 +291,8 @@ def to_single_state(tracks): scans2 = pseudomeas_extractor2.extract(tracklets2, timestamp) # Generate fused tracks ctracks2 = fuse_tracker2.process_scans(scans2) + # The above steps can be combined into a single function call + # ctracks2 = fuse_tracker2.process_tracks(alltracks2, timestamp) # Run Root tracker # ~~~~~~~~~~~~~~~~ @@ -304,6 +308,8 @@ def to_single_state(tracks): scans3 = pseudomeas_extractor3.extract(tracklets3, timestamp) # Generate fused tracks ctracks3 = fuse_tracker3.process_scans(scans3) + # The above steps can be combined into a single function call + # ctracks3 = fuse_tracker3.process_tracks(alltracks3, timestamp) # Store tracks tracks.update(ctracks3) From 787ccac1539a28232602db435dc09849f790cf9e Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 17 Jul 2023 01:28:38 +0100 Subject: [PATCH 77/87] Add fix for non-positive/non-semi-definite measurement prediction covariance --- stonesoup/custom/hypothesiser/probability.py | 3 ++ stonesoup/functions/__init__.py | 57 ++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index 7dad20760..754cc2718 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -5,6 +5,7 @@ from scipy.stats import multivariate_normal as mn from stonesoup.base import Property +from stonesoup.functions import nearestSPD from stonesoup.hypothesiser import Hypothesiser from stonesoup.measures import SquaredMahalanobis from stonesoup.predictor import Predictor @@ -156,6 +157,8 @@ def hypothesise(self, track, detections, timestamp, **kwargs): # Compute measurement prediction and probability measure measurement_prediction = self.updater.predict_measurement( prediction, detection.measurement_model, **kwargs) + # Ensure covariance is positive definite + measurement_prediction.covar = nearestSPD(measurement_prediction.covar) # Calculate difference before to handle custom types (mean defaults to zero) # This is required as log pdf coverts arrays to floats diff --git a/stonesoup/functions/__init__.py b/stonesoup/functions/__init__.py index f18e14ab9..9222682a5 100644 --- a/stonesoup/functions/__init__.py +++ b/stonesoup/functions/__init__.py @@ -2,6 +2,7 @@ import copy import numpy as np +from numpy import linalg as la from ..types.numeric import Probability from ..types.array import StateVector, StateVectors, CovarianceMatrix @@ -670,3 +671,59 @@ def sde_euler_maruyama_integration(fun, t_values, state_x0): a, b = fun(state_x, t) state_x.state_vector = state_x.state_vector + a*delta_t + b@delta_w return state_x.state_vector + + +def nearestSPD(A): + """Find the nearest positive-semidefinite matrix to input + + A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which + credits [2]. + + [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd + + [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite + matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 + """ + + if isPD(A): + return A + + B = (A + A.T) / 2 + _, s, V = la.svd(B) + + H = np.dot(V.T, np.dot(np.diag(s), V)) + + A2 = (B + H) / 2 + + A3 = (A2 + A2.T) / 2 + + if isPD(A3): + return A3 + + spacing = np.spacing(la.norm(A)) + # The above is different from [1]. It appears that MATLAB's `chol` Cholesky + # decomposition will accept matrixes with exactly 0-eigenvalue, whereas + # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab + # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` + # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on + # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas + # `spacing` will, for Gaussian random matrixes of small dimension, be on + # othe order of 1e-16. In practice, both ways converge, as the unit test + # below suggests. + I = np.eye(A.shape[0]) + k = 1 + while not isPD(A3): + mineig = np.min(np.real(la.eigvals(A3))) + A3 += I * (-mineig * k**2 + spacing) + k += 1 + + return CovarianceMatrix(A3) + + +def isPD(B): + """Returns true when input is positive-definite, via Cholesky""" + try: + _ = la.cholesky(B) + return True + except la.LinAlgError: + return False \ No newline at end of file From 30e5396f277936fdd49cbfee5f1238926ec664cf Mon Sep 17 00:00:00 2001 From: sglvladi Date: Wed, 5 Jul 2023 00:08:26 +0100 Subject: [PATCH 78/87] WIP: Comms & Processing --- stonesoup/custom/sensor/action/processing.py | 108 +++++++++++++++++++ stonesoup/custom/sensor/processing.py | 103 ++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 stonesoup/custom/sensor/action/processing.py create mode 100644 stonesoup/custom/sensor/processing.py diff --git a/stonesoup/custom/sensor/action/processing.py b/stonesoup/custom/sensor/action/processing.py new file mode 100644 index 000000000..17c7ba2cb --- /dev/null +++ b/stonesoup/custom/sensor/action/processing.py @@ -0,0 +1,108 @@ +from enum import Enum +from uuid import UUID + +import numpy as np +import scipy +from scipy.stats import mvn, norm + +from stonesoup.base import Base, Property +from stonesoup.sensor.action import Action, ActionGenerator +from stonesoup.types.state import GaussianState + + +class ProcessOutput(Base): + run_time: float = Property(doc="Processing time in seconds") + + +class ProcessingJobState(Enum): + PENDING = 0 + RUNNING = 1 + COMPLETED = 2 + + +class ProcessingJob(Base): + id: UUID = Property(doc="Unique identifier for the job") + algorithm: str = Property(doc="Algorithm to be used") + probability_of_detection: float = Property(doc="Probability of detection") + clutter_density: float = Property(doc="Clutter density") + processing_time: GaussianState = Property(doc="Processing time in seconds") + state: ProcessingJobState = Property(doc="State of the job", + default=ProcessingJobState.PENDING) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._start_time = None + self._end_time = None + + @property + def start_time(self): + return self._start_time + + @property + def end_time(self): + return self._end_time + + def start(self, timestamp): + self.state = ProcessingJobState.RUNNING + self._start_time = timestamp + noise = np.max([0, norm.rvs(self.processing_time.mean, np.sqrt(self.processing_time.covar))]) + self._end_time = timestamp + noise + + +class ProcessingAction(Action): + target_value: UUID = Property(doc="Target value.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._end_time = mvn.rvs(self.processing_time.mean, self.processing_time.covar) + + @property + def end_time(self): + return self._end_time + + def act(self, current_time, timestamp, init_value): + if current_time >= self.end_time: + return True + else: + return False + + +class ProcessingActionGenerator(ActionGenerator): + """Generates possible actions for processing data in a given time period.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return ProcessingAction(generator=self, + end_time=self.end_time, + target_value=True) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution: float + Resolution of the action space + epsilon: float + Probability of taking a random action + + Returns + ------- + Iterator[Action] + Iterator of actions + """ + if resolution is None: + resolution = self.resolution + if epsilon is None: + epsilon = self.epsilon + + if np.random.random() < epsilon: + yield self.default_action + else: + for value in np.arange(self.limits[0], self.limits[1], resolution): + yield self._action_cls(generator=self, + end_time=self.end_time, + target_value=value) + diff --git a/stonesoup/custom/sensor/processing.py b/stonesoup/custom/sensor/processing.py new file mode 100644 index 000000000..0b613c52a --- /dev/null +++ b/stonesoup/custom/sensor/processing.py @@ -0,0 +1,103 @@ +import datetime +import random +from queue import Queue +from typing import Set, Union +from uuid import UUID + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.sensor.action.processing import ProcessingActionInput, \ + ProcessingActionGenerator, ProcessingJobState +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.array import CovarianceMatrix +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState + + +class ProcessingNode(Sensor): + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.LinearGaussian` model") + current_job_id: UUID = ActionableProperty( + doc="The current job id", + default=None, + generator_cls=ProcessingActionGenerator + ) + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + self._current_job = None + super().__init__(*args, **kwargs) + self._job_queue = [] + + @current_job_id.setter + def current_job_id(self, value): + self._property_current_job_id = value + if self.current_job is not None: + self._current_job = next((job for job in self.job_queue if job.id == value), None) + else: + self._current_job = None + + @property + def job_queue(self): + return self._job_queue + + @property + def current_job(self): + return self._current_job + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + def act(self, timestamp: datetime.datetime): + if not self.validate_timestamp(): + self.timestamp = timestamp + return + + if self.current_job is None or self.current_job.state == ProcessingJobState.COMPLETED: + self._job_queue.remove(self.current_job) + self.current_job_id = None + elif self.current_job is not None: + if self.current_job.state == ProcessingJobState.RUNNING \ + and timestamp >= self.current_job.end_time: + self.current_job.state = ProcessingJobState.COMPLETED + self.timestamp = timestamp + + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + detections = set() + measurement_model = self.measurement_model + + if self.current_job is not None and self.current_job.state == ProcessingJobState.COMPLETED: + for ground_truth in ground_truths: + if random.random() < self.current_job.probability_of_detection: + measurement = measurement_model.function(ground_truth.state_vector, noise, **kwargs) + detection = TrueDetection(state_vector=measurement, + groundtruth_path=ground_truth, + sensor_state=self) + detections.add(detection) + if self.clutter_model is not None: + + + From 9576e703640e6812dcf8b7eb51786f8cc05f884e Mon Sep 17 00:00:00 2001 From: sglvladi Date: Thu, 27 Jul 2023 15:28:12 +0100 Subject: [PATCH 79/87] Fix issue with fuse tracker sometimes not updating when no scans exist --- stonesoup/custom/reader/tracklet.py | 6 +++++- stonesoup/custom/sensor/movable.py | 2 +- stonesoup/custom/tracker/fuse.py | 26 +++++++++++++++++++++++++- 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/stonesoup/custom/reader/tracklet.py b/stonesoup/custom/reader/tracklet.py index 8cb086081..09392cb79 100644 --- a/stonesoup/custom/reader/tracklet.py +++ b/stonesoup/custom/reader/tracklet.py @@ -308,8 +308,12 @@ def extract(self, tracklets, timestamp): def get_scans_from_tracklets(self, tracklets, timestamp): measdata = self.get_pseudomeas(tracklets) - self._last_scan = timestamp scans = self.get_scans_from_measdata(measdata) + # if not len(scans): + # scans = [Scan(start_time=self._last_scan, + # end_time=timestamp, + # sensor_scans=[])] + self._last_scan = timestamp # Sort the scans by start time scans.sort(key=lambda x: x.start_time) return scans diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index 0477083ec..f7c0d355b 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -116,7 +116,7 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, if self.fov_in_km: # distance = geopy.distance.distance(np.flip(self.position[0:2]), # np.flip(measurement_vector[0:2])).km - if not self._footprint.contains(Point(measurement_vector[0:2])): + if not self.footprint.contains(Point(measurement_vector[0:2])): continue else: # Normalise measurement vector relative to sensor position diff --git a/stonesoup/custom/tracker/fuse.py b/stonesoup/custom/tracker/fuse.py index 089459c96..b978911a3 100644 --- a/stonesoup/custom/tracker/fuse.py +++ b/stonesoup/custom/tracker/fuse.py @@ -1,6 +1,7 @@ import numpy as np from ..types.hypothesis import MultiHypothesis +from ..types.tracklet import Scan from ...base import Base, Property from ...dataassociator.probability import JPDA from ...tracker import Tracker @@ -56,6 +57,27 @@ def process_scan(self, scan, tracks, current_end_time): current_start_time = new_start_time current_end_time = new_end_time + if not len(scan.sensor_scans): + tracks = list(tracks) + detections = set() + + # Perform data association + associations = self.associator.associate(tracks, detections, + timestamp=current_end_time) + # Update tracks + for track in tracks: + self.update_track(track, associations[track], scan.id) + + # Initiate new tracks on unassociated detections + if isinstance(self.associator, JPDA): + assoc_detections = set( + [h.measurement for hyp in associations.values() for h in hyp if h]) + else: + assoc_detections = set( + [hyp.measurement for hyp in associations.values() if hyp]) + + tracks = set(tracks) + for sensor_scan in scan.sensor_scans: tracks = list(tracks) detections = set(sensor_scan.detections) @@ -256,11 +278,13 @@ def process_tracks(self, alltracks, timestamp): tracklets = self.tracklet_extractor.extract(alltracks, timestamp) # Extract pseudo-measurements scans = self.pseudomeas_extractor.extract(tracklets, timestamp) + if not len(scans) and self._current_end_time and timestamp - self._current_end_time >= self.tracklet_extractor.fuse_interval: + scans = [Scan(self._current_end_time, timestamp, [])] for scan in scans: self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, self._current_end_time) - return timestamp, self.tracks + return self.tracks def process_scans(self, scans): for scan in scans: From 5b277ba11c5291608109b241e37b6970b0ede423 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Mon, 31 Jul 2023 16:20:46 +0100 Subject: [PATCH 80/87] Fix/Patch LinAlgError in PDAHypothesiser --- stonesoup/custom/hypothesiser/probability.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index 754cc2718..f4ff605ac 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -162,9 +162,15 @@ def hypothesise(self, track, detections, timestamp, **kwargs): # Calculate difference before to handle custom types (mean defaults to zero) # This is required as log pdf coverts arrays to floats - log_pdf = mn.logpdf( - (detection.state_vector - measurement_prediction.state_vector).ravel(), - cov=measurement_prediction.covar) + try: + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar) + except np.linalg.LinAlgError: + print('Had to allow singular when evaluating likelihood!!!') + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar, allow_singular=True) pdf = Probability(log_pdf, log_value=True) probability = (pdf * prob_detect) / self.clutter_spatial_density From 53167080a67f068cfec13cc8102bb3980d4a9869 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Sun, 6 Aug 2023 16:19:13 +0100 Subject: [PATCH 81/87] Add ability to enable/disable birth weights weighting by birth density likelihood in (I)SMCPHD --- stonesoup/custom/initiator/smcphd.py | 71 +++++++++++++++++++++------- 1 file changed, 54 insertions(+), 17 deletions(-) diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py index e6aff468b..ed4342e2c 100644 --- a/stonesoup/custom/initiator/smcphd.py +++ b/stonesoup/custom/initiator/smcphd.py @@ -53,6 +53,13 @@ class SMCPHDFilter(Base): 'Default is "expansion"', default='expansion' ) + scale_birth_weights: bool = Property( + doc="Whether to scale the birth weights by their likelihood, given the birth density. " + "Setting this to True can cause issues if the defined birth density is not a good " + "approximation to the true birth density. On the other hand, setting this to False " + "can lead to premature initialization of targets.", + default=False + ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -92,6 +99,7 @@ def predict(self, state, timestamp): # Sample birth particles birth_particles = np.zeros((pred_particles_sv.shape[0], 0)) + birth_weights = np.zeros((0,)) if isinstance(self.birth_density, GaussianMixture): particles_per_component = num_birth // len(self.birth_density) for i, component in enumerate(self.birth_density): @@ -101,12 +109,30 @@ def predict(self, state, timestamp): component.mean.ravel(), component.covar, particles_per_component).T + birth_weights_component = np.full((particles_per_component,), + Probability(self.birth_rate / num_birth)) + if self.scale_birth_weights: + # Scale birth weights by their likelihood, given the birth density + birth_weights_component *= multivariate_normal.pdf( + birth_particles_component.T, + component.mean.ravel(), + component.covar, + allow_singular=True) birth_particles = np.hstack((birth_particles, birth_particles_component)) + birth_weights = np.hstack((birth_weights, birth_weights_component)) else: birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), self.birth_density.covar, num_birth) - birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) + birth_weights = np.full((num_birth,), + Probability(self.birth_rate / num_birth)) + if self.scale_birth_weights: + # Scale birth weights by their likelihood, given the birth density + birth_weights *= multivariate_normal.pdf( + birth_particles, + self.birth_density.mean.ravel(), + self.birth_density.covar, + allow_singular=True) # Surviving particle weights prob_survive = np.exp(-float(self.prob_death) * time_interval.total_seconds()) @@ -278,6 +304,7 @@ def get_weights_per_hypothesis(self, prediction, detections, meas_weights, *args class ISMCPHDFilter(SMCPHDFilter): + def predict(self, state, timestamp): """ Predict the next state of the target density @@ -376,9 +403,11 @@ def update(self, prediction, detections, timestamp, meas_weights=None): log_num_targets_birth = logsumexp(log_post_weights_birth) # N_{k|k} update2 = copy(birth_state) # Normalize weights - update2.weight = Probability.from_log_ufunc(log_post_weights_birth - log_num_targets_birth) + update2.weight = Probability.from_log_ufunc( + log_post_weights_birth - log_num_targets_birth) if self.resampler is not None: - update2 = self.resampler.resample(update2, update2.state_vector.shape[1]) # Resample + update2 = self.resampler.resample(update2, + update2.state_vector.shape[1]) # Resample # De-normalize update2.weight = Probability.from_log_ufunc(np.log(update2.weight).astype(float) + log_num_targets_birth) @@ -405,7 +434,7 @@ def get_birth_state(self, prediction, detections, timestamp): # Sample birth particles num_birth = round(float(self.prob_birth) * self.num_samples) birth_particles = np.zeros((prediction.state_vector.shape[0], 0)) - birth_weights= np.zeros((0, )) + birth_weights = np.zeros((0,)) if len(detections): num_birth_per_detection = num_birth // len(detections) for i, detection in enumerate(detections): @@ -418,10 +447,13 @@ def get_birth_state(self, prediction, detections, timestamp): birth_particles_i = multivariate_normal.rvs(mu.ravel(), cov, num_birth_per_detection).T - birth_weights_i = multivariate_normal.pdf(birth_particles_i.T, - mu.ravel(), - cov, - allow_singular=True) * Probability(self.birth_rate / num_birth) + birth_weights_i = np.full((num_birth_per_detection,), + Probability(self.birth_rate / num_birth)) + if self.scale_birth_weights: + birth_weights_i *= multivariate_normal.pdf(birth_particles_i.T, + mu.ravel(), + cov, + allow_singular=True) birth_particles = np.hstack((birth_particles, birth_particles_i)) birth_weights = np.hstack((birth_weights, birth_weights_i)) else: @@ -431,7 +463,8 @@ def get_birth_state(self, prediction, detections, timestamp): birth_weights = multivariate_normal.pdf(birth_particles.T, self.birth_density.mean.ravel(), self.birth_density.covar, - allow_singular=True) * Probability(self.birth_rate / num_birth) + allow_singular=True) * Probability( + self.birth_rate / num_birth) # birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) birth_particles = StateVectors(birth_particles) birth_state = Prediction.from_state(prediction, @@ -520,7 +553,7 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): cov = np.cov(particles_sv, ddof=0, aweights=weight) hypothesis = SingleProbabilityHypothesis(prediction, - measurement=detections[idx-1], + measurement=detections[idx - 1], probability=weights_per_hyp[:, idx]) track_state = GaussianStateUpdate(mu, cov, hypothesis=hypothesis, @@ -532,7 +565,7 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): track.exist_prob = Probability(log_intensity_per_hyp[idx], log_value=True) tracks.add(track) - weights[idx-1] = 0 + weights[idx - 1] = 0 # Update filter self._state = self.filter.update(prediction, detections, timestamp, weights) @@ -557,20 +590,23 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): # Calculate weights per hypothesis birth_state = self.filter.get_birth_state(prediction, detections, timestamp) - weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights, birth_state) + weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights, + birth_state) log_weights_per_hyp = np.log(weights_per_hyp[:self.filter.num_samples, :]).astype(float) # Calculate intensity per hypothesis log_intensity_per_hyp = logsumexp(log_weights_per_hyp, axis=0) - # print(np.exp(log_intensity_per_hyp)) + print(np.exp(log_intensity_per_hyp)) # Find detections with intensity above threshold and initiate valid_inds = np.flatnonzero(np.exp(log_intensity_per_hyp) > self.threshold) for idx in valid_inds: if not idx: continue - particles_sv = copy(prediction.state_vector[:, :len(prediction)-len(prediction.birth_idx)]) - weight = np.exp(log_weights_per_hyp[:self.filter.num_samples, idx] - log_intensity_per_hyp[idx]) + particles_sv = copy( + prediction.state_vector[:, :len(prediction) - len(prediction.birth_idx)]) + weight = np.exp( + log_weights_per_hyp[:self.filter.num_samples, idx] - log_intensity_per_hyp[idx]) mu = np.average(particles_sv, axis=1, @@ -579,7 +615,8 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): hypothesis = SingleProbabilityHypothesis(prediction, measurement=detections[idx - 1], - probability=weights_per_hyp[:self.filter.num_samples, idx]) + probability=weights_per_hyp[ + :self.filter.num_samples, idx]) track_state = GaussianStateUpdate(mu, cov, hypothesis=hypothesis, timestamp=timestamp) @@ -595,4 +632,4 @@ def initiate(self, detections, timestamp, weights=None, **kwargs): # Update filter self._state = self.filter.update(prediction, detections, timestamp, weights) - return tracks \ No newline at end of file + return tracks From 3afa14033f79f28240d56502d3009cad1654e301 Mon Sep 17 00:00:00 2001 From: sglvladi Date: Tue, 22 Aug 2023 13:25:01 +0100 Subject: [PATCH 82/87] Added comms & processing example --- .../comms_proc/comms_proc_example.py | 322 +++++++++ examples/reactive-isr/comms_proc/evaluator.py | 190 +++++ examples/reactive-isr/comms_proc/utils.py | 262 +++++++ stonesoup/custom/functions/__init__.py | 92 +++ stonesoup/custom/functions/rollout.py | 653 ++++++++++++++++++ stonesoup/custom/hypothesiser/probability.py | 3 +- stonesoup/custom/sensor/movable.py | 13 +- stonesoup/custom/tracker/__init__.py | 20 +- stonesoup/dataassociator/neighbour.py | 7 +- stonesoup/models/clutter/clutter.py | 11 +- 10 files changed, 1557 insertions(+), 16 deletions(-) create mode 100644 examples/reactive-isr/comms_proc/comms_proc_example.py create mode 100644 examples/reactive-isr/comms_proc/evaluator.py create mode 100644 examples/reactive-isr/comms_proc/utils.py create mode 100644 stonesoup/custom/functions/rollout.py diff --git a/examples/reactive-isr/comms_proc/comms_proc_example.py b/examples/reactive-isr/comms_proc/comms_proc_example.py new file mode 100644 index 000000000..279054e7a --- /dev/null +++ b/examples/reactive-isr/comms_proc/comms_proc_example.py @@ -0,0 +1,322 @@ +from copy import copy, deepcopy +from datetime import datetime, timedelta +from uuid import uuid4 + +import warnings + +from matplotlib import pyplot as plt +from ordered_set import OrderedSet + +from stonesoup.custom.functions.rollout import enumerate_action_configs, extract_rois, get_sensor, \ + queue_actions, ActionTupleType +from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState + +warnings.simplefilter(action='ignore', category=FutureWarning) +warnings.simplefilter(action='ignore', category=RuntimeWarning) + +import numpy as np +from matplotlib.path import Path +from shapely import unary_union + +from reactive_isr_core.data import Node, AvailableAlgorithms, Algorithm, ProcessingStatistics, \ + TraversalTime, Edge, NetworkTopology, Availability, Storage, ImageStore, ProcessingAction, \ + ActionList, CommunicateAction, CollectAction, ActionStatus, Image, GeoLocation +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.custom.tracker import SMCPHD_JIPDA, SMCPHD_IGNN +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.types.array import StateVector +from stonesoup.types.detection import TrueDetection +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState, ParticleState + +from evaluator import CommsAndProcEvaluator +from utils import setup_network, setup_rfis, plot_cov_ellipse + + +def _prob_detect_func(fovs, prob_detect): + """Closure to return the probability of detection function for a given environment scan""" + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + paths = [Path(poly.boundary.coords) for poly in fovs] + + # Probability of detection nested function + def prob_detect_func(state): + for path_p in paths: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.1)) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + points = state.state_vector[[0, 2], :].T + return prob_detect if np.alltrue(path_p.contains_points(points)) \ + else Probability(0) + + return prob_detect_func + +seed = 2001 +np.random.seed(seed) + +# Parameters +# ========== +start_time = datetime.now() # Simulation start time +prob_detect = Probability(.9) # 90% chance of detection. +prob_death = Probability(0.01) # Probability of death +prob_birth = Probability(0.1) # Probability of birth +prob_survive = Probability(0.99) # Probability of survival +birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) +clutter_rate = 10 # Clutter-rate (Mean number of clutter measurements per scan) +surveillance_region = [[-5, -2], # The surveillance region + [50.1, 53.2]] +surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ + * (surveillance_region[1][1] - surveillance_region[1][0]) # Surveillance volume +clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area +birth_density = GaussianState( + StateVector(np.array([-2.5, 0.0, 51, 0.0, 0.0, 0.0])), + np.diag([3. ** 2, .01 ** 2, 3. ** 2, .01 ** 2, 0., 0.])) # Birth density +birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' +num_particles = 2 ** 8 # Number of particles used by the PHD filter +num_iter = 400 # Number of simulation steps +PLOT = True # Set [True | False] to turn plotting [ON | OFF] +MANUAL_RFI = True # Set [True | False] to turn manual RFI [ON | OFF] +colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # Colors for plotting + +sensor_position = StateVector([-4.5, 51.5, 100.]) +network_topology, assets = setup_network(sensor_position, start_time) +image_store = ImageStore( + images=[] +) + +# Ongoing actions is a dictionary of lists of actions. The keys are the action types and the values +# are the lists of actions of that type. The action types are 'collect', 'comms' and 'proc'. +# This dictionary is used to keep track of ongoing actions. +ongoing_actions = { + 'collect': [], + 'comms': [], + 'proc': [], +} + +rfis = setup_rfis(start_time, num_rois=2, time_varying=True) + +# Simulate Groundtruth +# ==================== +gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), + ConstantVelocity(0.), + ConstantVelocity(0.)]) + +timestamps = [] +for k in range(0, num_iter + 1, 2): + timestamps.append(start_time + timedelta(seconds=k)) +truths = set() +rois = extract_rois(rfis) +for roi in rois: + lon_min, lat_min = roi.corners[0].longitude, roi.corners[0].latitude + lon_max, lat_max = roi.corners[1].longitude, roi.corners[1].latitude + for i in range(2): + lat = np.random.uniform(lat_min, lat_max) + lon = np.random.uniform(lon_min, lon_max) + truth = GroundTruthPath([GroundTruthState([lon, 0.00, lat, 0.00, 0, 0], + timestamp=start_time)]) + for timestamp in timestamps[1:]: + truth.append(GroundTruthState( + gnd_transition_model.function(truth[-1], noise=False, + time_interval=timedelta(seconds=1)), + timestamp=timestamp)) + truths.add(truth) + +# Plot groundtruth, sensors and rois +# ============================ +# fig = plt.figure(figsize=(10, 6)) +# ax = fig.add_subplot(111) +# ax.set_xlim(surveillance_region[0][0]-1, surveillance_region[0][1]+1) +# ax.set_ylim(surveillance_region[1][0], surveillance_region[1][1]) +# ax.set_xlabel('Longitude') +# ax.set_ylabel('Latitude') +# ax.set_title('Groundtruth and initial sensor locations') +# ax.set_aspect('equal') +# for i, track in enumerate(truths): +# ax.plot([state.state_vector[0] for state in track], +# [state.state_vector[2] for state in track], +# color=colors[i], linestyle='--', linewidth=2, label=f'Truth {i+1}') +# ax.plot(track[-1].state_vector[0], track[-1].state_vector[2], +# color=colors[i], marker='o', markersize=5) +# asset = assets.assets[0] +# sensor = get_sensor(asset.asset_status.location, asset.asset_description.fov_radius) +# footprint = sensor.footprint +# x, y = footprint.exterior.xy +# ax.plot(x, y, color='r', label=f'Sensor') +# for i, roi in enumerate(rois): +# lon_min, lat_min = roi.corners[0].longitude, roi.corners[0].latitude +# lon_max, lat_max = roi.corners[1].longitude, roi.corners[1].latitude +# ax.plot([lon_min, lon_max, lon_max, lon_min, lon_min], +# [lat_min, lat_min, lat_max, lat_max, lat_min], +# color='k', linestyle='--', linewidth=0.1, label=f'ROI {i+1}') +# ax.legend() +# plt.show() + +# Tracking Components +# =================== +# Transition model +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.000001), + ConstantVelocity(0.000001), + ConstantVelocity(0.000001)]) + +# Main tracker +tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time) + +# Evaluator tracker +eval_tracker = SMCPHD_IGNN(birth_density=birth_density, transition_model=transition_model, + measurement_model=None, prob_detection=prob_detect, + prob_death=prob_death, prob_birth=prob_birth, + birth_rate=birth_rate, clutter_intensity=clutter_intensity, + num_samples=num_particles, birth_scheme=birth_scheme, + start_time=start_time) + + +# Evaluator +num_samples = 40 # Number of monte-carlo samples for Monte-Carlo Rollout +num_timesteps = 5 # Number of timesteps for Monte-Carlo Rollout +interval = timedelta(seconds=1) # Interval between timesteps for Monte-Carlo Rollout +evaluator = CommsAndProcEvaluator( + tracker=eval_tracker, + num_timesteps=num_timesteps, + interval=interval, + num_samples=num_samples, +) + +def optimise(tracks, image_store, network_topology, assets, rfis, ongoing_actions, timestamp): + # Get all possible action configurations + configs = enumerate_action_configs(image_store, network_topology, assets, rfis, + ongoing_actions, timestamp) + + # For each action configuration + rewards = [] + for config in configs: + # Evaluate the action configuration + reward = evaluator(config, tracks, image_store, network_topology, assets, rfis, + ongoing_actions, timestamp) + + rewards.append(reward) + + print(f'\nRewards: \n--------------------------------------------------------') + for i, config in enumerate(configs): + print(f'{rewards[i]:.2f} - {config}') + # Find the best action configuration + max_reward = np.max(rewards) + best_inds = np.argwhere(rewards == max_reward).flatten() + best_ind = np.random.choice(best_inds) + best_config = configs[best_ind] + return best_config + + +if PLOT: + fig = plt.figure(figsize=(10, 6)) + ax = fig.add_subplot(111) + +tracks = set() +processed_images = list() +for k, timestamp in enumerate(timestamps): + print(f'\n\nIter: {k+1} - Timestamp: {timestamp}\n ===========================================') + truth_states = OrderedSet(truth[timestamp] for truth in truths) + + # Update ongoing actions + completed_comms_actions = [] + completed_proc_actions = [] + for comms_action in ongoing_actions['comms']: + if timestamp >= comms_action.end_time: + completed_comms_actions.append(comms_action) + comms_action.image.node_id = comms_action.target_node_id + for proc_action in ongoing_actions['proc']: + if timestamp >= proc_action.end_time: + completed_proc_actions.append(proc_action) + for comms_action in completed_comms_actions: + ongoing_actions['comms'].remove(comms_action) + for proc_action in completed_proc_actions: + ongoing_actions['proc'].remove(proc_action) + try: + image_store.images.remove(proc_action.image) + except ValueError: + pass + + # Optimise actions + chosen_actions = optimise(tracks, image_store, network_topology, assets, rfis, + ongoing_actions, timestamp) + print(f'Chosen actions: \n--------------------------------------------------------') + print(chosen_actions) + + + # Perform chosen actions + queue_actions(chosen_actions, image_store, ongoing_actions) + sensor_action = chosen_actions[0] + if sensor_action: + coll_action = sensor_action[0] + sensor = get_sensor(coll_action.image.location, coll_action.image.fov_radius) + proc_actions = [action.proc_action for action in chosen_actions if action] + proc_actions.sort(key=lambda x: x.image.collection_time) + for i, proc_action in enumerate(proc_actions): + if proc_action.image in processed_images: + continue + else: + processed_images.append(proc_action.image) + sub_sensor = get_sensor(proc_action.image.location, proc_action.image.fov_radius, + proc_action.algorithm.prob_detection, + proc_action.algorithm.false_alarm_density) + p = sub_sensor.footprint + tracker.prob_detect = _prob_detect_func([p], proc_action.algorithm.prob_detection) + tracker.clutter_intensity = proc_action.algorithm.false_alarm_density/p.area + + # Observe the ground truth + detections = sensor.measure(truth_states, noise=True) + detections = list(detections) + # Track using main tracker + tracks = tracker.track(detections, proc_action.image.collection_time) + + # Print debug info + tracks = list(tracks) + print(f'\n Action {i + 1} --------------------------------------------------------') + for track in tracks: + print(f'Track {track.id} - Exist prob: {track.exist_prob}') + + if PLOT: + ax.cla() + ax.set_xlim(surveillance_region[0][0] - 1, surveillance_region[0][1] + 1) + ax.set_ylim(surveillance_region[1][0], surveillance_region[1][1]) + ax.set_xlabel('Longitude') + ax.set_ylabel('Latitude') + ax.set_title('Groundtruth and initial sensor locations') + ax.set_aspect('equal') + for i, track in enumerate(truths): + ax.plot([state.state_vector[0] for state in track], + [state.state_vector[2] for state in track], + color=colors[i], linestyle='--', linewidth=2, label=f'Truth {i + 1}') + ax.plot(track[-1].state_vector[0], track[-1].state_vector[2], + color=colors[i], marker='o', markersize=5) + + for track in tracks: + plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], + edgecolor='r', facecolor='none', ax=ax) + ax.plot(track.state_vector[0, 0], track.state_vector[2, 0], 'rx', markersize=5) + + footprint = sensor.footprint + x, y = footprint.exterior.xy + ax.plot(x, y, color='r', label=f'Sensor') + for i, roi in enumerate(rois): + lon_min, lat_min = roi.corners[0].longitude, roi.corners[0].latitude + lon_max, lat_max = roi.corners[1].longitude, roi.corners[1].latitude + ax.plot([lon_min, lon_max, lon_max, lon_min, lon_min], + [lat_min, lat_min, lat_max, lat_max, lat_min], + color='k', linestyle='--', linewidth=0.1, label=f'ROI {i + 1}') + ax.legend() + plt.pause(0.1) \ No newline at end of file diff --git a/examples/reactive-isr/comms_proc/evaluator.py b/examples/reactive-isr/comms_proc/evaluator.py new file mode 100644 index 000000000..27a78c9e5 --- /dev/null +++ b/examples/reactive-isr/comms_proc/evaluator.py @@ -0,0 +1,190 @@ +import copy +import datetime +from typing import List, Any, Tuple, Set +import itertools as it +from uuid import uuid4 + +import numpy as np +from matplotlib.path import Path +from scipy.stats import poisson +from shapely import unary_union + +from reactive_isr_core.data import ImageStore, NetworkTopology, AssetList, RFI +from stonesoup.base import Base, Property +from stonesoup.custom.functions import eval_rfi_new +from stonesoup.custom.functions.rollout import CollectionAction, CommsAction, ProcAction, \ + rollout_actions, proc_actions_from_config_sequence, get_sensor, simulate_new_tracks +from stonesoup.custom.tracker import SMCPHD_JIPDA +from stonesoup.functions import gm_reduce_single +from stonesoup.tracker import Tracker +from stonesoup.types.array import StateVectors +from stonesoup.types.numeric import Probability +from stonesoup.types.state import ParticleState +from stonesoup.types.track import Track +from stonesoup.types.update import GaussianStateUpdate + + +class CommsAndProcEvaluator(Base): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + tracker: Tracker = Property(doc="Tracker used to track the tracks") + num_timesteps: int = Property(doc="Number of timesteps to rollout") + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __call__(self, config: Tuple[CollectionAction, CommsAction, ProcAction], tracks: Set[Track], + image_store: ImageStore, network_topology: NetworkTopology, assets: AssetList, + rfis: List[RFI], ongoing_actions, timestamp, *args, **kwargs): + + if not len(rfis): + return 0 + + # Rollout actions + config_seq_list = rollout_actions(config, image_store, network_topology, assets, rfis, + ongoing_actions, self.num_samples, self.num_timesteps, + self.interval, timestamp) + rewards = [] + # Evaluate each rollout + for config_seq in config_seq_list: + reward = 0 + # Get all processing actions + proc_actions = proc_actions_from_config_sequence(config_seq) + # Sort processing actions by image collection time + sorted_proc_actions = sorted(proc_actions, key=lambda x: x.image.collection_time) + tracks_copy = set(copy.copy(track) for track in tracks) + + # For each processing action + for i, proc_action in enumerate(sorted_proc_actions): + # Get the image and algorithm + image = proc_action.image + algorithm = proc_action.algorithm + + # The current time is the image collection time + current_time = image.collection_time + + # Create a sensor + sensor = get_sensor(image.location, image.fov_radius, algorithm.prob_detection, + algorithm.false_alarm_density) + + # Predict tracks to current time + predicted_tracks = set() + for track in tracks_copy: + predicted_track = copy.copy(track) + predicted_track.append(self.tracker._predictor.predict(track, timestamp=current_time)) + predicted_tracks.add(predicted_track) + + # Simulate new tracks + new_tracks = simulate_new_tracks(sensor, current_time, self.tracker.birth_density) + tracks_copy = set(tracks_copy) + tracks_copy |= new_tracks + predicted_tracks |= new_tracks + + # Use the sensor to generate detections + detections = {detection + for detection in sensor.measure(predicted_tracks, noise=False, + timestamp=current_time)} + # Configure the tracker's probability of detection based on the image footprint + p = sensor.footprint + self.tracker.prob_detect = _prob_detect_func([p], + proc_action.algorithm.prob_detection) + self.tracker.clutter_intensity = proc_action.algorithm.false_alarm_density/p.area + + # Update tracks with detections + tracks_copy = self._update_tracks(tracks_copy, detections, current_time) + + for rfi in rfis: + reward += eval_rfi_new(rfi, tracks_copy, use_variance=self.use_variance, + timestamp=proc_action.end_time) + rewards.append(reward) + return np.max(rewards) + + def _update_tracks(self, tracks, detections, timestamp): + tracks = list(tracks) + hypotheses = self.tracker._associator.generate_hypotheses(tracks, detections, timestamp) + associations = self.tracker._associator.associate(tracks, detections, + timestamp, hypotheses=hypotheses) + for track, multihypothesis in associations.items(): + if isinstance(self.tracker, SMCPHD_JIPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self.tracker._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + else: + timestamp_m1 = track.timestamp \ + if self.tracker.predict else track[-2].timestamp + time_interval = timestamp - timestamp_m1 + track.append(multihypothesis.prediction) + prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) + pred_prob_exist = prob_survive * track.exist_prob + non_exist_weight = 1 - pred_prob_exist + target_hyps = hypotheses[track] + if multihypothesis: + # Update track + state_post = self.tracker._updater.update(multihypothesis) + track.append(state_post) + weights = np.array([hyp.probability for hyp in target_hyps])*pred_prob_exist + new_exist_prob = np.sum(weights) / (non_exist_weight + np.sum(weights)) + track.exist_prob = new_exist_prob + else: + non_det_weight = target_hyps.get_missed_detection_probability() + new_exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + track.exist_prob = new_exist_prob + return tracks + + + +def _prob_detect_func(fovs, prob_detect): + """Closure to return the probability of detection function for a given environment scan""" + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + paths = [Path(poly.boundary.coords) for poly in fovs] + + # Probability of detection nested function + def prob_detect_func(state): + for path_p in paths: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.01)) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + points = state.state_vector[[0, 2], :].T + return prob_detect if np.alltrue(path_p.contains_points(points)) \ + else Probability(0.01) + + return prob_detect_func \ No newline at end of file diff --git a/examples/reactive-isr/comms_proc/utils.py b/examples/reactive-isr/comms_proc/utils.py new file mode 100644 index 000000000..7c9d061ac --- /dev/null +++ b/examples/reactive-isr/comms_proc/utils.py @@ -0,0 +1,262 @@ +from datetime import datetime, timedelta +from uuid import uuid4 + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.patches import Ellipse + +from reactive_isr_core.data import Algorithm, ProcessingStatistics, Storage, Node, \ + AvailableAlgorithms, Availability, Edge, TraversalTime, NetworkTopology, AssetList, Asset, \ + AssetDescription, SensorType, AssetStatus, GeoLocation, RFI, \ + TaskType, GeoRegion, PriorityOverTime, ThresholdOverTime + + +def setup_network(sensor_location, start_time): + algorithms = [ + Algorithm( + cost=1, + prob_detection=0.6, + false_alarm_density=0.1, + name="Algorithm1", + processing_statistics=ProcessingStatistics( + mu=1, + sigma=0.1, + lower_truncation=0 + ) + ), + Algorithm( + cost=2, + prob_detection=0.75, + false_alarm_density=0.05, + name="Algorithm2", + processing_statistics=ProcessingStatistics( + mu=3, + sigma=0.1, + lower_truncation=0 + ) + ), + Algorithm( + cost=3, + prob_detection=0.9, + false_alarm_density=0.01, + name="Algorithm3", + processing_statistics=ProcessingStatistics( + mu=5, + sigma=0.1, + lower_truncation=0 + ) + ) + ] + + dummy_storage = Storage( + capacity=1, + contents=[] + ) + + sensor_node = Node( + id=uuid4(), + processing_capability=AvailableAlgorithms( + algorithms=[algorithms[0]] + ), + total_task_capacity=dict(), + availability=Availability.AVAILABLE, + storage=dummy_storage, + peers=[] + ) + + fob_node = Node( + id=uuid4(), + processing_capability=AvailableAlgorithms( + algorithms=[algorithms[1]] + ), + total_task_capacity=dict(), + availability=Availability.AVAILABLE, + storage=dummy_storage, + peers=[] + ) + + cic_node = Node( + id=uuid4(), + processing_capability=AvailableAlgorithms( + algorithms=[algorithms[2]] + ), + total_task_capacity=dict(), + availability=Availability.AVAILABLE, + storage=dummy_storage, + peers=[] + ) + + nodes = [sensor_node, fob_node, cic_node] + + edges = [ + Edge( + id=uuid4(), + source_node=sensor_node.id, + target_node=fob_node.id, + traversal_time=TraversalTime( + mu=1, + sigma=0.1, + lower_truncation=0 + ) + ), + Edge( + id=uuid4(), + source_node=fob_node.id, + target_node=cic_node.id, + traversal_time=TraversalTime( + mu=3, + sigma=0.1, + lower_truncation=0 + ) + ), + Edge( + id=uuid4(), + source_node=sensor_node.id, + target_node=cic_node.id, + traversal_time=TraversalTime( + mu=4, + sigma=0.2, + lower_truncation=0 + ) + ), + ] + + sensor_node.peers = [fob_node.id] + fob_node.peers = [sensor_node.id, cic_node.id] + cic_node.peers = [fob_node.id] + + network_topology = NetworkTopology( + nodes=nodes, + edges=edges + ) + + assets = AssetList( + assets=[ + Asset( + asset_description=AssetDescription( + id=sensor_node.id, + name="Sensor", + sensor_types=[SensorType.AERIAL_V_CAMERA], + response_timeout=1, + fov_radius=30, + ), + asset_status=AssetStatus( + time=start_time, + id=sensor_node.id, + location=GeoLocation( + latitude=sensor_location[1], + longitude=sensor_location[0], + altitude=sensor_location[2] + ), + availability=Availability.AVAILABLE + ), + target_detections=[] + ), + ] + ) + + return network_topology, assets + + +def setup_rfis(start_time, num_rois, time_varying): + roi1 = GeoRegion(corners=[ + GeoLocation( + longitude=-3.3, + latitude=51.1, + altitude=0), + GeoLocation( + longitude=-2.9, + latitude=51.5, + altitude=0)] + ) + roi2 = GeoRegion(corners=[ + GeoLocation( + longitude=-2.4, + latitude=52.1, + altitude=0), + GeoLocation( + longitude=-2, + latitude=52.5, + altitude=0)] + ) + rois=[roi1] + if num_rois > 1: + rois.append(roi2) + priority = [5, 5] + if time_varying: + priority = [5, 0] + rfi = RFI(id=uuid4(), + task_type=TaskType.COUNT, + region_of_interest=rois, + start_time=datetime.now(), + end_time=datetime.now(), + priority_over_time=PriorityOverTime( + timescale=[start_time, start_time+timedelta(seconds=400)], + priority=priority), + targets=[], + threshold_over_time=ThresholdOverTime(timescale=[start_time], + threshold=[.01])) + return [rfi] + + +def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + +def compute_ellipse(cov, pos, nstd=1, **kwargs): + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + return ellip.get_path() + + +def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): + """ + Plots an `nstd` sigma error ellipse based on the specified covariance + matrix (`cov`). Additional keyword arguments are passed on to the + ellipse patch artist. + Parameters + ---------- + cov : The 2x2 covariance matrix to base the ellipse on + pos : The location of the center of the ellipse. Expects a 2-element + sequence of [x0, y0]. + nstd : The radius of the ellipse in numbers of standard deviations. + Defaults to 2 standard deviations. + ax : The axis that the ellipse will be plotted on. Defaults to the + current axis. + Additional keyword arguments are pass on to the ellipse patch. + Returns + ------- + A matplotlib ellipse artist + """ + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + if ax is None: + ax = plt.gca() + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + + ax.add_artist(ellip) + return ellip diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 5592bb372..0d5ff0cdb 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,3 +1,4 @@ +import copy from functools import partial import math from typing import Set, List, Sequence @@ -514,6 +515,97 @@ def eval_rfi(rfi: RFI, tracks: Sequence[Track], sensors: Sequence[Sensor], return config_metric +def eval_rfi_new(rfi: RFI, tracks: Sequence[Track], timestamp, + phd_state: ParticleState = None, use_variance=True): + num_samples = 100 + mu_overall = 0 + var_overall = 0 # np.inf if len(valid_tracks) == 0 else 0 + config_metric = 0 + + target_types = [t.target_type.value for t in rfi.targets] + valid_tracks = [track for track in tracks + if not (target_types) + or (target_types and any(item in track.metadata['target_type_confidences'] + for item in target_types))] + for roi in rfi.region_of_interest: + + xmin, ymin = roi.corners[0].longitude, roi.corners[0].latitude + xmax, ymax = roi.corners[1].longitude, roi.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + path_p = Path(geom.boundary.coords) + + # Calculate PHD density inside polygon + if phd_state is not None: + points = phd_state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + if np.sum(inside_points) > 0: + # The mean of the PHD density inside the polygon is the sum of the weights of the + # particles inside the polygon + mu_overall = np.exp(logsumexp(np.log(phd_state.weight[inside_points].astype(float)))) + # The variance of a Poisson distribution is equal to the mean + var_overall = mu_overall + + # Calculate number of tracks inside polygon + for track in valid_tracks: + # Sample points from the track state + points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), + cov=track.covar[[0, 2], :][:, [0, 2]], + size=num_samples) + + # Check which points are inside the polygon + inside_points = path_p.contains_points(points) + # Probability of existence inside the polygon is the fraction of points inside the polygon + # times the probability of existence + p_success = float(track.exist_prob) * (np.sum(inside_points) / num_samples) + # Mean of a Bernoulli distribution is equal to the probability of success + mu_overall += p_success + # Variance of a Bernoulli distribution is equal to the probability of success, + # times the probability of failure + var_overall += p_success * (1 - p_success) + + # Compute time varying reward + # NOTE: We assume that the priority over time is monotonically decreasing + potential_reward = 0 # Default reward + # Find index of the closest earlier timestamp + inds_lt = np.flatnonzero(np.array(rfi.priority_over_time.timescale) <= timestamp) + # If there are no earlier timestamps, it means that the RFI has not been active yet + if len(inds_lt) > 0: + max_reward_idx = inds_lt[-1] + max_reward_time = rfi.priority_over_time.timescale[max_reward_idx] + max_potential_reward = rfi.priority_over_time.priority[max_reward_idx] + # Find index of the closest later timestamp + inds_gt = np.flatnonzero(np.array(rfi.priority_over_time.timescale) > timestamp) + # If there are later timestamps, we interpolate between the two points + if len(inds_gt) > 0: + min_reward_idx = inds_gt[0] + min_reward_time = rfi.priority_over_time.timescale[min_reward_idx] + min_potential_reward = rfi.priority_over_time.priority[min_reward_idx] + dt_total = min_reward_time - max_reward_time + dt = timestamp - max_reward_time + dy = max_potential_reward - min_potential_reward + # Linearly interpolate between the two points + potential_reward = max_potential_reward - dy * (dt / dt_total) + else: + # If there are no later timestamps, we reward the max potential reward + potential_reward = max_potential_reward + + if rfi.task_type == TaskType.COUNT: + if mu_overall > 0 and var_overall < rfi.threshold_over_time.threshold[0]: + config_metric += potential_reward + if use_variance: + config_metric += 1 / var_overall + else: + a=2 + elif rfi.task_type == TaskType.FOLLOW: + for target in rfi.targets: + track = next((track for track in tracks if track.id == str(target.target_UUID)), None) + if track is not None: + var = track.covar[0, 0] + track.covar[2, 2] + if var < rfi.threshold_over_time.threshold[0]: + config_metric += potential_reward + + return config_metric + proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') diff --git a/stonesoup/custom/functions/rollout.py b/stonesoup/custom/functions/rollout.py new file mode 100644 index 000000000..556c8104c --- /dev/null +++ b/stonesoup/custom/functions/rollout.py @@ -0,0 +1,653 @@ +import copy +import itertools +import math +from datetime import timedelta, datetime +from enum import Enum +from typing import Union +from uuid import uuid4, UUID + +import numpy as np +from pydantic import BaseModel +from scipy.stats import poisson +from shapely import Point, Polygon + +from reactive_isr_core.data import TargetType, Availability, ActionStatus, Image, GeoLocation, \ + Algorithm + +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.models.clutter import ClutterModel +from stonesoup.types.array import StateVector +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState +from stonesoup.types.track import Track + + +class ActionTupleType(Enum): + """Enum for the different types of action tuples""" + NO_ACTION = 0 + ONBOARD = 1 + REMOTE = 2 + COMMS_AND_PROC = 3 + PROC_ONLY = 4 + +class ActionTuple(tuple): + def __new__(self, tup=None): + if not tup: + coll_action, comms_action, proc_action = None, None, None + else: + coll_action, comms_action, proc_action = tup + + return tuple.__new__(ActionTuple, (coll_action, comms_action, proc_action)) + + def __init__(self, *args, **kwargs): + if self.coll_action is None and self.comms_action is None and self.proc_action is None: + self._action_tuple_type = ActionTupleType.NO_ACTION + elif self.coll_action is None: + if self.comms_action is None: + self._action_tuple_type = ActionTupleType.PROC_ONLY + else: + self._action_tuple_type = ActionTupleType.COMMS_AND_PROC + else: + if self.coll_action.node_id == self.proc_action.node_id: + self._action_tuple_type = ActionTupleType.ONBOARD + else: + self._action_tuple_type = ActionTupleType.REMOTE + + def __bool__(self): + return any(action is not None for action in self) + + def __str__(self): + if self._action_tuple_type == ActionTupleType.NO_ACTION: + return "No action" + elif self._action_tuple_type == ActionTupleType.PROC_ONLY: + return (f"PROC_ONLY(node={self.proc_action.node_id}, " + f"image={self.proc_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + elif self._action_tuple_type == ActionTupleType.COMMS_AND_PROC: + return (f"COMMS_PROC(from={self.comms_action.source_node_id}, " + f"to={self.comms_action.target_node_id}, " + f"image={self.proc_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + elif self._action_tuple_type == ActionTupleType.ONBOARD: + return (f"ONBOARD(image={self.coll_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + else: + return (f"REMOTE(from={self.coll_action.node_id}, " + f"to={self.proc_action.node_id}, " + f"image={self.coll_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + + def __repr__(self): + return str(self) + + @property + def coll_action(self): + return self[0] + + @property + def comms_action(self): + return self[1] + + @property + def proc_action(self): + return self[2] + + @property + def is_onboard(self): + """Check if the processing is performed onboard""" + return self._action_tuple_type == ActionTupleType.ONBOARD + + @property + def type(self): + return self._action_tuple_type + + +class CollectionAction(BaseModel): + id: UUID + node_id: UUID + location: GeoLocation + status: ActionStatus + image: Image + + +class CommsAction(BaseModel): + id: UUID + source_node_id: UUID + target_node_id: UUID + image: Image + status: ActionStatus + start_time: Union[None, datetime] + dt: timedelta + + @property + def end_time(self): + return self.start_time + self.dt + + +class ProcAction(BaseModel): + id: UUID + node_id: UUID + image: Image + algorithm: Algorithm + status: ActionStatus + start_time: Union[None, datetime] + dt: timedelta + + @property + def end_time(self): + return self.start_time + self.dt + + +def cover_rectangle_with_minimum_overlapping_circles(x1, y1, x2, y2, radius): + """ + https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1343643 + + """ + width = x2 - x1 + height = y2 - y1 + + p = Point(x1 + width/2, y1 + height/2).buffer(radius) + pol = Polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2)]) + intersection = p.intersection(pol) + if intersection.area >= 0.9*pol.area: + return [(x1 + width/2, y1 + height/2)] + + # if width <= np.sqrt(3)/2*radius and height <= np.sqrt(3)/2*radius: + z1 = height / (np.sqrt(3) * radius) + re1 = z1 - math.floor(z1) + n = math.floor(z1) + if re1 <= 1/2: + n += 1 + else: + n += 2 + + z2 = width / (3/2 * radius) + re2 = z2 - math.floor(z2) + m = math.floor(z2) + if re2 <= 2/3: + m += 1 + else: + m += 2 + + centers = [] + + for k in range(1, n+1): + for l in range(1, m+1): + if l % 2 == 1: + center = ((0.5 + (l-1) * 3/2) * radius, (k-1)*np.sqrt(3)*radius) + else: + center = ((0.5 + (l-1) * 3/2) * radius, (k-1)*np.sqrt(3)*radius + np.sqrt(3)/2*radius) + offset_center = (center[0] + x1, center[1] + y1) + cp = Point(offset_center) + if cp.distance(pol) <= np.sqrt(3)/2*radius: + centers.append(offset_center) + return centers + + +def sample_dt(stats): + dt = np.random.normal(stats.mu, stats.sigma) + if dt < stats.lower_truncation: + dt = stats.lower_truncation + return timedelta(seconds=dt) + + +def extract_rois(rfis): + """Extract all regions of interest from a list of rfis.""" + rois = [] + for rfi in rfis: + for roi in rfi.region_of_interest: + rois.append(roi) + return rois + + +def get_processing_nodes(network_topology): + return [node for node in network_topology.nodes + if len(node.processing_capability.algorithms) > 0] + + +def get_edge(network_topology, source_node_id, target_node_id): + return next(edge for edge in network_topology.edges + if edge.source_node == source_node_id + and edge.target_node == target_node_id) + + +def get_earliest_proc_start_time(ongoing_actions_per_node, node_id, default_time): + """Get the earliest start time for processing actions for a given node. + + If there are no ongoing processing actions for the node, then the earliest start time is the + default time. Otherwise, the earliest start time is the end time of the latest ongoing + processing action. + """ + if len(ongoing_actions_per_node[node_id]): + return ongoing_actions_per_node[node_id][-1].end_time + else: + return default_time + + +def exists_ongoing_comms_proc_action_for_image(ongoing_actions, image_id): + """Check if there is an ongoing comms or processing action for a given image.""" + return (any(action for action in ongoing_actions['comms'] if action.image.id == image_id) + or any(action for action in ongoing_actions['proc'] if action.image.id == image_id)) + + +def enumerate_actions_for_processing_node(node, image_store, network, ongoing_actions, + earliest_proc_start_time, timestamp): + """Enumerate all possible actions for a processing node that is not a sensor. + + If the node is not an asset (sensor), then we don't need to consider collection actions, but + we do need to consider comms and processing actions for existing images + + Arguments + --------- + node : Node + The processing node + image_store : ImageStore + The image store, containing all images that have been collected + network : NetworkTopology + The network topology, containing all nodes and edges + earliest_proc_start_time : datetime + The earliest start time for processing actions + timestamp : datetime + The current timestamp + + Returns + ------- + possible_actions : List[Tuple[CollectionAction, CommsAction, ProcAction]] + A list of possible actions for the node + """ + possible_actions = [ActionTuple()] # No action + node_id = node.id + for image in image_store.images: + + # If there is an ongoing comms or processing action for this image, then we don't need to + # consider processing actions for it + if exists_ongoing_comms_proc_action_for_image(ongoing_actions, image.id): + continue + + # If the image is not on this node, then we need to consider a comms action with + # the appropriate traversal time + if image.node_id != node_id: + edge = get_edge(network, image.node_id, node_id) + dt = sample_dt(edge.traversal_time) + else: + dt = timedelta(seconds=0) + comms_action = CommsAction( + id=uuid4(), + source_node_id=image.node_id, + target_node_id=node_id, + status=ActionStatus.CREATED, + start_time=timestamp, + image=image, + dt=dt + ) + # The earliest start time for processing actions is the end time of the comms + # action or the earliest start time of the processing node + earliest_proc_start_time_tmp = max(comms_action.end_time, earliest_proc_start_time) + # Create a processing action for each algorithm + for algorithm in node.processing_capability.algorithms: + proc_action = ProcAction( + id=uuid4(), + node_id=node.id, + algorithm=algorithm, + status=ActionStatus.CREATED, + start_time=earliest_proc_start_time_tmp, + image=image, + dt=sample_dt(algorithm.processing_statistics) + ) + possible_actions.append( + ActionTuple((None, comms_action, proc_action)) + ) + return possible_actions + + +def enumerate_actions_for_asset(node, fov_radius, network, rois, ongoing_proc_actions_per_node, + earliest_proc_start_time, timestamp): + """Enumerate all possible actions for a processing node that is an asset (sensor). + + If the node is an asset (sensor), then we need to consider collection actions, comms actions + and processing actions. + + NOTE: We assume that a sensor will not consider processing existing images. + + Arguments + --------- + node : Node + The processing node + fov_radius : float + The field of view radius of the sensor (in km) + network : NetworkTopology + The network topology, containing all nodes and edges + rois : List[GeoRegion] + A list of regions of interest + ongoing_proc_actions_per_node : Dict[UUID, List[ProcAction]] + A dictionary of ongoing processing actions per node + earliest_proc_start_time : datetime + The earliest start time for processing actions for the node + timestamp : datetime + The current timestamp + + Returns + ------- + possible_actions : List[Tuple[CollectionAction, CommsAction, ProcAction]] + A list of possible actions for the node + """ + possible_actions = [ActionTuple()] # No action + node_id = node.id + # NOTE: This is an approximation of asset fov in lat/long degrees (1 degree = 111km) + asset_fov_ll = fov_radius / 111 + # Get all possible collect locations + possible_collect_locations = [] + for roi in rois: + x1 = roi.corners[0].longitude + y1 = roi.corners[0].latitude + x2 = roi.corners[1].longitude + y2 = roi.corners[1].latitude + # For each roi, find the minimum number of overlapping circles required to cover it + possible_collect_locations += cover_rectangle_with_minimum_overlapping_circles( + x1, y1, x2, y2, asset_fov_ll + ) + processing_nodes = get_processing_nodes(network) + for location in possible_collect_locations: + geo_location = GeoLocation(latitude=location[1], longitude=location[0], altitude=0) + # Create dummy image + image = Image( + id=uuid4(), + collection_time=timestamp, + size=1, + location=geo_location, + fov_radius=fov_radius, + node_id=node_id + ) + # Create a collection action for each possible location + coll_action = CollectionAction( + id=uuid4(), + node_id=node_id, + location=geo_location, + status=ActionStatus.CREATED, + image=image + ) + # Create comms and processing actions for each processing node + for proc_node in processing_nodes: + dt = timedelta(seconds=0) + earliest_proc_start_time_tmp = earliest_proc_start_time + if proc_node.id != node_id: + # If the processing node is not the same as the collection node, then we + # need to consider a comms action with the appropriate traversal time + edge = get_edge(network, node_id, proc_node.id) + dt = sample_dt(edge.traversal_time) + earliest_proc_start_time_tmp = get_earliest_proc_start_time( + ongoing_proc_actions_per_node, proc_node.id, timestamp + ) + comms_action = CommsAction( + id=uuid4(), + source_node_id=node_id, + target_node_id=proc_node.id, + image=image, + status=ActionStatus.CREATED, + start_time=timestamp, + dt=dt + ) + for algorithm in proc_node.processing_capability.algorithms: + # Create a processing action for each algorithm + proc_action = ProcAction( + id=uuid4(), + node_id=proc_node.id, + algorithm=algorithm, + image=image, + status=ActionStatus.CREATED, + start_time=max(comms_action.end_time, earliest_proc_start_time_tmp), + dt=sample_dt(algorithm.processing_statistics) + ) + # Add triple of actions to list of possible actions + possible_actions.append(ActionTuple((coll_action, comms_action, proc_action))) + + return possible_actions + + +def enumerate_action_configs(image_store, network, assets, rfis, ongoing_actions, timestamp): + + # Extract all rois from rfis + rois = extract_rois(rfis) + processing_nodes = get_processing_nodes(network) + available_assets = [asset for asset in assets.assets + if asset.asset_status.availability == Availability.AVAILABLE] + + ongoing_proc_actions_per_node = { + node.id: sorted([action for action in ongoing_actions['proc'] + if action.node_id == node.id], key=lambda a: a.end_time) + for node in processing_nodes + } + + # Iterate over the processing nodes and enumerate all possible actions for each node + possible_actions_per_node = dict() + for node in processing_nodes: + node_id = node.id + # If there are ongoing actions for this node, set the earliest start time to the end time + # of the latest ongoing action + earliest_start_time = get_earliest_proc_start_time( + ongoing_proc_actions_per_node, node_id, timestamp + ) + # Check if the node is an asset (sensor) + asset = next((asset for asset in available_assets + if asset.asset_description.id == node_id), None) + # If the node is not an asset (sensor), then we don't need to consider collection actions + if asset is None: + possible_actions_per_node[node.id] = enumerate_actions_for_processing_node( + node, image_store, network, ongoing_actions, earliest_start_time, + timestamp + ) + # If the node is an asset (sensor), then we need to consider collection actions + else: + possible_actions_per_node[node.id] = enumerate_actions_for_asset( + node, asset.asset_description.fov_radius, network, rois, + ongoing_proc_actions_per_node, earliest_start_time, timestamp + ) + + # Enumerate all possible action configurations + possible_action_configs = [] + num_images_to_be_processed = len(image_store.images) - np.sum([ + exists_ongoing_comms_proc_action_for_image(ongoing_actions, image.id) + for image in image_store.images + ]) + for config in itertools.product(*possible_actions_per_node.values()): + num_collection_actions = sum(1 for action in config if action and action.coll_action) + # If the number of collection actions is less than the number of available assets, then + # this is certainly not an optimal action configuration + if num_collection_actions < len(available_assets): + continue + + # Filter out action combinations where some remote processing nodes do nothing, when + # there are available images to be processed + num_remote_proc_actions = \ + sum(1 for action in config + if action and action.type in (ActionTupleType.COMMS_AND_PROC, + ActionTupleType.PROC_ONLY) + ) + # If the number of remote processing actions is less than the number of images to be + # processed, then this is certainly not an optimal action configuration + if num_remote_proc_actions < num_images_to_be_processed: + continue + + # Make a deep copy of the combination, so that we can modify its elements without + # affecting the original ones + config = copy.deepcopy(config) + + # Get all processing actions in the configuration + proc_actions = [action.proc_action for action in config if action] + + # Ensure that no more than one processing action is performed on an image + image_ids_to_be_processed = [action.image.id for action in proc_actions] + # If an image id appears more than once in the list, then there is more than one + # processing action for that image, hence we discard this action configuration + if len(set(image_ids_to_be_processed)) != len(image_ids_to_be_processed): + continue + + # Check that processing actions are consistent with each other (i.e. no overlapping + # processing actions). This is done by ensuring that the start time of each processing + # action is after the end time of the previous processing action for the same node. + proc_node_ids = [action.node_id for action in proc_actions] + # Get processing actions for each node + proc_actions_per_node = { + node_id: [action for action in proc_actions if action.node_id == node_id] + for node_id in proc_node_ids + } + for node_id, p_actions in proc_actions_per_node.items(): + # Sort processing actions by start time + p_actions.sort(key=lambda a: a.start_time) + if len(p_actions) == 1: + continue + for previous_action, current_action in zip(p_actions, p_actions[1:]): + # If the start time of the current processing action is before the end time of + # the previous processing action, then set the start time of the current + # processing action to the end time of the previous processing action + if current_action.start_time < previous_action.end_time: + current_action.start_time = previous_action.end_time + possible_action_configs.append(config) + + return possible_action_configs + + +def proc_actions_from_config_sequence(config_seq): + proc_actions = [] + for config in config_seq: + for action in config: + if action is not None and action[2] is not None: + proc_actions.append(action[2]) + return proc_actions + + +def queue_actions(config, image_store, ongoing_actions): + """Queue actions for execution + + Add collection, comms and processing actions to the ongoing actions list + """ + for node_actions in config: + if not node_actions: + continue + coll_action, comms_action, proc_action = node_actions + # If collection action is None or collection action is not for the same node as + # processing action, it means that processing is not done on the sensor + if coll_action is None or coll_action.node_id != proc_action.node_id: + # Perform collection action + if coll_action is not None: + image_store.images.append(proc_action.image) + # Perform comms action + if comms_action is not None: + if comms_action.dt != 0: + # If comms action is not instantaneous, add it to ongoing actions + ongoing_actions['comms'].append(comms_action) + else: + # If comms action is instantaneous, find image and update node_id to + # processing node id + image = next(image for image in image_store.images + if image.id == comms_action.image_id) + image.node_id = proc_action.node_id + # Perform processing action + ongoing_actions['proc'].append(proc_action) + + +def rollout_actions(config, image_store, network_topology, assets, rfis, + ongoing_actions, num_samples, num_timesteps, interval, timestamp): + """Rollout actions for a given configuration + + Returns a list of lists of action configs, where each list of action configs is a rollout + """ + + # Initialise list of action configs. The first action config is the same for all rollouts + all_configs = [[config] for _ in range(num_samples)] + + # For each rollout + for config_list in all_configs: + current_time = timestamp + # Copy image store and ongoing actions, so that they can be modified without affecting + # other rollouts + image_store_tmp = copy.deepcopy(image_store) + ongoing_actions_tmp = copy.deepcopy(ongoing_actions) + # Queue actions for execution + queue_actions(config, image_store_tmp, ongoing_actions_tmp) + # Rollout + for i in range(num_timesteps): + current_time += interval + # Get all possible action configs for the current time + configs = enumerate_action_configs(image_store_tmp, network_topology, assets, + rfis, ongoing_actions_tmp, current_time) + # Select a random action config and add it to the rollout action configs + current_config = configs[np.random.randint(len(configs))] + config_list.append(current_config) + # Queue actions for execution + queue_actions(current_config, image_store_tmp, ongoing_actions_tmp) + return all_configs + + +def get_sensor(location, fov_radius, prob_detection=None, false_alarm_density=None): + # Create a sensor + sensor_position = StateVector([location.longitude, + location.latitude, + location.altitude]) + if prob_detection is not None: + prob_detection = Probability(prob_detection) + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.0001, 0.0001, 0.0001]), + location_x=sensor_position[0], + location_y=sensor_position[1], + position=sensor_position, + prob_detect=prob_detection, + fov_radius=fov_radius, + fov_in_km=True) + # Configure the sensor clutter model based on the algorithm false alarm density + # and the image footprint + x, y = sensor.footprint.exterior.coords.xy + min_x, max_x = np.min(x), np.max(x) + min_y, max_y = np.min(y), np.max(y) + if false_alarm_density: + clutter_model = ClutterModel( + clutter_rate=false_alarm_density, + distribution=np.random.default_rng().uniform, + dist_params=((min_x, max_x), (min_y, max_y), (100., 100.)) + ) + sensor.clutter_model = clutter_model + return sensor + + +def simulate_new_tracks(sensor, timestamp, birth_density): + x, y = sensor.footprint.exterior.coords.xy + min_x, max_x = np.min(x), np.max(x) + min_y, max_y = np.min(y), np.max(y) + + # Assume a Poisson process for the number of tracks. Tracks position are generated + # uniformly across the image footprint, with a random velocity between 0 and 10 m/s + new_tracks = set() + for _ in range(poisson.rvs(1)): + # Sample position + x = np.random.default_rng().uniform(min_x, max_x) + y = np.random.default_rng().uniform(min_y, max_y) + z = 100. + state_vector = StateVector([x, 0, y, 0, z, 0]) + # Extract covariance from birth density and adjust position covariance + covariance = np.copy(birth_density.covar) + covariance[[0, 2], [0, 2]] = np.random.default_rng().uniform(0.01, 0.1) # 0.01 + + # Sample existence probability + exist_prob = Probability(np.random.default_rng().uniform(0.5, 1)) #0.99 + + # Sample target type + all_target_types = list(TargetType) + num_types = np.random.randint(1, len(all_target_types) + 1) + target_type_inds = np.random.choice([i for i in range(len(TargetType))], + size=num_types, replace=False) + target_types = [all_target_types[i] for i in target_type_inds] + target_type_confidences = { + target_type: Probability(np.random.default_rng().uniform(0.1, 1)) + for target_type in target_types + } + + # Create track + init_metadata = { + 'target_type_confidences': target_type_confidences, + } + state = GaussianState(state_vector, covariance, timestamp=timestamp) + track = Track(id=uuid4(), states=[state], init_metadata=init_metadata) + track.exist_prob = exist_prob + new_tracks.add(track) + return new_tracks + diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py index f4ff605ac..b366f3dd8 100644 --- a/stonesoup/custom/hypothesiser/probability.py +++ b/stonesoup/custom/hypothesiser/probability.py @@ -41,6 +41,7 @@ class PDAHypothesiser(Hypothesiser): doc="Target Detection Probability") predict: bool = Property(default=True, doc="Perform prediction step") per_measurement: bool = Property(default=False, doc="Generate per measurement predictions") + normalise: bool = Property(default=True, doc="Normalise probabilities") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -182,7 +183,7 @@ def hypothesise(self, track, detections, timestamp, **kwargs): probability, measurement_prediction)) - return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) + return MultipleHypothesis(hypotheses, normalise=self.normalise, total_weight=1) class IPDAHypothesiser(PDAHypothesiser): diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py index f7c0d355b..bf1021bde 100644 --- a/stonesoup/custom/sensor/movable.py +++ b/stonesoup/custom/sensor/movable.py @@ -16,6 +16,7 @@ from stonesoup.types.array import CovarianceMatrix from stonesoup.types.detection import TrueDetection from stonesoup.types.groundtruth import GroundTruthState +from stonesoup.types.numeric import Probability class MovableUAVCamera(Sensor): @@ -34,6 +35,9 @@ class MovableUAVCamera(Sensor): :class:`~.CartesianToElevationBearing` model") fov_radius: Union[float, List[float]] = Property( doc="The detection field of view radius of the sensor") + prob_detect: Probability = Property( + default=None, + doc="The probability of detection of the sensor. Defaults to 1.0") clutter_model: ClutterModel = Property( default=None, doc="An optional clutter generator that adds a set of simulated " @@ -59,6 +63,8 @@ class MovableUAVCamera(Sensor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + if self.prob_detect is None: + self.prob_detect = Probability(1) self._footprint = None @location_x.setter @@ -132,12 +138,15 @@ def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, measurement_model=measurement_model, timestamp=truth.timestamp, groundtruth_path=truth) - detections.add(detection) + + # Generate detection with probability of detection + if np.random.rand() <= self.prob_detect: + detections.add(detection) # Generate clutter at this time step if self.clutter_model is not None: self.clutter_model.measurement_model = measurement_model - clutter = self.clutter_model.function(ground_truths) + clutter = self.clutter_model.function(ground_truths, **kwargs) detections |= clutter return detections diff --git a/stonesoup/custom/tracker/__init__.py b/stonesoup/custom/tracker/__init__.py index ee72b4ce3..f5ad1fa39 100644 --- a/stonesoup/custom/tracker/__init__.py +++ b/stonesoup/custom/tracker/__init__.py @@ -47,6 +47,7 @@ class _BaseTracker(Base): start_time: datetime = Property(doc='Start time of the tracker', default=None) def __init__(self, *args, **kwargs): + self._clutter_intensity = kwargs.pop('clutter_intensity', None) super().__init__(*args, **kwargs) self.prob_detect = self.prob_detection self._tracks = set() @@ -74,6 +75,18 @@ def prob_detect(self, prob_detect): if hasattr(self, '_initiator'): self._initiator.filter.prob_detect = self._prob_detect + @property + def clutter_intensity(self): + return self._clutter_intensity + + @clutter_intensity.setter + def clutter_intensity(self, clutter_intensity): + self._clutter_intensity = clutter_intensity + if hasattr(self, '_hypothesiser'): + self._hypothesiser.clutter_spatial_density = self._clutter_intensity + if hasattr(self, '_initiator'): + self._initiator.filter.clutter_intensity = self._clutter_intensity + @abstractmethod def track(self, detections, timestamp, *args, **kwargs): raise NotImplementedError @@ -239,9 +252,10 @@ def __init__(self, *args, **kwargs): self._hypothesiser = PDAHypothesiser(self._predictor, self._updater, self.clutter_intensity, prob_detect=self.prob_detect, - predict=self.predict) - self._hypothesiser = DistanceHypothesiser(self._predictor, self._updater, - Mahalanobis(), 10) + predict=self.predict, + normalise=False) + # self._hypothesiser = DistanceHypothesiser(self._predictor, self._updater, + # Mahalanobis(), 10) self._associator = GNNWith2DAssignment(self._hypothesiser) resampler = SystematicResampler() diff --git a/stonesoup/dataassociator/neighbour.py b/stonesoup/dataassociator/neighbour.py index 2ea26f6da..f332e4f3a 100644 --- a/stonesoup/dataassociator/neighbour.py +++ b/stonesoup/dataassociator/neighbour.py @@ -145,7 +145,7 @@ class GNNWith2DAssignment(DataAssociator): hypothesiser: Hypothesiser = Property( doc="Generate a set of hypotheses for each prediction-detection pair") - def associate(self, tracks, detections, timestamp, **kwargs): + def associate(self, tracks, detections, timestamp, hypotheses=None, **kwargs): """Associate a set of detections with predicted states. Parameters @@ -163,8 +163,9 @@ def associate(self, tracks, detections, timestamp, **kwargs): Key value pair of tracks with associated detection """ - # Generate a set of hypotheses for each track on each detection - hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs) + if hypotheses is None: + # Generate a set of hypotheses for each track on each detection + hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs) # Create dictionary for associations associations = {} diff --git a/stonesoup/models/clutter/clutter.py b/stonesoup/models/clutter/clutter.py index ec6aa7efa..3a820c083 100644 --- a/stonesoup/models/clutter/clutter.py +++ b/stonesoup/models/clutter/clutter.py @@ -1,3 +1,5 @@ +import datetime + import numpy as np from scipy.stats import poisson from typing import Set, Union, Callable, Tuple, Optional @@ -57,7 +59,8 @@ def __init__(self, *args, **kwargs): else: self.random_state = None - def function(self, ground_truths: Set[GroundTruthState], **kwargs) -> Set[Clutter]: + def function(self, ground_truths: Set[GroundTruthState], timestamp: datetime.datetime, + **kwargs) -> Set[Clutter]: """ Use the defined distribution and parameters to create simulated clutter for the current time step. Return this clutter to the calling sensor so @@ -73,12 +76,6 @@ def function(self, ground_truths: Set[GroundTruthState], **kwargs) -> Set[Clutte : set of :class:`~.Clutter` The simulated clutter. """ - # Extract the timestamp from the ground_truths. Groundtruth is - # necessary to get the proper timestamp. If there is no - # groundtruth return a set of no Clutter. - if not ground_truths: - return set() - timestamp = next(iter(ground_truths)).timestamp # Generate the clutter for this time step clutter = set() From ebbb74c00b945d096d3d126efc223ac0d97460cf Mon Sep 17 00:00:00 2001 From: sglvladi Date: Fri, 8 Mar 2024 13:58:38 +0000 Subject: [PATCH 83/87] Minor fixes to port from RISR --- .../hierarchical-tracking-example.py | 8 +- examples/reactive-isr/utils.py | 79 +------- stonesoup/custom/functions/__init__.py | 168 ------------------ stonesoup/custom/tracker/__init__.py | 6 +- 4 files changed, 10 insertions(+), 251 deletions(-) diff --git a/examples/reactive-isr/hierarchical-tracking-example.py b/examples/reactive-isr/hierarchical-tracking-example.py index f1c144e44..ec5ce4916 100644 --- a/examples/reactive-isr/hierarchical-tracking-example.py +++ b/examples/reactive-isr/hierarchical-tracking-example.py @@ -77,7 +77,7 @@ def to_single_state(tracks): # Parameters -np.random.seed(1000) +np.random.seed(1005) clutter_rate = 1 # Mean number of clutter points per scan max_range = 50 # Max range of sensor (meters) surveillance_area = np.pi * max_range ** 2 # Surveillance region area @@ -371,13 +371,15 @@ def to_single_state(tracks): # Add legend info for i, color in enumerate(colors): plt.plot([], [], f'--{color}', label=f'Groundtruth (Sensor {i + 1})') - plt.plot([], [], f':{color}', label=f'Tracklets (Sensor {i + 1})') plt.plot([], [], f'x{color}', label=f'Detections (Sensor {i + 1})') - plt.plot([], [], f'-*m', label=f'Fused Tracks') + plt.plot([], [], '-*c', label=f'Fused Tracks (Fuse Tracker 1)') + plt.plot([], [], '-*r', label=f'Fused Tracks (Fuse Tracker 2)') + plt.plot([], [], f'-*m', label=f'Fused Tracks (Top Tracker)') plt.legend(loc='upper right') plt.xlim((-200, 200)) plt.ylim((-200, 200)) plt.pause(0.01) + a=2 print(datetime.now() - sim_start_time) diff --git a/examples/reactive-isr/utils.py b/examples/reactive-isr/utils.py index 966586319..886a8b942 100644 --- a/examples/reactive-isr/utils.py +++ b/examples/reactive-isr/utils.py @@ -8,8 +8,6 @@ from shapely.geometry import Point from shapely.ops import unary_union -from reactive_isr_core.data import BeliefState, AssetList, GeoLocation, SensorType, ActionList - from stonesoup.types.track import Track from stonesoup.custom.sensor.movable import MovableUAVCamera from stonesoup.sensor.sensor import Sensor @@ -106,79 +104,4 @@ def prob_detect_func(state): point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) return prob_detect if poly.contains(point) else Probability(0.1) - return prob_detect_func - - -def belief_state_to_tracks(belief: BeliefState) -> Sequence[Track]: - """Converts a belief state to a set of stonesoup tracks""" - targets = belief.targets - tracks = [] - for target_id, target_detection in targets.items(): - state_vector = StateVector([target_detection.location.longitude, - target_detection.velocity.longitude, - target_detection.location.latitude, - target_detection.velocity.latitude, - target_detection.location.altitude, - target_detection.velocity.altitude]) - covariance_matrix = np.zeros((6, 6), dtype=float) - covariance_matrix[0::2, 0::2] = target_detection.location_error - covariance_matrix[1::2, 1::2] = target_detection.velocity_error - metadata = { - 'target_type_confidences': target_detection.target_type_confidences, - } - state = GaussianState(state_vector, covariance_matrix, - timestamp=target_detection.time) - track = Track(id=target_id, states=[state], init_metadata=metadata) - track.exist_prob = Probability(target_detection.confidence) - tracks.append(track) - return tracks - - -def assets_to_sensors(assets: AssetList, region_corners: List[GeoLocation], - action_resolutions: Dict[str, float]) -> Sequence[Sensor]: - """Converts a set of assets to a list of stonesoup sensors""" - sensors = [] - for asset in assets.assets: - if SensorType.AERIAL_V_CAMERA in asset.asset_description.sensor_types: - sensor_position = StateVector([asset.asset_status.location.longitude, - asset.asset_status.location.latitude, - asset.asset_status.location.altitude]) - lim_x = np.sort([loc.longitude for loc in region_corners]) - lim_y = np.sort([loc.latitude for loc in region_corners]) - limits = {'location_x': lim_x, 'location_y': lim_y} - sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.05, 0.05, 0.05]), - fov_radius=asset.asset_description.fov_radius, - location_x=sensor_position[0], - location_y=sensor_position[1], - resolutions=action_resolutions, - position=sensor_position, - limits=limits) - sensor.id = asset.asset_description.id - sensors.append(sensor) - else: - raise NotImplementedError("Only aerial cameras are supported") - return sensors - - -def action_list_to_config(assets, action_list: ActionList, - region_corners: List[GeoLocation], - action_resolutions: Dict[str, float], - time: datetime) -> Mapping[Sensor, Sequence[ssAction]]: - """Converts a reactive_isr_core action list to a stonesoup config""" - sensors = assets_to_sensors(assets, region_corners, action_resolutions) - config = {} - - for action in action_list.actions: - try: - sensor = next(s for s in sensors if s.id == action.asset_id) - except StopIteration as exc: - raise ValueError(f"Asset {action.asset_id} not found") from exc - location_x, location_y = action.location.longitude, action.location.latitude - action_generators = sensor.actions(time) - x_action_gen = next(a for a in action_generators if a.attribute == 'location_x') - y_action_gen = next(a for a in action_generators if a.attribute == 'location_y') - x_action = x_action_gen.action_from_value(location_x) - y_action = y_action_gen.action_from_value(location_y) - config[sensor] = (x_action, y_action) - return config \ No newline at end of file + return prob_detect_func \ No newline at end of file diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py index 0d5ff0cdb..b39b178ab 100644 --- a/stonesoup/custom/functions/__init__.py +++ b/stonesoup/custom/functions/__init__.py @@ -1,4 +1,3 @@ -import copy from functools import partial import math from typing import Set, List, Sequence @@ -16,7 +15,6 @@ from shapely.geometry.base import BaseGeometry from vector3d.vector import Vector -from reactive_isr_core.data import RFI, TaskType from stonesoup.sensor.sensor import Sensor from stonesoup.types.angle import Angle from stonesoup.types.state import ParticleState @@ -440,172 +438,6 @@ def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, return mu_overall, var_overall - -def eval_rfi(rfi: RFI, tracks: Sequence[Track], sensors: Sequence[Sensor], - phd_state: ParticleState = None, use_variance=True): - num_samples = 100 - mu_overall = 0 - var_overall = 0 # np.inf if len(valid_tracks) == 0 else 0 - config_metric = 0 - - xmin, ymin = rfi.region_of_interest.corners[0].longitude, \ - rfi.region_of_interest.corners[0].latitude - xmax, ymax = rfi.region_of_interest.corners[1].longitude, \ - rfi.region_of_interest.corners[1].latitude - geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) - path_p = Path(geom.boundary.coords) - - target_types = [t.target_type.value for t in rfi.targets] - valid_tracks = [track for track in tracks - if not (target_types) - or (target_types and any(item in track.metadata['target_type_confidences'] - for item in target_types))] - - # Calculate PHD density inside polygon - if phd_state is not None: - points = phd_state.state_vector[[0, 2], :].T - inside_points = path_p.contains_points(points) - if np.sum(inside_points) > 0: - # The mean of the PHD density inside the polygon is the sum of the weights of the - # particles inside the polygon - mu_overall = np.exp(logsumexp(np.log(phd_state.weight[inside_points].astype(float)))) - # The variance of a Poisson distribution is equal to the mean - var_overall = mu_overall - - # Calculate number of tracks inside polygon - for track in valid_tracks: - # Sample points from the track state - points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), - cov=track.covar[[0, 2], :][:, [0, 2]], - size=num_samples) - # Check which points are inside the polygon - inside_points = path_p.contains_points(points) - # Probability of existence inside the polygon is the fraction of points inside the polygon - # times the probability of existence - p_success = float(track.exist_prob) * (np.sum(inside_points) / num_samples) - # Mean of a Bernoulli distribution is equal to the probability of success - mu_overall += p_success - # Variance of a Bernoulli distribution is equal to the probability of success, - # times the probability of failure - var_overall += p_success * (1 - p_success) - - if rfi.task_type == TaskType.COUNT: - if mu_overall > 0 and var_overall < rfi.threshold_over_time.threshold[0]: - # TODO: Need to select the priority - config_metric += rfi.priority_over_time.priority[0] - if use_variance: - config_metric += 1 / var_overall - elif mu_overall == 0 and var_overall == 0: - aoi = 0 - for sensor in sensors: - # center = (sensor.position[1], sensor.position[0]) - # radius = sensor.fov_radius - # p = geodesic_point_buffer(*center, radius) - p = sensor.footprint - aoi = max([geom.intersection(p).area / geom.area, aoi]) - config_metric += aoi*rfi.priority_over_time.priority[0] - elif rfi.task_type == TaskType.FOLLOW: - for target in rfi.targets: - track = next((track for track in tracks if track.id == str(target.target_UUID)), None) - if track is not None: - var = track.covar[0, 0] + track.covar[2, 2] - if var < rfi.threshold_over_time.threshold[0]: - config_metric += rfi.priority_over_time.priority[0] - - return config_metric - - -def eval_rfi_new(rfi: RFI, tracks: Sequence[Track], timestamp, - phd_state: ParticleState = None, use_variance=True): - num_samples = 100 - mu_overall = 0 - var_overall = 0 # np.inf if len(valid_tracks) == 0 else 0 - config_metric = 0 - - target_types = [t.target_type.value for t in rfi.targets] - valid_tracks = [track for track in tracks - if not (target_types) - or (target_types and any(item in track.metadata['target_type_confidences'] - for item in target_types))] - for roi in rfi.region_of_interest: - - xmin, ymin = roi.corners[0].longitude, roi.corners[0].latitude - xmax, ymax = roi.corners[1].longitude, roi.corners[1].latitude - geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) - path_p = Path(geom.boundary.coords) - - # Calculate PHD density inside polygon - if phd_state is not None: - points = phd_state.state_vector[[0, 2], :].T - inside_points = path_p.contains_points(points) - if np.sum(inside_points) > 0: - # The mean of the PHD density inside the polygon is the sum of the weights of the - # particles inside the polygon - mu_overall = np.exp(logsumexp(np.log(phd_state.weight[inside_points].astype(float)))) - # The variance of a Poisson distribution is equal to the mean - var_overall = mu_overall - - # Calculate number of tracks inside polygon - for track in valid_tracks: - # Sample points from the track state - points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), - cov=track.covar[[0, 2], :][:, [0, 2]], - size=num_samples) - - # Check which points are inside the polygon - inside_points = path_p.contains_points(points) - # Probability of existence inside the polygon is the fraction of points inside the polygon - # times the probability of existence - p_success = float(track.exist_prob) * (np.sum(inside_points) / num_samples) - # Mean of a Bernoulli distribution is equal to the probability of success - mu_overall += p_success - # Variance of a Bernoulli distribution is equal to the probability of success, - # times the probability of failure - var_overall += p_success * (1 - p_success) - - # Compute time varying reward - # NOTE: We assume that the priority over time is monotonically decreasing - potential_reward = 0 # Default reward - # Find index of the closest earlier timestamp - inds_lt = np.flatnonzero(np.array(rfi.priority_over_time.timescale) <= timestamp) - # If there are no earlier timestamps, it means that the RFI has not been active yet - if len(inds_lt) > 0: - max_reward_idx = inds_lt[-1] - max_reward_time = rfi.priority_over_time.timescale[max_reward_idx] - max_potential_reward = rfi.priority_over_time.priority[max_reward_idx] - # Find index of the closest later timestamp - inds_gt = np.flatnonzero(np.array(rfi.priority_over_time.timescale) > timestamp) - # If there are later timestamps, we interpolate between the two points - if len(inds_gt) > 0: - min_reward_idx = inds_gt[0] - min_reward_time = rfi.priority_over_time.timescale[min_reward_idx] - min_potential_reward = rfi.priority_over_time.priority[min_reward_idx] - dt_total = min_reward_time - max_reward_time - dt = timestamp - max_reward_time - dy = max_potential_reward - min_potential_reward - # Linearly interpolate between the two points - potential_reward = max_potential_reward - dy * (dt / dt_total) - else: - # If there are no later timestamps, we reward the max potential reward - potential_reward = max_potential_reward - - if rfi.task_type == TaskType.COUNT: - if mu_overall > 0 and var_overall < rfi.threshold_over_time.threshold[0]: - config_metric += potential_reward - if use_variance: - config_metric += 1 / var_overall - else: - a=2 - elif rfi.task_type == TaskType.FOLLOW: - for target in rfi.targets: - track = next((track for track in tracks if track.id == str(target.target_UUID)), None) - if track is not None: - var = track.covar[0, 0] + track.covar[2, 2] - if var < rfi.threshold_over_time.threshold[0]: - config_metric += potential_reward - - return config_metric - proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') diff --git a/stonesoup/custom/tracker/__init__.py b/stonesoup/custom/tracker/__init__.py index f5ad1fa39..275359133 100644 --- a/stonesoup/custom/tracker/__init__.py +++ b/stonesoup/custom/tracker/__init__.py @@ -124,7 +124,8 @@ def __init__(self, *args, **kwargs): clutter_intensity=self.clutter_intensity, num_samples=self.num_samples, resampler=resampler, - birth_scheme=self.birth_scheme) + birth_scheme=self.birth_scheme, + scale_birth_weights=True) else: phd_filter = SMCPHDFilter(birth_density=self.birth_density, transition_model=self.transition_model, @@ -136,7 +137,8 @@ def __init__(self, *args, **kwargs): clutter_intensity=self.clutter_intensity, num_samples=self.num_samples, resampler=resampler, - birth_scheme=self.birth_scheme) + birth_scheme=self.birth_scheme, + scale_birth_weights=True) # Sample prior state from birth density if isinstance(self.birth_density, GaussianMixture): state_vector = np.zeros((self.transition_model.ndim_state, 0)) From 5f7dcefa641b193fd495420b4c86219d67018971 Mon Sep 17 00:00:00 2001 From: prh Date: Tue, 5 Nov 2024 11:03:46 +0000 Subject: [PATCH 84/87] PRH backup --- examples/prh_example/fusion.py | 71 ++++ .../prh_example/scratch_hierarchical_large.py | 349 ++++++++++++++++++ stonesoup/custom/reader/tracklet.py | 184 ++++++++- stonesoup/custom/tracker/fuse.py | 4 + 4 files changed, 607 insertions(+), 1 deletion(-) create mode 100644 examples/prh_example/fusion.py create mode 100644 examples/prh_example/scratch_hierarchical_large.py diff --git a/examples/prh_example/fusion.py b/examples/prh_example/fusion.py new file mode 100644 index 000000000..8be36573d --- /dev/null +++ b/examples/prh_example/fusion.py @@ -0,0 +1,71 @@ +import numpy as np + +from stonesoup.custom.types.tracklet import SensorTracks + + +# Create a class to represent a node in a fusion hierarchy, +class FusionNode: + """ + Class to represent a node in a fusion hierarchy + """ + def __init__(self, tracker, children, statedim, use_two_state_tracks=True): + # Tracker used at this node + self.tracker = tracker + # Child nodes used to supply tracks for fusion + self.children = children + # The dimension of a (single) target state + self.statedim = statedim + # Whether to propagate two-state tracks + self.use_two_state_tracks = use_two_state_tracks + + @property + def tracks(self): + return self.tracker.tracks + + @property + def detections(self): + if self.is_leaf(): + return self.tracker.detector.detections + else: + sensor_scans = [d for scan in self.tracker._scans for d in scan.sensor_scans] + detections = set([d for sscan in sensor_scans for d in sscan.detections]) + return detections + + # Process the tracks at this node by reading in the child tracks, creating pseudomeasurements and performing + # tracking + def process_tracks(self, timestamp): + child_tracks = [child.tracker.tracks for child in self.children] + input_tracks = [SensorTracks(tracks, i) for i, tracks in enumerate(child_tracks)] + if not self.use_two_state_tracks: + input_tracks = [SensorTracks(to_single_state(track, self.statedim), i) for i, track in enumerate(input_tracks)] + two_state_tracks = self.tracker.process_tracks(input_tracks, timestamp) + if not self.use_two_state_tracks: + return to_single_state(two_state_tracks, self.statedim) + else: + return two_state_tracks + + def is_leaf(self): + return len(self.children) == 0 + + def get_leaf_trackers(self): + if self.is_leaf(): + return [self.tracker] + else: + leaf_trackers = [] + for child in self.children: + leaf_trackers += child.get_leaf_trackers() + return leaf_trackers + + """ + # TODO: Try to make the fusion engine more pythonic with some class structure (under construction) + def runTracker(self, timestamp): + if not self.is_leaf: + child_tracks = [] + for child in self.child_trackers: + child_tracks.append(child.runTracker(timestamp)) + else: + for child in self.child_trackers: + child_tracks.append() + child_ + two_state_tracks + """ diff --git a/examples/prh_example/scratch_hierarchical_large.py b/examples/prh_example/scratch_hierarchical_large.py new file mode 100644 index 000000000..d0a9ae57f --- /dev/null +++ b/examples/prh_example/scratch_hierarchical_large.py @@ -0,0 +1,349 @@ +# Generate a multi-sensor / multi-target simulation and tracking scenario + +import matplotlib +from itertools import tee + +matplotlib.use('TkAgg') +from matplotlib import pyplot as plt + +import numpy as np +from datetime import datetime, timedelta + +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.types.state import GaussianState, State +from stonesoup.types.numeric import Probability +from stonesoup.types.track import Track +from stonesoup.types.update import Update, GaussianStateUpdate +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.simulator.simple import MultiTargetGroundTruthSimulator + +from stonesoup.platform import FixedPlatform, MovingPlatform +from stonesoup.sensor.radar import RadarBearingRange +from stonesoup.models.measurement.nonlinear import CartesianToBearingRange +from stonesoup.simulator.simple import DummyGroundTruthSimulator +from stonesoup.simulator.platform import PlatformDetectionSimulator +#from stonesoup.custom.simulator.platform import PlatformTargetDetectionSimulator + +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.updater.kalman import UnscentedKalmanUpdater +from stonesoup.hypothesiser.distance import DistanceHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.dataassociator.neighbour import GNNWith2DAssignment +from stonesoup.initiator.simple import MultiMeasurementInitiator +from stonesoup.deleter.error import CovarianceBasedDeleter +from stonesoup.tracker.simple import MultiTargetTracker +from stonesoup.plugins.pyehm import JPDAWithEHM2 +from stonesoup.gater.distance import DistanceGater + +from stonesoup.custom.predictor.twostate import TwoStatePredictor +from stonesoup.custom.updater.twostate import TwoStateKalmanUpdater +from stonesoup.custom.initiator.twostate import TwoStateInitiator +from stonesoup.custom.types.tracklet import SensorTracks +from stonesoup.custom.reader.tracklet import TrackletExtractor, PseudoMeasExtractor, TrackletExtractorTwoState +from stonesoup.custom.tracker.fuse import FuseTracker2 + +from stonesoup.custom.hypothesiser.probability import \ + PDAHypothesiser as PDAHypothesiserLyud # Lyudmil's custom PDA which doesn't have to predict? +from stonesoup.hypothesiser.probability import PDAHypothesiser # replaced with Lyudmil's custom code + +from utils import plot_cov_ellipse#compute_ellipse + +from prh_funcs import tile_with_circles, merge_position_and_velocity, to_single_state, fit_normal_to_uniform,\ + merge_position_and_velocity_covariance +from fusion import FusionNode + +from output_files import output_tracks, output_meas, output_truth + + +np.random.seed(1991) + +# Whether to use one or two state trackers at the upper hierarchy levels +use_two_state_tracks = True +# Whether to divide out the prior distribution when calculating pseudomeasurements +use_prior = True + + +# A function to plot the output tracks, including the lower level tracks fed into the higher level trackers +def plot_tracks(all_tracks, all_detections, truth, numxy, fusion_level): + """ + Plot results for a level of trackers + """ + + numx, numy = numxy[0], numxy[1] + fig, axs = plt.subplots(numy, numx, squeeze = False) + + for row in range(numy): + for col in range(numx): + + i = col * numy + row + ax = axs[numy - row - 1, col] + fusion_node = fusion_level[i] + + leaf_platforms = [p for x in fusion_node.get_leaf_trackers() for p in x.detector.platforms] + platform_states = [np.array(p.state_vector.flatten()) for p in leaf_platforms] + platform_ranges = [p.sensors[0].max_range for p in leaf_platforms] + + # Tracks + for track in all_tracks[i]: # leaf: # [i]: + track_x = np.array([x.state_vector.flatten() for x in track.states]) + ax.plot(track_x[:, -4], track_x[:, -2], color='r', marker='+') + for state in track: + mn, cv = state.mean[np.ix_([-4, -2])], state.covar[np.ix_([-4, -2], [-4, -2])] + # path = compute_ellipse(mn, cv) + plot_cov_ellipse(cv, mn, + ax=ax) + # Ground truth + for target in truth: + truth_x = np.array([x.state_vector.flatten() for x in target.states]) + ax.plot(truth_x[:, 0], truth_x[:, 2], color='g', marker='') + + # Platform range + for (platform_state, platform_range) in zip(platform_states, platform_ranges):#platform_positions[i]: + circle = plt.Circle(platform_state[np.ix_([0, 2])], platform_range, color='k', fill=False) + ax.add_patch(circle) + + # Detections (will have problems if the pseudomeasurements are inhomogeneous due to having different + # rank) + if all_detections and all_detections[i]: + if fusion_node.is_leaf(): + d = np.array([z.measurement_model.inverse_function(z).flatten() for z in all_detections[i]]) + meas_x = d[:,0] + meas_y = d[:,2] + else: + d = np.array([z.state_vector.flatten() for z in all_detections[i]]) + meas_x = d[:,-4] + meas_y = d[:,-2] + ax.plot(meas_x, meas_y, color='b', marker='.', linestyle='') + + # Axes + ax.set_xlim([minpos[0], maxpos[0]]) + ax.set_ylim([minpos[1], maxpos[1]]) + + +# Create a leaf-level tracker (i.e. one which processes raw measurements and provides tracks for higher level +# trackers to process +def create_leaf_tracker(platforms, gnd_sim, transition_model, prior_state): + + # Create detection simulator + detection_sim = PlatformDetectionSimulator( + groundtruth=gnd_sim, + platforms=platforms) + + this_meas_model = platforms[0].sensors[0].measurement_model # will this work with multiple platforms/sensors per tracker? + predictor = KalmanPredictor(transition_model) + updater = UnscentedKalmanUpdater(this_meas_model) + + # Covariance-based deleted + deleter = CovarianceBasedDeleter(covar_trace_thresh=10000.0) + + # Create GNN data association + hypothesiser = PDAHypothesiser(predictor, updater, prob_detect=0.9, prob_gate=0.999, + clutter_spatial_density=1.0e-6) + data_associator = GNNWith2DAssignment(hypothesiser) + + # Create initiator for this sensor + min_detections = 1 + initiator = MultiMeasurementInitiator( + prior_state=prior_state, + measurement_model=this_meas_model, + deleter=deleter, + data_associator=data_associator, + updater=updater, + min_points=min_detections) + + # Set up the tracker + return MultiTargetTracker( + initiator=initiator, + deleter=deleter, + detector=detection_sim, + data_associator=data_associator, + updater=updater) + +# Create a fusion tracker, i.e. one which takes in tracks, produces pseudomeasurements from them and carries out +# tracking on the pseudomeasurements +# use_two_state specifies if the tracks to be fused consist of single target state distributions (as is the case for +# tracks produced by leaf trackers, or fusion tracks which have been converted to single state), or distributions on +# pairs of states for a track over an intervaL) +def create_fuse_tracker(fusion_time_interval, transition_model, prior_state, use_two_state): + + # Fusion tracker components + two_state_predictor = TwoStatePredictor(transition_model) + two_state_updater = TwoStateKalmanUpdater(None, True) + fuse_initiator = TwoStateInitiator(prior_state, transition_model, two_state_updater) + # Lyudmil's code uses PDAHypothesiser from stonesoup.custom.hypothesiser.probability + fuse_hypothesiser = PDAHypothesiserLyud(predictor=None, + updater=two_state_updater, + clutter_spatial_density=Probability(-80, log_value=True),#Probability(-80, log_value=True), + prob_detect=Probability(prob_detect), + prob_gate=Probability(0.99), + predict=False, + per_measurement=True) + #fuse_associator = JPDAWithEHM2(fuse_hypothesiser) # in Fuse tracker + fuse_associator = GNNWith2DAssignment(fuse_hypothesiser) # in Fuse tracker + if use_two_state: + tracklet_extractor = TrackletExtractorTwoState(transition_model=transition_model, + fuse_interval=fusion_time_interval) + else: + tracklet_extractor = TrackletExtractor(transition_model=transition_model, + fuse_interval=fusion_time_interval) + + pseudomeas_extractor = PseudoMeasExtractor(use_prior=True) + return FuseTracker2(initiator=fuse_initiator, predictor=two_state_predictor, + updater=two_state_updater, associator=fuse_associator, + tracklet_extractor=tracklet_extractor, + pseudomeas_extractor=pseudomeas_extractor, death_rate=1e-4, + prob_detect=Probability(prob_detect), + delete_thresh=Probability(0.5)) # delete_thresh=Probability(0.0)) + +# Create a hierarchy of trackers with leaf trackers at the bottom, passing up to multiple layers of fusion trackers +def create_fusion_hierarchy(platforms, gnd_sims, tracker_hierarchy_indices, fusion_times, + transition_model, prior_state): + """ + Create hierarchy of fusion trackers (all the same for now, with different fusion times) + """ + fusion_hierarchy = [] + + # Generate leaf nodes + fusion_hierarchy.append([]) + for i_node, idx in enumerate(tracker_hierarchy_indices[0]): + tracker = create_leaf_tracker([platforms[i] for i in idx], gnd_sims[i_node], + transition_model, prior_state) + fusion_hierarchy[-1].append(FusionNode(tracker, [], statedim, use_two_state_tracks)) + + # Generate fusion nodes + for i_level, (fusion_time, level) in enumerate(zip(fusion_times[1:], tracker_hierarchy_indices[1:])): + fusion_hierarchy.append([]) + for child_idx in level: + is_two_state = (i_level > 0 and use_two_state_tracks) + tracker = create_fuse_tracker(fusion_time, transition_model, prior_state, is_two_state) + children = [fusion_hierarchy[-2][i] for i in child_idx] + fusion_hierarchy[-1].append(FusionNode(tracker, children, statedim, use_two_state_tracks)) + + return fusion_hierarchy + + +# Specify maximum and minimum extect of surveillance region +minpos = np.array([-1000, -1000]) +maxpos = np.array([3000, 1000]) +posdim = len(minpos) +statedim = 2 * posdim +position_mapping = (0, 2) +velocity_mapping = (1, 3) + +# Specify sensor noise covariance and measurement times +noise_covar = CovarianceMatrix(np.diag([0.01 ** 2, 10 ** 2])) +start_time = datetime(year=1970, month=1, day=1)#datetime.now().replace(microsecond=0)# +timestep_size = timedelta(seconds=1.0) +simulation_length = timedelta(seconds=300) +number_of_steps = int(simulation_length / timestep_size) + 1 +prob_detect = 0.9 # Probability of Detection + +# Specify initial velocity mean and covariance +init_velocity_mean = StateVector([[0.0], [0.0]]) +init_velocity_covariance = CovarianceMatrix(np.diag([10.0 ** 2, 10.0 ** 2])) + +# Get initial state distribution by fitting a Gaussian to the uniform position distribution and merging with +# the velocity distribution +init_position_mean, init_position_covariance = fit_normal_to_uniform(minpos, maxpos) +s_prior_state_mean = merge_position_and_velocity(init_position_mean, init_velocity_mean, statedim, + position_mapping, velocity_mapping) +s_prior_state_covariance = merge_position_and_velocity_covariance(init_position_covariance, init_velocity_covariance, + statedim, position_mapping, velocity_mapping) +# Specify prior Gaussian distribution of a new track +s_prior_state = GaussianState(s_prior_state_mean, s_prior_state_covariance, start_time) + +# 2-d Constant Velocity model for target +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.1)] * 2) + +# Generate the target initial states +number_of_targets = 20 +init_target_states = [] +for _ in range(number_of_targets): + init_pos = np.random.uniform(minpos, maxpos) + init_vel = StateVector(np.random.multivariate_normal(init_velocity_mean.flatten(), init_velocity_covariance)) + init_pos_vel = merge_position_and_velocity(init_pos, init_vel, statedim, position_mapping, velocity_mapping) + init_target_states.append(State(init_pos_vel, start_time)) + +# Ground truth simulator +ground_truth_simulator = MultiTargetGroundTruthSimulator( + transition_model=transition_model, + initial_state=s_prior_state, + birth_rate=0.0, + death_probability=0.0, + preexisting_states = [x.state_vector for x in init_target_states], + timestep=timestep_size, + number_steps=number_of_steps) + +# Generate grid of sensors which tile a box +numx, numy = 4, 2 +[platform_positions, max_range] = tile_with_circles(minpos, maxpos, numx, numy) +# Size of tracker grid for each level in the hierarchy +numxy_hierarchy = [(numx, numy), (numx, numy//2), (numx//2, numy//2), (1,1)] + +# Create sensor platforms +# (how do I add clutter?) +platforms = [] +for i_platform, x in enumerate(platform_positions): + + sensor = RadarBearingRange(ndim_state=statedim, noise_covar=noise_covar, + position_mapping=position_mapping, max_range=max_range) + + # Create fixed sensor platform (we need to have 4-dimensional states for the sensors even if they are stationary + # because the other sensors try to measure them as if they were targets) + platform_state = merge_position_and_velocity(x, [0, 0], statedim, + position_mapping, velocity_mapping) + platform = FixedPlatform(State(platform_state, timestamp=start_time), position_mapping=position_mapping) + platform.add_sensor(sensor) + platforms.append(platform) + +# Create hierarchy of fusion trackers (all the same for now, with different fusion times) +fusion_times = [timestep_size, 4*timestep_size, 24*timestep_size, 48*timestep_size] +tracker_hierarchy_indices = [[(i,) for i, _ in enumerate(platforms)], [(0,1),(2,3),(4,5),(6,7)], [(0,1),(2,3)], [(0,1)]] +gnd_sims = tee(ground_truth_simulator, len(tracker_hierarchy_indices[0])) +fusion_hierarchy = create_fusion_hierarchy(platforms, gnd_sims, tracker_hierarchy_indices, fusion_times, + transition_model, s_prior_state) + +# Run the trackers: + +all_tracks = [[set() for _ in level] for level in fusion_hierarchy] +all_detections = [[set() for _ in level] for level in fusion_hierarchy] +root_node = fusion_hierarchy[-1][0] +leaf_trackers = root_node.get_leaf_trackers() + +for leaf_time_and_tracks in zip(*leaf_trackers): + + timestamp = leaf_time_and_tracks[0][0] + leaf_tracks = [x[1] for x in leaf_time_and_tracks] + + print("Time: " + str(timestamp)) + + # Run fusion level trackers + for i_level, level in enumerate(fusion_hierarchy[1:]): + for i_node, node in enumerate(level): + node.process_tracks(timestamp) + + # Get tracks and detections from fusion hierarchy + for i_level, level in enumerate(fusion_hierarchy): + for i_node, node in enumerate(level): + all_tracks[i_level][i_node].update(node.tracker.tracks) + all_detections[i_level][i_node].update(node.detections) + + ntracks_hierarchy = [[len(node.tracks) for node in level] for level in fusion_hierarchy] + print(ntracks_hierarchy) + +truth = ground_truth_simulator.groundtruth_paths + +outdir = "TestData/Large/" + +output_meas(outdir + "outputfile.txt", start_time, platform_positions, all_detections) +output_tracks(outdir + "outputtracks.txt", start_time, all_tracks) +output_truth(outdir + "outputtruth.txt", start_time, truth) + +# Plot results: +for i in range(len(all_tracks)): + # (don't plot the detections because they will have different dimensions) + plot_tracks(all_tracks[i], None, truth, numxy_hierarchy[i], fusion_hierarchy[i]) + #plot_tracks(all_tracks[i], all_detections[i], truth, numxy_hierarchy[i], fusion_hierarchy[i]) + +plt.show() diff --git a/stonesoup/custom/reader/tracklet.py b/stonesoup/custom/reader/tracklet.py index 09392cb79..2e4396a5e 100644 --- a/stonesoup/custom/reader/tracklet.py +++ b/stonesoup/custom/reader/tracklet.py @@ -125,6 +125,8 @@ def get_tracklets_batch(self, alltracks, fuse_times): tracklets.append(tracklets_tmp) return tracklets + # PRH: Replaced the below with states so it more easily generalises + """ def augment_tracklet(self, tracklet, track, transition_model, timestamp): track_times = np.array([s.timestamp for s in track]) @@ -215,7 +217,83 @@ def init_tracklet(cls, track, tx_model, fuse_times, sensor_id=None): tracklet = Tracklet(id=track.id, states=states, init_metadata={'sensor_id': sensor_id}) return tracklet + """ + # PRH: new init_tracklet and augment_tracklet functions - should be the same for single and two-state trackers + @classmethod + def init_tracklet(cls, track, tx_model, fuse_times, sensor_id=None): + track_times = np.array([s.timestamp for s in track]) + idx0 = np.flatnonzero(fuse_times >= track_times[0]) + idx1 = np.flatnonzero(fuse_times <= track_times[-1]) + + if not len(idx0) or not len(idx1): + return None + else: + idx0 = idx0[0] + idx1 = idx1[-1] + + states = [] + filtered_times = np.array([s.timestamp for s in track]) + + cnt = 0 + for i in range(idx0, idx1): + start_time = fuse_times[i] + end_time = fuse_times[i + 1] + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + cnt += 1 + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + + post_mean, post_cov, prior_mean, prior_cov = \ + cls.get_interval_dist(track[ind0:ind1 + 1], tx_model, start_time, end_time) + + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + + states.append(prior) + states.append(posterior) + + if not cnt: + return None + + tracklet = Tracklet(id=track.id, states=states, init_metadata={'sensor_id': sensor_id}) + + return tracklet + + def augment_tracklet(self, tracklet, track, transition_model, timestamp): + track_times = np.array([s.timestamp for s in track]) + + filtered_times = np.array([s.timestamp for s in track]) + + start_time = tracklet.states[-1].timestamp + end_time = timestamp + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + + post_mean, post_cov, prior_mean, prior_cov = \ + self.get_interval_dist(track[ind0:ind1 + 1], transition_model, start_time, end_time) + + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + tracklet.states.append(prior) + tracklet.states.append(posterior) + + """ @classmethod def get_interval_dist(cls, filtered_means, filtered_covs, filtered_times, states, tx_model, start_time, end_time): @@ -238,6 +316,27 @@ def get_interval_dist(cls, filtered_means, filtered_covs, filtered_times, states t = np.array([start_time, *filtered_times, end_time]) post_mean, post_cov = cls.rts_smoother_endpoints(mn, cv, t, tx_model) + return post_mean, post_cov, prior_mean, prior_cov + """ + + @classmethod + def get_interval_dist(cls, filtered_states, transition_model, start_time, end_time): + + # Get filtered distributions at start and end of interval + predictor = ExtendedKalmanPredictor(transition_model) + + start_state = predictor.predict(filtered_states[0], start_time) + end_state = predictor.predict(filtered_states[-1], end_time) + + # Predict prior mean + prior_mean, prior_cov = predict_state_to_two_state(start_state.mean, start_state.covar, transition_model, + end_time - start_time) + + mn = np.concatenate([start_state.mean] + [x.mean for x in filtered_states[1:-1]] + [end_state.mean], 1) + cv = np.stack([start_state.covar] + [x.covar for x in filtered_states[1:-1]] + [end_state.covar], 2) + t = [start_time] + [x.timestamp for x in filtered_states[1:-1]] + [end_time] + post_mean, post_cov = cls.rts_smoother_endpoints(mn, cv, t, transition_model) + return post_mean, post_cov, prior_mean, prior_cov @classmethod @@ -268,7 +367,6 @@ def rts_smoother_endpoints(cls, filtered_means, filtered_covs, times, tx_model): joint_smoothed_cov = F2 @ joint_smoothed_cov @ F2.T + Omega2 return joint_smoothed_mean, joint_smoothed_cov - class PseudoMeasExtractor(Base, BufferedGenerator): tracklet_extractor: TrackletExtractor = Property(doc='The tracket extractor', default=None) target_state_dim: int = Property(doc='The target state dim', default=None) @@ -482,3 +580,87 @@ def get_pseudomeasurement(self, mu1, C1, mu2, C2): return H, z, R, evals return H, z, R, evals + +#======================================================================================================================= + +# PRH: Added code + +class TrackletExtractorTwoState(TrackletExtractor): + def __init__(self, *args, **kwargs): + super(TrackletExtractor, self).__init__(*args, **kwargs) + self._tracklets = [] + self._fuse_times = [] + + @classmethod + def get_interval_dist(cls, filtered_states, transition_model, start_time, end_time): + """ + Get the prior (using measurements up to start_time) and posterior (using measurements up to end_time) + from filtered_states. + Currently, we assume that the end time of the first state is the start time and the end time of the last state + is the end time + """ + + # Get filtered distributions at start and end of interval + predictor = ExtendedKalmanPredictor(transition_model) + + # PRH: Assume first and last state times correspond with fuse times for now + # TODO: Generalise so start_time could be beteeen start_time and end_time of filtered_states[0] and \ + # end_time could be between start_time and end_time of filtered_states[-1] + assert (filtered_states[0].end_time == start_time) + assert (filtered_states[-1].end_time == end_time) + + # Dimension of a single state (as opposed to a two_state) + one_statedim = transition_model.ndim + + # Get single state mean and covariance at start of interval + # (to generalise, we could get the mean and covariance at a point in the interval) + start_single_state_mean = filtered_states[0].mean[one_statedim:] + start_single_state_covar = filtered_states[0].covar[one_statedim:, one_statedim:] + + # Predict prior mean over the time interval + prior_mean, prior_cov = predict_state_to_two_state(start_single_state_mean, + start_single_state_covar, + transition_model, end_time - start_time) + + # Get mean and covariance of the last period up to end_time + # (to generalise, could get the two-state distribution between filtered_states[-1].start_time and end_time) + end_two_state_mean = filtered_states[-1].mean + end_two_state_covar = filtered_states[-1].covar + + # Get posterior mean by running smoother (Note that we ignore filtered_states[0] since this covers an interval + # before start_time) + # TODO: Generalise to have a two-state distribution at the start if start_time ~= filtered_states[0].end_time + mn = np.concatenate([x.mean for x in filtered_states[1:-1]] + [end_two_state_mean], 1) + cv = np.stack([x.covar for x in filtered_states[1:-1]] + [end_two_state_covar], 2) + post_mean, post_cov = cls._rts_smoother_endpoints_two_state(mn, cv) + + return post_mean, post_cov, prior_mean, prior_cov + + @classmethod + def _rts_smoother_endpoints_two_state(cls, filtered_means, filtered_covs): + """ + Given joint distributions (x[k], x[k+1]) for k=0,...,T-1, compute the joint distribution (x[0], x[T]) + """ + two_statedim, ntimesteps = filtered_means.shape + statedim = two_statedim // 2 + smoothed_mean, smoothed_cov = filtered_means[:,-1], filtered_covs[:,:,-1] + + for k in reversed(range(ntimesteps - 1)): + + mu_x = filtered_means[:statedim, k] + mu_y = filtered_means[statedim:, k] + Pxx = filtered_covs[:statedim, :statedim, k] + Pyy = filtered_covs[statedim:, statedim:, k] + Pxy = filtered_covs[:statedim, statedim:, k] + + F = Pxy @ inv(Pyy) + b = mu_x - F @ mu_y + Omega = Pxx - Pxy @ inv(Pyy) @ Pxy.T + + F2 = block_diag(F, np.eye(statedim)) + b2 = np.concatenate((b, np.zeros((statedim,)))) + Omega2 = block_diag(Omega, np.zeros((statedim, statedim))) + smoothed_mean = F2 @ smoothed_mean + b2 + smoothed_cov = F2 @ smoothed_cov @ F2.T + Omega2 + + return smoothed_mean, smoothed_cov diff --git a/stonesoup/custom/tracker/fuse.py b/stonesoup/custom/tracker/fuse.py index b978911a3..6257ff5e4 100644 --- a/stonesoup/custom/tracker/fuse.py +++ b/stonesoup/custom/tracker/fuse.py @@ -268,6 +268,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._tracks = set() self._current_end_time = None + self._scans = [] # PRH: Added to get scans @property def tracks(self): @@ -284,6 +285,9 @@ def process_tracks(self, alltracks, timestamp): for scan in scans: self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, self._current_end_time) + + self._scans = scans#.update(scans) # PRH: Added to get scans + return self.tracks def process_scans(self, scans): From 6d71783c20e9765bd55b1bbbad634d9d1cdf1ce1 Mon Sep 17 00:00:00 2001 From: prh Date: Tue, 5 Nov 2024 13:02:06 +0000 Subject: [PATCH 85/87] Updated example comments --- .../prh_example/scratch_hierarchical_large.py | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/examples/prh_example/scratch_hierarchical_large.py b/examples/prh_example/scratch_hierarchical_large.py index d0a9ae57f..d6b19ac8f 100644 --- a/examples/prh_example/scratch_hierarchical_large.py +++ b/examples/prh_example/scratch_hierarchical_large.py @@ -1,4 +1,32 @@ -# Generate a multi-sensor / multi-target simulation and tracking scenario +""" + Generate a multi-sensor / multi-target simulation and tracking scenario + The scenario simulates 8 radar sensors positioned on a 4x2 grid of overlapping FOVs, organised + in 4 hierarchical levels of 8 trackers, 4 of 2 each, 2 of 4 each and 1 of all 8 sensors passed + up the fusion tree. + + |----------------| + | Top Tracker | + |----------------| + | + -------------------------------------------- + | | + |-----------------| |-----------------| + | Fuse Tracker 2a | | Fuse Tracker 2b | + |-----------------| |-----------------| + | | + ------------------- ------------------- + | | | | +|------------------| |------------------| |------------------| |------------------| +| Fuse Tracker 1a | | Fuse Tracker 1b | | Fuse Tracker 1c | | Fuse Tracker 1d | +|------------------| |------------------| |------------------| |------------------| + | | | | | | | | +|-------| |-------| |-------| |-------| |-------| |-------| |-------| |-------| +| Leaf | | Leaf | | Leaf | | Leaf | | Leaf | | Leaf | | Leaf | | Leaf | +|Sens. 1| |Sens. 2| |Sens. 3| |Sens. 4| |Sens. 5| |Sens. 6| |Sens. 7| |Sens. 8| +|-------| |-------| |-------| |-------| |-------| |-------| |-------| |-------| +""" + + import matplotlib from itertools import tee @@ -344,6 +372,8 @@ def create_fusion_hierarchy(platforms, gnd_sims, tracker_hierarchy_indices, fusi for i in range(len(all_tracks)): # (don't plot the detections because they will have different dimensions) plot_tracks(all_tracks[i], None, truth, numxy_hierarchy[i], fusion_hierarchy[i]) + fig = plt.gcf() + fig.suptitle(f'Level {i+1}') #plot_tracks(all_tracks[i], all_detections[i], truth, numxy_hierarchy[i], fusion_hierarchy[i]) plt.show() From 12faf331f904625bd59efa95a5f9e57edf21ead5 Mon Sep 17 00:00:00 2001 From: prh Date: Tue, 5 Nov 2024 13:06:08 +0000 Subject: [PATCH 86/87] Removed unused files --- examples/reactive-isr/camera_actions.py | 85 ---- examples/reactive-isr/camera_lat_lon_uav.py | 112 ----- .../comms_proc/comms_proc_example.py | 322 --------------- examples/reactive-isr/comms_proc/evaluator.py | 190 --------- examples/reactive-isr/comms_proc/utils.py | 262 ------------ .../hierarchical-tracking-example.py | 385 ------------------ .../movable_uav_camera_actions.py | 88 ---- .../reactive-isr/multi-sonar-ehm-fuse-3.py | 254 ------------ examples/reactive-isr/risr-demo.py | 318 --------------- examples/reactive-isr/sensor_management.py | 240 ----------- examples/reactive-isr/smcphd_init-example.py | 297 -------------- .../smcphd_init-sm-example-movable.py | 302 -------------- .../reactive-isr/smcphd_init-sm-example.py | 331 --------------- examples/smcphd-example.py | 160 -------- 14 files changed, 3346 deletions(-) delete mode 100644 examples/reactive-isr/camera_actions.py delete mode 100644 examples/reactive-isr/camera_lat_lon_uav.py delete mode 100644 examples/reactive-isr/comms_proc/comms_proc_example.py delete mode 100644 examples/reactive-isr/comms_proc/evaluator.py delete mode 100644 examples/reactive-isr/comms_proc/utils.py delete mode 100644 examples/reactive-isr/hierarchical-tracking-example.py delete mode 100644 examples/reactive-isr/movable_uav_camera_actions.py delete mode 100644 examples/reactive-isr/multi-sonar-ehm-fuse-3.py delete mode 100644 examples/reactive-isr/risr-demo.py delete mode 100644 examples/reactive-isr/sensor_management.py delete mode 100644 examples/reactive-isr/smcphd_init-example.py delete mode 100644 examples/reactive-isr/smcphd_init-sm-example-movable.py delete mode 100644 examples/reactive-isr/smcphd_init-sm-example.py delete mode 100644 examples/smcphd-example.py diff --git a/examples/reactive-isr/camera_actions.py b/examples/reactive-isr/camera_actions.py deleted file mode 100644 index cbffb0d39..000000000 --- a/examples/reactive-isr/camera_actions.py +++ /dev/null @@ -1,85 +0,0 @@ -import itertools -from datetime import datetime - -import numpy as np - -from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera -from stonesoup.types.angle import Angle -from stonesoup.types.array import StateVector - -# Specify the rotation offset of the camera -# In this case we rotate the camera around the Y axis by 90 degrees, meaning that the camera is -# pointing downwards -# NOTE: Panning moves the footprint of the camera along the Y axis, and tilting moves the -# footprint along the X axis -rotation_offset = StateVector([Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset - -# Specify the initial pan and tilt of the camera -pan = Angle(0) -tilt = Angle(0) - -# The camera is positioned at x=10, y=10, z=100 -position = StateVector([10., 10., 100.]) - -# We can also set the resolution of each actionable property. The resolution is used when -# discretising the action space. In this case, we set the resolution of both the pan and tilt to -# 10 degrees, meaning that the action space will contain values in the range [-pi/2, pi/2] with -# a step size of 10 degrees for each property. -# NOTE: Currently, the current state of each property is appended to the action space, meaning -# that the action space will contain 19 values for each property (not 18). In the current example, -# this means that the action for 0 degrees will be duplicated. This is a known "feature" and will -# be fixed in a future release. -resolutions = {'pan': np.radians(10), 'tilt': np.radians(10)} - -# Create a camera object -sensor = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.05, 0.05, 0.05]), - fov_angle=[np.radians(15), np.radians(10)], - rotation_offset=rotation_offset, - pan=pan, tilt=pan, - resolutions=resolutions, - position=position) - -# Set a query time -timestamp = datetime.now() - -# Calling sensor.actions() will return a set of action generators. Each action generator is an -# object that contains all the actions that can be performed by the sensor at a given time. In this -# case, the sensor can perform two actions: pan and tilt. Hence, the result of sensor.actions() is -# a set of two action generators: one for panning and one for tilting. -action_generators = sensor.actions(timestamp) - -# Let's look at the action generators -# The first action generator is for panning. We can extract the action generator by searching for -# the action generator that controls the 'pan'. So, the following line of code simply filters the -# action generators that control the 'pan' of the camera (the for-if statement) and then selects -# the first action generator (since there is only one), via the next() statement. -pan_action_generator = next(ag for ag in action_generators if ag.attribute == 'pan') -# The second action generator is for tilting. We can extract the action generator by searching for -# the action generator that controls the 'tilt'. -tilt_action_generator = next(ag for ag in action_generators if ag.attribute == 'tilt') - -# We can now look at the actions that can be performed by the action generators. The action -# generators provide a Python "iterator" interface. This means that we can iterate over the action -# generators to get the actions that can be performed (e.g. with a "for" loop). Instead, we can -# also use the list() function to get a list of all the actions that can be performed. -possible_pan_actions = list(pan_action_generator) -possible_tilt_actions = list(tilt_action_generator) - -# Each action has a "target_value" property that specifies the value that the property will be -# set to if the action is performed. The following line of code prints the target values of the -# 10th action for pan and tilt. -print(possible_pan_actions[9].target_value) -print(possible_tilt_actions[9].target_value) - -# To get all the possible combinations of actions, we can use the itertools.product() function. -possible_action_combinations = list(itertools.product(possible_pan_actions, possible_tilt_actions)) - -# Let us now select the 10th action combination and task the sensor to perform the action. -chosen_action_combination = possible_action_combinations[9] -sensor.add_actions(chosen_action_combination) -sensor.act(timestamp) - -# The statement below is just an extra statement to allow us to breakpoint the code and inspect -# the possible actions. -end = True \ No newline at end of file diff --git a/examples/reactive-isr/camera_lat_lon_uav.py b/examples/reactive-isr/camera_lat_lon_uav.py deleted file mode 100644 index 4ba12e4ac..000000000 --- a/examples/reactive-isr/camera_lat_lon_uav.py +++ /dev/null @@ -1,112 +0,0 @@ -from datetime import datetime, timedelta - -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.patches import Rectangle - -from stonesoup.custom.functions import get_camera_footprint -from stonesoup.custom.sensor.action.pan_tilt import ChangePanTiltAction -from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.platform import FixedPlatform -from stonesoup.types.angle import Bearing, Elevation, Angle -from stonesoup.types.array import StateVector -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState -from stonesoup.types.state import State - - - -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -num_iter = 100 # Number of simulation steps -rotation_offset = StateVector([Angle(0), Angle(-np.pi/2), Angle(0)]) # Camera rotation offset -pan_tilt = StateVector([Angle(0), Angle(-np.pi/32)]) # Camera pan and tilt -camera = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], noise_covar=np.diag([0.001, 0.001, 0.001]), - fov_angle=np.radians(10), rotation_offset=rotation_offset, pan_tilt=pan_tilt) -platform = FixedPlatform(position_mapping=(0, 2, 4), orientation=StateVector([0, 0, 0]), - states=[State([10., 0., 10., 0., 100., 0], timestamp=start_time)], - sensors=[camera]) - - - -# Models -# ====== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), - ConstantVelocity(0.01), - ConstantVelocity(0.0)]) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) -truths = set() -truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -timestamps = [] -for k in range(1, num_iter + 1): - timestamps.append(start_time + timedelta(seconds=k)) - -# Simulate measurements -# ===================== -scans = [] - -# Schedule an action to change the pan and tilt of the camera after 30 seconds -generator = next(g for g in camera.actions(start_time + timedelta(seconds=30))) -action = generator.action_from_value(StateVector([Angle(0), Angle(0)])) - -camera.add_actions([action]) -fig = plt.figure(figsize=(10, 6)) -ax = fig.add_subplot(1, 1, 1) -for k in range(num_iter): - timestamp = timestamps[k] - camera.act(timestamp) - truth_states = [truth[k] for truth in truths] - measurement_set = camera.measure(truth_states, timestamp=timestamp) - scan = (timestamp, measurement_set) - scans.append(scan) - ax.cla() - ax.set_xlabel("$x$") - ax.set_ylabel("$y$") - ax.set_xlim(-10, 30) - ax.set_ylim(-10, 30) - ax.set_aspect('equal') - - # Fov ranges (min, center, max) - xmin, xmax, ymin, ymax = get_camera_footprint(camera) - - ax.add_patch(Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, facecolor='none', edgecolor='r')) - # x, y = pol2cart(100, camera.orientation[2] - camera.fov_angle / 2) - # ax.plot([0, x], [0, y], 'r-', label="Camera FOV") - # x, y = pol2cart(100, camera.orientation[2] + camera.fov_angle / 2) - # ax.plot([0, x], [0, y], 'r-') - for truth in truths: - data = np.array([state.state_vector for state in truth[:k + 1]]) - ax.plot(data[:, 0], data[:, 2], '--', label="Ground truth") - detections = scan[1] - for detection in detections: - # x, y = pol2cart(100, detection.state_vector[1] + camera.orientation[2]) - # ax.plot([0, x], [0, y], 'b-') - ax.plot(detection.state_vector[0], detection.state_vector[1], 'bx') - plt.pause(0.1) - -# # Plot results -# # ============ -# for k, scan in enumerate(scans): -# -# a = 2 \ No newline at end of file diff --git a/examples/reactive-isr/comms_proc/comms_proc_example.py b/examples/reactive-isr/comms_proc/comms_proc_example.py deleted file mode 100644 index 279054e7a..000000000 --- a/examples/reactive-isr/comms_proc/comms_proc_example.py +++ /dev/null @@ -1,322 +0,0 @@ -from copy import copy, deepcopy -from datetime import datetime, timedelta -from uuid import uuid4 - -import warnings - -from matplotlib import pyplot as plt -from ordered_set import OrderedSet - -from stonesoup.custom.functions.rollout import enumerate_action_configs, extract_rois, get_sensor, \ - queue_actions, ActionTupleType -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState - -warnings.simplefilter(action='ignore', category=FutureWarning) -warnings.simplefilter(action='ignore', category=RuntimeWarning) - -import numpy as np -from matplotlib.path import Path -from shapely import unary_union - -from reactive_isr_core.data import Node, AvailableAlgorithms, Algorithm, ProcessingStatistics, \ - TraversalTime, Edge, NetworkTopology, Availability, Storage, ImageStore, ProcessingAction, \ - ActionList, CommunicateAction, CollectAction, ActionStatus, Image, GeoLocation -from stonesoup.custom.sensor.movable import MovableUAVCamera -from stonesoup.custom.tracker import SMCPHD_JIPDA, SMCPHD_IGNN -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.array import StateVector -from stonesoup.types.detection import TrueDetection -from stonesoup.types.numeric import Probability -from stonesoup.types.state import GaussianState, ParticleState - -from evaluator import CommsAndProcEvaluator -from utils import setup_network, setup_rfis, plot_cov_ellipse - - -def _prob_detect_func(fovs, prob_detect): - """Closure to return the probability of detection function for a given environment scan""" - # Get the union of all field of views - fovs_union = unary_union(fovs) - if fovs_union.geom_type == 'MultiPolygon': - fovs = [poly for poly in fovs_union] - else: - fovs = [fovs_union] - - paths = [Path(poly.boundary.coords) for poly in fovs] - - # Probability of detection nested function - def prob_detect_func(state): - for path_p in paths: - if isinstance(state, ParticleState): - prob_detect_arr = np.full((len(state),), Probability(0.1)) - points = state.state_vector[[0, 2], :].T - inside_points = path_p.contains_points(points) - prob_detect_arr[inside_points] = prob_detect - return prob_detect_arr - else: - points = state.state_vector[[0, 2], :].T - return prob_detect if np.alltrue(path_p.contains_points(points)) \ - else Probability(0) - - return prob_detect_func - -seed = 2001 -np.random.seed(seed) - -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -prob_detect = Probability(.9) # 90% chance of detection. -prob_death = Probability(0.01) # Probability of death -prob_birth = Probability(0.1) # Probability of birth -prob_survive = Probability(0.99) # Probability of survival -birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) -clutter_rate = 10 # Clutter-rate (Mean number of clutter measurements per scan) -surveillance_region = [[-5, -2], # The surveillance region - [50.1, 53.2]] -surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ - * (surveillance_region[1][1] - surveillance_region[1][0]) # Surveillance volume -clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area -birth_density = GaussianState( - StateVector(np.array([-2.5, 0.0, 51, 0.0, 0.0, 0.0])), - np.diag([3. ** 2, .01 ** 2, 3. ** 2, .01 ** 2, 0., 0.])) # Birth density -birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' -num_particles = 2 ** 8 # Number of particles used by the PHD filter -num_iter = 400 # Number of simulation steps -PLOT = True # Set [True | False] to turn plotting [ON | OFF] -MANUAL_RFI = True # Set [True | False] to turn manual RFI [ON | OFF] -colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # Colors for plotting - -sensor_position = StateVector([-4.5, 51.5, 100.]) -network_topology, assets = setup_network(sensor_position, start_time) -image_store = ImageStore( - images=[] -) - -# Ongoing actions is a dictionary of lists of actions. The keys are the action types and the values -# are the lists of actions of that type. The action types are 'collect', 'comms' and 'proc'. -# This dictionary is used to keep track of ongoing actions. -ongoing_actions = { - 'collect': [], - 'comms': [], - 'proc': [], -} - -rfis = setup_rfis(start_time, num_rois=2, time_varying=True) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) - -timestamps = [] -for k in range(0, num_iter + 1, 2): - timestamps.append(start_time + timedelta(seconds=k)) -truths = set() -rois = extract_rois(rfis) -for roi in rois: - lon_min, lat_min = roi.corners[0].longitude, roi.corners[0].latitude - lon_max, lat_max = roi.corners[1].longitude, roi.corners[1].latitude - for i in range(2): - lat = np.random.uniform(lat_min, lat_max) - lon = np.random.uniform(lon_min, lon_max) - truth = GroundTruthPath([GroundTruthState([lon, 0.00, lat, 0.00, 0, 0], - timestamp=start_time)]) - for timestamp in timestamps[1:]: - truth.append(GroundTruthState( - gnd_transition_model.function(truth[-1], noise=False, - time_interval=timedelta(seconds=1)), - timestamp=timestamp)) - truths.add(truth) - -# Plot groundtruth, sensors and rois -# ============================ -# fig = plt.figure(figsize=(10, 6)) -# ax = fig.add_subplot(111) -# ax.set_xlim(surveillance_region[0][0]-1, surveillance_region[0][1]+1) -# ax.set_ylim(surveillance_region[1][0], surveillance_region[1][1]) -# ax.set_xlabel('Longitude') -# ax.set_ylabel('Latitude') -# ax.set_title('Groundtruth and initial sensor locations') -# ax.set_aspect('equal') -# for i, track in enumerate(truths): -# ax.plot([state.state_vector[0] for state in track], -# [state.state_vector[2] for state in track], -# color=colors[i], linestyle='--', linewidth=2, label=f'Truth {i+1}') -# ax.plot(track[-1].state_vector[0], track[-1].state_vector[2], -# color=colors[i], marker='o', markersize=5) -# asset = assets.assets[0] -# sensor = get_sensor(asset.asset_status.location, asset.asset_description.fov_radius) -# footprint = sensor.footprint -# x, y = footprint.exterior.xy -# ax.plot(x, y, color='r', label=f'Sensor') -# for i, roi in enumerate(rois): -# lon_min, lat_min = roi.corners[0].longitude, roi.corners[0].latitude -# lon_max, lat_max = roi.corners[1].longitude, roi.corners[1].latitude -# ax.plot([lon_min, lon_max, lon_max, lon_min, lon_min], -# [lat_min, lat_min, lat_max, lat_max, lat_min], -# color='k', linestyle='--', linewidth=0.1, label=f'ROI {i+1}') -# ax.legend() -# plt.show() - -# Tracking Components -# =================== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.000001), - ConstantVelocity(0.000001), - ConstantVelocity(0.000001)]) - -# Main tracker -tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time) - -# Evaluator tracker -eval_tracker = SMCPHD_IGNN(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time) - - -# Evaluator -num_samples = 40 # Number of monte-carlo samples for Monte-Carlo Rollout -num_timesteps = 5 # Number of timesteps for Monte-Carlo Rollout -interval = timedelta(seconds=1) # Interval between timesteps for Monte-Carlo Rollout -evaluator = CommsAndProcEvaluator( - tracker=eval_tracker, - num_timesteps=num_timesteps, - interval=interval, - num_samples=num_samples, -) - -def optimise(tracks, image_store, network_topology, assets, rfis, ongoing_actions, timestamp): - # Get all possible action configurations - configs = enumerate_action_configs(image_store, network_topology, assets, rfis, - ongoing_actions, timestamp) - - # For each action configuration - rewards = [] - for config in configs: - # Evaluate the action configuration - reward = evaluator(config, tracks, image_store, network_topology, assets, rfis, - ongoing_actions, timestamp) - - rewards.append(reward) - - print(f'\nRewards: \n--------------------------------------------------------') - for i, config in enumerate(configs): - print(f'{rewards[i]:.2f} - {config}') - # Find the best action configuration - max_reward = np.max(rewards) - best_inds = np.argwhere(rewards == max_reward).flatten() - best_ind = np.random.choice(best_inds) - best_config = configs[best_ind] - return best_config - - -if PLOT: - fig = plt.figure(figsize=(10, 6)) - ax = fig.add_subplot(111) - -tracks = set() -processed_images = list() -for k, timestamp in enumerate(timestamps): - print(f'\n\nIter: {k+1} - Timestamp: {timestamp}\n ===========================================') - truth_states = OrderedSet(truth[timestamp] for truth in truths) - - # Update ongoing actions - completed_comms_actions = [] - completed_proc_actions = [] - for comms_action in ongoing_actions['comms']: - if timestamp >= comms_action.end_time: - completed_comms_actions.append(comms_action) - comms_action.image.node_id = comms_action.target_node_id - for proc_action in ongoing_actions['proc']: - if timestamp >= proc_action.end_time: - completed_proc_actions.append(proc_action) - for comms_action in completed_comms_actions: - ongoing_actions['comms'].remove(comms_action) - for proc_action in completed_proc_actions: - ongoing_actions['proc'].remove(proc_action) - try: - image_store.images.remove(proc_action.image) - except ValueError: - pass - - # Optimise actions - chosen_actions = optimise(tracks, image_store, network_topology, assets, rfis, - ongoing_actions, timestamp) - print(f'Chosen actions: \n--------------------------------------------------------') - print(chosen_actions) - - - # Perform chosen actions - queue_actions(chosen_actions, image_store, ongoing_actions) - sensor_action = chosen_actions[0] - if sensor_action: - coll_action = sensor_action[0] - sensor = get_sensor(coll_action.image.location, coll_action.image.fov_radius) - proc_actions = [action.proc_action for action in chosen_actions if action] - proc_actions.sort(key=lambda x: x.image.collection_time) - for i, proc_action in enumerate(proc_actions): - if proc_action.image in processed_images: - continue - else: - processed_images.append(proc_action.image) - sub_sensor = get_sensor(proc_action.image.location, proc_action.image.fov_radius, - proc_action.algorithm.prob_detection, - proc_action.algorithm.false_alarm_density) - p = sub_sensor.footprint - tracker.prob_detect = _prob_detect_func([p], proc_action.algorithm.prob_detection) - tracker.clutter_intensity = proc_action.algorithm.false_alarm_density/p.area - - # Observe the ground truth - detections = sensor.measure(truth_states, noise=True) - detections = list(detections) - # Track using main tracker - tracks = tracker.track(detections, proc_action.image.collection_time) - - # Print debug info - tracks = list(tracks) - print(f'\n Action {i + 1} --------------------------------------------------------') - for track in tracks: - print(f'Track {track.id} - Exist prob: {track.exist_prob}') - - if PLOT: - ax.cla() - ax.set_xlim(surveillance_region[0][0] - 1, surveillance_region[0][1] + 1) - ax.set_ylim(surveillance_region[1][0], surveillance_region[1][1]) - ax.set_xlabel('Longitude') - ax.set_ylabel('Latitude') - ax.set_title('Groundtruth and initial sensor locations') - ax.set_aspect('equal') - for i, track in enumerate(truths): - ax.plot([state.state_vector[0] for state in track], - [state.state_vector[2] for state in track], - color=colors[i], linestyle='--', linewidth=2, label=f'Truth {i + 1}') - ax.plot(track[-1].state_vector[0], track[-1].state_vector[2], - color=colors[i], marker='o', markersize=5) - - for track in tracks: - plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], - edgecolor='r', facecolor='none', ax=ax) - ax.plot(track.state_vector[0, 0], track.state_vector[2, 0], 'rx', markersize=5) - - footprint = sensor.footprint - x, y = footprint.exterior.xy - ax.plot(x, y, color='r', label=f'Sensor') - for i, roi in enumerate(rois): - lon_min, lat_min = roi.corners[0].longitude, roi.corners[0].latitude - lon_max, lat_max = roi.corners[1].longitude, roi.corners[1].latitude - ax.plot([lon_min, lon_max, lon_max, lon_min, lon_min], - [lat_min, lat_min, lat_max, lat_max, lat_min], - color='k', linestyle='--', linewidth=0.1, label=f'ROI {i + 1}') - ax.legend() - plt.pause(0.1) \ No newline at end of file diff --git a/examples/reactive-isr/comms_proc/evaluator.py b/examples/reactive-isr/comms_proc/evaluator.py deleted file mode 100644 index 27a78c9e5..000000000 --- a/examples/reactive-isr/comms_proc/evaluator.py +++ /dev/null @@ -1,190 +0,0 @@ -import copy -import datetime -from typing import List, Any, Tuple, Set -import itertools as it -from uuid import uuid4 - -import numpy as np -from matplotlib.path import Path -from scipy.stats import poisson -from shapely import unary_union - -from reactive_isr_core.data import ImageStore, NetworkTopology, AssetList, RFI -from stonesoup.base import Base, Property -from stonesoup.custom.functions import eval_rfi_new -from stonesoup.custom.functions.rollout import CollectionAction, CommsAction, ProcAction, \ - rollout_actions, proc_actions_from_config_sequence, get_sensor, simulate_new_tracks -from stonesoup.custom.tracker import SMCPHD_JIPDA -from stonesoup.functions import gm_reduce_single -from stonesoup.tracker import Tracker -from stonesoup.types.array import StateVectors -from stonesoup.types.numeric import Probability -from stonesoup.types.state import ParticleState -from stonesoup.types.track import Track -from stonesoup.types.update import GaussianStateUpdate - - -class CommsAndProcEvaluator(Base): - """A reward function which calculates the potential reduction in the uncertainty of track estimates - if a particular action is taken by a sensor or group of sensors. - - Given a configuration of sensors and actions, a metric is calculated for the potential - reduction in the uncertainty of the tracks that would occur if the sensing configuration - were used to make an observation. A larger value indicates a greater reduction in - uncertainty. - """ - - tracker: Tracker = Property(doc="Tracker used to track the tracks") - num_timesteps: int = Property(doc="Number of timesteps to rollout") - interval: datetime.timedelta = Property(doc="Interval between timesteps", - default=datetime.timedelta(seconds=1)) - num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) - prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) - use_variance: bool = Property(doc="Use variance in prioritisation", default=False) - - def __call__(self, config: Tuple[CollectionAction, CommsAction, ProcAction], tracks: Set[Track], - image_store: ImageStore, network_topology: NetworkTopology, assets: AssetList, - rfis: List[RFI], ongoing_actions, timestamp, *args, **kwargs): - - if not len(rfis): - return 0 - - # Rollout actions - config_seq_list = rollout_actions(config, image_store, network_topology, assets, rfis, - ongoing_actions, self.num_samples, self.num_timesteps, - self.interval, timestamp) - rewards = [] - # Evaluate each rollout - for config_seq in config_seq_list: - reward = 0 - # Get all processing actions - proc_actions = proc_actions_from_config_sequence(config_seq) - # Sort processing actions by image collection time - sorted_proc_actions = sorted(proc_actions, key=lambda x: x.image.collection_time) - tracks_copy = set(copy.copy(track) for track in tracks) - - # For each processing action - for i, proc_action in enumerate(sorted_proc_actions): - # Get the image and algorithm - image = proc_action.image - algorithm = proc_action.algorithm - - # The current time is the image collection time - current_time = image.collection_time - - # Create a sensor - sensor = get_sensor(image.location, image.fov_radius, algorithm.prob_detection, - algorithm.false_alarm_density) - - # Predict tracks to current time - predicted_tracks = set() - for track in tracks_copy: - predicted_track = copy.copy(track) - predicted_track.append(self.tracker._predictor.predict(track, timestamp=current_time)) - predicted_tracks.add(predicted_track) - - # Simulate new tracks - new_tracks = simulate_new_tracks(sensor, current_time, self.tracker.birth_density) - tracks_copy = set(tracks_copy) - tracks_copy |= new_tracks - predicted_tracks |= new_tracks - - # Use the sensor to generate detections - detections = {detection - for detection in sensor.measure(predicted_tracks, noise=False, - timestamp=current_time)} - # Configure the tracker's probability of detection based on the image footprint - p = sensor.footprint - self.tracker.prob_detect = _prob_detect_func([p], - proc_action.algorithm.prob_detection) - self.tracker.clutter_intensity = proc_action.algorithm.false_alarm_density/p.area - - # Update tracks with detections - tracks_copy = self._update_tracks(tracks_copy, detections, current_time) - - for rfi in rfis: - reward += eval_rfi_new(rfi, tracks_copy, use_variance=self.use_variance, - timestamp=proc_action.end_time) - rewards.append(reward) - return np.max(rewards) - - def _update_tracks(self, tracks, detections, timestamp): - tracks = list(tracks) - hypotheses = self.tracker._associator.generate_hypotheses(tracks, detections, timestamp) - associations = self.tracker._associator.associate(tracks, detections, - timestamp, hypotheses=hypotheses) - for track, multihypothesis in associations.items(): - if isinstance(self.tracker, SMCPHD_JIPDA): - # calculate each Track's state as a Gaussian Mixture of - # its possible associations with each detection, then - # reduce the Mixture to a single Gaussian State - posterior_states = [] - posterior_state_weights = [] - for hypothesis in multihypothesis: - posterior_state_weights.append(hypothesis.probability) - if hypothesis: - posterior_states.append(self.tracker._updater.update(hypothesis)) - else: - posterior_states.append(hypothesis.prediction) - - # Merge/Collapse to single Gaussian - means = StateVectors([state.state_vector for state in posterior_states]) - covars = np.stack([state.covar for state in posterior_states], axis=2) - weights = np.asarray(posterior_state_weights) - - post_mean, post_covar = gm_reduce_single(means, covars, weights) - - track.append(GaussianStateUpdate( - np.array(post_mean), np.array(post_covar), - multihypothesis, - multihypothesis[0].prediction.timestamp)) - else: - timestamp_m1 = track.timestamp \ - if self.tracker.predict else track[-2].timestamp - time_interval = timestamp - timestamp_m1 - track.append(multihypothesis.prediction) - prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) - pred_prob_exist = prob_survive * track.exist_prob - non_exist_weight = 1 - pred_prob_exist - target_hyps = hypotheses[track] - if multihypothesis: - # Update track - state_post = self.tracker._updater.update(multihypothesis) - track.append(state_post) - weights = np.array([hyp.probability for hyp in target_hyps])*pred_prob_exist - new_exist_prob = np.sum(weights) / (non_exist_weight + np.sum(weights)) - track.exist_prob = new_exist_prob - else: - non_det_weight = target_hyps.get_missed_detection_probability() - new_exist_prob = non_det_weight / (non_exist_weight + non_det_weight) - track.exist_prob = new_exist_prob - return tracks - - - -def _prob_detect_func(fovs, prob_detect): - """Closure to return the probability of detection function for a given environment scan""" - # Get the union of all field of views - fovs_union = unary_union(fovs) - if fovs_union.geom_type == 'MultiPolygon': - fovs = [poly for poly in fovs_union] - else: - fovs = [fovs_union] - - paths = [Path(poly.boundary.coords) for poly in fovs] - - # Probability of detection nested function - def prob_detect_func(state): - for path_p in paths: - if isinstance(state, ParticleState): - prob_detect_arr = np.full((len(state),), Probability(0.01)) - points = state.state_vector[[0, 2], :].T - inside_points = path_p.contains_points(points) - prob_detect_arr[inside_points] = prob_detect - return prob_detect_arr - else: - points = state.state_vector[[0, 2], :].T - return prob_detect if np.alltrue(path_p.contains_points(points)) \ - else Probability(0.01) - - return prob_detect_func \ No newline at end of file diff --git a/examples/reactive-isr/comms_proc/utils.py b/examples/reactive-isr/comms_proc/utils.py deleted file mode 100644 index 7c9d061ac..000000000 --- a/examples/reactive-isr/comms_proc/utils.py +++ /dev/null @@ -1,262 +0,0 @@ -from datetime import datetime, timedelta -from uuid import uuid4 - -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.patches import Ellipse - -from reactive_isr_core.data import Algorithm, ProcessingStatistics, Storage, Node, \ - AvailableAlgorithms, Availability, Edge, TraversalTime, NetworkTopology, AssetList, Asset, \ - AssetDescription, SensorType, AssetStatus, GeoLocation, RFI, \ - TaskType, GeoRegion, PriorityOverTime, ThresholdOverTime - - -def setup_network(sensor_location, start_time): - algorithms = [ - Algorithm( - cost=1, - prob_detection=0.6, - false_alarm_density=0.1, - name="Algorithm1", - processing_statistics=ProcessingStatistics( - mu=1, - sigma=0.1, - lower_truncation=0 - ) - ), - Algorithm( - cost=2, - prob_detection=0.75, - false_alarm_density=0.05, - name="Algorithm2", - processing_statistics=ProcessingStatistics( - mu=3, - sigma=0.1, - lower_truncation=0 - ) - ), - Algorithm( - cost=3, - prob_detection=0.9, - false_alarm_density=0.01, - name="Algorithm3", - processing_statistics=ProcessingStatistics( - mu=5, - sigma=0.1, - lower_truncation=0 - ) - ) - ] - - dummy_storage = Storage( - capacity=1, - contents=[] - ) - - sensor_node = Node( - id=uuid4(), - processing_capability=AvailableAlgorithms( - algorithms=[algorithms[0]] - ), - total_task_capacity=dict(), - availability=Availability.AVAILABLE, - storage=dummy_storage, - peers=[] - ) - - fob_node = Node( - id=uuid4(), - processing_capability=AvailableAlgorithms( - algorithms=[algorithms[1]] - ), - total_task_capacity=dict(), - availability=Availability.AVAILABLE, - storage=dummy_storage, - peers=[] - ) - - cic_node = Node( - id=uuid4(), - processing_capability=AvailableAlgorithms( - algorithms=[algorithms[2]] - ), - total_task_capacity=dict(), - availability=Availability.AVAILABLE, - storage=dummy_storage, - peers=[] - ) - - nodes = [sensor_node, fob_node, cic_node] - - edges = [ - Edge( - id=uuid4(), - source_node=sensor_node.id, - target_node=fob_node.id, - traversal_time=TraversalTime( - mu=1, - sigma=0.1, - lower_truncation=0 - ) - ), - Edge( - id=uuid4(), - source_node=fob_node.id, - target_node=cic_node.id, - traversal_time=TraversalTime( - mu=3, - sigma=0.1, - lower_truncation=0 - ) - ), - Edge( - id=uuid4(), - source_node=sensor_node.id, - target_node=cic_node.id, - traversal_time=TraversalTime( - mu=4, - sigma=0.2, - lower_truncation=0 - ) - ), - ] - - sensor_node.peers = [fob_node.id] - fob_node.peers = [sensor_node.id, cic_node.id] - cic_node.peers = [fob_node.id] - - network_topology = NetworkTopology( - nodes=nodes, - edges=edges - ) - - assets = AssetList( - assets=[ - Asset( - asset_description=AssetDescription( - id=sensor_node.id, - name="Sensor", - sensor_types=[SensorType.AERIAL_V_CAMERA], - response_timeout=1, - fov_radius=30, - ), - asset_status=AssetStatus( - time=start_time, - id=sensor_node.id, - location=GeoLocation( - latitude=sensor_location[1], - longitude=sensor_location[0], - altitude=sensor_location[2] - ), - availability=Availability.AVAILABLE - ), - target_detections=[] - ), - ] - ) - - return network_topology, assets - - -def setup_rfis(start_time, num_rois, time_varying): - roi1 = GeoRegion(corners=[ - GeoLocation( - longitude=-3.3, - latitude=51.1, - altitude=0), - GeoLocation( - longitude=-2.9, - latitude=51.5, - altitude=0)] - ) - roi2 = GeoRegion(corners=[ - GeoLocation( - longitude=-2.4, - latitude=52.1, - altitude=0), - GeoLocation( - longitude=-2, - latitude=52.5, - altitude=0)] - ) - rois=[roi1] - if num_rois > 1: - rois.append(roi2) - priority = [5, 5] - if time_varying: - priority = [5, 0] - rfi = RFI(id=uuid4(), - task_type=TaskType.COUNT, - region_of_interest=rois, - start_time=datetime.now(), - end_time=datetime.now(), - priority_over_time=PriorityOverTime( - timescale=[start_time, start_time+timedelta(seconds=400)], - priority=priority), - targets=[], - threshold_over_time=ThresholdOverTime(timescale=[start_time], - threshold=[.01])) - return [rfi] - - -def eigsorted(cov): - vals, vecs = np.linalg.eigh(cov) - order = vals.argsort()[::-1] - return vals[order], vecs[:, order] - - -def compute_ellipse(cov, pos, nstd=1, **kwargs): - - def eigsorted(cov): - vals, vecs = np.linalg.eigh(cov) - order = vals.argsort()[::-1] - return vals[order], vecs[:, order] - - vals, vecs = eigsorted(cov) - theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) - - # Width and height are "full" widths, not radius - width, height = 2 * nstd * np.sqrt(vals) - ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, - alpha=0.4, **kwargs) - return ellip.get_path() - - -def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): - """ - Plots an `nstd` sigma error ellipse based on the specified covariance - matrix (`cov`). Additional keyword arguments are passed on to the - ellipse patch artist. - Parameters - ---------- - cov : The 2x2 covariance matrix to base the ellipse on - pos : The location of the center of the ellipse. Expects a 2-element - sequence of [x0, y0]. - nstd : The radius of the ellipse in numbers of standard deviations. - Defaults to 2 standard deviations. - ax : The axis that the ellipse will be plotted on. Defaults to the - current axis. - Additional keyword arguments are pass on to the ellipse patch. - Returns - ------- - A matplotlib ellipse artist - """ - - def eigsorted(cov): - vals, vecs = np.linalg.eigh(cov) - order = vals.argsort()[::-1] - return vals[order], vecs[:, order] - - if ax is None: - ax = plt.gca() - - vals, vecs = eigsorted(cov) - theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) - - # Width and height are "full" widths, not radius - width, height = 2 * nstd * np.sqrt(vals) - ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, - alpha=0.4, **kwargs) - - ax.add_artist(ellip) - return ellip diff --git a/examples/reactive-isr/hierarchical-tracking-example.py b/examples/reactive-isr/hierarchical-tracking-example.py deleted file mode 100644 index ec5ce4916..000000000 --- a/examples/reactive-isr/hierarchical-tracking-example.py +++ /dev/null @@ -1,385 +0,0 @@ -""" -This example demonstrates a simple hierarchical tracker: - - |----------------| - | Top Tracker | - |----------------| - | - -------------------- - | | - |----------------| |----------------| - | Fuse Tracker 1 | | Fuse Tracker 2 | - |----------------| |----------------| - | | - ------------------- ------------ - | | | -|----------------| |----------------| |----------------| -| Leaf Tracker 1 | | Leaf Tracker 2 | | Leaf Tracker 3 | -|----------------| |----------------| |----------------| - | | | -|----------------| |----------------| |----------------| -| Sensor 1 | | Sensor 2 | | Sensor 3 | -|----------------| |----------------| |----------------| - -""" -import numpy as np -from datetime import datetime, timedelta -from copy import deepcopy, copy -import matplotlib.pyplot as plt -from matplotlib.patches import Ellipse - -from stonesoup.custom.sensor.movable import MovableUAVCamera -from stonesoup.custom.tracker import SMCPHD_JIPDA -from stonesoup.custom.types.tracklet import SensorTracks -from stonesoup.custom.initiator.twostate import TwoStateInitiator -from stonesoup.types.numeric import Probability -from stonesoup.types.state import State, GaussianState -from stonesoup.types.array import StateVector, CovarianceMatrix -from stonesoup.platform.base import MovingPlatform -from stonesoup.models.transition.linear import (CombinedLinearGaussianTransitionModel, - ConstantVelocity, KnownTurnRate, - NthDerivativeDecay, - OrnsteinUhlenbeck) -from stonesoup.platform.base import MultiTransitionMovingPlatform -from stonesoup.simulator.simple import DummyGroundTruthSimulator -from stonesoup.types.track import Track -from stonesoup.types.update import Update, GaussianStateUpdate -from stonesoup.gater.distance import DistanceGater -from stonesoup.plugins.pyehm import JPDAWithEHM2 -from stonesoup.measures import Mahalanobis - -from utils import plot_cov_ellipse - -from stonesoup.custom.hypothesiser.probability import PDAHypothesiser -from stonesoup.custom.simulator.platform import PlatformTargetDetectionSimulator -from stonesoup.custom.predictor.twostate import TwoStatePredictor -from stonesoup.custom.updater.twostate import TwoStateKalmanUpdater -from stonesoup.custom.reader.tracklet import TrackletExtractor, PseudoMeasExtractor -from stonesoup.custom.tracker.fuse import FuseTracker2 - - -def to_single_state(tracks): - """ Converts a set of tracks with two-state vectors to a set of tracks with one-state vectors""" - new_tracks = set() - for track in tracks: - states = [] - for state in track.states: - if isinstance(state, Update): - new_state = GaussianStateUpdate(state.state_vector[6:], state.covar[6:, 6:], - hypothesis=state.hypothesis, - timestamp=state.timestamp) - else: - new_state = GaussianState(state.state_vector[6:], state.covar[6:, 6:], - timestamp=state.timestamp) - states.append(new_state) - new_tracks.add(Track(id=track.id, states=states)) - return new_tracks - - -# Parameters -np.random.seed(1005) -clutter_rate = 1 # Mean number of clutter points per scan -max_range = 50 # Max range of sensor (meters) -surveillance_area = np.pi * max_range ** 2 # Surveillance region area -clutter_density = clutter_rate / surveillance_area # Mean number of clutter points per unit area -prob_detect = 0.9 # Probability of Detection -num_timesteps = 151 # Number of simulation timesteps -PLOT = True # Plot the results or not - -# Simulation components -# --------------------- -# In this simulation, we have 3 platforms, each with a sensor. The sensors are mounted on the -# platforms and can move with them. The platforms are moving in a straight line at constant -# velocity. There also exists a (non-cooperative) target that is also moving in a straight line at -# constant velocity. - -# Simulation start time -start_time = datetime.now() - -# Define transition model and position for 3D platform -platform_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) - -# Create platforms. Each platform has a sensor and a transition model. The platform's sensor can -# only detect targets within its field of view (FOV), but not itself. -init_states = [State(StateVector([-50, 0, -25, 1, 0, 0]), start_time), - State(StateVector([50, 0, -25, 1, 0, 0]), start_time), - State(StateVector([-25, 1, 50, 0, 0, 0]), start_time)] -platforms = [] -for i, init_state in enumerate(init_states): - # Platform - platform = MovingPlatform(states=init_state, - position_mapping=(0, 2, 4), - velocity_mapping=(1, 3, 5), - transition_model=platform_transition_model) - - # Sensor - sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([.1, .1, .1]), - mounting_offset=StateVector([0, 0, 0]), - rotation_offset=StateVector([0, 0, 0]), - fov_radius=max_range, - limits=None, - fov_in_km=False) - platform.add_sensor(sensor) - platforms.append(platform) - -# The (non-cooperative) target -cv_model = CombinedLinearGaussianTransitionModel( - [ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)]) -init_state_gnd = State(StateVector([25, -1, 25, -1, 0, 0]), start_time) -target = MovingPlatform(transition_model=cv_model, - states=init_state_gnd, - position_mapping=(0, 2, 4), - velocity_mapping=(1, 3, 5), - sensors=None) - -# Simulation timestamps -times = np.arange(0, num_timesteps, 1) -timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times] - -# A dummy ground truth simulator, which simply acts as a clock -gnd_simulator = DummyGroundTruthSimulator(times=timestamps) - -# Detection simulators (1 for each platform) -detector1 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[0]], - targets=[platforms[1], platforms[2], target]) -detector2 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[1]], - targets=[platforms[0], platforms[2], target]) -detector3 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[2]], - targets=[platforms[0], platforms[1], target]) -all_detectors = [detector1, detector2, detector3] - -# Hierarchical tracking components -# -------------------------------- -# In this section, we define the components of the hierarchical trackers. Recall that the -# hierarchy is as follows: -# 1. 3 Leaf trackers (one for each sensor) -# 2. 2 Branch fuse trackers: -# a. One that fuses the tracks from leaf trackers 1 and 2 -# b. One that fuses the tracks from leaf tracker 3 -# 3. The root (top) fuse tracker that fuses the tracks from the branch trackers - -# Leaf trackers (one for each sensor) -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# Each leaf tracker is a JIPDA tracker that uses a SMC-PHD filter to initialise tracks. -leaf_trackers = [] -for i, detector in enumerate(all_detectors): - transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)] * 3) - birth_density = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), - CovarianceMatrix(np.diag([50, 2, 50, 2, 0, 0]))) - prob_death = Probability(0.01) # Probability of death - prob_birth = Probability(0.1) # Probability of birth - prob_survive = Probability(0.99) # Probability of survival - birth_rate = 0.02 - num_particles = 2 ** 11 - birth_scheme = 'mixture' - tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_density, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time, detector=detector, use_ismcphd=True) - leaf_trackers.append(tracker) - -# Fusion Tracker components -# ~~~~~~~~~~~~~~~~~~~~~~~~~ -# The fusion trackers are JPDA trackers, that use the tracks from the leaf trackers as inputs. -# The transition model, predictor, updater, hypothesiser, data associator, and initiator can -# be shared between the fusion trackers. -# -# On the contrary, the tracklet and pseudo-measurement extractors must be defined separately for -# each fusion tracker. This is because, these components perform caching of the tracks and -# pseudo-measurements, generated at each time step. - -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)] * 3) - -# Predictors and updaters -two_state_predictor = TwoStatePredictor(transition_model) -two_state_updater = TwoStateKalmanUpdater(None, True) - -# Hypothesiser -hypothesiser1 = PDAHypothesiser(predictor=None, - updater=two_state_updater, - clutter_spatial_density=Probability(-80, log_value=True), - prob_detect=Probability(prob_detect), - prob_gate=Probability(0.99), - predict=False, - per_measurement=True) -hypothesiser1 = DistanceGater(hypothesiser1, Mahalanobis(), 10) # Uncomment to use JPDA+EHM2 - -# Data associator -fuse_associator = JPDAWithEHM2(hypothesiser1) # in Fuse tracker - -# Initiator -prior = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), - CovarianceMatrix(np.diag([50, 5, 50, 5, 0, 0]))) # Uncomment for GNN in Fuse Tracker -initiator1 = TwoStateInitiator(prior, transition_model, two_state_updater) - -# Fuse tracker 1 (fuses tracks from leaf trackers 1 and 2) -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -tracklet_extractor = TrackletExtractor(transition_model=transition_model, - fuse_interval=timedelta(seconds=2)) -pseudomeas_extractor = PseudoMeasExtractor() -fuse_tracker1 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, - updater=two_state_updater, associator=fuse_associator, - tracklet_extractor=tracklet_extractor, - pseudomeas_extractor=pseudomeas_extractor, death_rate=1e-4, - prob_detect=Probability(prob_detect), - delete_thresh=Probability(0.1)) - -# Fuse tracker 2 (fuses tracks from leaf tracker 3) -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -tracklet_extractor2 = TrackletExtractor(transition_model=transition_model, - fuse_interval=timedelta(seconds=2)) -pseudomeas_extractor2 = PseudoMeasExtractor() -fuse_tracker2 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, - updater=two_state_updater, associator=fuse_associator, - tracklet_extractor=tracklet_extractor2, - pseudomeas_extractor=pseudomeas_extractor2, death_rate=1e-4, - prob_detect=Probability(prob_detect), - delete_thresh=Probability(0.1)) - -# Root tracker (fuses tracks from the branch trackers) -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -tracklet_extractor3 = TrackletExtractor(transition_model=transition_model, - fuse_interval=timedelta(seconds=4)) -pseudomeas_extractor3 = PseudoMeasExtractor() -fuse_tracker3 = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, - updater=two_state_updater, associator=fuse_associator, - tracklet_extractor=tracklet_extractor3, - pseudomeas_extractor=pseudomeas_extractor3, death_rate=1e-4, - prob_detect=Probability(prob_detect), - delete_thresh=Probability(0.1)) - -# Run the simulation -# ------------------ -sim_start_time = datetime.now() -tracks = set() -if PLOT: - plt.figure(figsize=(10, 10)) - plt.ion() - -# We use the leaf trackers as our clock for the simulation. Each leaf tracker provides an -# iterator over the tracks it generates at each time step. We use the `zip` function to -# iterate over the tracks from all the leaf trackers simultaneously. -for (timestamp, tracks1), (_, tracks2), (_, tracks3) in zip(*leaf_trackers): - - # Run Fuse tracker 1 - # ~~~~~~~~~~~~~~~~~~~~ - # Group tracks from leaf trackers 1 and 2 - alltracks1 = [SensorTracks(tracks, i, transition_model) for i, tracks - in enumerate([tracks1, tracks2])] - # Extract tracklets - tracklets1 = tracklet_extractor.extract(alltracks1, timestamp) - # Extract pseudo-measurements - scans1 = pseudomeas_extractor.extract(tracklets1, timestamp) - # Generate fused tracks - ctracks1 = fuse_tracker1.process_scans(scans1) - # The above steps can be combined into a single function call - # ctracks1 = fuse_tracker1.process_tracks(alltracks1, timestamp) - - # Run Fuse tracker 2 - # ~~~~~~~~~~~~~~~~~~~~ - # Group tracks from leaf tracker 3 - alltracks2 = [SensorTracks(tracks3, 2, transition_model)] - # Extract tracklets - tracklets2 = tracklet_extractor2.extract(alltracks2, timestamp) - # Extract pseudo-measurements - scans2 = pseudomeas_extractor2.extract(tracklets2, timestamp) - # Generate fused tracks - ctracks2 = fuse_tracker2.process_scans(scans2) - # The above steps can be combined into a single function call - # ctracks2 = fuse_tracker2.process_tracks(alltracks2, timestamp) - - # Run Root tracker - # ~~~~~~~~~~~~~~~~ - # Convert two-state tracks to single-state tracks - ctracks11 = to_single_state(ctracks1) - ctracks22 = to_single_state(ctracks2) - # Group tracks from Fuse trackers 1 and 2 - alltracks3 = [SensorTracks(tracks, i, transition_model) for i, tracks - in enumerate([ctracks11, ctracks22])] - # Extract tracklets - tracklets3 = tracklet_extractor3.extract(alltracks3, timestamp) - # Extract pseudo-measurements - scans3 = pseudomeas_extractor3.extract(tracklets3, timestamp) - # Generate fused tracks - ctracks3 = fuse_tracker3.process_scans(scans3) - # The above steps can be combined into a single function call - # ctracks3 = fuse_tracker3.process_tracks(alltracks3, timestamp) - - # Store tracks - tracks.update(ctracks3) - - # Print progress - print(f'{timestamp - start_time} - No. Tracks: {len(ctracks3)}') - - # Plot - if PLOT: - plt.clf() - colors = ['r', 'g', 'b'] - - # Plot groundtruth - data = np.array([state.state_vector for state in target]) - plt.plot(data[:, 0], data[:, 2], '--k', label='Groundtruth (Target)') - for i, (platform, color) in enumerate(zip(platforms, colors)): - data = np.array([state.state_vector for state in platform]) - plt.plot(data[:, 0], data[:, 2], f'--{color}') - - # Plot sensor FOVs - ax1 = plt.gca() - for j, platform in enumerate(platforms): - sensor = platform.sensors[0] - circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, - color=colors[j], - fill=False, - label=f'Sensor {j + 1}') - ax1.add_artist(circle, ) - - # Plot detections - all_detections = [detector.detections for detector in all_detectors] - for i, (detections, color) in enumerate(zip(all_detections, colors)): - for detection in detections: - model = detection.measurement_model - x, y = detection.state_vector[0], detection.state_vector[1] - plt.plot(x, y, f'{color}x') - - # Plot tracks from Fuse tracker 1 - for track in ctracks1: - data = np.array([state.state_vector for state in track]) - plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.plot(data[:, 6], data[:, 8], '-*c') - - # Plot tracks from Fuse tracker 2 - for track in ctracks2: - data = np.array([state.state_vector for state in track]) - plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.plot(data[:, 6], data[:, 8], '-*r') - - # Plot tracks from Root tracker - for track in tracks: - data = np.array([state.state_vector for state in track]) - plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.plot(data[:, 6], data[:, 8], '-*m') - - # Add legend info - for i, color in enumerate(colors): - plt.plot([], [], f'--{color}', label=f'Groundtruth (Sensor {i + 1})') - plt.plot([], [], f'x{color}', label=f'Detections (Sensor {i + 1})') - plt.plot([], [], '-*c', label=f'Fused Tracks (Fuse Tracker 1)') - plt.plot([], [], '-*r', label=f'Fused Tracks (Fuse Tracker 2)') - plt.plot([], [], f'-*m', label=f'Fused Tracks (Top Tracker)') - - plt.legend(loc='upper right') - plt.xlim((-200, 200)) - plt.ylim((-200, 200)) - plt.pause(0.01) - a=2 - -print(datetime.now() - sim_start_time) diff --git a/examples/reactive-isr/movable_uav_camera_actions.py b/examples/reactive-isr/movable_uav_camera_actions.py deleted file mode 100644 index 41a06aec8..000000000 --- a/examples/reactive-isr/movable_uav_camera_actions.py +++ /dev/null @@ -1,88 +0,0 @@ -import itertools -from datetime import datetime - -import numpy as np - -from stonesoup.custom.sensor.movable import MovableUAVCamera -from stonesoup.types.angle import Angle -from stonesoup.types.array import StateVector - -# The camera is initially positioned at x=10, y=10, z=100 -position = StateVector([10., 10., 100.]) - -# We can also set the resolution of each actionable property. The resolution is used when -# discretising the action space. In this case, we set the resolution of both the X and Y locations -# to 10 units. -resolutions = {'location_x': 10., 'location_y': 10.} - -# Furthermore, we can specify the limits of the action space. In this case, we set the limits of -# both the X and Y locations to [-100, 100]. This means that the action space will contain values -# in the range [-100, 100] with a step size of 10 units for each property (based on the resolution -# specified above). -limits = {'location_x': [-100, 100], 'location_y': [-90, 90]} - -# Create a camera object -sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.05, 0.05, 0.05]), - location_x=position[0], location_y=position[1], - resolutions=resolutions, - position=position, - fov_radius=100, - fov_in_km=False, - limits=limits) - -# Set a query time -timestamp = datetime.now() - -# Calling sensor.actions() will return a set of action generators. Each action generator is an -# object that contains all the actions that can be performed by the sensor at a given time. In this -# case, the sensor has two actionable properties: X and Y location. Hence, the result of -# sensor.actions() is a set of two action generators: one for moving on the X-axis and one for -# moving on the Y-axis. -action_generators = sensor.actions(timestamp) - -# Let's look at the action generators -# The first action generator is for the X location. We can extract the action generator by -# searching for the action generator that controls the 'location_x' property. So, the following -# line of code simply filters the action generators that control 'location_x' (the for-if -# statement) and then selects the first action generator (since there is only one), via the next() -# statement. -x_action_generator = next(ag for ag in action_generators if ag.attribute == 'location_x') -# The second action generator is for the Y location. We can extract the action generator by -# searching for the action generator that controls 'location_y'. -y_action_generator = next(ag for ag in action_generators if ag.attribute == 'location_y') - -# We can now look at the actions that can be performed by the action generators. The action -# generators provide a Python "iterator" interface. This means that we can iterate over the action -# generators to get the actions that can be performed (e.g. with a "for" loop). Instead, we can -# also use the list() function to get a list of all the actions that can be performed. -possible_x_actions = list(x_action_generator) -possible_y_actions = list(y_action_generator) - -# Each action has a "target_value" property that specifies the value that the property will be -# set to if the action is performed. The following line of code prints the target values of the -# 10th action for pan and tilt. -print(possible_x_actions[9].target_value) -print(possible_y_actions[9].target_value) - -# To get all the possible combinations of actions, we can use the itertools.product() function. -possible_action_combinations = list(itertools.product(possible_x_actions, possible_y_actions)) - -# Let us now select the 10th action combination and task the sensor to perform the action. -chosen_action_combination = possible_action_combinations[9] -sensor.add_actions(chosen_action_combination) -sensor.act(timestamp) - -# We can also create a custom action combination. For example, we can move the camera to the -# location (0, 10, 100) by generating an action that sets the X location to 0 and an action that -# sets the Y location to 10. We can then combine these two actions into a single action combination -# and task the sensor to perform the action. -custom_action_x = x_action_generator.action_from_value(0) # Action that sets the X location to 0 -custom_action_y = y_action_generator.action_from_value(10) # Action that sets the Y location to 10 -custom_action_combination = (custom_action_x, custom_action_y) -sensor.add_actions(custom_action_combination) -sensor.act(timestamp) - -# The statement below is just an extra statement to allow us to breakpoint the code and inspect -# the possible actions. -end = True diff --git a/examples/reactive-isr/multi-sonar-ehm-fuse-3.py b/examples/reactive-isr/multi-sonar-ehm-fuse-3.py deleted file mode 100644 index f8ccbea61..000000000 --- a/examples/reactive-isr/multi-sonar-ehm-fuse-3.py +++ /dev/null @@ -1,254 +0,0 @@ -""" -multi-sonar-ehm-fuse.py - -This example script simulates 3 moving platforms, each equipped with a single active sonar sensor -(StoneSoup does not have an implementation of an active sonar so a radar is used instead), and 1 -target. Each sensor generates detections of all other objects (excluding itself). - -The tracking configuration is as follows: -- For each sensor whose index in the 'all_detectors' list is not in 'bias_tracker_idx', a - local tracker is configured that acts like a contact follower and generates Track objects. The - outputs of these trackers are the fed into the Fusion engine. -- For all other sensors, their data is fed directly into the Fusion engine. Note that the - TrackletExtractorWithTracker is used here, meaning that a (local) bias estimation tracker is run - on the data read from each sensor, before it is fed into the main Fuse Tracker (i.e. the - component of the Fusion Engine that produces the fused tracks). -- The data association algorithm used for both the local and fuse trackers is JPDA with EHM. - -""" -import numpy as np -from datetime import datetime, timedelta -from copy import deepcopy, copy -import matplotlib.pyplot as plt -from matplotlib.patches import Ellipse - -from stonesoup.custom.sensor.movable import MovableUAVCamera -from stonesoup.custom.tracker import SMCPHD_JIPDA -from stonesoup.custom.types.tracklet import SensorTracks -from stonesoup.custom.initiator.twostate import TwoStateInitiator -from stonesoup.types.numeric import Probability -from stonesoup.types.state import State, GaussianState -from stonesoup.types.array import StateVector, CovarianceMatrix -from stonesoup.platform.base import MovingPlatform -from stonesoup.models.transition.linear import (CombinedLinearGaussianTransitionModel, - ConstantVelocity, KnownTurnRate, NthDerivativeDecay, - OrnsteinUhlenbeck) -from stonesoup.platform.base import MultiTransitionMovingPlatform -from stonesoup.simulator.simple import DummyGroundTruthSimulator -from stonesoup.types.update import Update -from stonesoup.gater.distance import DistanceGater -from stonesoup.plugins.pyehm import JPDAWithEHM2 -from stonesoup.measures import Mahalanobis - -from utils import plot_cov_ellipse - -from stonesoup.custom.hypothesiser.probability import PDAHypothesiser -from stonesoup.custom.simulator.platform import PlatformTargetDetectionSimulator -from stonesoup.custom.predictor.twostate import TwoStatePredictor -from stonesoup.custom.updater.twostate import TwoStateKalmanUpdater -from stonesoup.custom.reader.tracklet import TrackletExtractor, PseudoMeasExtractor -from stonesoup.custom.tracker.fuse import FuseTracker2 - -# Parameters -np.random.seed(1000) -clutter_rate = 1 # Mean number of clutter points per scan -max_range = 130 # Max range of sensor (meters) -surveillance_area = np.pi*max_range**2 # Surveillance region area -clutter_density = clutter_rate/surveillance_area # Mean number of clutter points per unit area -prob_detect = 0.9 # Probability of Detection -num_timesteps = 101 # Number of simulation timesteps -PLOT = True - -# Simulation start time -start_time = datetime.now() - -# Define transition model and position for 3D platform -platform_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) - -# Create platforms -init_states = [State(StateVector([-50, 0, -25, 1, 0, 0]), start_time), - State(StateVector([50, 0, -25, 1, 0, 0]), start_time), - State(StateVector([-25, 1, 50, 0, 0, 0]), start_time)] -platforms = [] -for i, init_state in enumerate(init_states): - # Platform - platform = MovingPlatform(states=init_state, - position_mapping=(0, 2, 4), - velocity_mapping=(1, 3, 5), - transition_model=platform_transition_model) - - # Sensor - sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([3, 3, 3]), - mounting_offset=StateVector([0, 0, 0]), - rotation_offset=StateVector([0, 0, 0]), - fov_radius=max_range, - limits=None, - fov_in_km=False) - platform.add_sensor(sensor) - platforms.append(platform) - - -# Simulation components - -# The target -cv_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)]) -ct_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)]) -manoeuvres = [cv_model, ct_model] -manoeuvre_times = [timedelta(seconds=4), timedelta(seconds=4)] -init_state_gnd = State(StateVector([25, -1, 25, -1, 0, 0]), start_time) -target = MultiTransitionMovingPlatform(transition_models=manoeuvres, - transition_times=manoeuvre_times, - states=init_state_gnd, - position_mapping=(0, 2, 4), - velocity_mapping=(1, 3, 5), - sensors=None) - -times = np.arange(0, num_timesteps, 1) -timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times] - -gnd_simulator = DummyGroundTruthSimulator(times=timestamps) - -# Detection simulators (1 for each platform) -detector1 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[0]], - targets=[platforms[1], platforms[2], target]) -detector2 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[1]], - targets=[platforms[0], platforms[2], target]) -detector3 = PlatformTargetDetectionSimulator(groundtruth=gnd_simulator, platforms=[platforms[2]], - targets=[platforms[0], platforms[1], target]) - -all_detectors = [detector1, detector2, detector3] - -# Multi-Target Trackers (1 per platform) -base_trackers = [] -for i, detector in enumerate(all_detectors): - transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)]*3) - birth_density = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), - CovarianceMatrix(np.diag([50, 2, 50, 2, 0, 0]))) - prob_death = Probability(0.01) # Probability of death - prob_birth = Probability(0.1) # Probability of birth - prob_survive = Probability(0.99) # Probability of survival - birth_rate = 0.02 - num_particles = 2 ** 11 - birth_scheme = 'mixture' - tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_density, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time, detector=detector, use_ismcphd=True) - base_trackers.append(tracker) - -# Fusion Tracker -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1)]*3) -prior = GaussianState(StateVector([0, 0, 0, 0, 0, 0]), - CovarianceMatrix(np.diag([50, 5, 50, 5, 0, 0]))) -tracklet_extractor = TrackletExtractor(trackers=None, - transition_model=transition_model, - fuse_interval=timedelta(seconds=3)) -pseudomeas_extractor = PseudoMeasExtractor(None, state_idx_to_use=None, use_prior=False) - -two_state_predictor = TwoStatePredictor(transition_model) -two_state_updater = TwoStateKalmanUpdater(None, True) -hypothesiser1 = PDAHypothesiser(predictor=None, - updater=two_state_updater, - clutter_spatial_density=Probability(-80, log_value=True), - prob_detect=Probability(prob_detect), - prob_gate=Probability(0.99), - predict=False, - per_measurement=True) -hypothesiser1 = DistanceGater(hypothesiser1, Mahalanobis(), 10) # Uncomment to use JPDA+EHM2 -fuse_associator = JPDAWithEHM2(hypothesiser1) # in Fuse tracker -# fuse_associator = GNNWith2DAssignment(hypothesiser1) # Uncomment for GNN in Fuse Tracker -initiator1 = TwoStateInitiator(prior, transition_model, two_state_updater) -fuse_tracker = FuseTracker2(initiator=initiator1, predictor=two_state_predictor, - updater=two_state_updater, associator=fuse_associator, - tracklet_extractor=tracklet_extractor, - pseudomeas_extractor=detector, death_rate=1e-4, - prob_detect=Probability(prob_detect), - delete_thresh=Probability(0.1)) - -sim_start_time = datetime.now() -tracks = set() - -if PLOT: - plt.figure(figsize=(10, 10)) - plt.ion() -for (timestamp, tracks1), (_, tracks2), (_, tracks3) in zip(*base_trackers): - - alltracks = [SensorTracks(tracks, i, transition_model) for i, tracks - in enumerate([tracks1, tracks2, tracks3])] - - # Perform fusion - - # _, ctracks = fuse_tracker.process_tracks(alltracks, timestamp) - - # Extract tracklets - tracklets = tracklet_extractor.extract(alltracks, timestamp) - - # Extract pseudo-measurements - scans = pseudomeas_extractor.extract(tracklets, timestamp) - - # Process pseudo-measurements - ctracks = fuse_tracker.process_scans(scans) - - # Update tracks - tracks.update(ctracks) - - print(f'{timestamp-start_time} - No. Tracks: {len(ctracks)}') - tracks.update(ctracks) - # Plot - if PLOT: - plt.clf() - all_detections = [detector.detections for detector in all_detectors] - colors = ['r', 'g', 'b'] - data = np.array([state.state_vector for state in target]) - plt.plot(data[:, 0], data[:, 2], '--k', label='Groundtruth (Target)') - for i, (platform, color) in enumerate(zip(platforms, colors)): - data = np.array([state.state_vector for state in platform]) - plt.plot(data[:, 0], data[:, 2], f'--{color}') - - ax1 = plt.gca() - for j, platform in enumerate(platforms): - sensor = platform.sensors[0] - circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, - color=colors[j], - fill=False, - label=f'Sensor {j+1}') - ax1.add_artist(circle, ) - - for i, (detections, color) in enumerate(zip(all_detections, colors)): - for detection in detections: - model = detection.measurement_model - x, y = detection.state_vector[0], detection.state_vector[1] - plt.plot(x, y, f'{color}x') - - for i, (tracklets, color) in enumerate(zip(tracklet_extractor.current[1], colors)): - for tracklet in tracklets: - data = np.array([s.mean for s in tracklet.states if isinstance(s, Update)]) - plt.plot(data[:, 6], data[:, 8], f':{color}') - - for track in tracks: - data = np.array([state.state_vector for state in track]) - plot_cov_ellipse(track.covar[[6, 8], :][:, [6, 8]], track.state_vector[[6, 8], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.plot(data[:, 6], data[:, 8], '-*m') - - # Add legend info - for i, color in enumerate(colors): - plt.plot([], [], f'--{color}', label=f'Groundtruth (Sensor {i + 1})') - plt.plot([], [], f':{color}', label=f'Tracklets (Sensor {i + 1})') - plt.plot([], [], f'x{color}', label=f'Detections (Sensor {i + 1})') - plt.plot([], [], f'-*m', label=f'Fused Tracks') - - # state_smc = non_bias_trackers[0]._initiator._state - # plt.plot(state_smc.state_vector[0, :], state_smc.state_vector[2, :], 'r.') - - plt.legend(loc='upper right') - plt.xlim((-200, 200)) - plt.ylim((-200, 200)) - plt.pause(0.01) - -print(datetime.now() - sim_start_time) \ No newline at end of file diff --git a/examples/reactive-isr/risr-demo.py b/examples/reactive-isr/risr-demo.py deleted file mode 100644 index 334fa3fd6..000000000 --- a/examples/reactive-isr/risr-demo.py +++ /dev/null @@ -1,318 +0,0 @@ -import uuid -from datetime import datetime, timedelta -import warnings -warnings.simplefilter(action='ignore', category=FutureWarning) - -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.widgets import Button -from ordered_set import OrderedSet -from shapely.geometry import Point, Polygon - -from stonesoup.custom.functions import calculate_num_targets_dist, geodesic_point_buffer -from stonesoup.custom.sensor.movable import MovableUAVCamera -from stonesoup.custom.sensormanager.base import UniqueBruteForceSensorManager -from stonesoup.custom.sensormanager.reward import RolloutPriorityRewardFunction, \ - RolloutPriorityRewardFunction2 -from stonesoup.types.angle import Angle -from stonesoup.types.array import StateVector -from stonesoup.types.numeric import Probability -from stonesoup.types.state import GaussianState, ParticleState -from stonesoup.custom.tracker import SMCPHD_JIPDA, SMCPHD_IGNN -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState - -from reactive_isr_core.data import RFI, TaskType, GeoRegion, GeoLocation, PriorityOverTime, \ - ThresholdOverTime, TargetSpecification, TargetType - -from utils import plot_cov_ellipse, _prob_detect_func - -# np.random.seed(5547) -np.random.seed(95146) - -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -prob_detect = Probability(.9) # 90% chance of detection. -prob_death = Probability(0.01) # Probability of death -prob_birth = Probability(0.1) # Probability of birth -prob_survive = Probability(0.99) # Probability of survival -birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) -clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) -surveillance_region = [[-5, -2], # The surveillance region - [50.1, 53.2]] -surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ - * (surveillance_region[1][1] - surveillance_region[1][0]) # Surveillance volume -clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area -birth_density = GaussianState( - StateVector(np.array([-2.5, 0.0, 51, 0.0, 0.0, 0.0])), - np.diag([3. ** 2, .01 ** 2, 3. ** 2, .01 ** 2, 0., 0.])) # Birth density -birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' -num_particles = 2 ** 8 # Number of particles used by the PHD filter -num_iter = 200 # Number of simulation steps -total_no_sensors = 3 # Total number of sensors -PLOT = True # Set [True | False] to turn plotting [ON | OFF] -MANUAL_RFI = True # Set [True | False] to turn manual RFI [ON | OFF] -colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # Colors for plotting - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) -timestamps = [] -for k in range(0, num_iter + 1, 2): - timestamps.append(start_time + timedelta(seconds=k)) - -truths = set() -truth = GroundTruthPath([GroundTruthState([-3.7, 0.0, 52.0, 0.01, 0, 0], timestamp=start_time)]) -for timestamp in timestamps[1:]: - truth.append(GroundTruthState( - gnd_transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=timestamp)) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([-4.6, 0.01, 52.1, -0.01, 0, 0], timestamp=start_time)]) -for timestamp in timestamps[1:]: - truth.append(GroundTruthState( - gnd_transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=timestamp)) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([-3.5, 0, 51.3, -0.01, 0, 0], timestamp=start_time)]) -for timestamp in timestamps[1:]: - truth.append(GroundTruthState( - gnd_transition_model.function(truth[-1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=timestamp)) -truths.add(truth) - -# Create sensors -# ============== -sensors = [] -for i in range(0, total_no_sensors): - rotation_offset = StateVector( - [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset - pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt - increment = 1.0*i - x = -4.5 + increment - y = 51.5 if i == 0 else 51.5 + increment - position = StateVector([-4.5+increment, 51.5, 100.]) - resolutions = {'location_x': 1, 'location_y': 1} - limits = {'location_x': [surveillance_region[0][0]+0.5, surveillance_region[0][1]-0.5], - 'location_y': [round(surveillance_region[1][0])+0.5, round(surveillance_region[1][1])-0.5]} - sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.0001, 0.0001, 0.0001]), - location_x=position[0], location_y=position[1], - resolutions=resolutions, - position=position, - fov_radius=70, - limits=limits) - sensors.append(sensor) -for sensor in sensors: - sensor.timestamp = start_time - -# Plot groundtruth and sensors -# ============================ -fig = plt.figure(figsize=(10, 6)) -ax = fig.add_subplot(111) -ax.set_xlim(surveillance_region[0][0]-1, surveillance_region[0][1]+1) -ax.set_ylim(surveillance_region[1][0], surveillance_region[1][1]) -ax.set_xlabel('Longitude') -ax.set_ylabel('Latitude') -ax.set_title('Groundtruth and initial sensor locations') -ax.grid(True) -ax.set_aspect('equal') -for i, track in enumerate(truths): - ax.plot([state.state_vector[0] for state in track], - [state.state_vector[2] for state in track], - color=colors[i], linestyle='--', linewidth=2, label=f'Truth {i+1}') -for j, sensor in enumerate(sensors): - coords = geodesic_point_buffer(sensor.position[1], sensor.position[0], sensor.fov_radius).exterior.coords[:] - ax.plot([coord[0] for coord in coords], [coord[1] for coord in coords], - color=colors[j], linewidth=2, label=f'Sensor {j+1} FOV') - # circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, - # color=colors[j], - # fill=False, - # label=f'Sensor {j + 1}') - # ax.add_artist(circle, ) -ax.legend() -plt.show() - -# Tracking Components -# =================== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.000001), - ConstantVelocity(0.000001), - ConstantVelocity(0.000001)]) - -# Main tracker -tracker = SMCPHD_IGNN(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time) - -# Evaluator tracker -tracker2 = SMCPHD_IGNN(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time) -# tracker2 = copy.deepcopy(tracker) - - -# Sensor Management Components -# ============================ -# Reward function -roi = GeoRegion(corners=[ - GeoLocation( - longitude=surveillance_region[0][0], - latitude=surveillance_region[1][0], - altitude=0), - GeoLocation( - longitude=surveillance_region[0][1], - latitude=surveillance_region[1][1], - altitude=0)] -) -rfi = RFI(id=uuid.uuid4(), - task_type=TaskType.COUNT, - region_of_interest=roi, - start_time=datetime.now(), - end_time=datetime.now(), - priority_over_time=PriorityOverTime(timescale=[datetime.now()], priority=[5]), - targets=[], #TargetSpecification(target_type=TargetType.VEHICLE, existence_probability=0.9) - threshold_over_time=ThresholdOverTime(timescale=[datetime.now()], threshold=[.00001])) -rfis = [rfi] if not MANUAL_RFI else [] -reward_function = RolloutPriorityRewardFunction2(tracker2, 0, - num_samples=100, interval=timedelta(seconds=5), - rfis=rfis) -sensor_manager = UniqueBruteForceSensorManager(sensors, reward_function) - - -# Estimate -# ======== -# Plotting setup -if PLOT: - fig1 = plt.figure(figsize=(20, 7)) - ax1, ax2 = fig1.subplots(1, 2) - ax1.set_title('Simulation') - ax2.set_title('Variance') - fig1.subplots_adjust(bottom=0.2) - axbtn = fig1.add_axes([0.81, 0.05, 0.15, 0.075]) - btn = Button(axbtn, 'New RFI') - def set_rfis(*args, **kwargs): - print("Added RFI") - reward_function.rfis = [rfi] - btn.on_clicked(set_rfis) - plt.ion() - -# Main tracking loop -tracks = set() -vars = [] -for k, timestamp in enumerate(timestamps): - - if k == 20: - sensors.pop(1) - - sensor_detections = [] - tracks = list(tracks) - truth_states = OrderedSet(truth[timestamp] for truth in truths) - - # Compute variance of number of targets - region_corners = rfi.region_of_interest.corners - xmin, ymin = region_corners[0].longitude, region_corners[0].latitude - xmax, ymax = region_corners[1].longitude, region_corners[1].latitude - geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) - _, var = calculate_num_targets_dist(tracks, geom) - vars.append(var) - - # Check if RFI is satisfied and remove it - if MANUAL_RFI and len(reward_function.rfis) > 0: - if var < rfi.threshold_over_time.threshold[0]: - reward_function.rfis.remove(rfi) - - # Generate chosen configuration - chosen_actions = sensor_manager.choose_actions(tracks, timestamp) - for chosen_action in chosen_actions: - for sensor, actions in chosen_action.items(): - sensor.add_actions(actions) - - # Cue sensors - for sensor in sensors: - sensor.act(timestamp) - - # For each sensor - for j, sensor in enumerate(sensors): - - # Compute probability of detection - # center = (sensor.position[0], sensor.position[1]) - # radius = sensor.fov_radius - # p = Point(center).buffer(radius) - p = geodesic_point_buffer(sensor.position[1], sensor.position[0], sensor.fov_radius) - tracker.prob_detect = _prob_detect_func(prob_detect, [p]) - - # Observe the ground truth - detections = sensor.measure(truth_states, noise=True) - for detection in detections: - detection.metadata['target_type_confidences'] = { - 'person': 1.0 - } - sensor_detections.append(detections) - - detections = list(detections) - num_tracks = len(tracks) - num_detections = len(detections) - - # Track using main tracker - tracks = tracker.track(detections, timestamp) - - # Print debug info - tracks = list(tracks) - print(f'\n Sensor {j+1} ===========================================') - for track in tracks: - print(f'Track {track.id} - Exist prob: {track.exist_prob}') - - # Plot output - if PLOT: - ax1.cla() - ax2.cla() - ax2.plot([i for i in range(k+1)], vars, 'r') - ax2.set_xlabel('Time') - ax2.set_ylabel('Variance') - ax1.set_title('Simulation') - ax2.set_title('Variance') - for j, sensor in enumerate(sensors): - coords = geodesic_point_buffer(sensor.position[1], sensor.position[0], - sensor.fov_radius).exterior.coords[:] - ax1.plot([coord[0] for coord in coords], [coord[1] for coord in coords], - color=colors[j], linewidth=2, label=f'Sensor {j + 1} FOV') - # circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, - # color=colors[j], - # fill=False, - # label=f'Sensor {j+1}') - # ax1.add_artist(circle, ) - detections = sensor_detections[j] - if len(detections): - det_data = np.array([det.state_vector for det in detections]) - ax1.plot(det_data[:, 0], det_data[:, 1], f'*{colors[j]}', label='Detections') - - for i, truth in enumerate(truths): - data = np.array([s.state_vector for s in truth[:k + 1]]) - ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') - - for i, track in enumerate(tracks): - data = np.array([s.state_vector for s in track]) - ax1.plot(data[:, 0], data[:, 2], label=f'Track {i}') - plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], - edgecolor='r', facecolor='none', ax=ax1) - ax1.set_aspect('equal', adjustable='box', anchor='C') - ax1.set_xlim(np.array(surveillance_region[0]) + np.array([-1, 1])) - ax1.set_ylim(surveillance_region[1]) - ax1.set_xlabel('Longitude') - ax1.set_ylabel('Latitude') - ax1.legend(loc='upper right') - plt.pause(0.1) - diff --git a/examples/reactive-isr/sensor_management.py b/examples/reactive-isr/sensor_management.py deleted file mode 100644 index a73421f78..000000000 --- a/examples/reactive-isr/sensor_management.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - - -import numpy as np -import random -from datetime import datetime, timedelta -import time - -import matplotlib.pyplot as plt -from matplotlib.patches import Rectangle -from ordered_set import OrderedSet - -from stonesoup.custom.functions import get_camera_footprint -from stonesoup.plotter import Plotter -from stonesoup.types.state import StateVector -from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera -from stonesoup.types.angle import Angle -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState -from stonesoup.predictor.kalman import KalmanPredictor -from stonesoup.updater.kalman import ExtendedKalmanUpdater -from stonesoup.types.state import GaussianState -from stonesoup.types.track import Track -from stonesoup.sensormanager.reward import UncertaintyRewardFunction -from stonesoup.sensormanager import BruteForceSensorManager, OptimizeBruteSensorManager -from stonesoup.hypothesiser.distance import DistanceHypothesiser -from stonesoup.measures import Mahalanobis -from stonesoup.dataassociator.neighbour import GNNWith2DAssignment -from stonesoup.metricgenerator.tracktotruthmetrics import SIAPMetrics -from stonesoup.measures import Euclidean -from stonesoup.dataassociator.tracktotrack import TrackToTruth -from stonesoup.metricgenerator.uncertaintymetric import SumofCovarianceNormsMetric -from stonesoup.metricgenerator.manager import SimpleManager - -np.random.seed(1990) -random.seed(1990) - -# Parameters -# ========== # Simulation start time -num_iter = 100 # Number of simulation steps -ntruths = 2 # Number of ground truths -total_no_sensors = 1 -start_time = datetime.now() - -# Models -# ======l -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.005), - ConstantVelocity(0.005), - ConstantVelocity(0)]) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) -truths = set() -truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, - time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, - time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -# Create sensors -# ============== -sensors = set() -for n in range(0, total_no_sensors): - rotation_offset = StateVector( - [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset - pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt - - sensor = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.001, 0.001, 0.001]), - fov_angle=[np.radians(15), np.radians(10)], - rotation_offset=rotation_offset, - pan=pan_tilt[0], tilt=pan_tilt[1], - position=StateVector([10., 10., 100.])) - sensors.add(sensor) -for sensor in sensors: - sensor.timestamp = start_time - -# Predctor and Updater -# ==================== -predictor = KalmanPredictor(transition_model) -updater = ExtendedKalmanUpdater(measurement_model=None) -# measurement model is added to detections by the sensor - -# Initialise tracks -# ================= -tracks = [] -for truth in truths: - sv = truth[0].state_vector - prior = GaussianState(sv, np.diag([0.5, 0.5, 0.5, 0.5, 0.5, 0.5]), timestamp=start_time) - tracks.append(Track([prior])) - -# Initialise sensor manager -# ========================= -reward_function = UncertaintyRewardFunction(predictor, updater) -sensor_manager = BruteForceSensorManager(sensors, reward_function) -# sensor_manager = OptimizeBruteSensorManager(sensors, -# reward_function=reward_function, -# n_grid_points=15, -# finish=True) - -# Hypothesiser and Data Associator -# ================================ -hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=5) -data_associator = GNNWith2DAssignment(hypothesiser) - -# Run the sensor manager -# ====================== - -# Start timer for cell execution time -cell_start_time2 = time.time() -timestamps = [] -for k in range(1, num_iter + 1): - timestamps.append(start_time + timedelta(seconds=k)) - -fig = plt.figure(figsize=(10, 6)) -ax = fig.add_subplot(1, 1, 1) -plt.ion() -for k, timestep in enumerate(timestamps): - - print(timestep) - # Generate chosen configuration - chosen_actions = sensor_manager.choose_actions(tracks, timestep) - - # Create empty dictionary for measurements - measurements = [] - - for chosen_action in chosen_actions: - for sensor, actions in chosen_action.items(): - sensor.add_actions(actions) - - ax.cla() - ax.set_xlabel("$x$") - ax.set_ylabel("$y$") - ax.set_xlim(-10, 30) - ax.set_ylim(-10, 30) - ax.set_aspect('equal') - - # Fov ranges (min, center, max) - xmin, xmax, ymin, ymax = get_camera_footprint(sensor) - - ax.add_patch( - Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='b')) - - truth_states = OrderedSet(truth[timestep] for truth in truths) - for sensor in sensors: - sensor.act(timestep) - - # Observe this ground truth - sensor_measurements = sensor.measure(truth_states, noise=True) - measurements.extend(sensor_measurements) - - # Fov ranges (min, center, max) - xmin, xmax, ymin, ymax = get_camera_footprint(sensor) - - ax.add_patch( - Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='r')) - for truth in truths: - data = np.array([state.state_vector for state in truth[:k + 2]]) - ax.plot(data[:, 0], data[:, 2], '--', label="Ground truth") - - hypotheses = data_associator.associate(tracks, - measurements, - timestep) - for track in tracks: - hypothesis = hypotheses[track] - if hypothesis.measurement: - post = updater.update(hypothesis) - track.append(post) - else: # When data associator says no detections are good enough, we'll keep the prediction - track.append(hypothesis.prediction) - - for track in tracks: - data = np.array([state.state_vector for state in track]) - ax.plot(data[:, 0], data[:, 2], '-', label="Ground truth") - - plt.pause(0.1) -cell_run_time2 = round(time.time() - cell_start_time2, 2) - -# Plot the results -# ================ - -# Plot ground truths, tracks and uncertainty ellipses for each target. -plotter = Plotter() -plotter.plot_sensors(sensors) -plotter.plot_ground_truths(truths, [0, 2]) -plotter.plot_tracks(set(tracks), [0, 2], uncertainty=True) - -# Metrics -siap_generator = SIAPMetrics(position_measure=Euclidean((0, 2)), - velocity_measure=Euclidean((1, 3))) -associator = TrackToTruth(association_threshold=30) -uncertainty_generator = SumofCovarianceNormsMetric() - -metric_manager = SimpleManager([siap_generator, uncertainty_generator], - associator=associator) -metric_manager.add_data(truths, tracks) -metricsA = metric_manager.generate_metrics() - -# SIAP metrics -fig, axes = plt.subplots(2) -times = metric_manager.list_timestamps() -pa_metricA = metricsA['SIAP Position Accuracy at times'] -va_metricA = metricsA['SIAP Velocity Accuracy at times'] -axes[0].set(title='Positional Accuracy', xlabel='Time', ylabel='PA') -axes[0].plot(times, [metric.value for metric in pa_metricA.value], - label='BruteForceSensorManager') -axes[0].legend() -axes[1].set(title='Velocity Accuracy', xlabel='Time', ylabel='VA') -axes[1].plot(times, [metric.value for metric in va_metricA.value], - label='BruteForceSensorManager') -axes[1].legend() - -# Uncertainty metrics -uncertainty_metricA = metricsA['Sum of Covariance Norms Metric'] -fig = plt.figure() -ax = fig.add_subplot(1, 1, 1) -ax.plot([i.timestamp for i in uncertainty_metricA.value], - [i.value for i in uncertainty_metricA.value], - label='BruteForceSensorManager') -ax.set_ylabel("Sum of covariance matrix norms") -ax.set_xlabel("Time") -ax.legend() - -# Print run time -print(f'Optimised Brute Force: {cell_run_time2} s') diff --git a/examples/reactive-isr/smcphd_init-example.py b/examples/reactive-isr/smcphd_init-example.py deleted file mode 100644 index 1db9b4025..000000000 --- a/examples/reactive-isr/smcphd_init-example.py +++ /dev/null @@ -1,297 +0,0 @@ -from matplotlib import pyplot as plt -from matplotlib.patches import Ellipse -from pyehm.plugins.stonesoup import JPDAWithEHM2 - -from stonesoup.custom.jipda import JIPDAWithEHM2 -from stonesoup.deleter.time import UpdateTimeDeleter -from stonesoup.functions import gm_reduce_single -from stonesoup.gater.distance import DistanceGater -from stonesoup.hypothesiser.probability import IPDAHypothesiser, PDAHypothesiser -from stonesoup.measures import Mahalanobis -from stonesoup.predictor.kalman import KalmanPredictor -from stonesoup.resampler.particle import SystematicResampler -from stonesoup.types.array import StateVector, StateVectors -from stonesoup.types.numeric import Probability -from stonesoup.types.particle import Particle -from stonesoup.types.state import GaussianState, ParticleState -from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator - -from datetime import datetime -from datetime import timedelta -import numpy as np -from scipy.stats import uniform, multivariate_normal - -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState -from stonesoup.types.detection import TrueDetection -from stonesoup.types.detection import Clutter -from stonesoup.models.measurement.linear import LinearGaussian -from stonesoup.types.update import GaussianStateUpdate -from stonesoup.updater.kalman import KalmanUpdater - - -def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): - """ - Plots an `nstd` sigma error ellipse based on the specified covariance - matrix (`cov`). Additional keyword arguments are passed on to the - ellipse patch artist. - Parameters - ---------- - cov : The 2x2 covariance matrix to base the ellipse on - pos : The location of the center of the ellipse. Expects a 2-element - sequence of [x0, y0]. - nstd : The radius of the ellipse in numbers of standard deviations. - Defaults to 2 standard deviations. - ax : The axis that the ellipse will be plotted on. Defaults to the - current axis. - Additional keyword arguments are pass on to the ellipse patch. - Returns - ------- - A matplotlib ellipse artist - """ - - def eigsorted(cov): - vals, vecs = np.linalg.eigh(cov) - order = vals.argsort()[::-1] - return vals[order], vecs[:, order] - - if ax is None: - ax = plt.gca() - - vals, vecs = eigsorted(cov) - theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) - - # Width and height are "full" widths, not radius - width, height = 2 * nstd * np.sqrt(vals) - ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, - alpha=0.4, **kwargs) - - ax.add_artist(ellip) - return ellip - -# np.random.seed(1991) - -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -prob_detect = Probability(.9) # 90% chance of detection. -prob_death = Probability(0.01) # Probability of death -prob_birth = Probability(0.1) # Probability of birth -prob_survive = Probability(0.99) # Probability of survival -birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) -clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) -surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] -surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ - * (surveillance_region[1][1] - surveillance_region[1][0]) -clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area -birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0])), - np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2])) # Birth density -birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' -num_particles = 2 ** 12 # Number of particles used by the PHD filter -num_iter = 100 # Number of simulation steps -PLOT = True # Set [True | False] to turn plotting [ON | OFF] - -# Models -# ====== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), - ConstantVelocity(0.01)]) -# Measurement model -measurement_model = LinearGaussian(ndim_state=4, - mapping=(0, 2), - noise_covar=np.array([[0.1, 0], - [0, 0.1]])) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.)]) -truths = set() -truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -timestamps = [] -for k in range(1, num_iter + 1): - timestamps.append(start_time + timedelta(seconds=k)) - -# Simulate measurements -# ===================== -scans = [] - -for k in range(num_iter): - measurement_set = set() - - # True detections - for truth in truths: - # Generate actual detection from the state with a 10% chance that no detection is received. - if np.random.rand() <= prob_detect: - measurement = measurement_model.function(truth[k], noise=True) - measurement_set.add(TrueDetection(state_vector=measurement, - groundtruth_path=truth, - timestamp=truth[k].timestamp, - measurement_model=measurement_model)) - - # Generate clutter at this time-step - truth_x = truth[k].state_vector[0] - truth_y = truth[k].state_vector[2] - - # Clutter detections - for _ in range(np.random.poisson(clutter_rate)): - x = uniform.rvs(-10, 30) - y = uniform.rvs(0, 25) - measurement_set.add(Clutter(np.array([[x], [y]]), timestamp=truth[k].timestamp, - measurement_model=measurement_model)) - scans.append((timestamps[k], measurement_set)) - -# Predictor & Updater -# =================== -predictor = KalmanPredictor(transition_model) -updater = KalmanUpdater(measurement_model) - -# Hypothesiser & Data Associator -# ============================== -hypothesiser = IPDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect, - prob_survive=prob_survive) -# hypothesiser = PDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect) -hypothesiser = DistanceGater(hypothesiser, Mahalanobis(), 10) -associator = JIPDAWithEHM2(hypothesiser) - -# Track Deleter -# ============= -deleter = UpdateTimeDeleter(time_since_update=timedelta(minutes=5)) - -# Initiator -# ========= -# Initialise PHD Filter -resampler = SystematicResampler() -phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detect=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, resampler=resampler, - birth_scheme=birth_scheme) - -# Sample prior state from birth density -state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), - birth_density.covar, - size=num_particles).T) -weight = np.ones((num_particles,)) * Probability(1 / num_particles) -state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) - - -initiator = SMCPHDInitiator(filter=phd_filter, prior=state) - -# Estimate -# ======== - -# Plot the prior -if PLOT: - fig1 = plt.figure(figsize=(13, 7)) - ax1 = plt.gca() - ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') - -# Main tracking loop -tracks = set() -for k, (timestamp, detections) in enumerate(scans): - - tracks = list(tracks) - detections = list(detections) - num_tracks = len(tracks) - num_detections = len(detections) - - # Perform data association - associations = associator.associate(tracks, detections, timestamp) - - assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) - for i, track in enumerate(tracks): - for hyp in associations[track]: - if not hyp: - assoc_prob_matrix[i, 0] = hyp.weight - else: - j = next(d_i for d_i, detection in enumerate(detections) - if hyp.measurement == detection) - assoc_prob_matrix[i, j + 1] = hyp.weight - - rho = np.zeros((len(detections))) - for j, detection in enumerate(detections): - rho_tmp = 1 - if len(assoc_prob_matrix): - for i, track in enumerate(tracks): - rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] - rho[j] = rho_tmp - - for track, multihypothesis in associations.items(): - - # calculate each Track's state as a Gaussian Mixture of - # its possible associations with each detection, then - # reduce the Mixture to a single Gaussian State - posterior_states = [] - posterior_state_weights = [] - for hypothesis in multihypothesis: - posterior_state_weights.append(hypothesis.probability) - if hypothesis: - posterior_states.append(updater.update(hypothesis)) - else: - posterior_states.append(hypothesis.prediction) - - # Merge/Collapse to single Gaussian - means = StateVectors([state.state_vector for state in posterior_states]) - covars = np.stack([state.covar for state in posterior_states], axis=2) - weights = np.asarray(posterior_state_weights) - - post_mean, post_covar = gm_reduce_single(means, covars, weights) - - track.append(GaussianStateUpdate( - np.array(post_mean), np.array(post_covar), - multihypothesis, - multihypothesis[0].prediction.timestamp)) - - tracks = set(tracks) - new_tracks = initiator.initiate(detections, timestamp, weights=rho) - tracks |= new_tracks - state = initiator._state - - # Delete tracks that have not been updated for a while - del_tracks = set() - for track in tracks: - if track.exist_prob < 0.1: - del_tracks.add(track) - tracks -= del_tracks - - print('\n===========================================') - print(f'Num targets: {np.sum(state.weight)} - Num new targets: {len(new_tracks)}') - for track in tracks: - print(f'Track {track.id} - Exist prob: {track.exist_prob}') - - # Plot resulting density - if PLOT: - ax1.cla() - for i, truth in enumerate(truths): - data = np.array([s.state_vector for s in truth[:k + 1]]) - ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') - if len(detections): - det_data = np.array([det.state_vector for det in detections]) - ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') - # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], - # 'r.', label='Particles') - - for track in tracks: - data = np.array([s.state_vector for s in track]) - ax1.plot(data[:, 0], data[:, 2], label=f'Track {track.id}') - plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.axis([*surveillance_region[0], *surveillance_region[1]]) - plt.legend(loc='center right') - plt.pause(0.01) diff --git a/examples/reactive-isr/smcphd_init-sm-example-movable.py b/examples/reactive-isr/smcphd_init-sm-example-movable.py deleted file mode 100644 index af434d2a5..000000000 --- a/examples/reactive-isr/smcphd_init-sm-example-movable.py +++ /dev/null @@ -1,302 +0,0 @@ -from matplotlib import pyplot as plt -from matplotlib.patches import Ellipse, Rectangle -from ordered_set import OrderedSet - -from shapely.geometry import Point -from shapely.ops import unary_union - -from stonesoup.custom.sensor.movable import MovableUAVCamera -from stonesoup.sensormanager import BruteForceSensorManager -from stonesoup.sensormanager.reward import UncertaintyRewardFunction, \ - RolloutUncertaintyRewardFunction -from stonesoup.types.angle import Angle -from stonesoup.types.array import StateVector -from stonesoup.types.numeric import Probability -from stonesoup.types.state import GaussianState, ParticleState -from stonesoup.custom.tracker import SMCPHD_JIPDA -from matplotlib.path import Path - -from datetime import datetime -from datetime import timedelta -import numpy as np - -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState -from stonesoup.types.update import GaussianStateUpdate -from stonesoup.updater.kalman import KalmanUpdater - - -def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): - """ - Plots an `nstd` sigma error ellipse based on the specified covariance - matrix (`cov`). Additional keyword arguments are passed on to the - ellipse patch artist. - Parameters - ---------- - cov : The 2x2 covariance matrix to base the ellipse on - pos : The location of the center of the ellipse. Expects a 2-element - sequence of [x0, y0]. - nstd : The radius of the ellipse in numbers of standard deviations. - Defaults to 2 standard deviations. - ax : The axis that the ellipse will be plotted on. Defaults to the - current axis. - Additional keyword arguments are pass on to the ellipse patch. - Returns - ------- - A matplotlib ellipse artist - """ - - def eigsorted(cov): - vals, vecs = np.linalg.eigh(cov) - order = vals.argsort()[::-1] - return vals[order], vecs[:, order] - - if ax is None: - ax = plt.gca() - - vals, vecs = eigsorted(cov) - theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) - - # Width and height are "full" widths, not radius - width, height = 2 * nstd * np.sqrt(vals) - ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, - alpha=0.4, **kwargs) - - ax.add_artist(ellip) - return ellip - -# np.random.seed(1991) - -def _prob_detect_func(fovs): - """Closure to return the probability of detection function for a given environment scan""" - - # Get the union of all field of views - fovs_union = unary_union(fovs) - if fovs_union.geom_type == 'MultiPolygon': - fovs = [poly for poly in fovs_union] - else: - fovs = [fovs_union] - - # Probability of detection nested function - def prob_detect_func(state): - for poly in fovs: - if isinstance(state, ParticleState): - prob_detect_arr = np.full((len(state),), Probability(0)) - path_p = Path(poly.boundary) - points = state.state_vector[[0, 2], :].T - inside_points = path_p.contains_points(points) - prob_detect_arr[inside_points] = prob_detect - return prob_detect_arr - else: - point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) - return prob_detect if poly.contains(point) else Probability(0) - - return prob_detect_func - -# if __name__ == '__main__': -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -prob_detect = Probability(.9) # 90% chance of detection. -prob_death = Probability(0.01) # Probability of death -prob_birth = Probability(0.1) # Probability of birth -prob_survive = Probability(0.99) # Probability of survival -birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) -clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) -surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] -surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ - * (surveillance_region[1][1] - surveillance_region[1][0]) -clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area -birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0, 0.0, 0.0])), - np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2, .0, .0])) # Birth density -birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' -num_particles = 2 ** 13 # Number of particles used by the PHD filter -num_iter = 100 # Number of simulation steps -total_no_sensors = 1 -PLOT = True # Set [True | False] to turn plotting [ON | OFF] - -# Models -# ====== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), - ConstantVelocity(0.01), - ConstantVelocity(0.01)]) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) -truths = set() -truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -timestamps = [] -for k in range(1, num_iter + 1): - timestamps.append(start_time + timedelta(seconds=k)) - - -# Create sensors -# ============== -sensors = set() -for i in range(0, total_no_sensors): - rotation_offset = StateVector( - [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset - pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt - - position = StateVector([i * 10., 10., 100.]) - resolutions = {'location_x': 5., 'location_y': 5.} - limits = {'location_x': surveillance_region[0], 'location_y': surveillance_region[1]} - sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.05, 0.05, 0.05]), - location_x=position[0], location_y=position[1], - resolutions=resolutions, - position=position, - fov_radius=10, - limits=limits) - sensors.add(sensor) -for sensor in sensors: - sensor.timestamp = start_time - -# # Predictor & Updater -# # =================== -# predictor = KalmanPredictor(transition_model) -# updater = KalmanUpdater(None) -# -# # Hypothesiser & Data Associator -# # ============================== -# hypothesiser = IPDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect, -# prob_survive=prob_survive) -# # hypothesiser = PDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect) -# hypothesiser = DistanceGater(hypothesiser, Mahalanobis(), 10) -# associator = JIPDAWithEHM2(hypothesiser) -# -# # Track Deleter -# # ============= -# deleter = UpdateTimeDeleter(time_since_update=timedelta(minutes=5)) -# -# # Initiator -# # ========= -# # Initialise PHD Filter -# resampler = SystematicResampler() -# phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, -# measurement_model=None, prob_detect=prob_detect, -# prob_death=prob_death, prob_birth=prob_birth, -# birth_rate=birth_rate, clutter_intensity=clutter_intensity, -# num_samples=num_particles, resampler=resampler, -# birth_scheme=birth_scheme) -# -# # Sample prior state from birth density -# state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), -# birth_density.covar, -# size=num_particles).T) -# weight = np.ones((num_particles,)) * Probability(1 / num_particles) -# state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) -# -# -# initiator = SMCPHDInitiator(filter=phd_filter, prior=state) - -tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detection=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time) - -# Initialise sensor manager -# ========================= -# reward_function = UncertaintyRewardFunction(tracker._predictor, tracker._updater) -reward_function = RolloutUncertaintyRewardFunction(tracker._predictor, tracker._updater, 2, - num_samples=10, interval=timedelta(seconds=5)) -sensor_manager = BruteForceSensorManager(sensors, reward_function) - -# Estimate -# ======== - -# Plot the prior -if PLOT: - fig1 = plt.figure(figsize=(10, 6)) - ax1 = plt.gca() - # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') - -# Main tracking loop -tracks = set() -for k, timestamp in enumerate(timestamps): - - tracks = list(tracks) - - # Generate chosen configuration - chosen_actions = sensor_manager.choose_actions(tracks, timestamp) - - # Create empty dictionary for measurements - detections = [] - - for chosen_action in chosen_actions: - for sensor, actions in chosen_action.items(): - sensor.add_actions(actions) - - fovs = [] - truth_states = OrderedSet(truth[timestamp] for truth in truths) - for sensor in sensors: - sensor.act(timestamp) - center = (sensor.position[0], sensor.position[1]) - radius = sensor.fov_radius - p = Point(center).buffer(radius) - fovs.append(p) - - tracker.prob_detect = _prob_detect_func(fovs) - - for sensor in sensors: - - # Observe this ground truth - sensor_measurements = sensor.measure(truth_states, noise=True) - detections.extend(sensor_measurements) - - detections = list(detections) - num_tracks = len(tracks) - num_detections = len(detections) - - tracks = tracker.track(detections, timestamp) - - print('\n===========================================') - # print(f'Num targets: {np.sum(state.weight)} - Num new targets: {len(new_tracks)}') - for track in tracks: - print(f'Track {track.id} - Exist prob: {track.exist_prob}') - - # Plot resulting density - if PLOT: - ax1.cla() - - circle = plt.Circle((sensor.position[0], sensor.position[1]), radius=sensor.fov_radius, - color='r', - fill=False) - ax1.add_artist(circle) - for i, truth in enumerate(truths): - data = np.array([s.state_vector for s in truth[:k + 1]]) - ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') - if len(detections): - det_data = np.array([det.state_vector for det in detections]) - ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') - # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], - # 'r.', label='Particles') - - for track in tracks: - data = np.array([s.state_vector for s in track]) - ax1.plot(data[:, 0], data[:, 2], label=f'Track {track.id}') - plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.axis([*surveillance_region[0], *surveillance_region[1]]) - plt.legend(loc='upper right') - plt.pause(0.1) diff --git a/examples/reactive-isr/smcphd_init-sm-example.py b/examples/reactive-isr/smcphd_init-sm-example.py deleted file mode 100644 index 02e7bcca1..000000000 --- a/examples/reactive-isr/smcphd_init-sm-example.py +++ /dev/null @@ -1,331 +0,0 @@ -from matplotlib import pyplot as plt -from matplotlib.patches import Ellipse, Rectangle -from ordered_set import OrderedSet - -from stonesoup.custom.functions import get_camera_footprint -from stonesoup.custom.jipda import JIPDAWithEHM2 -from stonesoup.custom.sensor.pan_tilt import PanTiltUAVCamera -from stonesoup.deleter.time import UpdateTimeDeleter -from stonesoup.functions import gm_reduce_single -from stonesoup.gater.distance import DistanceGater -from stonesoup.hypothesiser.probability import IPDAHypothesiser -from stonesoup.measures import Mahalanobis -from stonesoup.predictor.kalman import KalmanPredictor -from stonesoup.resampler.particle import SystematicResampler -from stonesoup.sensormanager import BruteForceSensorManager -from stonesoup.sensormanager.reward import UncertaintyRewardFunction -from stonesoup.types.angle import Angle -from stonesoup.types.array import StateVector, StateVectors -from stonesoup.types.numeric import Probability -from stonesoup.types.state import GaussianState, ParticleState -from stonesoup.custom.smcphd import SMCPHDFilter, SMCPHDInitiator -from stonesoup.custom.tracker import SMCPHD_JIPDA - -from datetime import datetime -from datetime import timedelta -import numpy as np -from scipy.stats import uniform, multivariate_normal - -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState -from stonesoup.types.update import GaussianStateUpdate -from stonesoup.updater.kalman import KalmanUpdater - - -def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): - """ - Plots an `nstd` sigma error ellipse based on the specified covariance - matrix (`cov`). Additional keyword arguments are passed on to the - ellipse patch artist. - Parameters - ---------- - cov : The 2x2 covariance matrix to base the ellipse on - pos : The location of the center of the ellipse. Expects a 2-element - sequence of [x0, y0]. - nstd : The radius of the ellipse in numbers of standard deviations. - Defaults to 2 standard deviations. - ax : The axis that the ellipse will be plotted on. Defaults to the - current axis. - Additional keyword arguments are pass on to the ellipse patch. - Returns - ------- - A matplotlib ellipse artist - """ - - def eigsorted(cov): - vals, vecs = np.linalg.eigh(cov) - order = vals.argsort()[::-1] - return vals[order], vecs[:, order] - - if ax is None: - ax = plt.gca() - - vals, vecs = eigsorted(cov) - theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) - - # Width and height are "full" widths, not radius - width, height = 2 * nstd * np.sqrt(vals) - ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, - alpha=0.4, **kwargs) - - ax.add_artist(ellip) - return ellip - -# np.random.seed(1991) - - -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -prob_detect = Probability(.9) # 90% chance of detection. -prob_death = Probability(0.01) # Probability of death -prob_birth = Probability(0.1) # Probability of birth -prob_survive = Probability(0.99) # Probability of survival -birth_rate = 0.02 # Birth-rate (Mean number of new targets per scan) -clutter_rate = 2 # Clutter-rate (Mean number of clutter measurements per scan) -surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] -surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ - * (surveillance_region[1][1] - surveillance_region[1][0]) -clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area -birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0, 0.0, 0.0])), - np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2, .0, .0])) # Birth density -birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' -num_particles = 2 ** 13 # Number of particles used by the PHD filter -num_iter = 100 # Number of simulation steps -total_no_sensors = 1 -PLOT = True # Set [True | False] to turn plotting [ON | OFF] - -# Models -# ====== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.01), - ConstantVelocity(0.01), - ConstantVelocity(0.01)]) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.), - ConstantVelocity(0.)]) -truths = set() -truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2, 0, 0], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -timestamps = [] -for k in range(1, num_iter + 1): - timestamps.append(start_time + timedelta(seconds=k)) - - -# Create sensors -# ============== -sensors = set() -for n in range(0, total_no_sensors): - rotation_offset = StateVector( - [Angle(0), Angle(-np.pi / 2), Angle(0)]) # Camera rotation offset - pan_tilt = StateVector([Angle(0), Angle(-np.pi / 32)]) # Camera pan and tilt - - sensor = PanTiltUAVCamera(ndim_state=6, mapping=[0, 2, 4], - noise_covar=np.diag([0.05, 0.05, 0.05]), - fov_angle=[np.radians(15), np.radians(10)], - rotation_offset=rotation_offset, - pan=pan_tilt[0], tilt=pan_tilt[1], - position=StateVector([10., 10., 100.])) - sensors.add(sensor) -for sensor in sensors: - sensor.timestamp = start_time - -# Predictor & Updater -# =================== -predictor = KalmanPredictor(transition_model) -updater = KalmanUpdater(None) - -# Hypothesiser & Data Associator -# ============================== -hypothesiser = IPDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect, - prob_survive=prob_survive) -# hypothesiser = PDAHypothesiser(predictor, updater, clutter_intensity, prob_detect=prob_detect) -hypothesiser = DistanceGater(hypothesiser, Mahalanobis(), 10) -associator = JIPDAWithEHM2(hypothesiser) - -# Track Deleter -# ============= -deleter = UpdateTimeDeleter(time_since_update=timedelta(minutes=5)) - -# Initiator -# ========= -# Initialise PHD Filter -resampler = SystematicResampler() -phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detect=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, resampler=resampler, - birth_scheme=birth_scheme) - -# Sample prior state from birth density -state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), - birth_density.covar, - size=num_particles).T) -weight = np.ones((num_particles,)) * Probability(1 / num_particles) -state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) - - -initiator = SMCPHDInitiator(filter=phd_filter, prior=state) - -tracker = SMCPHD_JIPDA(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detect=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity, - num_samples=num_particles, birth_scheme=birth_scheme, - start_time=start_time) - -# Initialise sensor manager -# ========================= -reward_function = UncertaintyRewardFunction(tracker._predictor, tracker._updater) -sensor_manager = BruteForceSensorManager(sensors, reward_function) - -# Estimate -# ======== - -# Plot the prior -if PLOT: - fig1 = plt.figure(figsize=(10, 6)) - ax1 = plt.gca() - # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') - -# Main tracking loop -tracks = set() -for k, timestamp in enumerate(timestamps): - - tracks = list(tracks) - - # Generate chosen configuration - chosen_actions = sensor_manager.choose_actions(tracks, timestamp) - - # Create empty dictionary for measurements - detections = [] - - for chosen_action in chosen_actions: - for sensor, actions in chosen_action.items(): - sensor.add_actions(actions) - - # Fov ranges (min, center, max) - foot1 = get_camera_footprint(sensor) - - truth_states = OrderedSet(truth[timestamp] for truth in truths) - for sensor in sensors: - sensor.act(timestamp) - - # Observe this ground truth - sensor_measurements = sensor.measure(truth_states, noise=True) - detections.extend(sensor_measurements) - - foot2 = get_camera_footprint(sensor) - - # tracks = tracker.track(detections, timestamp) - detections = list(detections) - num_tracks = len(tracks) - num_detections = len(detections) - - # Perform data association - associations = associator.associate(tracks, detections, timestamp) - - assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) - for i, track in enumerate(tracks): - for hyp in associations[track]: - if not hyp: - assoc_prob_matrix[i, 0] = hyp.weight - else: - j = next(d_i for d_i, detection in enumerate(detections) - if hyp.measurement == detection) - assoc_prob_matrix[i, j + 1] = hyp.weight - - rho = np.zeros((len(detections))) - for j, detection in enumerate(detections): - rho_tmp = 1 - if len(assoc_prob_matrix): - for i, track in enumerate(tracks): - rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] - rho[j] = rho_tmp - - for track, multihypothesis in associations.items(): - - # calculate each Track's state as a Gaussian Mixture of - # its possible associations with each detection, then - # reduce the Mixture to a single Gaussian State - posterior_states = [] - posterior_state_weights = [] - for hypothesis in multihypothesis: - posterior_state_weights.append(hypothesis.probability) - if hypothesis: - posterior_states.append(updater.update(hypothesis)) - else: - posterior_states.append(hypothesis.prediction) - - # Merge/Collapse to single Gaussian - means = StateVectors([state.state_vector for state in posterior_states]) - covars = np.stack([state.covar for state in posterior_states], axis=2) - weights = np.asarray(posterior_state_weights) - - post_mean, post_covar = gm_reduce_single(means, covars, weights) - - track.append(GaussianStateUpdate( - np.array(post_mean), np.array(post_covar), - multihypothesis, - multihypothesis[0].prediction.timestamp)) - - tracks = set(tracks) - new_tracks = initiator.initiate(detections, timestamp, weights=rho) - tracks |= new_tracks - state = initiator._state - - # Delete tracks that have not been updated for a while - del_tracks = set() - for track in tracks: - if track.exist_prob < 0.1: - del_tracks.add(track) - tracks -= del_tracks - - print('\n===========================================') - # print(f'Num targets: {np.sum(state.weight)} - Num new targets: {len(new_tracks)}') - for track in tracks: - print(f'Track {track.id} - Exist prob: {track.exist_prob}') - - # Plot resulting density - if PLOT: - ax1.cla() - xmin, xmax, ymin, ymax = foot1 - ax1.add_patch( - Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='b')) - xmin, xmax, ymin, ymax = foot2 - ax1.add_patch( - Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='r')) - for i, truth in enumerate(truths): - data = np.array([s.state_vector for s in truth[:k + 1]]) - ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') - if len(detections): - det_data = np.array([det.state_vector for det in detections]) - ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') - # ax1.plot(state.state_vector[0, :], state.state_vector[2, :], - # 'r.', label='Particles') - - for track in tracks: - data = np.array([s.state_vector for s in track]) - ax1.plot(data[:, 0], data[:, 2], label=f'Track {track.id}') - plot_cov_ellipse(track.covar[[0, 2], :][:, [0, 2]], track.state_vector[[0, 2], :], - edgecolor='r', facecolor='none', ax=ax1) - plt.axis([*surveillance_region[0], *surveillance_region[1]]) - plt.legend(loc='upper right') - plt.pause(0.01) diff --git a/examples/smcphd-example.py b/examples/smcphd-example.py deleted file mode 100644 index 8bd824389..000000000 --- a/examples/smcphd-example.py +++ /dev/null @@ -1,160 +0,0 @@ -from matplotlib import pyplot as plt -from stonesoup.resampler.particle import SystematicResampler -from stonesoup.types.array import StateVector, StateVectors -from stonesoup.types.numeric import Probability -from stonesoup.types.particle import Particle -from stonesoup.types.state import GaussianState, ParticleState -from stonesoup.custom.smcphd import SMCPHDFilter - -from datetime import datetime -from datetime import timedelta -import numpy as np -from scipy.stats import uniform, multivariate_normal - -from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ - ConstantVelocity -from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState -from stonesoup.types.detection import TrueDetection -from stonesoup.types.detection import Clutter -from stonesoup.models.measurement.linear import LinearGaussian - -np.random.seed(1991) - -# Parameters -# ========== -start_time = datetime.now() # Simulation start time -prob_detect = Probability(.9) # 90% chance of detection. -prob_death = Probability(0.01) # Probability of death -prob_birth = Probability(0.1) # Probability of birth -birth_rate = 0.05 # Birth-rate (Mean number of new targets per scan) -clutter_rate = .01 # Clutter-rate (Mean number of clutter measurements per scan) -surveillance_region = [[-10, 30], [0, 30]] # The surveillance region x=[-10, 30], y=[0, 30] -surveillance_area = (surveillance_region[0][1] - surveillance_region[0][0]) \ - * (surveillance_region[1][1] - surveillance_region[1][0]) -clutter_intensity = clutter_rate / surveillance_area # Clutter intensity per unit volume/area -birth_density = GaussianState(StateVector(np.array([10., 0.0, 10., 0.0])), - np.diag([10. ** 2, 1. ** 2, 10. ** 2, 1. ** 2])) # Birth density -birth_scheme = 'mixture' # Birth scheme. Possible values are 'expansion' and 'mixture' -num_particles = 2 ** 12 # Number of particles used by the PHD filter -num_iter = 100 # Number of simulation steps -PLOT = True # Set [True | False] to turn plotting [ON | OFF] - -# Models -# ====== -# Transition model -transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.05), - ConstantVelocity(0.05)]) -# Measurement model -measurement_model = LinearGaussian(ndim_state=4, - mapping=(0, 2), - noise_covar=np.array([[0.02, 0], - [0, 0.02]])) - -# Simulate Groundtruth -# ==================== -gnd_transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.), - ConstantVelocity(0.)]) -truths = set() -truth = GroundTruthPath([GroundTruthState([0, 0.2, 0, 0.2], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -truth = GroundTruthPath([GroundTruthState([0, 0.2, 20, -0.2], timestamp=start_time)]) -for k in range(1, num_iter + 1): - truth.append(GroundTruthState( - gnd_transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)), - timestamp=start_time + timedelta(seconds=k))) -truths.add(truth) - -timestamps = [] -for k in range(1, num_iter + 1): - timestamps.append(start_time + timedelta(seconds=k)) - -# Plot ground truth. -if PLOT: - from stonesoup.plotter import Plotter - - plotter = Plotter() - plotter.ax.set_ylim(0, 25) - plotter.plot_ground_truths(truths, [0, 2]) - -# Simulate measurements -# ===================== -scans = [] - -for k in range(num_iter): - measurement_set = set() - - # True detections - for truth in truths: - # Generate actual detection from the state with a 10% chance that no detection is received. - if np.random.rand() <= prob_detect: - measurement = measurement_model.function(truth[k], noise=True) - measurement_set.add(TrueDetection(state_vector=measurement, - groundtruth_path=truth, - timestamp=truth[k].timestamp, - measurement_model=measurement_model)) - - # Generate clutter at this time-step - truth_x = truth[k].state_vector[0] - truth_y = truth[k].state_vector[2] - - # Clutter detections - for _ in range(np.random.poisson(clutter_rate)): - x = uniform.rvs(-10, 30) - y = uniform.rvs(0, 25) - measurement_set.add(Clutter(np.array([[x], [y]]), timestamp=truth[k].timestamp, - measurement_model=measurement_model)) - scans.append((timestamps[k], measurement_set)) - -# Initialise PHD Filter -# ===================== -resampler = SystematicResampler() -phd_filter = SMCPHDFilter(birth_density=birth_density, transition_model=transition_model, - measurement_model=None, prob_detect=prob_detect, - prob_death=prob_death, prob_birth=prob_birth, - birth_rate=birth_rate, clutter_intensity=clutter_intensity + 0.001, - num_samples=num_particles, resampler=resampler, - birth_scheme=birth_scheme) - -# Estimate -# ======== - -# Sample prior state from birth density -state_vector = StateVectors(multivariate_normal.rvs(birth_density.state_vector.ravel(), - birth_density.covar, - size=num_particles).T) -weight = np.ones((num_particles,)) * Probability(1 / num_particles) -state = ParticleState(state_vector=state_vector, weight=weight, timestamp=start_time) - -# Plot the prior -if PLOT: - fig1 = plt.figure(figsize=(13, 7)) - ax1 = plt.gca() - ax1.plot(state.state_vector[0, :], state.state_vector[2, :], 'r.') - -# Main tracking loop -for k, (timestamp, detections) in enumerate(scans): - - new_state = phd_filter.iterate(state, detections, timestamp) - state = new_state - - print('Num targets: ', np.sum(state.weight)) - - # Plot resulting density - if PLOT: - ax1.cla() - for i, truth in enumerate(truths): - data = np.array([s.state_vector for s in truth[:k + 1]]) - ax1.plot(data[:, 0], data[:, 2], '--', label=f'Groundtruth Track {i+1}') - if len(detections): - det_data = np.array([det.state_vector for det in detections]) - ax1.plot(det_data[:, 0], det_data[:, 1], '*g', label='Detections') - ax1.plot(new_state.state_vector[0, :], new_state.state_vector[2, :], - 'r.', label='Particles') - plt.axis([*surveillance_region[0], *surveillance_region[1]]) - plt.legend(loc='center right') - plt.pause(0.01) From ec683e5b4958f06870d364264c759c45a8bf3d8d Mon Sep 17 00:00:00 2001 From: prh Date: Tue, 5 Nov 2024 15:28:30 +0000 Subject: [PATCH 87/87] Added missing files --- examples/prh_example/output_files.py | 58 +++++++++++++++++ examples/prh_example/prh_funcs.py | 65 +++++++++++++++++++ .../{reactive-isr => prh_example}/utils.py | 0 3 files changed, 123 insertions(+) create mode 100644 examples/prh_example/output_files.py create mode 100644 examples/prh_example/prh_funcs.py rename examples/{reactive-isr => prh_example}/utils.py (100%) diff --git a/examples/prh_example/output_files.py b/examples/prh_example/output_files.py new file mode 100644 index 000000000..67f435fee --- /dev/null +++ b/examples/prh_example/output_files.py @@ -0,0 +1,58 @@ +import numpy as np + + +def output_tracks(filename, start_time, all_tracks): + # Output all the tracks from the trackers + f = open(filename, "w") + f.write(str(start_time) + "\n") # start time + f.write(str(len(all_tracks)) + "\n") # number of hierarchy levels + for i_level, level in enumerate(all_tracks): + f.write(str(i_level) + "\n") # index of this level + f.write(str(len(level)) + "\n") # number of trackers at this level + for i_tracker, tracker in enumerate(level): + f.write(str(i_tracker) + "\n") # index of this tracker + f.write(str(len(tracker)) + "\n") # number of tracks + for i_track, track in enumerate(tracker): + f.write(str(len(track)) + "\n") # number of states in track + for x in track: + f.write(str(x.timestamp) + "\n") # state timestamp + f.write(str(x.state_vector.flatten()) + "\n") # state value + f.write(str(x.covar.flatten()) + "\n") # covariance + f.close() + + +def output_meas(filename, start_time, platform_positions, all_detections): + np.set_printoptions(linewidth=np.inf) + f = open(filename, "w") + f.write(str(start_time) + "\n") + f.write(str(len(platform_positions)) + "\n") + for pos in platform_positions: + f.write(str(pos.flatten()) + "\n") + f.write(str(len(all_detections)) + "\n") + for i_level, level_det in enumerate(all_detections): + f.write(str(i_level) + "\n") + f.write(str(len(level_det)) + "\n") + for i_node, node_det in enumerate(level_det): + f.write(str(i_node) + "\n") + f.write(str(len(node_det)) + "\n") + for det in node_det: + f.write(str(det.timestamp) + "\n") + f.write(str(np.array(det.state_vector).flatten()) + "\n") + f.write(str(np.array(det.measurement_model.noise_covar).flatten()) + "\n") + if i_level > 0: + f.write(str(np.array(det.measurement_model.h_matrix.shape)) + "\n") + f.write(str(np.array(det.measurement_model.h_matrix).flatten()) + "\n") + f.close() + + +def output_truth(filename, start_time, truth): + # Output the truth + f = open(filename, "w") + f.write(str(start_time) + "\n") + f.write(str(len(truth)) + "\n") + for target in truth: + f.write(str(len(target.states)) + "\n") + for x in target.states: + f.write(str(x.timestamp) + "\n") + f.write(str(x.state_vector.flatten()) + "\n") + f.close() diff --git a/examples/prh_example/prh_funcs.py b/examples/prh_example/prh_funcs.py new file mode 100644 index 000000000..1ff6504da --- /dev/null +++ b/examples/prh_example/prh_funcs.py @@ -0,0 +1,65 @@ +import numpy as np +from stonesoup.types.array import StateVector, CovarianceMatrix + +def tile_with_circles(minpos, maxpos, numx, numy): + """ + Return centres and radius of a grid of circles with numx columns and numy rows which tile the region defined + by minpos and maxpos + """ + field_size = maxpos - minpos + grid_size = max(field_size[0] / numx, field_size[1] / numy) + radius = grid_size / np.sqrt(2); + grid_start = (field_size - np.array([numx - 1, numy - 1]) * grid_size) / 2.0 + minpos + centres = [] + for i in range(numx): + for j in range(numy): + centres.append(grid_start + np.array([i, j]) * grid_size) + return centres, radius + + +def merge_position_and_velocity(position, velocity, statedim, position_mapping, velocity_mapping): + """ + Create a state by merging a position and a velocity + """ + state = StateVector(np.zeros((1, statedim))) + state[position_mapping, :] = StateVector(position) + state[velocity_mapping, :] = StateVector(velocity) + return state + + +def merge_position_and_velocity_covariance(poscov, velcov, statedim, position_mapping, velocity_mapping): + """ + Create a state covariance by merging a position covariance and a velocity covariance + """ + covariance = CovarianceMatrix(np.zeros((statedim, statedim))) + covariance[np.ix_(position_mapping, position_mapping)] = poscov + covariance[np.ix_(velocity_mapping, velocity_mapping)] = velcov + return covariance + + +def fit_normal_to_uniform(minval, maxval): + """ + """ + mean = StateVector((minval + maxval)/2.0) + cov = CovarianceMatrix(np.diag(np.power(maxval - minval, 2.0)/12.0)) + return mean, cov + + +def to_single_state(tracks, statedim): + """ + Convert a set of tracks with two-state vectors to a set of tracks with one-state vectors + """ + new_tracks = set() + for track in tracks: + states = [] + for state in track.states: + if isinstance(state, Update): + new_state = GaussianStateUpdate(state.state_vector[-statedim:], state.covar[-statedim:, -statedim:], + hypothesis=state.hypothesis, + timestamp=state.timestamp) + else: + new_state = GaussianState(state.state_vector[-statedim:], state.covar[-statedim:, -statedim:], + timestamp=state.timestamp) + states.append(new_state) + new_tracks.add(Track(id=track.id, states=states)) + return new_tracks diff --git a/examples/reactive-isr/utils.py b/examples/prh_example/utils.py similarity index 100% rename from examples/reactive-isr/utils.py rename to examples/prh_example/utils.py