diff --git a/examples/prh_example/fusion.py b/examples/prh_example/fusion.py new file mode 100644 index 000000000..8be36573d --- /dev/null +++ b/examples/prh_example/fusion.py @@ -0,0 +1,71 @@ +import numpy as np + +from stonesoup.custom.types.tracklet import SensorTracks + + +# Create a class to represent a node in a fusion hierarchy, +class FusionNode: + """ + Class to represent a node in a fusion hierarchy + """ + def __init__(self, tracker, children, statedim, use_two_state_tracks=True): + # Tracker used at this node + self.tracker = tracker + # Child nodes used to supply tracks for fusion + self.children = children + # The dimension of a (single) target state + self.statedim = statedim + # Whether to propagate two-state tracks + self.use_two_state_tracks = use_two_state_tracks + + @property + def tracks(self): + return self.tracker.tracks + + @property + def detections(self): + if self.is_leaf(): + return self.tracker.detector.detections + else: + sensor_scans = [d for scan in self.tracker._scans for d in scan.sensor_scans] + detections = set([d for sscan in sensor_scans for d in sscan.detections]) + return detections + + # Process the tracks at this node by reading in the child tracks, creating pseudomeasurements and performing + # tracking + def process_tracks(self, timestamp): + child_tracks = [child.tracker.tracks for child in self.children] + input_tracks = [SensorTracks(tracks, i) for i, tracks in enumerate(child_tracks)] + if not self.use_two_state_tracks: + input_tracks = [SensorTracks(to_single_state(track, self.statedim), i) for i, track in enumerate(input_tracks)] + two_state_tracks = self.tracker.process_tracks(input_tracks, timestamp) + if not self.use_two_state_tracks: + return to_single_state(two_state_tracks, self.statedim) + else: + return two_state_tracks + + def is_leaf(self): + return len(self.children) == 0 + + def get_leaf_trackers(self): + if self.is_leaf(): + return [self.tracker] + else: + leaf_trackers = [] + for child in self.children: + leaf_trackers += child.get_leaf_trackers() + return leaf_trackers + + """ + # TODO: Try to make the fusion engine more pythonic with some class structure (under construction) + def runTracker(self, timestamp): + if not self.is_leaf: + child_tracks = [] + for child in self.child_trackers: + child_tracks.append(child.runTracker(timestamp)) + else: + for child in self.child_trackers: + child_tracks.append() + child_ + two_state_tracks + """ diff --git a/examples/prh_example/output_files.py b/examples/prh_example/output_files.py new file mode 100644 index 000000000..67f435fee --- /dev/null +++ b/examples/prh_example/output_files.py @@ -0,0 +1,58 @@ +import numpy as np + + +def output_tracks(filename, start_time, all_tracks): + # Output all the tracks from the trackers + f = open(filename, "w") + f.write(str(start_time) + "\n") # start time + f.write(str(len(all_tracks)) + "\n") # number of hierarchy levels + for i_level, level in enumerate(all_tracks): + f.write(str(i_level) + "\n") # index of this level + f.write(str(len(level)) + "\n") # number of trackers at this level + for i_tracker, tracker in enumerate(level): + f.write(str(i_tracker) + "\n") # index of this tracker + f.write(str(len(tracker)) + "\n") # number of tracks + for i_track, track in enumerate(tracker): + f.write(str(len(track)) + "\n") # number of states in track + for x in track: + f.write(str(x.timestamp) + "\n") # state timestamp + f.write(str(x.state_vector.flatten()) + "\n") # state value + f.write(str(x.covar.flatten()) + "\n") # covariance + f.close() + + +def output_meas(filename, start_time, platform_positions, all_detections): + np.set_printoptions(linewidth=np.inf) + f = open(filename, "w") + f.write(str(start_time) + "\n") + f.write(str(len(platform_positions)) + "\n") + for pos in platform_positions: + f.write(str(pos.flatten()) + "\n") + f.write(str(len(all_detections)) + "\n") + for i_level, level_det in enumerate(all_detections): + f.write(str(i_level) + "\n") + f.write(str(len(level_det)) + "\n") + for i_node, node_det in enumerate(level_det): + f.write(str(i_node) + "\n") + f.write(str(len(node_det)) + "\n") + for det in node_det: + f.write(str(det.timestamp) + "\n") + f.write(str(np.array(det.state_vector).flatten()) + "\n") + f.write(str(np.array(det.measurement_model.noise_covar).flatten()) + "\n") + if i_level > 0: + f.write(str(np.array(det.measurement_model.h_matrix.shape)) + "\n") + f.write(str(np.array(det.measurement_model.h_matrix).flatten()) + "\n") + f.close() + + +def output_truth(filename, start_time, truth): + # Output the truth + f = open(filename, "w") + f.write(str(start_time) + "\n") + f.write(str(len(truth)) + "\n") + for target in truth: + f.write(str(len(target.states)) + "\n") + for x in target.states: + f.write(str(x.timestamp) + "\n") + f.write(str(x.state_vector.flatten()) + "\n") + f.close() diff --git a/examples/prh_example/prh_funcs.py b/examples/prh_example/prh_funcs.py new file mode 100644 index 000000000..1ff6504da --- /dev/null +++ b/examples/prh_example/prh_funcs.py @@ -0,0 +1,65 @@ +import numpy as np +from stonesoup.types.array import StateVector, CovarianceMatrix + +def tile_with_circles(minpos, maxpos, numx, numy): + """ + Return centres and radius of a grid of circles with numx columns and numy rows which tile the region defined + by minpos and maxpos + """ + field_size = maxpos - minpos + grid_size = max(field_size[0] / numx, field_size[1] / numy) + radius = grid_size / np.sqrt(2); + grid_start = (field_size - np.array([numx - 1, numy - 1]) * grid_size) / 2.0 + minpos + centres = [] + for i in range(numx): + for j in range(numy): + centres.append(grid_start + np.array([i, j]) * grid_size) + return centres, radius + + +def merge_position_and_velocity(position, velocity, statedim, position_mapping, velocity_mapping): + """ + Create a state by merging a position and a velocity + """ + state = StateVector(np.zeros((1, statedim))) + state[position_mapping, :] = StateVector(position) + state[velocity_mapping, :] = StateVector(velocity) + return state + + +def merge_position_and_velocity_covariance(poscov, velcov, statedim, position_mapping, velocity_mapping): + """ + Create a state covariance by merging a position covariance and a velocity covariance + """ + covariance = CovarianceMatrix(np.zeros((statedim, statedim))) + covariance[np.ix_(position_mapping, position_mapping)] = poscov + covariance[np.ix_(velocity_mapping, velocity_mapping)] = velcov + return covariance + + +def fit_normal_to_uniform(minval, maxval): + """ + """ + mean = StateVector((minval + maxval)/2.0) + cov = CovarianceMatrix(np.diag(np.power(maxval - minval, 2.0)/12.0)) + return mean, cov + + +def to_single_state(tracks, statedim): + """ + Convert a set of tracks with two-state vectors to a set of tracks with one-state vectors + """ + new_tracks = set() + for track in tracks: + states = [] + for state in track.states: + if isinstance(state, Update): + new_state = GaussianStateUpdate(state.state_vector[-statedim:], state.covar[-statedim:, -statedim:], + hypothesis=state.hypothesis, + timestamp=state.timestamp) + else: + new_state = GaussianState(state.state_vector[-statedim:], state.covar[-statedim:, -statedim:], + timestamp=state.timestamp) + states.append(new_state) + new_tracks.add(Track(id=track.id, states=states)) + return new_tracks diff --git a/examples/prh_example/scratch_hierarchical_large.py b/examples/prh_example/scratch_hierarchical_large.py new file mode 100644 index 000000000..d6b19ac8f --- /dev/null +++ b/examples/prh_example/scratch_hierarchical_large.py @@ -0,0 +1,379 @@ +""" + Generate a multi-sensor / multi-target simulation and tracking scenario + The scenario simulates 8 radar sensors positioned on a 4x2 grid of overlapping FOVs, organised + in 4 hierarchical levels of 8 trackers, 4 of 2 each, 2 of 4 each and 1 of all 8 sensors passed + up the fusion tree. + + |----------------| + | Top Tracker | + |----------------| + | + -------------------------------------------- + | | + |-----------------| |-----------------| + | Fuse Tracker 2a | | Fuse Tracker 2b | + |-----------------| |-----------------| + | | + ------------------- ------------------- + | | | | +|------------------| |------------------| |------------------| |------------------| +| Fuse Tracker 1a | | Fuse Tracker 1b | | Fuse Tracker 1c | | Fuse Tracker 1d | +|------------------| |------------------| |------------------| |------------------| + | | | | | | | | +|-------| |-------| |-------| |-------| |-------| |-------| |-------| |-------| +| Leaf | | Leaf | | Leaf | | Leaf | | Leaf | | Leaf | | Leaf | | Leaf | +|Sens. 1| |Sens. 2| |Sens. 3| |Sens. 4| |Sens. 5| |Sens. 6| |Sens. 7| |Sens. 8| +|-------| |-------| |-------| |-------| |-------| |-------| |-------| |-------| +""" + + + +import matplotlib +from itertools import tee + +matplotlib.use('TkAgg') +from matplotlib import pyplot as plt + +import numpy as np +from datetime import datetime, timedelta + +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.types.state import GaussianState, State +from stonesoup.types.numeric import Probability +from stonesoup.types.track import Track +from stonesoup.types.update import Update, GaussianStateUpdate +from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \ + ConstantVelocity +from stonesoup.simulator.simple import MultiTargetGroundTruthSimulator + +from stonesoup.platform import FixedPlatform, MovingPlatform +from stonesoup.sensor.radar import RadarBearingRange +from stonesoup.models.measurement.nonlinear import CartesianToBearingRange +from stonesoup.simulator.simple import DummyGroundTruthSimulator +from stonesoup.simulator.platform import PlatformDetectionSimulator +#from stonesoup.custom.simulator.platform import PlatformTargetDetectionSimulator + +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.updater.kalman import UnscentedKalmanUpdater +from stonesoup.hypothesiser.distance import DistanceHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.dataassociator.neighbour import GNNWith2DAssignment +from stonesoup.initiator.simple import MultiMeasurementInitiator +from stonesoup.deleter.error import CovarianceBasedDeleter +from stonesoup.tracker.simple import MultiTargetTracker +from stonesoup.plugins.pyehm import JPDAWithEHM2 +from stonesoup.gater.distance import DistanceGater + +from stonesoup.custom.predictor.twostate import TwoStatePredictor +from stonesoup.custom.updater.twostate import TwoStateKalmanUpdater +from stonesoup.custom.initiator.twostate import TwoStateInitiator +from stonesoup.custom.types.tracklet import SensorTracks +from stonesoup.custom.reader.tracklet import TrackletExtractor, PseudoMeasExtractor, TrackletExtractorTwoState +from stonesoup.custom.tracker.fuse import FuseTracker2 + +from stonesoup.custom.hypothesiser.probability import \ + PDAHypothesiser as PDAHypothesiserLyud # Lyudmil's custom PDA which doesn't have to predict? +from stonesoup.hypothesiser.probability import PDAHypothesiser # replaced with Lyudmil's custom code + +from utils import plot_cov_ellipse#compute_ellipse + +from prh_funcs import tile_with_circles, merge_position_and_velocity, to_single_state, fit_normal_to_uniform,\ + merge_position_and_velocity_covariance +from fusion import FusionNode + +from output_files import output_tracks, output_meas, output_truth + + +np.random.seed(1991) + +# Whether to use one or two state trackers at the upper hierarchy levels +use_two_state_tracks = True +# Whether to divide out the prior distribution when calculating pseudomeasurements +use_prior = True + + +# A function to plot the output tracks, including the lower level tracks fed into the higher level trackers +def plot_tracks(all_tracks, all_detections, truth, numxy, fusion_level): + """ + Plot results for a level of trackers + """ + + numx, numy = numxy[0], numxy[1] + fig, axs = plt.subplots(numy, numx, squeeze = False) + + for row in range(numy): + for col in range(numx): + + i = col * numy + row + ax = axs[numy - row - 1, col] + fusion_node = fusion_level[i] + + leaf_platforms = [p for x in fusion_node.get_leaf_trackers() for p in x.detector.platforms] + platform_states = [np.array(p.state_vector.flatten()) for p in leaf_platforms] + platform_ranges = [p.sensors[0].max_range for p in leaf_platforms] + + # Tracks + for track in all_tracks[i]: # leaf: # [i]: + track_x = np.array([x.state_vector.flatten() for x in track.states]) + ax.plot(track_x[:, -4], track_x[:, -2], color='r', marker='+') + for state in track: + mn, cv = state.mean[np.ix_([-4, -2])], state.covar[np.ix_([-4, -2], [-4, -2])] + # path = compute_ellipse(mn, cv) + plot_cov_ellipse(cv, mn, + ax=ax) + # Ground truth + for target in truth: + truth_x = np.array([x.state_vector.flatten() for x in target.states]) + ax.plot(truth_x[:, 0], truth_x[:, 2], color='g', marker='') + + # Platform range + for (platform_state, platform_range) in zip(platform_states, platform_ranges):#platform_positions[i]: + circle = plt.Circle(platform_state[np.ix_([0, 2])], platform_range, color='k', fill=False) + ax.add_patch(circle) + + # Detections (will have problems if the pseudomeasurements are inhomogeneous due to having different + # rank) + if all_detections and all_detections[i]: + if fusion_node.is_leaf(): + d = np.array([z.measurement_model.inverse_function(z).flatten() for z in all_detections[i]]) + meas_x = d[:,0] + meas_y = d[:,2] + else: + d = np.array([z.state_vector.flatten() for z in all_detections[i]]) + meas_x = d[:,-4] + meas_y = d[:,-2] + ax.plot(meas_x, meas_y, color='b', marker='.', linestyle='') + + # Axes + ax.set_xlim([minpos[0], maxpos[0]]) + ax.set_ylim([minpos[1], maxpos[1]]) + + +# Create a leaf-level tracker (i.e. one which processes raw measurements and provides tracks for higher level +# trackers to process +def create_leaf_tracker(platforms, gnd_sim, transition_model, prior_state): + + # Create detection simulator + detection_sim = PlatformDetectionSimulator( + groundtruth=gnd_sim, + platforms=platforms) + + this_meas_model = platforms[0].sensors[0].measurement_model # will this work with multiple platforms/sensors per tracker? + predictor = KalmanPredictor(transition_model) + updater = UnscentedKalmanUpdater(this_meas_model) + + # Covariance-based deleted + deleter = CovarianceBasedDeleter(covar_trace_thresh=10000.0) + + # Create GNN data association + hypothesiser = PDAHypothesiser(predictor, updater, prob_detect=0.9, prob_gate=0.999, + clutter_spatial_density=1.0e-6) + data_associator = GNNWith2DAssignment(hypothesiser) + + # Create initiator for this sensor + min_detections = 1 + initiator = MultiMeasurementInitiator( + prior_state=prior_state, + measurement_model=this_meas_model, + deleter=deleter, + data_associator=data_associator, + updater=updater, + min_points=min_detections) + + # Set up the tracker + return MultiTargetTracker( + initiator=initiator, + deleter=deleter, + detector=detection_sim, + data_associator=data_associator, + updater=updater) + +# Create a fusion tracker, i.e. one which takes in tracks, produces pseudomeasurements from them and carries out +# tracking on the pseudomeasurements +# use_two_state specifies if the tracks to be fused consist of single target state distributions (as is the case for +# tracks produced by leaf trackers, or fusion tracks which have been converted to single state), or distributions on +# pairs of states for a track over an intervaL) +def create_fuse_tracker(fusion_time_interval, transition_model, prior_state, use_two_state): + + # Fusion tracker components + two_state_predictor = TwoStatePredictor(transition_model) + two_state_updater = TwoStateKalmanUpdater(None, True) + fuse_initiator = TwoStateInitiator(prior_state, transition_model, two_state_updater) + # Lyudmil's code uses PDAHypothesiser from stonesoup.custom.hypothesiser.probability + fuse_hypothesiser = PDAHypothesiserLyud(predictor=None, + updater=two_state_updater, + clutter_spatial_density=Probability(-80, log_value=True),#Probability(-80, log_value=True), + prob_detect=Probability(prob_detect), + prob_gate=Probability(0.99), + predict=False, + per_measurement=True) + #fuse_associator = JPDAWithEHM2(fuse_hypothesiser) # in Fuse tracker + fuse_associator = GNNWith2DAssignment(fuse_hypothesiser) # in Fuse tracker + if use_two_state: + tracklet_extractor = TrackletExtractorTwoState(transition_model=transition_model, + fuse_interval=fusion_time_interval) + else: + tracklet_extractor = TrackletExtractor(transition_model=transition_model, + fuse_interval=fusion_time_interval) + + pseudomeas_extractor = PseudoMeasExtractor(use_prior=True) + return FuseTracker2(initiator=fuse_initiator, predictor=two_state_predictor, + updater=two_state_updater, associator=fuse_associator, + tracklet_extractor=tracklet_extractor, + pseudomeas_extractor=pseudomeas_extractor, death_rate=1e-4, + prob_detect=Probability(prob_detect), + delete_thresh=Probability(0.5)) # delete_thresh=Probability(0.0)) + +# Create a hierarchy of trackers with leaf trackers at the bottom, passing up to multiple layers of fusion trackers +def create_fusion_hierarchy(platforms, gnd_sims, tracker_hierarchy_indices, fusion_times, + transition_model, prior_state): + """ + Create hierarchy of fusion trackers (all the same for now, with different fusion times) + """ + fusion_hierarchy = [] + + # Generate leaf nodes + fusion_hierarchy.append([]) + for i_node, idx in enumerate(tracker_hierarchy_indices[0]): + tracker = create_leaf_tracker([platforms[i] for i in idx], gnd_sims[i_node], + transition_model, prior_state) + fusion_hierarchy[-1].append(FusionNode(tracker, [], statedim, use_two_state_tracks)) + + # Generate fusion nodes + for i_level, (fusion_time, level) in enumerate(zip(fusion_times[1:], tracker_hierarchy_indices[1:])): + fusion_hierarchy.append([]) + for child_idx in level: + is_two_state = (i_level > 0 and use_two_state_tracks) + tracker = create_fuse_tracker(fusion_time, transition_model, prior_state, is_two_state) + children = [fusion_hierarchy[-2][i] for i in child_idx] + fusion_hierarchy[-1].append(FusionNode(tracker, children, statedim, use_two_state_tracks)) + + return fusion_hierarchy + + +# Specify maximum and minimum extect of surveillance region +minpos = np.array([-1000, -1000]) +maxpos = np.array([3000, 1000]) +posdim = len(minpos) +statedim = 2 * posdim +position_mapping = (0, 2) +velocity_mapping = (1, 3) + +# Specify sensor noise covariance and measurement times +noise_covar = CovarianceMatrix(np.diag([0.01 ** 2, 10 ** 2])) +start_time = datetime(year=1970, month=1, day=1)#datetime.now().replace(microsecond=0)# +timestep_size = timedelta(seconds=1.0) +simulation_length = timedelta(seconds=300) +number_of_steps = int(simulation_length / timestep_size) + 1 +prob_detect = 0.9 # Probability of Detection + +# Specify initial velocity mean and covariance +init_velocity_mean = StateVector([[0.0], [0.0]]) +init_velocity_covariance = CovarianceMatrix(np.diag([10.0 ** 2, 10.0 ** 2])) + +# Get initial state distribution by fitting a Gaussian to the uniform position distribution and merging with +# the velocity distribution +init_position_mean, init_position_covariance = fit_normal_to_uniform(minpos, maxpos) +s_prior_state_mean = merge_position_and_velocity(init_position_mean, init_velocity_mean, statedim, + position_mapping, velocity_mapping) +s_prior_state_covariance = merge_position_and_velocity_covariance(init_position_covariance, init_velocity_covariance, + statedim, position_mapping, velocity_mapping) +# Specify prior Gaussian distribution of a new track +s_prior_state = GaussianState(s_prior_state_mean, s_prior_state_covariance, start_time) + +# 2-d Constant Velocity model for target +transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.1)] * 2) + +# Generate the target initial states +number_of_targets = 20 +init_target_states = [] +for _ in range(number_of_targets): + init_pos = np.random.uniform(minpos, maxpos) + init_vel = StateVector(np.random.multivariate_normal(init_velocity_mean.flatten(), init_velocity_covariance)) + init_pos_vel = merge_position_and_velocity(init_pos, init_vel, statedim, position_mapping, velocity_mapping) + init_target_states.append(State(init_pos_vel, start_time)) + +# Ground truth simulator +ground_truth_simulator = MultiTargetGroundTruthSimulator( + transition_model=transition_model, + initial_state=s_prior_state, + birth_rate=0.0, + death_probability=0.0, + preexisting_states = [x.state_vector for x in init_target_states], + timestep=timestep_size, + number_steps=number_of_steps) + +# Generate grid of sensors which tile a box +numx, numy = 4, 2 +[platform_positions, max_range] = tile_with_circles(minpos, maxpos, numx, numy) +# Size of tracker grid for each level in the hierarchy +numxy_hierarchy = [(numx, numy), (numx, numy//2), (numx//2, numy//2), (1,1)] + +# Create sensor platforms +# (how do I add clutter?) +platforms = [] +for i_platform, x in enumerate(platform_positions): + + sensor = RadarBearingRange(ndim_state=statedim, noise_covar=noise_covar, + position_mapping=position_mapping, max_range=max_range) + + # Create fixed sensor platform (we need to have 4-dimensional states for the sensors even if they are stationary + # because the other sensors try to measure them as if they were targets) + platform_state = merge_position_and_velocity(x, [0, 0], statedim, + position_mapping, velocity_mapping) + platform = FixedPlatform(State(platform_state, timestamp=start_time), position_mapping=position_mapping) + platform.add_sensor(sensor) + platforms.append(platform) + +# Create hierarchy of fusion trackers (all the same for now, with different fusion times) +fusion_times = [timestep_size, 4*timestep_size, 24*timestep_size, 48*timestep_size] +tracker_hierarchy_indices = [[(i,) for i, _ in enumerate(platforms)], [(0,1),(2,3),(4,5),(6,7)], [(0,1),(2,3)], [(0,1)]] +gnd_sims = tee(ground_truth_simulator, len(tracker_hierarchy_indices[0])) +fusion_hierarchy = create_fusion_hierarchy(platforms, gnd_sims, tracker_hierarchy_indices, fusion_times, + transition_model, s_prior_state) + +# Run the trackers: + +all_tracks = [[set() for _ in level] for level in fusion_hierarchy] +all_detections = [[set() for _ in level] for level in fusion_hierarchy] +root_node = fusion_hierarchy[-1][0] +leaf_trackers = root_node.get_leaf_trackers() + +for leaf_time_and_tracks in zip(*leaf_trackers): + + timestamp = leaf_time_and_tracks[0][0] + leaf_tracks = [x[1] for x in leaf_time_and_tracks] + + print("Time: " + str(timestamp)) + + # Run fusion level trackers + for i_level, level in enumerate(fusion_hierarchy[1:]): + for i_node, node in enumerate(level): + node.process_tracks(timestamp) + + # Get tracks and detections from fusion hierarchy + for i_level, level in enumerate(fusion_hierarchy): + for i_node, node in enumerate(level): + all_tracks[i_level][i_node].update(node.tracker.tracks) + all_detections[i_level][i_node].update(node.detections) + + ntracks_hierarchy = [[len(node.tracks) for node in level] for level in fusion_hierarchy] + print(ntracks_hierarchy) + +truth = ground_truth_simulator.groundtruth_paths + +outdir = "TestData/Large/" + +output_meas(outdir + "outputfile.txt", start_time, platform_positions, all_detections) +output_tracks(outdir + "outputtracks.txt", start_time, all_tracks) +output_truth(outdir + "outputtruth.txt", start_time, truth) + +# Plot results: +for i in range(len(all_tracks)): + # (don't plot the detections because they will have different dimensions) + plot_tracks(all_tracks[i], None, truth, numxy_hierarchy[i], fusion_hierarchy[i]) + fig = plt.gcf() + fig.suptitle(f'Level {i+1}') + #plot_tracks(all_tracks[i], all_detections[i], truth, numxy_hierarchy[i], fusion_hierarchy[i]) + +plt.show() diff --git a/examples/prh_example/utils.py b/examples/prh_example/utils.py new file mode 100644 index 000000000..886a8b942 --- /dev/null +++ b/examples/prh_example/utils.py @@ -0,0 +1,107 @@ +from datetime import datetime +from typing import Sequence, List, Dict, Mapping + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.patches import Ellipse +from matplotlib.path import Path +from shapely.geometry import Point +from shapely.ops import unary_union + +from stonesoup.types.track import Track +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.array import StateVector +from stonesoup.types.numeric import Probability +from stonesoup.types.state import ParticleState, GaussianState +from stonesoup.sensor.action import Action as ssAction + + +def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + +def compute_ellipse(cov, pos, nstd=1, **kwargs): + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + return ellip.get_path() + + +def plot_cov_ellipse(cov, pos, nstd=1, ax=None, **kwargs): + """ + Plots an `nstd` sigma error ellipse based on the specified covariance + matrix (`cov`). Additional keyword arguments are passed on to the + ellipse patch artist. + Parameters + ---------- + cov : The 2x2 covariance matrix to base the ellipse on + pos : The location of the center of the ellipse. Expects a 2-element + sequence of [x0, y0]. + nstd : The radius of the ellipse in numbers of standard deviations. + Defaults to 2 standard deviations. + ax : The axis that the ellipse will be plotted on. Defaults to the + current axis. + Additional keyword arguments are pass on to the ellipse patch. + Returns + ------- + A matplotlib ellipse artist + """ + + def eigsorted(cov): + vals, vecs = np.linalg.eigh(cov) + order = vals.argsort()[::-1] + return vals[order], vecs[:, order] + + if ax is None: + ax = plt.gca() + + vals, vecs = eigsorted(cov) + theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) + + # Width and height are "full" widths, not radius + width, height = 2 * nstd * np.sqrt(vals) + ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, + alpha=0.4, **kwargs) + + ax.add_artist(ellip) + return ellip + + +def _prob_detect_func(prob_detect, fovs): + """Closure to return the probability of detection function for a given environment scan""" + + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + # Probability of detection nested function + def prob_detect_func(state): + for poly in fovs: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.1)) + path_p = Path(poly.boundary.coords) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + point = Point(state.state_vector[0, 0], state.state_vector[2, 0]) + return prob_detect if poly.contains(point) else Probability(0.1) + + return prob_detect_func \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index fd186fa7c..2460ba5dc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,11 @@ install_requires = rtree scipy utm + pyehm + vector3d + shapely==2.0.1 + geopy + pyproj==3.4.1 [options.extras_require] dev = diff --git a/stonesoup/custom/__init__.py b/stonesoup/custom/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/dataassociator/__init__.py b/stonesoup/custom/dataassociator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/dataassociator/jipda.py b/stonesoup/custom/dataassociator/jipda.py new file mode 100644 index 000000000..70c93cc97 --- /dev/null +++ b/stonesoup/custom/dataassociator/jipda.py @@ -0,0 +1,66 @@ +from pyehm.plugins.stonesoup import JPDAWithEHM2 + +from stonesoup.types.detection import MissedDetection +from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.multihypothesis import MultipleHypothesis +from stonesoup.types.numeric import Probability + + +class JIPDAWithEHM2(JPDAWithEHM2): + + @classmethod + def _compute_multi_hypotheses(cls, tracks, detections, hypotheses, time): + + # Tracks and detections must be in a list so we can keep track of their order + track_list = list(tracks) + detection_list = list(detections) + + # Calculate validation and likelihood matrices + validation_matrix, likelihood_matrix = \ + cls._calc_validation_and_likelihood_matrices(track_list, detection_list, hypotheses) + + # Run EHM + assoc_prob_matrix = cls._run_ehm(validation_matrix, likelihood_matrix) + + # Calculate MultiMeasurementHypothesis for each Track over all + # available Detections with probabilities drawn from the association matrix + new_hypotheses = dict() + + for i, track in enumerate(track_list): + + single_measurement_hypotheses = list() + + # Null measurement hypothesis + null_hypothesis = next((hyp for hyp in hypotheses[track] if not hyp), None) + w = null_hypothesis.metadata['w'] + prob_misdetect = Probability(assoc_prob_matrix[i, 0])/(1+w) + single_measurement_hypotheses.append( + SingleProbabilityHypothesis( + hypotheses[track][0].prediction, + MissedDetection(timestamp=time), + measurement_prediction=null_hypothesis.measurement_prediction, + probability=prob_misdetect)) + + # True hypotheses + for hypothesis in hypotheses[track]: + if not hypothesis: + continue + + # Get the detection index + j = next(d_i + 1 for d_i, detection in enumerate(detection_list) + if hypothesis.measurement == detection) + + pro_detect_assoc = Probability(assoc_prob_matrix[i, j]) + single_measurement_hypotheses.append( + SingleProbabilityHypothesis( + hypothesis.prediction, + hypothesis.measurement, + measurement_prediction=hypothesis.measurement_prediction, + probability=pro_detect_assoc)) + + track.exist_prob = Probability.sum(hyp.probability + for hyp in single_measurement_hypotheses) + + new_hypotheses[track] = MultipleHypothesis(single_measurement_hypotheses, True, 1) + + return new_hypotheses \ No newline at end of file diff --git a/stonesoup/custom/functions/__init__.py b/stonesoup/custom/functions/__init__.py new file mode 100644 index 000000000..b39b178ab --- /dev/null +++ b/stonesoup/custom/functions/__init__.py @@ -0,0 +1,514 @@ +from functools import partial +import math +from typing import Set, List, Sequence + +import numpy as np +from numpy import linalg as la +from scipy.linalg import block_diag +import pyproj +from shapely import Polygon +from shapely.geometry import Point +from shapely.ops import transform +from matplotlib.path import Path +from scipy.special import logsumexp +from scipy.stats import multivariate_normal +from shapely.geometry.base import BaseGeometry +from vector3d.vector import Vector + +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.angle import Angle +from stonesoup.types.state import ParticleState +from stonesoup.types.track import Track + + +class CameraCalculator: + """Ported and modified from https://gist.github.com/luipir/dc33864b53cf6634f9cdd2bce712d3d9""" + + @staticmethod + def getBoundingPolygon(FOVh, FOVv, altitude, roll, pitch, heading): + """Get corners of the polygon captured by the camera on the ground. + The calculations are performed in the axes origin (0, 0, altitude) + and the points are not yet translated to camera's X-Y coordinates. + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + altitude (float): Altitude of the camera in meters + heading (float): Heading of the camera (z axis) in radians + roll (float): Roll of the camera (x axis) in radians + pitch (float): Pitch of the camera (y axis) in radians + Returns: + vector3d.vector.Vector: Array with 4 points defining a polygon + """ + # import ipdb; ipdb.set_trace() + ray11 = CameraCalculator.ray1(FOVh, FOVv) + ray22 = CameraCalculator.ray2(FOVh, FOVv) + ray33 = CameraCalculator.ray3(FOVh, FOVv) + ray44 = CameraCalculator.ray4(FOVh, FOVv) + + rotatedVectors = CameraCalculator.rotateRays(ray11, ray22, ray33, ray44, + roll, pitch, heading) + + origin = Vector(0, 0, altitude) + intersections = CameraCalculator.getRayGroundIntersections(rotatedVectors, origin) + + roll2, pitch2, heading2, FOVv2, FOVh2 = CameraCalculator.getFovRPH(intersections, altitude) + return intersections + + @staticmethod + def getFovRPH(intersections, altitude): + # Calculate unit vectors to the ground, assuming camera is at the origin + rotVecs = [Vector(i.x, i.y, -altitude).normalize() for i in intersections] + + # First rotation aligns the centroid of the polygon with the negative z axis + centroidVec = (rotVecs[0] + rotVecs[1] + rotVecs[2] + rotVecs[3]).normalize() + rot1 = rotation_matrix_from_vectors(Vector(z=-1), centroidVec).T + + # Get vectors after first rotation + rotVecs1 = [Vector(*(rot1 @ np.array([[r.x], [r.y], [r.z]])).flatten()) for r in rotVecs] + + # Second rotation alligns the centroid of the polygon with the negative y axis + rot2 = rotation_matrix_from_vectors(Vector(y=1), (rotVecs1[0] - rotVecs1[1]).normalize()).T + + # Get final rotation matrix + R = rot2 @ rot1 + + # Calculate roll, pitch and heading + roll, pitch, heading = roll_pitch_yaw_from_matrix(R.T) + + # Calculate FOVv and FOVh + rays2 = CameraCalculator.unrotateRays(*rotVecs, roll, pitch, heading) + rays2norm = [Vector(ray.x / -ray.z, ray.y / -ray.z, -1) for i, ray in enumerate(rays2)] + FOVv = np.arctan2(rays2norm[0].x, 1) * 2 + FOVh = np.arctan2(rays2norm[0].y, 1) * 2 + return roll, pitch, heading, FOVv, FOVh + + # Ray-vectors defining the camera's field of view. FOVh and FOVv are interchangeable + # depending on the camera's orientation + @staticmethod + def ray1(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(math.tan(FOVv / 2), math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def ray2(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(math.tan(FOVv / 2), -math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def ray3(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(-math.tan(FOVv / 2), -math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def ray4(FOVh, FOVv): + """ + Parameters: + FOVh (float): Horizontal field of view in radians + FOVv (float): Vertical field of view in radians + Returns: + vector3d.vector.Vector: normalised vector + """ + ray = Vector(-math.tan(FOVv / 2), math.tan(FOVh / 2), -1) + return ray.normalize() + + @staticmethod + def rotationMatrix(roll, pitch, yaw): + """Calculate rotation matrix + Parameters: + roll float: Roll rotation + pitch float: Pitch rotation + yaw float: Yaw rotation + Returns: + Returns new rotated ray-vectors + """ + sinAlpha = math.sin(yaw) + sinBeta = math.sin(pitch) + sinGamma = math.sin(roll) + cosAlpha = math.cos(yaw) + cosBeta = math.cos(pitch) + cosGamma = math.cos(roll) + m00 = cosAlpha * cosBeta + m01 = cosAlpha * sinBeta * sinGamma - sinAlpha * cosGamma + m02 = cosAlpha * sinBeta * cosGamma + sinAlpha * sinGamma + m10 = sinAlpha * cosBeta + m11 = sinAlpha * sinBeta * sinGamma + cosAlpha * cosGamma + m12 = sinAlpha * sinBeta * cosGamma - cosAlpha * sinGamma + m20 = -sinBeta + m21 = cosBeta * sinGamma + m22 = cosBeta * cosGamma + + return np.array([[m00, m01, m02], + [m10, m11, m12], + [m20, m21, m22]]) + + @staticmethod + def rotateRays(ray1, ray2, ray3, ray4, roll, pitch, yaw): + """Rotates the four ray-vectors around all 3 axes + Parameters: + ray1 (vector3d.vector.Vector): First ray-vector + ray2 (vector3d.vector.Vector): Second ray-vector + ray3 (vector3d.vector.Vector): Third ray-vector + ray4 (vector3d.vector.Vector): Fourth ray-vector + roll float: Roll rotation + pitch float: Pitch rotation + yaw float: Yaw rotation + Returns: + Returns new rotated ray-vectors + """ + rotationMatrix = CameraCalculator.rotationMatrix(roll, pitch, yaw) + rayMatrix = np.array([[ray.x, ray.y, ray.z] for ray in [ray1, ray2, ray3, ray4]]).T + rotatedRayMatrix = (rotationMatrix @ rayMatrix).T + rayArray = [Vector(*rayMatrix.flatten()) for rayMatrix in rotatedRayMatrix] + return rayArray + + @staticmethod + def unrotateRays(ray1, ray2, ray3, ray4, roll, pitch, yaw): + """Unrotates the four ray-vectors around all 3 axes + Parameters: + ray1 (vector3d.vector.Vector): First ray-vector + ray2 (vector3d.vector.Vector): Second ray-vector + ray3 (vector3d.vector.Vector): Third ray-vector + ray4 (vector3d.vector.Vector): Fourth ray-vector + roll float: Roll rotation + pitch float: Pitch rotation + yaw float: Yaw rotation + Returns: + Returns new rotated ray-vectors + """ + rotationMatrix = CameraCalculator.rotationMatrix(roll, pitch, yaw).T + rayMatrix = np.array([[ray.x, ray.y, ray.z] for ray in [ray1, ray2, ray3, ray4]]).T + rotatedRayMatrix = (rotationMatrix @ rayMatrix).T + rayArray = [Vector(*rayMatrix.flatten()) for rayMatrix in rotatedRayMatrix] + return rayArray + + @staticmethod + def getRayGroundIntersections(rays, origin): + """ + Finds the intersections of the camera's ray-vectors + and the ground approximated by a horizontal plane + Parameters: + rays (vector3d.vector.Vector[]): Array of 4 ray-vectors + origin (vector3d.vector.Vector): Position of the camera. The computation were developed + assuming the camera was at the axes origin (0, 0, altitude) and the + results translated by the camera's real position afterwards. + Returns: + vector3d.vector.Vector + """ + # Vector3d [] intersections = new Vector3d[rays.length]; + # for (int i = 0; i < rays.length; i ++) { + # intersections[i] = CameraCalculator.findRayGroundIntersection(rays[i], origin); + # } + # return intersections + + # 1to1 translation without python syntax optimisation + # intersections = [] + # for i in range(len(rays)): + # intersections.append(CameraCalculator.findRayGroundIntersection(rays[i], origin)) + return [CameraCalculator.findRayGroundIntersection(ray, origin) for ray in rays] + + @staticmethod + def findRayGroundIntersection(ray, origin): + """ + Finds a ray-vector's intersection with the ground approximated by a planeƧ + Parameters: + ray (vector3d.vector.Vector): Ray-vector + origin (vector3d.vector.Vector): Camera's position + Returns: + vector3d.vector.Vector + """ + # Parametric form of an equation + # P = origin + vector * t + x = Vector(origin.x, ray.x) + y = Vector(origin.y, ray.y) + z = Vector(origin.z, ray.z) + + # Equation of the horizontal plane (ground) + # -z = 0 + + # Calculate t by substituting z + t = - (z.x / z.y) + + # Substitute t in the original parametric equations to get points of intersection + return Vector(x.x + x.y * t, y.x + y.y * t, z.x + z.y * t) + + +def get_camera_footprint(camera): + # altitude = camera.position[2] + # try: + # pan, tilt = camera.pan_tilt + # except: + # pan, tilt = camera.pan, camera.tilt + # + # fov_range_pan = (pan - camera.fov_angle[0] / 2, pan, pan + camera.fov_angle[0] / 2) + # fov_range_tilt = (tilt - camera.fov_angle[1] / 2, tilt, tilt + camera.fov_angle[1] / 2) + # x_min = altitude * np.tan(fov_range_tilt[0]) + camera.position[0] + # x_max = altitude * np.tan(fov_range_tilt[2]) + camera.position[0] + # y_min = altitude * np.tan(fov_range_pan[0]) + camera.position[1] + # y_max = altitude * np.tan(fov_range_pan[2]) + camera.position[1] + + # Once the camera is rotated, the z axis becomes the x axis, and the x axis becomes the z axis + # TODO: More testing is needed to make sure this is correct + roll, pitch, heading = (camera.orientation[2], + camera.orientation[1] + np.pi / 2, + camera.orientation[0]) + + xmin, xmax, ymin, ymax = get_camera_footprint_low(camera.position, roll, pitch, heading, + camera.fov_angle) + return xmin, xmax, ymin, ymax + + +def get_camera_footprint_low(position, roll, pitch, heading, fov_angle): + bpol = CameraCalculator.getBoundingPolygon(fov_angle[0], + fov_angle[1], + position[2], + roll, # Tested, works + -pitch, # Tested, works + -heading) + xvals = np.sort(np.unique(np.round([v.x + position[0] for v in bpol], 2))) + yvals = np.sort(np.unique(np.round([v.y + position[1] for v in bpol], 2))) + + xmin = xvals[0] + xmax = xvals[-1] + ymin = yvals[0] + ymax = yvals[-1] + + return xmin, xmax, ymin, ymax + + +def get_roll_pitch_yaw_fov(x_min, x_max, y_min, y_max, altitude): + intersections = [Vector(x_max, y_max), Vector(x_max, y_min), Vector(x_min, y_min), + Vector(x_min, y_max)] + + roll, pitch, heading, fov_tilt, fov_pan = CameraCalculator.getFovRPH(intersections, altitude) + # Pitch needs to be inverted to get the tilt angle + pitch = -pitch + + return Angle(roll), Angle(pitch), Angle(heading), Angle(fov_tilt), Angle(fov_pan) + + +def lla_to_pan_tilt_fov(pos, x_min, x_max, y_min, y_max): + """Converts lat, lon, alt to az, el""" + + # We assume that the camera is looking at the ground, with heading pointing east + # Hence, panning is along the latitude axis, and tilting is along the longitude axis + + alt = pos[2] + phi1 = np.arctan2(x_min - pos[0], alt) + phi2 = np.arctan2(x_max - pos[0], alt) + + theta1 = np.arctan2(y_min - pos[1], alt) + theta2 = np.arctan2(y_max - pos[1], alt) + + fov_angle = (theta2 - theta1, phi2 - phi1) + pan, tilt = (fov_angle[0]) / 2 + theta1, (fov_angle[1]) / 2 + phi1 + + return pan, tilt, fov_angle + + +def get_nearest(array, value): + array = np.asarray(array) + idx = (np.abs(array - value)).argmin() + return array[idx] + + +def rotation_matrix_from_vectors(vec1, vec2): + """ Find the rotation matrix that aligns vec1 to vec2 + :param vec1: A 3d "source" vector + :param vec2: A 3d "destination" vector + :return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2. + """ + if isinstance(vec1, Vector): + vec1 = np.array([vec1.x, vec1.y, vec1.z]) + if isinstance(vec2, Vector): + vec2 = np.array([vec2.x, vec2.y, vec2.z]) + if np.allclose(vec1, vec2): + return np.eye(3) + a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3) + v = np.cross(a, b) + c = np.dot(a, b) + s = np.linalg.norm(v) + kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) + rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2)) + return rotation_matrix + + +def roll_pitch_yaw_from_matrix(matrix): + """ Returns the Euler angles from a rotation matrix + :param matrix: A transform matrix (3x3) + :return: Euler angles in the form of a tuple (roll, pitch, yaw) + """ + roll = np.arctan2(matrix[2, 1], matrix[2, 2]) + pitch = np.arctan2(-matrix[2, 0], np.sqrt(matrix[2, 1] ** 2 + matrix[2, 2] ** 2)) + yaw = np.arctan2(matrix[1, 0], matrix[0, 0]) + return roll, pitch, yaw + + +def rigid_transform_3D(A, B): + assert A.shape == B.shape + num_rows, num_cols = A.shape + if num_rows != 3: + raise Exception(f"matrix A is not 3xN, it is {num_rows}x{num_cols}") + num_rows, num_cols = B.shape + if num_rows != 3: + raise Exception(f"matrix B is not 3xN, it is {num_rows}x{num_cols}") + # find mean column wise + centroid_A = np.mean(A, axis=1) + centroid_B = np.mean(B, axis=1) + # ensure centroids are 3x1 + centroid_A = centroid_A.reshape(-1, 1) + centroid_B = centroid_B.reshape(-1, 1) + # subtract mean + Am = A - centroid_A + Bm = B - centroid_B + H = Am @ np.transpose(Bm) + # sanity check + # if linalg.matrix_rank(H) < 3: + # raise ValueError("rank of H = {}, expecting 3".format(linalg.matrix_rank(H))) + # find rotation + U, S, Vt = np.linalg.svd(H) + R = Vt.T @ U.T + # special reflection case + if np.linalg.det(R) < 0: + print("det(R) < R, reflection detected!, correcting for it ...") + Vt[2, :] *= -1 + R = Vt.T @ U.T + t = -R @ centroid_A + centroid_B + return R, t + + +def calculate_num_targets_dist(tracks: Set[Track], geom: BaseGeometry, + phd_state: ParticleState = None, target_types: List[str] = None): + num_samples = 100 + valid_tracks = [track for track in tracks + if not (target_types) + or (target_types and any(item in track.metadata['target_type_confidences'] + for item in target_types))] + mu_overall = 0 + var_overall = 0 # np.inf if len(valid_tracks) == 0 else 0 + path_p = Path(geom.boundary.coords) + + # Calculate PHD density inside polygon + if phd_state is not None: + points = phd_state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + if np.sum(inside_points) > 0: + # The mean of the PHD density inside the polygon is the sum of the weights of the + # particles inside the polygon + mu_overall = np.exp(logsumexp(np.log(phd_state.weight[inside_points].astype(float)))) + # The variance of a Poisson distribution is equal to the mean + var_overall = mu_overall + + # Calculate number of tracks inside polygon + for track in valid_tracks: + # Sample points from the track state + points = multivariate_normal.rvs(mean=track.state_vector[[0, 2]].ravel(), + cov=track.covar[[0, 2], :][:, [0, 2]], + size=num_samples) + # Check which points are inside the polygon + inside_points = path_p.contains_points(points) + # Probability of existence inside the polygon is the fraction of points inside the polygon + # times the probability of existence + p_success = float(track.exist_prob) * (np.sum(inside_points) / num_samples) + # Mean of a Bernoulli distribution is equal to the probability of success + mu_overall += p_success + # Variance of a Bernoulli distribution is equal to the probability of success, + # times the probability of failure + var_overall += p_success * (1 - p_success) + + return mu_overall, var_overall + +proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84') + + +def geodesic_point_buffer(lat, lon, km): + # Azimuthal equidistant projection + aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0' + project = partial( + pyproj.transform, + pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)), + proj_wgs84) + buf = Point(0, 0).buffer(km * 1000) # distance in metres + return transform(project, buf) + + +def predict_state_to_two_state(old_mean, old_cov, tx_model, dt): + A = tx_model.matrix(time_interval=dt) + Q = tx_model.covar(time_interval=dt) + statedim = A.shape[0] + AA = np.concatenate((np.eye(statedim), A)) + QQ = block_diag(np.zeros((statedim, statedim)), Q) + return AA @ old_mean, AA @ old_cov @ AA.T + QQ + + +def nearestPD(A): + """Find the nearest positive-definite matrix to input + + A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which + credits [2]. + + [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd + + [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite + matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 + """ + + B = (A + A.T) / 2 + _, s, V = la.svd(B) + + H = np.dot(V.T, np.dot(np.diag(s), V)) + + A2 = (B + H) / 2 + + A3 = (A2 + A2.T) / 2 + + if isPD(A3): + return A3 + + spacing = np.spacing(la.norm(A)) + # The above is different from [1]. It appears that MATLAB's `chol` Cholesky + # decomposition will accept matrixes with exactly 0-eigenvalue, whereas + # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab + # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` + # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on + # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas + # `spacing` will, for Gaussian random matrixes of small dimension, be on + # othe order of 1e-16. In practice, both ways converge, as the unit test + # below suggests. + I = np.eye(A.shape[0]) + k = 1 + while not isPD(A3): + mineig = np.min(np.real(la.eigvals(A3))) + A3 += I * (-mineig * k**2 + spacing) + k += 1 + + return A3 + + +def isPD(B): + """Returns true when input is positive-definite, via Cholesky""" + try: + _ = la.cholesky(B) + return True + except la.LinAlgError: + return False \ No newline at end of file diff --git a/stonesoup/custom/functions/rollout.py b/stonesoup/custom/functions/rollout.py new file mode 100644 index 000000000..556c8104c --- /dev/null +++ b/stonesoup/custom/functions/rollout.py @@ -0,0 +1,653 @@ +import copy +import itertools +import math +from datetime import timedelta, datetime +from enum import Enum +from typing import Union +from uuid import uuid4, UUID + +import numpy as np +from pydantic import BaseModel +from scipy.stats import poisson +from shapely import Point, Polygon + +from reactive_isr_core.data import TargetType, Availability, ActionStatus, Image, GeoLocation, \ + Algorithm + +from stonesoup.custom.sensor.movable import MovableUAVCamera +from stonesoup.models.clutter import ClutterModel +from stonesoup.types.array import StateVector +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState +from stonesoup.types.track import Track + + +class ActionTupleType(Enum): + """Enum for the different types of action tuples""" + NO_ACTION = 0 + ONBOARD = 1 + REMOTE = 2 + COMMS_AND_PROC = 3 + PROC_ONLY = 4 + +class ActionTuple(tuple): + def __new__(self, tup=None): + if not tup: + coll_action, comms_action, proc_action = None, None, None + else: + coll_action, comms_action, proc_action = tup + + return tuple.__new__(ActionTuple, (coll_action, comms_action, proc_action)) + + def __init__(self, *args, **kwargs): + if self.coll_action is None and self.comms_action is None and self.proc_action is None: + self._action_tuple_type = ActionTupleType.NO_ACTION + elif self.coll_action is None: + if self.comms_action is None: + self._action_tuple_type = ActionTupleType.PROC_ONLY + else: + self._action_tuple_type = ActionTupleType.COMMS_AND_PROC + else: + if self.coll_action.node_id == self.proc_action.node_id: + self._action_tuple_type = ActionTupleType.ONBOARD + else: + self._action_tuple_type = ActionTupleType.REMOTE + + def __bool__(self): + return any(action is not None for action in self) + + def __str__(self): + if self._action_tuple_type == ActionTupleType.NO_ACTION: + return "No action" + elif self._action_tuple_type == ActionTupleType.PROC_ONLY: + return (f"PROC_ONLY(node={self.proc_action.node_id}, " + f"image={self.proc_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + elif self._action_tuple_type == ActionTupleType.COMMS_AND_PROC: + return (f"COMMS_PROC(from={self.comms_action.source_node_id}, " + f"to={self.comms_action.target_node_id}, " + f"image={self.proc_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + elif self._action_tuple_type == ActionTupleType.ONBOARD: + return (f"ONBOARD(image={self.coll_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + else: + return (f"REMOTE(from={self.coll_action.node_id}, " + f"to={self.proc_action.node_id}, " + f"image={self.coll_action.image.id}, " + f"algorithm={self.proc_action.algorithm.name})") + + def __repr__(self): + return str(self) + + @property + def coll_action(self): + return self[0] + + @property + def comms_action(self): + return self[1] + + @property + def proc_action(self): + return self[2] + + @property + def is_onboard(self): + """Check if the processing is performed onboard""" + return self._action_tuple_type == ActionTupleType.ONBOARD + + @property + def type(self): + return self._action_tuple_type + + +class CollectionAction(BaseModel): + id: UUID + node_id: UUID + location: GeoLocation + status: ActionStatus + image: Image + + +class CommsAction(BaseModel): + id: UUID + source_node_id: UUID + target_node_id: UUID + image: Image + status: ActionStatus + start_time: Union[None, datetime] + dt: timedelta + + @property + def end_time(self): + return self.start_time + self.dt + + +class ProcAction(BaseModel): + id: UUID + node_id: UUID + image: Image + algorithm: Algorithm + status: ActionStatus + start_time: Union[None, datetime] + dt: timedelta + + @property + def end_time(self): + return self.start_time + self.dt + + +def cover_rectangle_with_minimum_overlapping_circles(x1, y1, x2, y2, radius): + """ + https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1343643 + + """ + width = x2 - x1 + height = y2 - y1 + + p = Point(x1 + width/2, y1 + height/2).buffer(radius) + pol = Polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2)]) + intersection = p.intersection(pol) + if intersection.area >= 0.9*pol.area: + return [(x1 + width/2, y1 + height/2)] + + # if width <= np.sqrt(3)/2*radius and height <= np.sqrt(3)/2*radius: + z1 = height / (np.sqrt(3) * radius) + re1 = z1 - math.floor(z1) + n = math.floor(z1) + if re1 <= 1/2: + n += 1 + else: + n += 2 + + z2 = width / (3/2 * radius) + re2 = z2 - math.floor(z2) + m = math.floor(z2) + if re2 <= 2/3: + m += 1 + else: + m += 2 + + centers = [] + + for k in range(1, n+1): + for l in range(1, m+1): + if l % 2 == 1: + center = ((0.5 + (l-1) * 3/2) * radius, (k-1)*np.sqrt(3)*radius) + else: + center = ((0.5 + (l-1) * 3/2) * radius, (k-1)*np.sqrt(3)*radius + np.sqrt(3)/2*radius) + offset_center = (center[0] + x1, center[1] + y1) + cp = Point(offset_center) + if cp.distance(pol) <= np.sqrt(3)/2*radius: + centers.append(offset_center) + return centers + + +def sample_dt(stats): + dt = np.random.normal(stats.mu, stats.sigma) + if dt < stats.lower_truncation: + dt = stats.lower_truncation + return timedelta(seconds=dt) + + +def extract_rois(rfis): + """Extract all regions of interest from a list of rfis.""" + rois = [] + for rfi in rfis: + for roi in rfi.region_of_interest: + rois.append(roi) + return rois + + +def get_processing_nodes(network_topology): + return [node for node in network_topology.nodes + if len(node.processing_capability.algorithms) > 0] + + +def get_edge(network_topology, source_node_id, target_node_id): + return next(edge for edge in network_topology.edges + if edge.source_node == source_node_id + and edge.target_node == target_node_id) + + +def get_earliest_proc_start_time(ongoing_actions_per_node, node_id, default_time): + """Get the earliest start time for processing actions for a given node. + + If there are no ongoing processing actions for the node, then the earliest start time is the + default time. Otherwise, the earliest start time is the end time of the latest ongoing + processing action. + """ + if len(ongoing_actions_per_node[node_id]): + return ongoing_actions_per_node[node_id][-1].end_time + else: + return default_time + + +def exists_ongoing_comms_proc_action_for_image(ongoing_actions, image_id): + """Check if there is an ongoing comms or processing action for a given image.""" + return (any(action for action in ongoing_actions['comms'] if action.image.id == image_id) + or any(action for action in ongoing_actions['proc'] if action.image.id == image_id)) + + +def enumerate_actions_for_processing_node(node, image_store, network, ongoing_actions, + earliest_proc_start_time, timestamp): + """Enumerate all possible actions for a processing node that is not a sensor. + + If the node is not an asset (sensor), then we don't need to consider collection actions, but + we do need to consider comms and processing actions for existing images + + Arguments + --------- + node : Node + The processing node + image_store : ImageStore + The image store, containing all images that have been collected + network : NetworkTopology + The network topology, containing all nodes and edges + earliest_proc_start_time : datetime + The earliest start time for processing actions + timestamp : datetime + The current timestamp + + Returns + ------- + possible_actions : List[Tuple[CollectionAction, CommsAction, ProcAction]] + A list of possible actions for the node + """ + possible_actions = [ActionTuple()] # No action + node_id = node.id + for image in image_store.images: + + # If there is an ongoing comms or processing action for this image, then we don't need to + # consider processing actions for it + if exists_ongoing_comms_proc_action_for_image(ongoing_actions, image.id): + continue + + # If the image is not on this node, then we need to consider a comms action with + # the appropriate traversal time + if image.node_id != node_id: + edge = get_edge(network, image.node_id, node_id) + dt = sample_dt(edge.traversal_time) + else: + dt = timedelta(seconds=0) + comms_action = CommsAction( + id=uuid4(), + source_node_id=image.node_id, + target_node_id=node_id, + status=ActionStatus.CREATED, + start_time=timestamp, + image=image, + dt=dt + ) + # The earliest start time for processing actions is the end time of the comms + # action or the earliest start time of the processing node + earliest_proc_start_time_tmp = max(comms_action.end_time, earliest_proc_start_time) + # Create a processing action for each algorithm + for algorithm in node.processing_capability.algorithms: + proc_action = ProcAction( + id=uuid4(), + node_id=node.id, + algorithm=algorithm, + status=ActionStatus.CREATED, + start_time=earliest_proc_start_time_tmp, + image=image, + dt=sample_dt(algorithm.processing_statistics) + ) + possible_actions.append( + ActionTuple((None, comms_action, proc_action)) + ) + return possible_actions + + +def enumerate_actions_for_asset(node, fov_radius, network, rois, ongoing_proc_actions_per_node, + earliest_proc_start_time, timestamp): + """Enumerate all possible actions for a processing node that is an asset (sensor). + + If the node is an asset (sensor), then we need to consider collection actions, comms actions + and processing actions. + + NOTE: We assume that a sensor will not consider processing existing images. + + Arguments + --------- + node : Node + The processing node + fov_radius : float + The field of view radius of the sensor (in km) + network : NetworkTopology + The network topology, containing all nodes and edges + rois : List[GeoRegion] + A list of regions of interest + ongoing_proc_actions_per_node : Dict[UUID, List[ProcAction]] + A dictionary of ongoing processing actions per node + earliest_proc_start_time : datetime + The earliest start time for processing actions for the node + timestamp : datetime + The current timestamp + + Returns + ------- + possible_actions : List[Tuple[CollectionAction, CommsAction, ProcAction]] + A list of possible actions for the node + """ + possible_actions = [ActionTuple()] # No action + node_id = node.id + # NOTE: This is an approximation of asset fov in lat/long degrees (1 degree = 111km) + asset_fov_ll = fov_radius / 111 + # Get all possible collect locations + possible_collect_locations = [] + for roi in rois: + x1 = roi.corners[0].longitude + y1 = roi.corners[0].latitude + x2 = roi.corners[1].longitude + y2 = roi.corners[1].latitude + # For each roi, find the minimum number of overlapping circles required to cover it + possible_collect_locations += cover_rectangle_with_minimum_overlapping_circles( + x1, y1, x2, y2, asset_fov_ll + ) + processing_nodes = get_processing_nodes(network) + for location in possible_collect_locations: + geo_location = GeoLocation(latitude=location[1], longitude=location[0], altitude=0) + # Create dummy image + image = Image( + id=uuid4(), + collection_time=timestamp, + size=1, + location=geo_location, + fov_radius=fov_radius, + node_id=node_id + ) + # Create a collection action for each possible location + coll_action = CollectionAction( + id=uuid4(), + node_id=node_id, + location=geo_location, + status=ActionStatus.CREATED, + image=image + ) + # Create comms and processing actions for each processing node + for proc_node in processing_nodes: + dt = timedelta(seconds=0) + earliest_proc_start_time_tmp = earliest_proc_start_time + if proc_node.id != node_id: + # If the processing node is not the same as the collection node, then we + # need to consider a comms action with the appropriate traversal time + edge = get_edge(network, node_id, proc_node.id) + dt = sample_dt(edge.traversal_time) + earliest_proc_start_time_tmp = get_earliest_proc_start_time( + ongoing_proc_actions_per_node, proc_node.id, timestamp + ) + comms_action = CommsAction( + id=uuid4(), + source_node_id=node_id, + target_node_id=proc_node.id, + image=image, + status=ActionStatus.CREATED, + start_time=timestamp, + dt=dt + ) + for algorithm in proc_node.processing_capability.algorithms: + # Create a processing action for each algorithm + proc_action = ProcAction( + id=uuid4(), + node_id=proc_node.id, + algorithm=algorithm, + image=image, + status=ActionStatus.CREATED, + start_time=max(comms_action.end_time, earliest_proc_start_time_tmp), + dt=sample_dt(algorithm.processing_statistics) + ) + # Add triple of actions to list of possible actions + possible_actions.append(ActionTuple((coll_action, comms_action, proc_action))) + + return possible_actions + + +def enumerate_action_configs(image_store, network, assets, rfis, ongoing_actions, timestamp): + + # Extract all rois from rfis + rois = extract_rois(rfis) + processing_nodes = get_processing_nodes(network) + available_assets = [asset for asset in assets.assets + if asset.asset_status.availability == Availability.AVAILABLE] + + ongoing_proc_actions_per_node = { + node.id: sorted([action for action in ongoing_actions['proc'] + if action.node_id == node.id], key=lambda a: a.end_time) + for node in processing_nodes + } + + # Iterate over the processing nodes and enumerate all possible actions for each node + possible_actions_per_node = dict() + for node in processing_nodes: + node_id = node.id + # If there are ongoing actions for this node, set the earliest start time to the end time + # of the latest ongoing action + earliest_start_time = get_earliest_proc_start_time( + ongoing_proc_actions_per_node, node_id, timestamp + ) + # Check if the node is an asset (sensor) + asset = next((asset for asset in available_assets + if asset.asset_description.id == node_id), None) + # If the node is not an asset (sensor), then we don't need to consider collection actions + if asset is None: + possible_actions_per_node[node.id] = enumerate_actions_for_processing_node( + node, image_store, network, ongoing_actions, earliest_start_time, + timestamp + ) + # If the node is an asset (sensor), then we need to consider collection actions + else: + possible_actions_per_node[node.id] = enumerate_actions_for_asset( + node, asset.asset_description.fov_radius, network, rois, + ongoing_proc_actions_per_node, earliest_start_time, timestamp + ) + + # Enumerate all possible action configurations + possible_action_configs = [] + num_images_to_be_processed = len(image_store.images) - np.sum([ + exists_ongoing_comms_proc_action_for_image(ongoing_actions, image.id) + for image in image_store.images + ]) + for config in itertools.product(*possible_actions_per_node.values()): + num_collection_actions = sum(1 for action in config if action and action.coll_action) + # If the number of collection actions is less than the number of available assets, then + # this is certainly not an optimal action configuration + if num_collection_actions < len(available_assets): + continue + + # Filter out action combinations where some remote processing nodes do nothing, when + # there are available images to be processed + num_remote_proc_actions = \ + sum(1 for action in config + if action and action.type in (ActionTupleType.COMMS_AND_PROC, + ActionTupleType.PROC_ONLY) + ) + # If the number of remote processing actions is less than the number of images to be + # processed, then this is certainly not an optimal action configuration + if num_remote_proc_actions < num_images_to_be_processed: + continue + + # Make a deep copy of the combination, so that we can modify its elements without + # affecting the original ones + config = copy.deepcopy(config) + + # Get all processing actions in the configuration + proc_actions = [action.proc_action for action in config if action] + + # Ensure that no more than one processing action is performed on an image + image_ids_to_be_processed = [action.image.id for action in proc_actions] + # If an image id appears more than once in the list, then there is more than one + # processing action for that image, hence we discard this action configuration + if len(set(image_ids_to_be_processed)) != len(image_ids_to_be_processed): + continue + + # Check that processing actions are consistent with each other (i.e. no overlapping + # processing actions). This is done by ensuring that the start time of each processing + # action is after the end time of the previous processing action for the same node. + proc_node_ids = [action.node_id for action in proc_actions] + # Get processing actions for each node + proc_actions_per_node = { + node_id: [action for action in proc_actions if action.node_id == node_id] + for node_id in proc_node_ids + } + for node_id, p_actions in proc_actions_per_node.items(): + # Sort processing actions by start time + p_actions.sort(key=lambda a: a.start_time) + if len(p_actions) == 1: + continue + for previous_action, current_action in zip(p_actions, p_actions[1:]): + # If the start time of the current processing action is before the end time of + # the previous processing action, then set the start time of the current + # processing action to the end time of the previous processing action + if current_action.start_time < previous_action.end_time: + current_action.start_time = previous_action.end_time + possible_action_configs.append(config) + + return possible_action_configs + + +def proc_actions_from_config_sequence(config_seq): + proc_actions = [] + for config in config_seq: + for action in config: + if action is not None and action[2] is not None: + proc_actions.append(action[2]) + return proc_actions + + +def queue_actions(config, image_store, ongoing_actions): + """Queue actions for execution + + Add collection, comms and processing actions to the ongoing actions list + """ + for node_actions in config: + if not node_actions: + continue + coll_action, comms_action, proc_action = node_actions + # If collection action is None or collection action is not for the same node as + # processing action, it means that processing is not done on the sensor + if coll_action is None or coll_action.node_id != proc_action.node_id: + # Perform collection action + if coll_action is not None: + image_store.images.append(proc_action.image) + # Perform comms action + if comms_action is not None: + if comms_action.dt != 0: + # If comms action is not instantaneous, add it to ongoing actions + ongoing_actions['comms'].append(comms_action) + else: + # If comms action is instantaneous, find image and update node_id to + # processing node id + image = next(image for image in image_store.images + if image.id == comms_action.image_id) + image.node_id = proc_action.node_id + # Perform processing action + ongoing_actions['proc'].append(proc_action) + + +def rollout_actions(config, image_store, network_topology, assets, rfis, + ongoing_actions, num_samples, num_timesteps, interval, timestamp): + """Rollout actions for a given configuration + + Returns a list of lists of action configs, where each list of action configs is a rollout + """ + + # Initialise list of action configs. The first action config is the same for all rollouts + all_configs = [[config] for _ in range(num_samples)] + + # For each rollout + for config_list in all_configs: + current_time = timestamp + # Copy image store and ongoing actions, so that they can be modified without affecting + # other rollouts + image_store_tmp = copy.deepcopy(image_store) + ongoing_actions_tmp = copy.deepcopy(ongoing_actions) + # Queue actions for execution + queue_actions(config, image_store_tmp, ongoing_actions_tmp) + # Rollout + for i in range(num_timesteps): + current_time += interval + # Get all possible action configs for the current time + configs = enumerate_action_configs(image_store_tmp, network_topology, assets, + rfis, ongoing_actions_tmp, current_time) + # Select a random action config and add it to the rollout action configs + current_config = configs[np.random.randint(len(configs))] + config_list.append(current_config) + # Queue actions for execution + queue_actions(current_config, image_store_tmp, ongoing_actions_tmp) + return all_configs + + +def get_sensor(location, fov_radius, prob_detection=None, false_alarm_density=None): + # Create a sensor + sensor_position = StateVector([location.longitude, + location.latitude, + location.altitude]) + if prob_detection is not None: + prob_detection = Probability(prob_detection) + sensor = MovableUAVCamera(ndim_state=6, mapping=[0, 2, 4], + noise_covar=np.diag([0.0001, 0.0001, 0.0001]), + location_x=sensor_position[0], + location_y=sensor_position[1], + position=sensor_position, + prob_detect=prob_detection, + fov_radius=fov_radius, + fov_in_km=True) + # Configure the sensor clutter model based on the algorithm false alarm density + # and the image footprint + x, y = sensor.footprint.exterior.coords.xy + min_x, max_x = np.min(x), np.max(x) + min_y, max_y = np.min(y), np.max(y) + if false_alarm_density: + clutter_model = ClutterModel( + clutter_rate=false_alarm_density, + distribution=np.random.default_rng().uniform, + dist_params=((min_x, max_x), (min_y, max_y), (100., 100.)) + ) + sensor.clutter_model = clutter_model + return sensor + + +def simulate_new_tracks(sensor, timestamp, birth_density): + x, y = sensor.footprint.exterior.coords.xy + min_x, max_x = np.min(x), np.max(x) + min_y, max_y = np.min(y), np.max(y) + + # Assume a Poisson process for the number of tracks. Tracks position are generated + # uniformly across the image footprint, with a random velocity between 0 and 10 m/s + new_tracks = set() + for _ in range(poisson.rvs(1)): + # Sample position + x = np.random.default_rng().uniform(min_x, max_x) + y = np.random.default_rng().uniform(min_y, max_y) + z = 100. + state_vector = StateVector([x, 0, y, 0, z, 0]) + # Extract covariance from birth density and adjust position covariance + covariance = np.copy(birth_density.covar) + covariance[[0, 2], [0, 2]] = np.random.default_rng().uniform(0.01, 0.1) # 0.01 + + # Sample existence probability + exist_prob = Probability(np.random.default_rng().uniform(0.5, 1)) #0.99 + + # Sample target type + all_target_types = list(TargetType) + num_types = np.random.randint(1, len(all_target_types) + 1) + target_type_inds = np.random.choice([i for i in range(len(TargetType))], + size=num_types, replace=False) + target_types = [all_target_types[i] for i in target_type_inds] + target_type_confidences = { + target_type: Probability(np.random.default_rng().uniform(0.1, 1)) + for target_type in target_types + } + + # Create track + init_metadata = { + 'target_type_confidences': target_type_confidences, + } + state = GaussianState(state_vector, covariance, timestamp=timestamp) + track = Track(id=uuid4(), states=[state], init_metadata=init_metadata) + track.exist_prob = exist_prob + new_tracks.add(track) + return new_tracks + diff --git a/stonesoup/custom/hypothesiser/__init__.py b/stonesoup/custom/hypothesiser/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/hypothesiser/probability.py b/stonesoup/custom/hypothesiser/probability.py new file mode 100644 index 000000000..b366f3dd8 --- /dev/null +++ b/stonesoup/custom/hypothesiser/probability.py @@ -0,0 +1,252 @@ +from copy import copy +from typing import Union, Callable + +import numpy as np +from scipy.stats import multivariate_normal as mn + +from stonesoup.base import Property +from stonesoup.functions import nearestSPD +from stonesoup.hypothesiser import Hypothesiser +from stonesoup.measures import SquaredMahalanobis +from stonesoup.predictor import Predictor +from stonesoup.types.detection import MissedDetection +from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.multihypothesis import MultipleHypothesis +from stonesoup.types.numeric import Probability +from stonesoup.types.state import State +from stonesoup.updater import Updater + + +class PDAHypothesiser(Hypothesiser): + """Hypothesiser based on Probabilistic Data Association (PDA) + + Generate track predictions at detection times and calculate probabilities + for all prediction-detection pairs for single prediction and multiple + detections. + """ + + predictor: Predictor = Property(doc="Predict tracks to detection times") + updater: Updater = Property(doc="Updater used to get measurement prediction") + clutter_spatial_density: float = Property( + default=None, + doc="Spatial density of clutter - tied to probability of false detection. Default is None " + "where the clutter spatial density is calculated based on assumption that " + "all but one measurement within the validation region of the track are clutter.") + prob_gate: Probability = Property( + default=Probability(0.95), + doc="Gate Probability - prob. gate contains true measurement " + "if detected") + prob_detect: Union[Probability, Callable[[State], Probability]] = Property( + default=None, + doc="Target Detection Probability") + predict: bool = Property(default=True, doc="Perform prediction step") + per_measurement: bool = Property(default=False, doc="Generate per measurement predictions") + normalise: bool = Property(default=True, doc="Normalise probabilities") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.prob_detect is None: + self.prob_detect = lambda x: Probability(0.85) + + def hypothesise(self, track, detections, timestamp, **kwargs): + r"""Evaluate and return all track association hypotheses. + + For a given track and a set of N detections, return a + MultipleHypothesis with N+1 detections (first detection is + a 'MissedDetection'), each with an associated probability. + Probabilities are assumed to be exhaustive (sum to 1) and mutually + exclusive (two detections cannot be the correct association at the + same time). + + Detection 0: missed detection, none of the detections are associated + with the track. + Detection :math:`i, i \in {1...N}`: detection i is associated + with the track. + + The probabilities for these detections are calculated as follow: + + .. math:: + + \beta_i(k) = \begin{cases} + \frac{\mathcal{L}_{i}(k)}{1-P_{D}P_{G}+\sum_{j=1}^{m(k)} + \mathcal{L}_{j}(k)}, \quad i=1,...,m(k) \\ + \frac{1-P_{D}P_{G}}{1-P_{D}P_{G}+\sum_{j=1}^{m(k)} + \mathcal{L}_{j}(k)}, \quad i=0 + \end{cases} + + where + + .. math:: + + \mathcal{L}_{i}(k) = \frac{\mathcal{N}[z_{i}(k);\hat{z}(k|k-1), + S(k)]P_{D}}{\lambda} + + :math:`\lambda` is the clutter density + + :math:`P_{D}` is the detection probability + + :math:`P_{G}` is the gate probability + + :math:`\mathcal{N}[z_{i}(k);\hat{z}(k|k-1),S(k)]` is the likelihood + ratio of the measurement :math:`z_{i}(k)` originating from the track + target rather than the clutter. + + NOTE: Since all probabilities have the same denominator and are + normalized later, the denominator can be discarded. + + References: + + [1] "The Probabilistic Data Association Filter: Estimation in the + Presence of Measurement Origin Uncertainty" - + https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=5338565 + + [2] "Robotics 2 Data Association" (Lecture notes) - + http://ais.informatik.uni-freiburg.de/teaching/ws10/robotics2/pdfs/rob2-15-dataassociation.pdf + + Parameters + ---------- + track : Track + The track object to hypothesise on + detections : set of :class:`~.Detection` + The available detections + timestamp : datetime.datetime + A timestamp used when evaluating the state and measurement + predictions. Note that if a given detection has a non empty + timestamp, then prediction will be performed according to + the timestamp of the detection. + + Returns + ------- + : :class:`~.MultipleHypothesis` + A container of :class:`~.SingleProbabilityHypothesis` objects + """ + + hypotheses = list() + + if self.predict: + # Common state & measurement prediction + prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) + else: + prediction = track.state + # Missed detection hypothesis + try: + prob_detect = self.prob_detect(prediction) + except TypeError: + prob_detect = self.prob_detect + # Missed detection hypothesis + probability = Probability(1 - prob_detect*self.prob_gate) + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + MissedDetection(timestamp=timestamp), + probability + )) + + # True detection hypotheses + measurement_prediction = None + for detection in detections: + if self.predict and self.per_measurement: + # Re-evaluate prediction + prediction = self.predictor.predict( + track.state, timestamp=detection.timestamp) + try: + prob_detect = self.prob_detect(prediction) + except TypeError: + prob_detect = self.prob_detect + + if self.per_measurement or measurement_prediction is None: + # Compute measurement prediction and probability measure + measurement_prediction = self.updater.predict_measurement( + prediction, detection.measurement_model, **kwargs) + # Ensure covariance is positive definite + measurement_prediction.covar = nearestSPD(measurement_prediction.covar) + + # Calculate difference before to handle custom types (mean defaults to zero) + # This is required as log pdf coverts arrays to floats + try: + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar) + except np.linalg.LinAlgError: + print('Had to allow singular when evaluating likelihood!!!') + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar, allow_singular=True) + pdf = Probability(log_pdf, log_value=True) + probability = (pdf * prob_detect) / self.clutter_spatial_density + + # True detection hypothesis + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + detection, + probability, + measurement_prediction)) + + return MultipleHypothesis(hypotheses, normalise=self.normalise, total_weight=1) + + +class IPDAHypothesiser(PDAHypothesiser): + """ Integrated PDA Hypothesiser """ + + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def hypothesise(self, track, detections, timestamp, **kwargs): + r"""Evaluate and return all track association hypotheses. + """ + + hypotheses = list() + + if self.predict: + # Common state & measurement prediction + prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs) + # Compute predicted existence + time_interval = timestamp - track.timestamp + prob_survive = np.exp(-(1-self.prob_survive)*time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + else: + prediction = track.state + # Missed detection hypothesis + prob_detect = self.prob_detect(prediction) + probability = Probability(1 - prob_detect * self.prob_gate * track.exist_prob) + w = (1 - track.exist_prob) / ((1 - prob_detect * self.prob_gate) * track.exist_prob) + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + MissedDetection(timestamp=timestamp), + probability, + metadata={"w": w} + )) + + # True detection hypotheses + measurement_prediction = None + for detection in detections: + if self.predict and self.per_measurement: + # Re-evaluate prediction + prediction = self.predictor.predict( + track.state, timestamp=detection.timestamp) + prob_detect = self.prob_detect(prediction) + if self.per_measurement or measurement_prediction is None: + # Compute measurement prediction and probability measure + measurement_prediction = self.updater.predict_measurement( + prediction, detection.measurement_model, **kwargs) + # Calculate difference before to handle custom types (mean defaults to zero) + # This is required as log pdf coverts arrays to floats + log_pdf = mn.logpdf( + (detection.state_vector - measurement_prediction.state_vector).ravel(), + cov=measurement_prediction.covar) + pdf = Probability(log_pdf, log_value=True) + probability = (pdf * prob_detect * track.exist_prob)/self.clutter_spatial_density + + # True detection hypothesis + hypotheses.append( + SingleProbabilityHypothesis( + prediction, + detection, + probability, + measurement_prediction)) + + return MultipleHypothesis(hypotheses, normalise=True, total_weight=1) \ No newline at end of file diff --git a/stonesoup/custom/initiator/__init__.py b/stonesoup/custom/initiator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/initiator/smcphd.py b/stonesoup/custom/initiator/smcphd.py new file mode 100644 index 000000000..ed4342e2c --- /dev/null +++ b/stonesoup/custom/initiator/smcphd.py @@ -0,0 +1,635 @@ +from copy import copy +from typing import List, Any, Union, Callable + +import numpy as np +from scipy.special import logsumexp +from scipy.stats import multivariate_normal + +from stonesoup.base import Base, Property +from stonesoup.initiator import Initiator +from stonesoup.models.measurement import MeasurementModel +from stonesoup.models.transition import TransitionModel +from stonesoup.resampler import Resampler +from stonesoup.types.array import StateVectors +from stonesoup.types.detection import Detection, MissedDetection +from stonesoup.types.hypothesis import SingleProbabilityHypothesis +from stonesoup.types.mixture import GaussianMixture +from stonesoup.types.multihypothesis import MultipleHypothesis +from stonesoup.types.numeric import Probability +from stonesoup.types.prediction import Prediction +from stonesoup.types.state import State +from stonesoup.types.track import Track +from stonesoup.types.update import Update, GaussianStateUpdate + + +class SMCPHDFilter(Base): + """ + Sequential Monte-Carlo (SMC) PHD filter implementation, based on [1]_ + + .. [1] Ba-Ngu Vo, S. Singh and A. Doucet, "Sequential Monte Carlo Implementation of the + PHD Filter for Multi-target Tracking," Sixth International Conference of Information + Fusion, 2003. Proceedings of the, 2003, pp. 792-799, doi: 10.1109/ICIF.2003.177320. + .. [2] P. Horridge and S. Maskell, ā€œUsing a probabilistic hypothesis density filter to + confirm tracks in a multi-target environment,ā€ in 2011 Jahrestagung der Gesellschaft + fr Informatik, October 2011. + """ + + transition_model: TransitionModel = Property(doc='The transition model') + measurement_model: MeasurementModel = Property(doc='The measurement model') + prob_detect: Union[Probability, Callable[[State], Probability]] = Property( + default=Probability(0.85), + doc="Target Detection Probability") + prob_death: Probability = Property(doc='The probability of death') + prob_birth: Probability = Property(doc='The probability of birth') + birth_rate: float = Property( + doc='The birth rate (i.e. number of new/born targets at each iteration(') + birth_density: State = Property( + doc='The birth density (i.e. density from which we sample birth particles)') + clutter_intensity: float = Property(doc='The clutter intensity per unit volume') + resampler: Resampler = Property(default=None, doc='Resampler to prevent particle degeneracy') + num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) + birth_scheme: str = Property( + doc='The scheme for birth particles. Options are "expansion" | "mixture". ' + 'Default is "expansion"', + default='expansion' + ) + scale_birth_weights: bool = Property( + doc="Whether to scale the birth weights by their likelihood, given the birth density. " + "Setting this to True can cause issues if the defined birth density is not a good " + "approximation to the true birth density. On the other hand, setting this to False " + "can lead to premature initialization of targets.", + default=False + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if not callable(self.prob_detect): + prob_detect = copy(self.prob_detect) + self.prob_detect = lambda state: prob_detect + + def predict(self, state, timestamp): + """ + Predict the next state of the target density + + Parameters + ---------- + state: :class:`~.State` + The current state of the target + timestamp: :class:`datetime.datetime` + The time at which the state is valid + + Returns + ------- + : :class:`~.State` + The predicted next state of the target + """ + + prior_weights = state.weight + time_interval = timestamp - state.timestamp + + # Predict particles forward + pred_particles_sv = self.transition_model.function(state, + time_interval=time_interval, + noise=True) + + if self.birth_scheme == 'expansion': + # Expansion birth scheme, as described in [1] + # Compute number of birth particles (J_k) as a fraction of the number of particles + num_birth = round(float(self.prob_birth) * self.num_samples) + + # Sample birth particles + birth_particles = np.zeros((pred_particles_sv.shape[0], 0)) + birth_weights = np.zeros((0,)) + if isinstance(self.birth_density, GaussianMixture): + particles_per_component = num_birth // len(self.birth_density) + for i, component in enumerate(self.birth_density): + if i == len(self.birth_density) - 1: + particles_per_component += num_birth % len(self.birth_density) + birth_particles_component = multivariate_normal.rvs( + component.mean.ravel(), + component.covar, + particles_per_component).T + birth_weights_component = np.full((particles_per_component,), + Probability(self.birth_rate / num_birth)) + if self.scale_birth_weights: + # Scale birth weights by their likelihood, given the birth density + birth_weights_component *= multivariate_normal.pdf( + birth_particles_component.T, + component.mean.ravel(), + component.covar, + allow_singular=True) + birth_particles = np.hstack((birth_particles, birth_particles_component)) + birth_weights = np.hstack((birth_weights, birth_weights_component)) + else: + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + num_birth) + birth_weights = np.full((num_birth,), + Probability(self.birth_rate / num_birth)) + if self.scale_birth_weights: + # Scale birth weights by their likelihood, given the birth density + birth_weights *= multivariate_normal.pdf( + birth_particles, + self.birth_density.mean.ravel(), + self.birth_density.covar, + allow_singular=True) + + # Surviving particle weights + prob_survive = np.exp(-float(self.prob_death) * time_interval.total_seconds()) + pred_weights = prob_survive * prior_weights + + # Append birth particles to predicted ones + pred_particles_sv = StateVectors( + np.concatenate((pred_particles_sv, birth_particles.T), axis=1)) + pred_weights = np.concatenate((pred_weights, birth_weights)) + else: + # Mixture based birth scheme + total_samples = self.num_samples + + # Flip a coin for each particle to decide if it gets replaced by a birth particle + birth_inds = np.flatnonzero(np.random.binomial(1, self.prob_birth, self.num_samples)) + + # Sample birth particles and replace in original state vector matrix + num_birth = len(birth_inds) + birth_particles = np.zeros((pred_particles_sv.shape[0], 0)) + if isinstance(self.birth_density, GaussianMixture): + particles_per_component = num_birth // len(self.birth_density) + for i, component in enumerate(self.birth_density): + if i == len(self.birth_density) - 1: + particles_per_component += num_birth % len(self.birth_density) + birth_particles_component = multivariate_normal.rvs( + component.mean.ravel(), + component.covar, + particles_per_component).T + birth_particles = np.hstack((birth_particles, birth_particles_component)) + else: + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + len(birth_inds)).T + pred_particles_sv[:, birth_inds] = birth_particles + + # Process weights + pred_weights = ((1 - self.prob_death) + Probability( + self.birth_rate / total_samples)) * prior_weights + + prediction = Prediction.from_state(state, state_vector=pred_particles_sv, + weight=pred_weights, + timestamp=timestamp, particle_list=None, + transition_model=self.transition_model) + + return prediction + + def update(self, prediction, detections, timestamp, meas_weights=None): + """ + Update the predicted state of the target density with the given detections + + Parameters + ---------- + prediction: :class:`~.State` + The predicted state of the target + detections: :class:`~.Detection` + The detections at the current time step + timestamp: :class:`datetime.datetime` + The time at which the update is valid + meas_weights: :class:`np.ndarray` + The weights of the measurements + + Returns + ------- + : :class:`~.State` + The updated state of the target + """ + + weights_per_hyp = self.get_weights_per_hypothesis(prediction, detections, meas_weights) + + # Construct hypothesis objects (StoneSoup specific) + single_hypotheses = [ + SingleProbabilityHypothesis(prediction, + measurement=MissedDetection(timestamp=timestamp), + probability=weights_per_hyp[:, 0])] + for i, detection in enumerate(detections): + single_hypotheses.append( + SingleProbabilityHypothesis(prediction, + measurement=detection, + probability=weights_per_hyp[:, i + 1]) + ) + hypothesis = MultipleHypothesis(single_hypotheses, normalise=False) + + # Update weights Eq. (8) of [1] + # w_k^i = \sum_{z \in Z_k}{w^{n,i}}, where i is the index of z in Z_k + log_post_weights = logsumexp(np.log(weights_per_hyp).astype(float), axis=1) + + # Resample + log_num_targets = logsumexp(log_post_weights) # N_{k|k} + update = copy(prediction) + # Normalize weights + update.weight = Probability.from_log_ufunc(log_post_weights - log_num_targets) + if self.resampler is not None: + update = self.resampler.resample(update, self.num_samples) # Resample + # De-normalize + update.weight = Probability.from_log_ufunc(np.log(update.weight).astype(float) + + log_num_targets) + + return Update.from_state( + state=prediction, + state_vector=update.state_vector, + weight=update.weight, + particle_list=None, + hypothesis=hypothesis, + timestamp=timestamp) + + def iterate(self, state, detections: List[Detection], timestamp): + """ + Iterate the filter over the given state and detections + + Parameters + ---------- + state: :class:`~.State` + The predicted state of the target + detections: :class:`~.Detection` + The detections at the current time step + timestamp: :class:`datetime.datetime` + The time at which the update is valid + + Returns + ------- + : :class:`~.State` + The updated state of the target + """ + prediction = self.predict(state, timestamp) + update = self.update(prediction, detections, timestamp) + return update + + def get_measurement_loglikelihoods(self, prediction, detections, meas_weights): + num_samples = prediction.state_vector.shape[1] + # Compute g(z|x) matrix as in [1] + g = np.zeros((num_samples, len(detections))) + for i, detection in enumerate(detections): + if not meas_weights[i]: + g[:, i] = -np.inf + continue + g[:, i] = detection.measurement_model.logpdf(detection, prediction, + noise=True) + return g + + def get_weights_per_hypothesis(self, prediction, detections, meas_weights, *args, **kwargs): + num_samples = prediction.state_vector.shape[1] + if meas_weights is None: + meas_weights = np.array([Probability(1) for _ in range(len(detections))]) + + # Compute g(z|x) matrix as in [1] + g = self.get_measurement_loglikelihoods(prediction, detections, meas_weights) + + # Get probability of detection + prob_detect = np.asfarray(self.prob_detect(prediction)) + + # Calculate w^{n,i} Eq. (20) of [2] + try: + Ck = np.log(prob_detect[:, np.newaxis]) + g \ + + np.log(prediction.weight[:, np.newaxis].astype(float)) + except IndexError: + Ck = np.log(prob_detect) + g \ + + np.log(prediction.weight[:, np.newaxis].astype(float)) + C = logsumexp(Ck, axis=0) + k = np.log([detection.metadata['clutter_density'] + if 'clutter_density' in detection.metadata else self.clutter_intensity + for detection in detections]) + C_plus = np.logaddexp(C, k) + weights_per_hyp = np.full((num_samples, len(detections) + 1), -np.inf) + weights_per_hyp[:, 0] = np.log(1 - prob_detect) + np.log(np.asfarray(prediction.weight)) + if len(detections): + weights_per_hyp[:, 1:] = np.log(np.asfarray(meas_weights)) + Ck - C_plus + + return Probability.from_log_ufunc(weights_per_hyp) + + +class ISMCPHDFilter(SMCPHDFilter): + + def predict(self, state, timestamp): + """ + Predict the next state of the target density + + Parameters + ---------- + state: :class:`~.State` + The current state of the target + timestamp: :class:`datetime.datetime` + The time at which the state is valid + + Returns + ------- + : :class:`~.State` + The predicted next state of the target + """ + + prior_weights = state.weight + time_interval = timestamp - state.timestamp + + # Predict particles forward + pred_particles_sv = self.transition_model.function(state, + time_interval=time_interval, + noise=True) + + # Surviving particle weights + prob_survive = np.exp(-float(self.prob_death) * time_interval.total_seconds()) + pred_weights = prob_survive * prior_weights + + prediction = Prediction.from_state(state, state_vector=pred_particles_sv, + weight=pred_weights, + timestamp=timestamp, particle_list=None, + transition_model=self.transition_model) + prediction.birth_idx = state.birth_idx if hasattr(state, 'birth_idx') else [] + return prediction + + def update(self, prediction, detections, timestamp, meas_weights=None): + """ + Update the predicted state of the target density with the given detections + + Parameters + ---------- + prediction: :class:`~.State` + The predicted state of the target + detections: :class:`~.Detection` + The detections at the current time step + timestamp: :class:`datetime.datetime` + The time at which the update is valid + meas_weights: :class:`np.ndarray` + The weights of the measurements + + Returns + ------- + : :class:`~.State` + The updated state of the target + """ + num_persistent = prediction.state_vector.shape[1] + birth_state = self.get_birth_state(prediction, detections, timestamp) + + weights_per_hyp = self.get_weights_per_hypothesis(prediction, detections, meas_weights, + birth_state) + + # Construct hypothesis objects (StoneSoup specific) + single_hypotheses = [ + SingleProbabilityHypothesis(prediction, + measurement=MissedDetection(timestamp=timestamp), + probability=weights_per_hyp[:num_persistent, 0])] + for i, detection in enumerate(detections): + single_hypotheses.append( + SingleProbabilityHypothesis(prediction, + measurement=detection, + probability=weights_per_hyp[:num_persistent, + i + 1]) + ) + hypothesis = MultipleHypothesis(single_hypotheses, normalise=False) + + # Update weights Eq. (8) of [1] + # w_k^i = \sum_{z \in Z_k}{w^{n,i}}, where i is the index of z in Z_k + log_post_weights = logsumexp(np.log(weights_per_hyp).astype(float), axis=1) + log_post_weights_pers = log_post_weights[:num_persistent] + log_post_weights_birth = log_post_weights[num_persistent:] + + # Resample persistent + log_num_targets_pers = logsumexp(log_post_weights_pers) # N_{k|k} + update = copy(prediction) + # Normalize weights + update.weight = Probability.from_log_ufunc(log_post_weights_pers - log_num_targets_pers) + if self.resampler is not None: + update = self.resampler.resample(update, self.num_samples) # Resample + # De-normalize + update.weight = Probability.from_log_ufunc(np.log(update.weight).astype(float) + + log_num_targets_pers) + + if len(detections): + # Resample birth + log_num_targets_birth = logsumexp(log_post_weights_birth) # N_{k|k} + update2 = copy(birth_state) + # Normalize weights + update2.weight = Probability.from_log_ufunc( + log_post_weights_birth - log_num_targets_birth) + if self.resampler is not None: + update2 = self.resampler.resample(update2, + update2.state_vector.shape[1]) # Resample + # De-normalize + update2.weight = Probability.from_log_ufunc(np.log(update2.weight).astype(float) + + log_num_targets_birth) + + full_update = Update.from_state( + state=prediction, + state_vector=StateVectors(np.hstack((update.state_vector, update2.state_vector))), + weight=np.hstack((update.weight, update2.weight)), + particle_list=None, + hypothesis=hypothesis, + timestamp=timestamp) + else: + full_update = Update.from_state( + state=prediction, + state_vector=update.state_vector, + weight=update.weight, + particle_list=None, + hypothesis=hypothesis, + timestamp=timestamp) + full_update.birth_idx = [i for i in range(len(update.weight), len(full_update.weight))] + return full_update + + def get_birth_state(self, prediction, detections, timestamp): + # Sample birth particles + num_birth = round(float(self.prob_birth) * self.num_samples) + birth_particles = np.zeros((prediction.state_vector.shape[0], 0)) + birth_weights = np.zeros((0,)) + if len(detections): + num_birth_per_detection = num_birth // len(detections) + for i, detection in enumerate(detections): + if i == len(detections) - 1: + num_birth_per_detection += num_birth % len(detections) + mu = self.birth_density.mean + mu[0::2] = detection.state_vector + cov = self.birth_density.covar + cov[0::2, 0::2] = detection.measurement_model.covar() + birth_particles_i = multivariate_normal.rvs(mu.ravel(), + cov, + num_birth_per_detection).T + birth_weights_i = np.full((num_birth_per_detection,), + Probability(self.birth_rate / num_birth)) + if self.scale_birth_weights: + birth_weights_i *= multivariate_normal.pdf(birth_particles_i.T, + mu.ravel(), + cov, + allow_singular=True) + birth_particles = np.hstack((birth_particles, birth_particles_i)) + birth_weights = np.hstack((birth_weights, birth_weights_i)) + else: + birth_particles = multivariate_normal.rvs(self.birth_density.mean.ravel(), + self.birth_density.covar, + num_birth).T + birth_weights = multivariate_normal.pdf(birth_particles.T, + self.birth_density.mean.ravel(), + self.birth_density.covar, + allow_singular=True) * Probability( + self.birth_rate / num_birth) + # birth_weights = np.full((num_birth,), Probability(self.birth_rate / num_birth)) + birth_particles = StateVectors(birth_particles) + birth_state = Prediction.from_state(prediction, + state_vector=birth_particles, + weight=birth_weights, + timestamp=timestamp, particle_list=None, + transition_model=self.transition_model) + return birth_state + + def get_weights_per_hypothesis(self, prediction, detections, meas_weights, birth_state, + *args, **kwargs): + num_samples = prediction.state_vector.shape[1] + if meas_weights is None: + meas_weights = np.array([Probability(1) for _ in range(len(detections))]) + + # Compute g(z|x) matrix as in [1] + g = self.get_measurement_loglikelihoods(prediction, detections, meas_weights) + + # Get probability of detection + prob_detect = np.asfarray(self.prob_detect(prediction)) + + # Calculate w^{n,i} Eq. (20) of [2] + try: + Ck = np.log(prob_detect[:, np.newaxis]) + g \ + + np.log(prediction.weight[:, np.newaxis].astype(float)) + except IndexError: + Ck = np.log(prob_detect) + g \ + + np.log(prediction.weight[:, np.newaxis].astype(float)) + C = logsumexp(Ck, axis=0) + Ck_birth = np.tile(np.log(np.asfarray(birth_state.weight)[:, np.newaxis]), len(detections)) + C_birth = logsumexp(Ck_birth, axis=0) + + k = np.log([detection.metadata['clutter_density'] + if 'clutter_density' in detection.metadata else self.clutter_intensity + for detection in detections]) + C_plus = np.logaddexp(C, k) + L = np.logaddexp(C_plus, C_birth) + + weights_per_hyp = np.full((num_samples + birth_state.state_vector.shape[1], + len(detections) + 1), -np.inf) + weights_per_hyp[:num_samples, 0] = np.log(1 - prob_detect) + np.log( + np.asfarray(prediction.weight)) + if len(detections): + weights_per_hyp[:num_samples, 1:] = np.log(np.asfarray(meas_weights)) + Ck - L + weights_per_hyp[num_samples:, 1:] = np.log(np.asfarray(meas_weights)) + Ck_birth - L + return Probability.from_log_ufunc(weights_per_hyp) + + +class SMCPHDInitiator(Initiator): + filter: SMCPHDFilter = Property(doc='The phd filter') + prior: Any = Property(doc='The prior state') + threshold: Probability = Property(doc='The thrshold probability for initiation', + default=Probability(0.9)) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._state = self.prior + + def initiate(self, detections, timestamp, weights=None, **kwargs): + tracks = set() + + if self._state.timestamp is None: + self._state.timestamp = timestamp + # Predict forward + prediction = self.filter.predict(self._state, timestamp) + + # Calculate weights per hypothesis + weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights) + log_weights_per_hyp = np.log(weights_per_hyp).astype(float) + + # Calculate intensity per hypothesis + log_intensity_per_hyp = logsumexp(log_weights_per_hyp, axis=0) + + # Find detections with intensity above threshold and initiate + valid_inds = np.flatnonzero(np.exp(log_intensity_per_hyp) > self.threshold) + for idx in valid_inds: + if not idx: + continue + + particles_sv = copy(prediction.state_vector) + weight = np.exp(log_weights_per_hyp[:, idx] - log_intensity_per_hyp[idx]) + + mu = np.average(particles_sv, + axis=1, + weights=weight) + cov = np.cov(particles_sv, ddof=0, aweights=weight) + + hypothesis = SingleProbabilityHypothesis(prediction, + measurement=detections[idx - 1], + probability=weights_per_hyp[:, idx]) + + track_state = GaussianStateUpdate(mu, cov, hypothesis=hypothesis, + timestamp=timestamp) + + # if np.trace(track_state.covar) < 10: + weights_per_hyp[:, idx] = Probability(0) + track = Track([track_state]) + track.exist_prob = Probability(log_intensity_per_hyp[idx], log_value=True) + tracks.add(track) + + weights[idx - 1] = 0 + + # Update filter + self._state = self.filter.update(prediction, detections, timestamp, weights) + + return tracks + + +class ISMCPHDInitiator(SMCPHDInitiator): + filter: ISMCPHDFilter = Property(doc='The phd filter') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._state = self.prior + + def initiate(self, detections, timestamp, weights=None, **kwargs): + tracks = set() + + if self._state.timestamp is None: + self._state.timestamp = timestamp + # Predict forward + prediction = self.filter.predict(self._state, timestamp) + + # Calculate weights per hypothesis + birth_state = self.filter.get_birth_state(prediction, detections, timestamp) + weights_per_hyp = self.filter.get_weights_per_hypothesis(prediction, detections, weights, + birth_state) + log_weights_per_hyp = np.log(weights_per_hyp[:self.filter.num_samples, :]).astype(float) + + # Calculate intensity per hypothesis + log_intensity_per_hyp = logsumexp(log_weights_per_hyp, axis=0) + print(np.exp(log_intensity_per_hyp)) + # Find detections with intensity above threshold and initiate + valid_inds = np.flatnonzero(np.exp(log_intensity_per_hyp) > self.threshold) + for idx in valid_inds: + if not idx: + continue + + particles_sv = copy( + prediction.state_vector[:, :len(prediction) - len(prediction.birth_idx)]) + weight = np.exp( + log_weights_per_hyp[:self.filter.num_samples, idx] - log_intensity_per_hyp[idx]) + + mu = np.average(particles_sv, + axis=1, + weights=weight) + cov = np.cov(particles_sv, ddof=0, aweights=weight) + + hypothesis = SingleProbabilityHypothesis(prediction, + measurement=detections[idx - 1], + probability=weights_per_hyp[ + :self.filter.num_samples, idx]) + + track_state = GaussianStateUpdate(mu, cov, hypothesis=hypothesis, + timestamp=timestamp) + + # if np.trace(track_state.covar) < 10: + weights_per_hyp[:, idx] = Probability(0) + track = Track([track_state]) + track.exist_prob = Probability(log_intensity_per_hyp[idx], log_value=True) + tracks.add(track) + + weights[idx - 1] = 0 + + # Update filter + self._state = self.filter.update(prediction, detections, timestamp, weights) + + return tracks diff --git a/stonesoup/custom/initiator/twostate.py b/stonesoup/custom/initiator/twostate.py new file mode 100644 index 000000000..b67146be5 --- /dev/null +++ b/stonesoup/custom/initiator/twostate.py @@ -0,0 +1,112 @@ +import numpy as np + +from ...base import Base, Property +from ...models.base import LinearModel, ReversibleModel +from ...models.transition import TransitionModel +from ...types.state import GaussianState, State +from ...updater import Updater +from ..functions import predict_state_to_two_state, nearestPD, isPD +from ...types.hypothesis import SingleProbabilityHypothesis +from ...types.track import Track +from ...types.numeric import Probability + +from ..types.state import TwoStateGaussianState + + +class TwoStateInitiator(Base): + + def __init__(self, *args, **kwargs): + super(TwoStateInitiator, self).__init__(*args, **kwargs) + self._max_track_id = 0 + + prior: GaussianState = Property(doc='The prior used to initiate fused tracks') + transition_model: TransitionModel = Property(doc='The transition model') + updater: Updater = Property(doc='Updater used to update fused tracks') + + def initiate(self, detections, start_time, end_time, **kwargs): + init_mean = self.prior.mean + init_cov = self.prior.covar + init_mean, init_cov = predict_state_to_two_state(init_mean, init_cov, + self.transition_model, + end_time - start_time) + + prior = TwoStateGaussianState(init_mean, init_cov, start_time=start_time, + end_time=end_time) + new_tracks = set() + for detection in detections: + hyp = SingleProbabilityHypothesis(prediction=prior, measurement=detection, + probability=Probability(1.0)) + state = self.updater.update(hyp) + track = Track([state], id=self._max_track_id) + track.exist_prob = Probability(1) + self._max_track_id += 1 + new_tracks.add(track) + + return new_tracks + + + +class TwoStateMeasurementInitiator(TwoStateInitiator): + + skip_non_reversible: bool = Property(default=False) + diag_load: float = Property(default=0.0, doc="Positive float value for diagonal loading") + + def initiate(self, detections, start_time, end_time, **kwargs): + + new_tracks = set() + for detection in detections: + measurement_model = detection.measurement_model + + if isinstance(measurement_model, LinearModel): + model_matrix = measurement_model.matrix() + inv_model_matrix = np.linalg.pinv(model_matrix) + state_vector = inv_model_matrix @ detection.state_vector + else: + if isinstance(measurement_model, ReversibleModel): + try: + state_vector = measurement_model.inverse_function(detection) + except NotImplementedError: + if not self.skip_non_reversible: + raise + else: + continue + model_matrix = measurement_model.jacobian(State(state_vector)) + inv_model_matrix = np.linalg.pinv(model_matrix) + elif self.skip_non_reversible: + continue + else: + raise Exception("Invalid measurement model used.\ + Must be instance of linear or reversible.") + + model_covar = measurement_model.covar() + + init_mean = self.prior.state_vector.copy() + init_cov = self.prior.covar.copy() + + + init_mean, init_cov = predict_state_to_two_state(init_mean, init_cov, + self.transition_model, + end_time - start_time) + mapped_dimensions = measurement_model.mapping + + init_mean[mapped_dimensions, :] = 0 + init_cov[mapped_dimensions, :] = 0 + C0 = inv_model_matrix @ model_covar @ inv_model_matrix.T + C0 = C0 + init_cov + np.diag(np.array([self.diag_load] * C0.shape[0])) + if not isPD(C0): + C0 = nearestPD(C0) + init_mean = init_mean + state_vector + prior = TwoStateGaussianState(init_mean, C0, start_time=start_time, + end_time=end_time) + hyp = SingleProbabilityHypothesis(prediction=prior, measurement=detection, + probability=Probability(1.0)) + state = self.updater.update(hyp) + track = Track([state], id=self._max_track_id) + track.exist_prob = Probability(1) + self._max_track_id += 1 + new_tracks.add(track) + + return new_tracks + + + diff --git a/stonesoup/custom/models/__init__.py b/stonesoup/custom/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/models/measurement/__init__.py b/stonesoup/custom/models/measurement/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/models/measurement/linear.py b/stonesoup/custom/models/measurement/linear.py new file mode 100644 index 000000000..75d9f12ea --- /dev/null +++ b/stonesoup/custom/models/measurement/linear.py @@ -0,0 +1,97 @@ +from stonesoup.base import Property +from stonesoup.models.base import LinearModel, GaussianModel +from stonesoup.models.measurement import MeasurementModel +from stonesoup.types.array import Matrix, CovarianceMatrix + + +class LinearGaussianPredefinedH(MeasurementModel, LinearModel, GaussianModel): + r"""This is a class implementation of a time-invariant 1D + Linear-Gaussian Measurement Model. + + The model is described by the following equations: + + .. math:: + + y_t = H_k*x_t + v_k,\ \ \ \ v(k)\sim \mathcal{N}(0,R) + + where ``H_k`` is a (:py:attr:`~ndim_meas`, :py:attr:`~ndim_state`) \ + matrix and ``v_k`` is Gaussian distributed. + + """ + + h_matrix: Matrix = Property(doc="The model matrix") + noise_covar: CovarianceMatrix = Property(doc="Noise covariance") + + @property + def ndim_state(self): + """ndim_meas getter method + + Returns + ------- + :class:`int` + The number of measurement dimensions + """ + + return self.h_matrix.shape[1] + + @property + def ndim_meas(self): + """ndim_meas getter method + + Returns + ------- + :class:`int` + The number of measurement dimensions + """ + + return len(self.mapping) + + def matrix(self, **kwargs): + """Model matrix :math:`H(t)` + + Returns + ------- + :class:`numpy.ndarray` of shape \ + (:py:attr:`~ndim_meas`, :py:attr:`~ndim_state`) + The model matrix evaluated given the provided time interval. + """ + + return self.h_matrix + + def function(self, state, noise=False, **kwargs): + """Model function :math:`h(t,x(t),w(t))` + + Parameters + ---------- + state: :class:`~.State` + An input state + noise: :class:`numpy.ndarray` or bool + An externally generated random process noise sample (the default is + `False`, in which case no noise will be added + if 'True', the output of :meth:`~.Model.rvs` is added) + + Returns + ------- + :class:`numpy.ndarray` of shape (:py:attr:`~ndim_meas`, 1) + The model function evaluated given the provided time interval. + """ + + if isinstance(noise, bool) or noise is None: + if noise: + noise = self.rvs() + else: + noise = 0 + + return self.matrix(**kwargs)@state.state_vector + noise + + def covar(self, **kwargs): + """Returns the measurement model noise covariance matrix. + + Returns + ------- + :class:`~.CovarianceMatrix` of shape\ + (:py:attr:`~ndim_meas`, :py:attr:`~ndim_meas`) + The measurement noise covariance. + """ + + return self.noise_covar \ No newline at end of file diff --git a/stonesoup/custom/predictor/__init__.py b/stonesoup/custom/predictor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/predictor/twostate.py b/stonesoup/custom/predictor/twostate.py new file mode 100644 index 000000000..4e8d875ff --- /dev/null +++ b/stonesoup/custom/predictor/twostate.py @@ -0,0 +1,29 @@ +from stonesoup.predictor import Predictor +from stonesoup.predictor._utils import predict_lru_cache +from stonesoup.types.prediction import Prediction + +from stonesoup.custom.functions import predict_state_to_two_state + +class TwoStatePredictor(Predictor): + + @predict_lru_cache() + def predict(self, prior, current_end_time=None, new_start_time=None, new_end_time=None, + **kwargs): + statedim = self.transition_model.ndim_state + mu = prior.mean[-statedim:] + C = prior.covar[-statedim:, -statedim:] + if new_start_time > current_end_time: + dt = new_start_time - current_end_time + A = self.transition_model.matrix(time_interval=dt) + Q = self.transition_model.covar(time_interval=dt) + mu = A @ mu + C = A @ C @ A.T + Q + elif new_start_time < current_end_time: + raise ValueError('newStartTime < currentEndTime - scan times messed up!') + + two_state_mu, two_state_cov = predict_state_to_two_state(mu, C, self.transition_model, + new_end_time - new_start_time) + + return Prediction.from_state(prior, two_state_mu, two_state_cov, + start_time=new_start_time, + end_time=new_end_time) diff --git a/stonesoup/custom/reader/__init__.py b/stonesoup/custom/reader/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/reader/track.py b/stonesoup/custom/reader/track.py new file mode 100644 index 000000000..2cb7e13a6 --- /dev/null +++ b/stonesoup/custom/reader/track.py @@ -0,0 +1,61 @@ +import threading +from copy import copy + +from ...base import Property +from ...models.transition import TransitionModel +from ...reader.base import Reader +from ...tracker.base import Tracker +from ..types.tracklet import SensorTracks +from ...buffered_generator import BufferedGenerator + + +class TrackReader(Reader): + tracker: Tracker = Property(doc='Tracker from which to read tracks') + run_async: bool = Property( + doc="If set to ``True``, the reader will read tracks from the tracker asynchronously " + "and only yield the latest set of tracks when iterated. Defaults to ``False``", + default=False) + transition_model: TransitionModel = Property(doc='Transition model used by the tracker', + default=None) + sensor_id: str = Property(doc='The id of the sensor', default=None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Variables used in async mode + if self.run_async: + self._buffer = None + # Initialise frame capture thread + self._capture_thread = threading.Thread(target=self._capture) + self._capture_thread.daemon = True + self._thread_lock = threading.Lock() + self._capture_thread.start() + + @property + def tracks(self): + return self.current[1] + + @BufferedGenerator.generator_method + def tracks_gen(self): + if self.run_async: + yield from self._tracks_gen_async() + else: + yield from self._tracks_gen() + + def _capture(self): + for timestamp, tracks in self.tracker: + self._thread_lock.acquire() + self._buffer = (timestamp, SensorTracks(tracks, self.sensor_id, self.transition_model)) + self._thread_lock.release() + + def _tracks_gen(self): + for timestamp, tracks in self.tracker: + yield timestamp, SensorTracks(tracks, self.sensor_id, self.transition_model) + + def _tracks_gen_async(self): + while self._capture_thread.is_alive(): + if self._buffer is not None: + self._thread_lock.acquire() + timestamp, tracks = copy(self._buffer) + self._buffer = None + self._thread_lock.release() + yield timestamp, tracks diff --git a/stonesoup/custom/reader/tracklet.py b/stonesoup/custom/reader/tracklet.py new file mode 100644 index 000000000..2e4396a5e --- /dev/null +++ b/stonesoup/custom/reader/tracklet.py @@ -0,0 +1,666 @@ +from typing import List +import datetime +from copy import deepcopy + +import numpy as np +from scipy.linalg import block_diag, inv + +from .track import TrackReader +from ...base import Base, Property +from ...buffered_generator import BufferedGenerator +from ...detector import Detector +from ...models.transition.base import TransitionModel +from ..models.measurement.linear import LinearGaussianPredefinedH +from ...tracker.base import Tracker +from ...types.mixture import GaussianMixture +from ...types.numeric import Probability +from ..types.prediction import TwoStateGaussianStatePrediction, Prediction +from ..types.tracklet import Tracklet, SensorTracklets, Scan, SensorScan +from ...types.state import GaussianState +from ...types.array import StateVector +from ...types.detection import Detection +from ..functions import predict_state_to_two_state +from ...predictor.kalman import ExtendedKalmanPredictor +from ..types.update import TwoStateGaussianStateUpdate, Update + + +class TrackletExtractor(Base, BufferedGenerator): + transition_model: TransitionModel = Property(doc='Transition model') + fuse_interval: datetime.timedelta = Property(doc='Fusion interval') + trackers: List[Tracker] = Property( + doc='List of trackers from which to extract tracks.', + default=None + ) + real_time: bool = Property(doc='Flag indicating whether the extractor should report ' + 'real time', default=False) + + def __init__(self, *args, **kwargs): + super(TrackletExtractor, self).__init__(*args, **kwargs) + self._tracklets = [] + self._fuse_times = [] + + @property + def tracklets(self): + return self.current[1] + + @BufferedGenerator.generator_method + def tracklets_gen(self): + """Returns a generator of detections for each time step. + + Yields + ------ + : :class:`datetime.datetime` + Datetime of current time step + : set of :class:`~.Detection` + Detections generate in the time step + """ + for data in zip(*self.trackers): + timestamp = data[0][0] + alltracks = [d[1] for d in data] + if self.real_time: + timestamp = datetime.datetime.now() + if not len(self._fuse_times) or timestamp - self._fuse_times[-1] >= self.fuse_interval: + # Append current fuse time to fuse times + self._fuse_times.append(timestamp) + yield timestamp, self.get_tracklets_seq(alltracks, timestamp) + + def extract(self, alltracks, timestamp): + if not len(self._fuse_times) or timestamp - self._fuse_times[-1] >= self.fuse_interval: + # Append current fuse time to fuse times + self._fuse_times.append(timestamp) + self._tracklets = self.get_tracklets_seq(alltracks, timestamp) + self.current = (timestamp, self._tracklets) + return self._tracklets + + def get_tracklets_seq(self, alltracks, timestamp): + # Iterate over the local tracks of each sensor + for sensor_tracks in alltracks: + sensor_id = sensor_tracks.sensor_id + # Get tracklets for sensor + idx = next((i for i, t in enumerate(self._tracklets) + if t.sensor_id == sensor_id), None) + sensor_tracklets = self._tracklets[idx] if idx is not None else [] + # Temporary tracklet list + tracklets_tmp = [] + # Transition model + transition_model = self.transition_model + if sensor_tracks.transition_model is not None: + transition_model = sensor_tracks.transition_model + # For each local track + for track in sensor_tracks: + tracklet = next((t for t in sensor_tracklets if track.id == t.id), None) + # If the tracklet doesn't already exist + if tracklet is None and len(self._fuse_times) > 1: + # Create it + tracklet = self.init_tracklet(track, transition_model, + np.array(self._fuse_times), sensor_id) + elif tracklet is not None: + # Else simply augment + self.augment_tracklet(tracklet, track, transition_model, timestamp) + # Append tracklet to temporary tracklets + if tracklet: + tracklets_tmp.append(tracklet) + # If a tracklet set for the sensor doesn't already exist + if idx is None: + # Add it + self._tracklets.append(SensorTracklets(tracklets_tmp, sensor_id)) + else: + # Else replace the existing one + self._tracklets[idx] = SensorTracklets(tracklets_tmp, sensor_id) + # Return the stored tracklets + return self._tracklets + + def get_tracklets_batch(self, alltracks, fuse_times): + tracklets = [] + for tracks in alltracks: + tracklets_tmp = [] + # Transition model + transition_model = self.transition_model + if tracks.transition_model is not None: + transition_model = tracks.transition_model + for track in tracks: + tracklet = self.init_tracklet(track, transition_model, fuse_times) + if tracklet: + tracklets_tmp.append(tracklet) + tracklets.append(tracklets_tmp) + return tracklets + + # PRH: Replaced the below with states so it more easily generalises + """ + def augment_tracklet(self, tracklet, track, transition_model, timestamp): + track_times = np.array([s.timestamp for s in track]) + + filtered_means = np.concatenate([s.mean for s in track], 1) + filtered_covs = np.stack([s.covar for s in track], 2) + filtered_times = np.array([s.timestamp for s in track]) + + start_time = tracklet.states[-1].timestamp + end_time = timestamp + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + # The end states + end_states = [track.states[ind0], track.states[ind1]] + # All means, covs and times that fall inbetween + means = filtered_means[:, ind0 + 1:ind1 + 1] + covs = filtered_covs[:, :, ind0 + 1: ind1 + 1] + times = filtered_times[ind0 + 1:ind1 + 1] + # Compute interval distribution + post_mean, post_cov, prior_mean, prior_cov = \ + self.get_interval_dist(means, covs, times, end_states, + transition_model, start_time, end_time) + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + tracklet.states.append(prior) + tracklet.states.append(posterior) + + @classmethod + def init_tracklet(cls, track, tx_model, fuse_times, sensor_id=None): + track_times = np.array([s.timestamp for s in track]) + idx0 = np.flatnonzero(fuse_times >= track_times[0]) + idx1 = np.flatnonzero(fuse_times <= track_times[-1]) + + if not len(idx0) or not len(idx1): + return None + else: + idx0 = idx0[0] + idx1 = idx1[-1] + + states = [] + + filtered_means = np.concatenate([s.mean for s in track], 1) + filtered_covs = np.stack([s.covar for s in track], 2) + filtered_times = np.array([s.timestamp for s in track]) + + cnt = 0 + for i in range(idx0, idx1): + start_time = fuse_times[i] + end_time = fuse_times[i + 1] + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + cnt += 1 + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + # The end states + end_states = [track.states[ind0], track.states[ind1]] + # All means, covs and times that fall inbetween + means = filtered_means[:, ind0 + 1:ind1 + 1] + covs = filtered_covs[:, :, ind0 + 1: ind1 + 1] + times = filtered_times[ind0 + 1:ind1 + 1] + # Compute interval distribution + post_mean, post_cov, prior_mean, prior_cov = \ + cls.get_interval_dist(means, covs, times, end_states, + tx_model, start_time, end_time) + + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + + states.append(prior) + states.append(posterior) + + if not cnt: + return None + + tracklet = Tracklet(id=track.id, states=states, init_metadata={'sensor_id': sensor_id}) + + return tracklet + """ + + # PRH: new init_tracklet and augment_tracklet functions - should be the same for single and two-state trackers + @classmethod + def init_tracklet(cls, track, tx_model, fuse_times, sensor_id=None): + track_times = np.array([s.timestamp for s in track]) + idx0 = np.flatnonzero(fuse_times >= track_times[0]) + idx1 = np.flatnonzero(fuse_times <= track_times[-1]) + + if not len(idx0) or not len(idx1): + return None + else: + idx0 = idx0[0] + idx1 = idx1[-1] + + states = [] + filtered_times = np.array([s.timestamp for s in track]) + + cnt = 0 + for i in range(idx0, idx1): + start_time = fuse_times[i] + end_time = fuse_times[i + 1] + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + cnt += 1 + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + + post_mean, post_cov, prior_mean, prior_cov = \ + cls.get_interval_dist(track[ind0:ind1 + 1], tx_model, start_time, end_time) + + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + + states.append(prior) + states.append(posterior) + + if not cnt: + return None + + tracklet = Tracklet(id=track.id, states=states, init_metadata={'sensor_id': sensor_id}) + + return tracklet + + def augment_tracklet(self, tracklet, track, transition_model, timestamp): + track_times = np.array([s.timestamp for s in track]) + + filtered_times = np.array([s.timestamp for s in track]) + + start_time = tracklet.states[-1].timestamp + end_time = timestamp + nupd = np.sum(np.logical_and(track_times > start_time, track_times <= end_time)) + if nupd > 0: + # Indices of end-states that are just before the start and end times + ind0 = np.flatnonzero(filtered_times <= start_time)[-1] + ind1 = np.flatnonzero(filtered_times <= end_time)[-1] + + post_mean, post_cov, prior_mean, prior_cov = \ + self.get_interval_dist(track[ind0:ind1 + 1], transition_model, start_time, end_time) + + prior = TwoStateGaussianStatePrediction(prior_mean, prior_cov, + start_time=start_time, + end_time=end_time) + posterior = TwoStateGaussianStateUpdate(post_mean, post_cov, + hypothesis=None, + start_time=start_time, + end_time=end_time) + tracklet.states.append(prior) + tracklet.states.append(posterior) + + """ + @classmethod + def get_interval_dist(cls, filtered_means, filtered_covs, filtered_times, states, tx_model, + start_time, end_time): + + # Get filtered distributions at start and end of interval + predictor = ExtendedKalmanPredictor(tx_model) + state0 = states[0] + state1 = states[1] + + pred0 = predictor.predict(state0, start_time) + pred1 = predictor.predict(state1, end_time) + + # Predict prior mean + prior_mean, prior_cov = predict_state_to_two_state(pred0.mean, pred0.covar, tx_model, + end_time - start_time) + + # Get posterior mean by running smoother + mn = np.concatenate([pred0.mean, filtered_means, pred1.mean], 1) + cv = np.stack([pred0.covar, *list(np.swapaxes(filtered_covs, 0, 2)), pred1.covar], 2) + t = np.array([start_time, *filtered_times, end_time]) + post_mean, post_cov = cls.rts_smoother_endpoints(mn, cv, t, tx_model) + + return post_mean, post_cov, prior_mean, prior_cov + """ + + @classmethod + def get_interval_dist(cls, filtered_states, transition_model, start_time, end_time): + + # Get filtered distributions at start and end of interval + predictor = ExtendedKalmanPredictor(transition_model) + + start_state = predictor.predict(filtered_states[0], start_time) + end_state = predictor.predict(filtered_states[-1], end_time) + + # Predict prior mean + prior_mean, prior_cov = predict_state_to_two_state(start_state.mean, start_state.covar, transition_model, + end_time - start_time) + + mn = np.concatenate([start_state.mean] + [x.mean for x in filtered_states[1:-1]] + [end_state.mean], 1) + cv = np.stack([start_state.covar] + [x.covar for x in filtered_states[1:-1]] + [end_state.covar], 2) + t = [start_time] + [x.timestamp for x in filtered_states[1:-1]] + [end_time] + post_mean, post_cov = cls.rts_smoother_endpoints(mn, cv, t, transition_model) + + return post_mean, post_cov, prior_mean, prior_cov + + @classmethod + def rts_smoother_endpoints(cls, filtered_means, filtered_covs, times, tx_model): + statedim, ntimesteps = filtered_means.shape + + joint_smoothed_mean = np.tile(filtered_means[:, -1], (1, 2)).T + joint_smoothed_cov = np.tile(filtered_covs[:, :, -1], (2, 2)) + + for k in reversed(range(ntimesteps - 1)): + dt = times[k + 1] - times[k] + A = tx_model.matrix(time_interval=dt) + Q = tx_model.covar(time_interval=dt) + # Filtered distribution + m = filtered_means[:, k][:, np.newaxis] + P = filtered_covs[:, :, k] + # Get transition model x_{k+1} -> x_k + # p(x_k | x_{k+1}, y_{1:T}) = Norm(x_k; Fx_{k+1} + b, Omega) + F = P @ A.T @ inv(A @ P @ A.T + Q) + b = m - F @ A @ m + Omega = P - F @ (A @ P @ A.T + Q) @ F.T + # Two-state transition model (x_{k+1}, x_T) -> (x_k, x_T) + F2 = block_diag(F, np.eye(statedim)) + b2 = np.concatenate((b, np.zeros((statedim, 1)))) + Omega2 = block_diag(Omega, np.zeros((statedim, statedim))) + # Predict back + joint_smoothed_mean = F2 @ joint_smoothed_mean + b2 + joint_smoothed_cov = F2 @ joint_smoothed_cov @ F2.T + Omega2 + return joint_smoothed_mean, joint_smoothed_cov + +class PseudoMeasExtractor(Base, BufferedGenerator): + tracklet_extractor: TrackletExtractor = Property(doc='The tracket extractor', default=None) + target_state_dim: int = Property(doc='The target state dim', default=None) + state_idx_to_use: List[int] = Property(doc='The indices of the state corresponding to pos/vel', + default=None) + use_prior: bool = Property(doc="", default=False) + + def __init__(self, *args, **kwargs): + super(PseudoMeasExtractor, self).__init__(*args, **kwargs) + self._last_scan = None + + @property + def scans(self): + return self.current[1] + + @BufferedGenerator.generator_method + def scans_gen(self): + """Returns a generator of detections for each time step. + + Yields + ------ + : :class:`datetime.datetime` + Datetime of current time step + : set of :class:`~.Detection` + Detections generate in the time step + """ + for timestamp, tracklets in self.tracklet_extractor: + scans = self.get_scans_from_tracklets(tracklets, timestamp) + yield timestamp, scans + # for scan in scans: + # yield timestamp, scan + + def extract(self, tracklets, timestamp): + scans = self.get_scans_from_tracklets(tracklets, timestamp) + self.current = timestamp, scans + return scans + + def get_scans_from_tracklets(self, tracklets, timestamp): + measdata = self.get_pseudomeas(tracklets) + scans = self.get_scans_from_measdata(measdata) + # if not len(scans): + # scans = [Scan(start_time=self._last_scan, + # end_time=timestamp, + # sensor_scans=[])] + self._last_scan = timestamp + # Sort the scans by start time + scans.sort(key=lambda x: x.start_time) + return scans + + def get_pseudomeas(self, all_tracklets): + measurements = [] + for i, tracklets in enumerate(all_tracklets): + for j, tracklet in enumerate(tracklets): + measdata = self.get_pseudomeas_from_tracklet(tracklet, i, self._last_scan) + measurements += measdata + # Sort the measurements by end time + measurements.sort(key=lambda x: x.end_time) + return measurements + + @classmethod + def get_scans_from_measdata(cls, measdata): + if not len(measdata): + return [] + + start = np.min([m.start_time for m in measdata]) + times = np.array([[(m.end_time - start).total_seconds(), + (m.start_time - start).total_seconds()] for m in measdata]) + true_times = np.array([[m.end_time, m.start_time] for m in measdata]) + end_start_times, idx = np.unique(times, return_index=True, axis=0) + idx2 = [] + for previous, current in zip(idx, idx[1:]): + idx2.append([i for i in range(previous, current)]) + else: + idx2.append([i for i in range(idx[-1], len(measdata))]) + nscans = len(idx) + + scans = [] + for i in range(nscans): + thesescans = [measdata[j] for j in idx2[i]] + if not len(thesescans): + continue + start_time = true_times[idx[i], 1] # end_start_times[i, 1] + end_time = true_times[idx[i], 0] + sens_ids = [m.metadata['sensor_id'] for m in thesescans] + sens_ids, sidx = np.unique(sens_ids, return_index=True) + sidx2 = [] + for previous, current in zip(sidx, sidx[1:]): + sidx2.append([i for i in range(previous, current)]) + else: + sidx2.append([i for i in range(sidx[-1], len(thesescans))]) + nsensscans = len(sidx) + scan = Scan(start_time, end_time, []) + for s in range(nsensscans): + sensor_id = sens_ids[s] + sscan = SensorScan(sensor_id, []) + sscan.detections = [thesescans[j] for j in sidx2[s]] + for detection in sscan.detections: + detection.metadata['scan_id'] = scan.id + detection.metadata['sensor_scan_id'] = sscan.id + detection.metadata['clutter_density'] = Probability(-70, log_value=True) + scan.sensor_scans.append(sscan) + scans.append(scan) + return scans + + def get_pseudomeas_from_tracklet(self, tracklet, sensor_id, last_scan=None): + + priors = [s for s in tracklet.states if isinstance(s, Prediction)] + posteriors = [s for s in tracklet.states if isinstance(s, Update)] + + if last_scan is None: + inds = [i for i in range(len(posteriors))] + else: + inds = [i for i, p in enumerate(posteriors) if p.timestamp > last_scan] + + measdata = [] + + state_dim = posteriors[-1].state_vector.shape[0] + if self.state_idx_to_use is not None: + state_idx = list(self.state_idx_to_use) + offset = state_dim//2 + for i in self.state_idx_to_use: + state_idx.append(offset+i) + else: + state_idx = [i for i in range(state_dim)] + + for k in inds: + post_mean = posteriors[k].mean[state_idx, :] + post_cov = posteriors[k].covar[state_idx, :][:, state_idx] + prior_mean = priors[k].mean[state_idx, :] + prior_cov = priors[k].covar[state_idx, :][:, state_idx] + + H, z, R, _ = self.get_pseudomeasurement(post_mean, post_cov, prior_mean, prior_cov) + + if len(H): + num_rows, num_cols = H.shape + if self.target_state_dim is not None: + # Add zero columns for bias state indices + col_diff = (self.target_state_dim-num_cols)//2 + H1 = H[:, :num_cols//2] + H2 = H[:, num_cols//2:] + H1 = np.append(H1, np.zeros((H.shape[0], col_diff)), axis=1) + H2 = np.append(H2, np.zeros((H.shape[0], col_diff)), axis=1) + H = np.append(H1, H2, axis=1) + meas_model = LinearGaussianPredefinedH(h_matrix=H, noise_covar=R, + mapping=[i for i in range(H.shape[0])]) + detection = Detection(state_vector=StateVector(z), measurement_model=meas_model, + timestamp=posteriors[k].timestamp, + metadata=tracklet.metadata) + detection.metadata['track_id'] = tracklet.id + detection.start_time = posteriors[k].start_time + detection.end_time = posteriors[k].end_time + measdata.append(detection) + + return measdata + + def get_pseudomeasurement(self, mu1, C1, mu2, C2): + eigthresh = 1e-6 + matthresh = 1e-6 + + invC1 = inv(C1) + invC2 = inv(C2) + # Ensure inverses are symmetric + invC1 = (invC1 + invC1.T) / 2 + invC2 = (invC2 + invC2.T) / 2 + invC = invC1 - invC2 + + D, v = np.linalg.eig(invC) + D = np.diag(D) + Htilde = v.T + evals = np.diag(D) + + idx = np.flatnonzero(np.abs(evals) > eigthresh) + + H = Htilde[idx, :] + + statedim = mu1.shape[0] + + if not self.use_prior: + H = np.eye(statedim) + z = mu1 + R = C1 + return H, z, R, evals + + + if np.max(np.abs(C1.flatten() - C2.flatten())) < matthresh: + # print('Discarded - matrices too similar') + H = np.zeros((0, statedim)) + z = np.zeros((0, 1)) + R = np.zeros((0, 0)) + return H, z, R, evals + + if np.all(np.abs(evals) <= eigthresh): + # print('Discarded - all eigenvalues zero') + H = np.zeros((0, statedim)) + z = np.zeros((0, 1)) + R = np.zeros((0, 0)) + return H, z, R, evals + + R = inv(D[idx, :][:, idx]) + z = R @ (H @ invC1 @ mu1 - H @ invC2 @ mu2) + + # Discard measurement if R is not positive definite + try: + np.linalg.cholesky(R) + except np.linalg.LinAlgError: + # if not np.all(np.linalg.eigvals(R) > 0): + # print('Discarded - singular R') + H = np.zeros((0, statedim)) + z = np.zeros((0, 1)) + R = np.zeros((0, 0)) + return H, z, R, evals + + return H, z, R, evals + +#======================================================================================================================= + +# PRH: Added code + +class TrackletExtractorTwoState(TrackletExtractor): + def __init__(self, *args, **kwargs): + super(TrackletExtractor, self).__init__(*args, **kwargs) + self._tracklets = [] + self._fuse_times = [] + + @classmethod + def get_interval_dist(cls, filtered_states, transition_model, start_time, end_time): + """ + Get the prior (using measurements up to start_time) and posterior (using measurements up to end_time) + from filtered_states. + Currently, we assume that the end time of the first state is the start time and the end time of the last state + is the end time + """ + + # Get filtered distributions at start and end of interval + predictor = ExtendedKalmanPredictor(transition_model) + + # PRH: Assume first and last state times correspond with fuse times for now + # TODO: Generalise so start_time could be beteeen start_time and end_time of filtered_states[0] and \ + # end_time could be between start_time and end_time of filtered_states[-1] + assert (filtered_states[0].end_time == start_time) + assert (filtered_states[-1].end_time == end_time) + + # Dimension of a single state (as opposed to a two_state) + one_statedim = transition_model.ndim + + # Get single state mean and covariance at start of interval + # (to generalise, we could get the mean and covariance at a point in the interval) + start_single_state_mean = filtered_states[0].mean[one_statedim:] + start_single_state_covar = filtered_states[0].covar[one_statedim:, one_statedim:] + + # Predict prior mean over the time interval + prior_mean, prior_cov = predict_state_to_two_state(start_single_state_mean, + start_single_state_covar, + transition_model, end_time - start_time) + + # Get mean and covariance of the last period up to end_time + # (to generalise, could get the two-state distribution between filtered_states[-1].start_time and end_time) + end_two_state_mean = filtered_states[-1].mean + end_two_state_covar = filtered_states[-1].covar + + # Get posterior mean by running smoother (Note that we ignore filtered_states[0] since this covers an interval + # before start_time) + # TODO: Generalise to have a two-state distribution at the start if start_time ~= filtered_states[0].end_time + mn = np.concatenate([x.mean for x in filtered_states[1:-1]] + [end_two_state_mean], 1) + cv = np.stack([x.covar for x in filtered_states[1:-1]] + [end_two_state_covar], 2) + post_mean, post_cov = cls._rts_smoother_endpoints_two_state(mn, cv) + + return post_mean, post_cov, prior_mean, prior_cov + + @classmethod + def _rts_smoother_endpoints_two_state(cls, filtered_means, filtered_covs): + """ + Given joint distributions (x[k], x[k+1]) for k=0,...,T-1, compute the joint distribution (x[0], x[T]) + """ + two_statedim, ntimesteps = filtered_means.shape + statedim = two_statedim // 2 + smoothed_mean, smoothed_cov = filtered_means[:,-1], filtered_covs[:,:,-1] + + for k in reversed(range(ntimesteps - 1)): + + mu_x = filtered_means[:statedim, k] + mu_y = filtered_means[statedim:, k] + Pxx = filtered_covs[:statedim, :statedim, k] + Pyy = filtered_covs[statedim:, statedim:, k] + Pxy = filtered_covs[:statedim, statedim:, k] + + F = Pxy @ inv(Pyy) + b = mu_x - F @ mu_y + Omega = Pxx - Pxy @ inv(Pyy) @ Pxy.T + + F2 = block_diag(F, np.eye(statedim)) + b2 = np.concatenate((b, np.zeros((statedim,)))) + Omega2 = block_diag(Omega, np.zeros((statedim, statedim))) + smoothed_mean = F2 @ smoothed_mean + b2 + smoothed_cov = F2 @ smoothed_cov @ F2.T + Omega2 + + return smoothed_mean, smoothed_cov diff --git a/stonesoup/custom/sensor/__init__.py b/stonesoup/custom/sensor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/sensor/action/__init__.py b/stonesoup/custom/sensor/action/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/sensor/action/angle.py b/stonesoup/custom/sensor/action/angle.py new file mode 100644 index 000000000..68331672c --- /dev/null +++ b/stonesoup/custom/sensor/action/angle.py @@ -0,0 +1,148 @@ +from typing import Iterator + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.functions import get_nearest +from stonesoup.sensor.action import Action, RealNumberActionGenerator +from stonesoup.types.angle import Angle + + +class ChangeAngleAction(Action): + """The action of changing the pan & tilt of sensors where `pan_tilt` is an + :class:`~.ActionableProperty`""" + + increasing_angle: bool = Property( + default=None, readonly=True, + doc="Indicated the direction of change in the dwell centre angle. The first element " + "relates to bearing, the second to elevation.") + + def act(self, current_time, timestamp, init_value): + """Assumes that duration keeps within the action end time + + Parameters + ---------- + current_time: datetime.datetime + Current time + timestamp: datetime.datetime + Modification of attribute ends at this time stamp + init_value: Any + Current value of the dwell centre + + Returns + ------- + Any + The new value of the dwell centre""" + + if timestamp >= self.end_time: + return self.target_value # target direction + else: + return init_value # same direction + + +class ChangePanAction(ChangeAngleAction): + pass + + +class ChangeTiltAction(ChangeAngleAction): + pass + + +class AngleUAVActionsGenerator(RealNumberActionGenerator): + """Generates possible actions for changing the dwell centre of a sensor in a given + time period.""" + + owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " + "dwell-centre attributes") + resolution: Angle = Property(default=np.radians(10), doc="Resolution of action space") + + _action_cls = ChangeAngleAction + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=self.current_value, + increasing_angle=None) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution : Angle + Resolution of yielded action target values + epsilon: float + Tolerance of equality check in iteration + """ + if resolution is not None: + self.resolution = resolution + if epsilon is not None: + self.epsilon = epsilon + + @property + def initial_value(self): + return self.current_value + + @property + def min(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return Angle(-np.pi / 2) + + @property + def max(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return Angle(np.pi / 2) + + def __contains__(self, item): + + if isinstance(item, self._action_cls): + item = item.target_value + + return self.min <= item <= self.max + + def _get_direction(self, angle): + angle = Angle(angle) + + if self.initial_value - self.resolution / 2 \ + <= angle \ + <= self.initial_value + self.resolution / 2: + return None # no rotation, target angle achieved + + return angle > self.initial_value + + def __iter__(self) -> Iterator[ChangeAngleAction]: + """Returns all possible ChangePanTiltAction types""" + possible_angles = np.arange(self.min, self.max, self.resolution) + + yield self.default_action + for angle in possible_angles: + increasing = self._get_direction(angle) + yield self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle, + increasing_angle=increasing) + + def action_from_value(self, value): + if value not in self: + return None + possible_angles = np.arange(self.min, self.max, self.resolution) + angle = get_nearest(possible_angles, value) + increasing = self._get_direction(angle) + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle, + increasing_angle=increasing) + + +class PanUAVActionsGenerator(AngleUAVActionsGenerator): + _action_cls = ChangePanAction + pass + + +class TiltUAVActionsGenerator(AngleUAVActionsGenerator): + _action_cls = ChangeTiltAction + pass + diff --git a/stonesoup/custom/sensor/action/location.py b/stonesoup/custom/sensor/action/location.py new file mode 100644 index 000000000..d947af892 --- /dev/null +++ b/stonesoup/custom/sensor/action/location.py @@ -0,0 +1,123 @@ +import itertools + +import numpy as np +from typing import Iterator + +from stonesoup.custom.functions import get_nearest +from stonesoup.types.array import StateVector + +from stonesoup.base import Property + +from stonesoup.sensor.action import Action, RealNumberActionGenerator + + +class ChangeLocationAction(Action): + def act(self, current_time, timestamp, init_value): + """Assumes that duration keeps within the action end time + + Parameters + ---------- + current_time: datetime.datetime + Current time + timestamp: datetime.datetime + Modification of attribute ends at this time stamp + init_value: Any + Current value of the dwell centre + + Returns + ------- + Any + The new value of the dwell centre""" + + if timestamp >= self.end_time: + return self.target_value # target direction + else: + return init_value # same direction + + +class LocationActionGenerator(RealNumberActionGenerator): + """Generates possible actions for changing the dwell centre of a sensor in a given + time period.""" + + owner: object = Property(doc="Object with `timestamp`, `rpm` (revolutions per minute) and " + "dwell-centre attributes") + resolution: float = Property(default=10, doc="Resolution of action space") + limits: StateVector = Property(doc="Min and max values of the action space", + default=StateVector([-100, 100])) + + _action_cls = ChangeLocationAction + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=self.current_value) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution : float + Resolution of yielded action target values + epsilon: float + Epsilon value for action target values + + Returns + ------- + :class:`.Action` + Action with target value + """ + if resolution is not None: + self.resolution = resolution + if epsilon is not None: + self.epsilon = epsilon + + @property + def initial_value(self): + return self.current_value + + @property + def min(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return self.limits[0] + + @property + def max(self): + # Pan can rotate freely, while tilt is limited to +/- 90 degrees + return self.limits[1] + + def __contains__(self, item): + + if isinstance(item, self._action_cls): + item = item.target_value + + possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, + dtype=float) + possible_values = np.append(possible_values, self.current_value) + possible_values.sort() + return possible_values[0] <= item <= possible_values[-1] + + def __iter__(self) -> Iterator[ChangeLocationAction]: + """Returns all possible ChangePanTiltAction types""" + possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, dtype=float) + + yield self.default_action + for angle in possible_values: + if angle == self.current_value: + continue + yield self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle) + + def action_from_value(self, value): + if value not in self: + return None + possible_values = np.arange(self.min, self.max + self.resolution, self.resolution, dtype=float) + possible_values = np.append(possible_values, self.current_value) + angle = get_nearest(possible_values, value) + return self._action_cls(generator=self, + end_time=self.end_time, + target_value=angle) \ No newline at end of file diff --git a/stonesoup/custom/sensor/action/processing.py b/stonesoup/custom/sensor/action/processing.py new file mode 100644 index 000000000..17c7ba2cb --- /dev/null +++ b/stonesoup/custom/sensor/action/processing.py @@ -0,0 +1,108 @@ +from enum import Enum +from uuid import UUID + +import numpy as np +import scipy +from scipy.stats import mvn, norm + +from stonesoup.base import Base, Property +from stonesoup.sensor.action import Action, ActionGenerator +from stonesoup.types.state import GaussianState + + +class ProcessOutput(Base): + run_time: float = Property(doc="Processing time in seconds") + + +class ProcessingJobState(Enum): + PENDING = 0 + RUNNING = 1 + COMPLETED = 2 + + +class ProcessingJob(Base): + id: UUID = Property(doc="Unique identifier for the job") + algorithm: str = Property(doc="Algorithm to be used") + probability_of_detection: float = Property(doc="Probability of detection") + clutter_density: float = Property(doc="Clutter density") + processing_time: GaussianState = Property(doc="Processing time in seconds") + state: ProcessingJobState = Property(doc="State of the job", + default=ProcessingJobState.PENDING) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._start_time = None + self._end_time = None + + @property + def start_time(self): + return self._start_time + + @property + def end_time(self): + return self._end_time + + def start(self, timestamp): + self.state = ProcessingJobState.RUNNING + self._start_time = timestamp + noise = np.max([0, norm.rvs(self.processing_time.mean, np.sqrt(self.processing_time.covar))]) + self._end_time = timestamp + noise + + +class ProcessingAction(Action): + target_value: UUID = Property(doc="Target value.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._end_time = mvn.rvs(self.processing_time.mean, self.processing_time.covar) + + @property + def end_time(self): + return self._end_time + + def act(self, current_time, timestamp, init_value): + if current_time >= self.end_time: + return True + else: + return False + + +class ProcessingActionGenerator(ActionGenerator): + """Generates possible actions for processing data in a given time period.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def default_action(self): + return ProcessingAction(generator=self, + end_time=self.end_time, + target_value=True) + + def __call__(self, resolution=None, epsilon=None): + """ + Parameters + ---------- + resolution: float + Resolution of the action space + epsilon: float + Probability of taking a random action + + Returns + ------- + Iterator[Action] + Iterator of actions + """ + if resolution is None: + resolution = self.resolution + if epsilon is None: + epsilon = self.epsilon + + if np.random.random() < epsilon: + yield self.default_action + else: + for value in np.arange(self.limits[0], self.limits[1], resolution): + yield self._action_cls(generator=self, + end_time=self.end_time, + target_value=value) + diff --git a/stonesoup/custom/sensor/movable.py b/stonesoup/custom/sensor/movable.py new file mode 100644 index 000000000..bf1021bde --- /dev/null +++ b/stonesoup/custom/sensor/movable.py @@ -0,0 +1,199 @@ +import datetime +from typing import Union, List, Set + +import numpy as np +import geopy.distance +from shapely import Point + +from stonesoup.base import Property +from stonesoup.custom.functions import geodesic_point_buffer +from stonesoup.custom.sensor.action.location import LocationActionGenerator +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.action import ActionGenerator +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.array import CovarianceMatrix +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState +from stonesoup.types.numeric import Probability + + +class MovableUAVCamera(Sensor): + """A movable UAV camera sensor.""" + + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.CartesianToElevationBearing` model") + fov_radius: Union[float, List[float]] = Property( + doc="The detection field of view radius of the sensor") + prob_detect: Probability = Property( + default=None, + doc="The probability of detection of the sensor. Defaults to 1.0") + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + location_x: float = ActionableProperty( + doc="The sensor x location. Defaults to zero", + default=0, + generator_cls=LocationActionGenerator + ) + location_y: float = ActionableProperty( + doc="The sensor y location. Defaults to zero", + default=0, + generator_cls=LocationActionGenerator + ) + limits: dict = Property( + doc="The sensor min max location", + default=None + ) + fov_in_km: bool = Property( + doc="Whether the FOV radius is in kilo-meters or degrees", + default=True) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.prob_detect is None: + self.prob_detect = Probability(1) + self._footprint = None + + @location_x.setter + def location_x(self, value): + self._property_location_x = value + if not self.movement_controller: + return + new_position = self.movement_controller.position.copy() + new_position[0] = value + self.movement_controller.position = new_position + + @location_y.setter + def location_y(self, value): + self._property_location_y = value + if not self.movement_controller: + return + new_position = self.movement_controller.position.copy() + new_position[1] = value + self.movement_controller.position = new_position + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + @property + def footprint(self): + if self._footprint is None: + if self.fov_in_km: + self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), + self.fov_radius) + else: + self._footprint = Point(self.position[0:2]).buffer(self.fov_radius) + return self._footprint + + def act(self, timestamp: datetime.datetime): + super().act(timestamp) + if self.fov_in_km: + self._footprint = geodesic_point_buffer(*np.flip(self.position[0:2]), self.fov_radius) + else: + self._footprint = Point(self.position[0:2]).buffer(self.fov_radius) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + if self.fov_in_km: + # distance = geopy.distance.distance(np.flip(self.position[0:2]), + # np.flip(measurement_vector[0:2])).km + if not self.footprint.contains(Point(measurement_vector[0:2])): + continue + else: + # Normalise measurement vector relative to sensor position + norm_measurement_vector = measurement_vector.astype(float) - self.position.astype( + float) + distance = np.linalg.norm(norm_measurement_vector[0:2]) + + # Do not measure if state not in FOV + if distance > self.fov_radius: + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + + # Generate detection with probability of detection + if np.random.rand() <= self.prob_detect: + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths, **kwargs) + detections |= clutter + + return detections + + def _default_action(self, name, property_, timestamp): + """Returns the default action of the action generator associated with the property + (assumes the property is an :class:`~.ActionableProperty`).""" + generator = self._get_generator(name, property_, timestamp, self.timestamp) + return generator.default_action + + def actions(self, timestamp: datetime.datetime, start_timestamp: datetime.datetime = None + ) -> Set[ActionGenerator]: + """Method to return a set of action generators available up to a provided timestamp. + + A generator is returned for each actionable property that the sensor has. + + Parameters + ---------- + timestamp: datetime.datetime + Time of action finish. + start_timestamp: datetime.datetime, optional + Time of action start. + + Returns + ------- + : set of :class:`~.ActionGenerator` + Set of action generators, that describe the bounds of each action space. + """ + + if not self.validate_timestamp(): + self.timestamp = timestamp + + if start_timestamp is None: + start_timestamp = self.timestamp + + generators = {self._get_generator(name, property_, timestamp, start_timestamp) + for name, property_ in self._actionable_properties.items()} + + return generators + + def _get_generator(self, name, prop, timestamp, start_timestamp): + """Returns the action generator associated with the """ + kwargs = {'owner': self, 'attribute': name, 'start_time': start_timestamp, + 'end_time': timestamp} + if self.resolutions and name in self.resolutions.keys(): + kwargs['resolution'] = self.resolutions[name] + if self.limits and name in self.limits.keys(): + kwargs['limits'] = self.limits[name] + generator = prop.generator_cls(**kwargs) + return generator diff --git a/stonesoup/custom/sensor/pan_tilt.py b/stonesoup/custom/sensor/pan_tilt.py new file mode 100644 index 000000000..2a14ccbd6 --- /dev/null +++ b/stonesoup/custom/sensor/pan_tilt.py @@ -0,0 +1,128 @@ +import datetime +from copy import copy +from typing import Sequence, Iterator, Set, Union, Optional, List +from itertools import product + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.sensor.action.angle import PanUAVActionsGenerator, TiltUAVActionsGenerator +from stonesoup.functions import cart2sphere +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.action import Action, RealNumberActionGenerator +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.passive import PassiveElevationBearing +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.angle import Angle, Bearing, Elevation +from stonesoup.types.array import StateVector, CovarianceMatrix +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState +from stonesoup.functions import build_rotation_matrix + + + +class PanTiltUAVCamera(Sensor): + """A camera that can pan and tilt.""" + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.CartesianToElevationBearing` model") + fov_angle: Union[float, List[float]] = Property( + doc="The field of view (FOV) angle (in radians). If provided in a list, the first element " + "is the pan FOV angle and the second element is the tilt FOV angle. Else, the same " + "FOV angle is used for both pan and tilt.") + pan: Angle = ActionableProperty( + doc="The sensor pan. Defaults to zero", + default=Angle(0), + generator_cls=PanUAVActionsGenerator) + tilt: Angle = ActionableProperty( + doc="The sensor tilt. Defaults to zero", + default=Angle(0), + generator_cls=TiltUAVActionsGenerator) + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if isinstance(self.fov_angle, float): + self.fov_angle = [self.fov_angle, self.fov_angle] + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + + detections = set() + measurement_model = self.measurement_model + + for truth in ground_truths: + # Transform state to measurement space and generate random noise + measurement_vector = measurement_model.function(truth, noise=noise, **kwargs) + + # Normalise measurement vector relative to sensor position + norm_measurement_vector = measurement_vector.astype(float) - self.position.astype(float) + + # Rotate measurement vector relative to sensor orientation + rotation_matrix = build_rotation_matrix(self.orientation) + norm_rotated_measurement_vector = rotation_matrix @ norm_measurement_vector + + # Convert to spherical coordinates + _, bearing_t, elevation_t = cart2sphere(*norm_rotated_measurement_vector) + + # Check if state falls within sensor's FOV + fov_min = -np.array(self.fov_angle) / 2 + fov_max = +np.array(self.fov_angle) / 2 + + # Do not measure if state not in FOV + if (not fov_min[0] <= bearing_t <= fov_max[0]) \ + or (not fov_min[1] <= elevation_t <= fov_max[1]): + continue + + detection = TrueDetection(measurement_vector, + measurement_model=measurement_model, + timestamp=truth.timestamp, + groundtruth_path=truth) + detections.add(detection) + + # Generate clutter at this time step + if self.clutter_model is not None: + self.clutter_model.measurement_model = measurement_model + clutter = self.clutter_model.function(ground_truths) + detections |= clutter + + return detections + + @property + def orientation(self) -> Optional[StateVector]: + """A 3x1 StateVector of angles (rad), specifying the sensor orientation in terms of the + counter-clockwise rotation around each Cartesian axis in the order :math:`x,y,z`. + The rotation angles are positive if the rotation is in the counter-clockwise + direction when viewed by an observer looking along the respective rotation axis, + towards the origin. + + .. note:: + This property delegates the actual calculation of orientation to the Sensor's + :attr:`movement_controller` + + It is settable if, and only if, the sensor holds its own internal movement_controller. + """ + if self.movement_controller is None: + return None + return self.movement_controller.orientation + self.rotation_offset \ + + StateVector([0, self.tilt, self.pan]) \ No newline at end of file diff --git a/stonesoup/custom/sensor/processing.py b/stonesoup/custom/sensor/processing.py new file mode 100644 index 000000000..0b613c52a --- /dev/null +++ b/stonesoup/custom/sensor/processing.py @@ -0,0 +1,103 @@ +import datetime +import random +from queue import Queue +from typing import Set, Union +from uuid import UUID + +import numpy as np + +from stonesoup.base import Property +from stonesoup.custom.sensor.action.processing import ProcessingActionInput, \ + ProcessingActionGenerator, ProcessingJobState +from stonesoup.models.clutter import ClutterModel +from stonesoup.models.measurement.linear import LinearGaussian +from stonesoup.sensor.actionable import ActionableProperty +from stonesoup.sensor.sensor import Sensor +from stonesoup.types.array import CovarianceMatrix +from stonesoup.types.detection import TrueDetection +from stonesoup.types.groundtruth import GroundTruthState + + +class ProcessingNode(Sensor): + ndim_state: int = Property( + doc="Number of state dimensions. This is utilised by (and follows in\ + format) the underlying :class:`~.CartesianToElevationBearing`\ + model") + mapping: np.ndarray = Property( + doc="Mapping between the targets state space and the sensors\ + measurement capability") + noise_covar: CovarianceMatrix = Property( + doc="The sensor noise covariance matrix. This is utilised by\ + (and follow in format) the underlying \ + :class:`~.LinearGaussian` model") + current_job_id: UUID = ActionableProperty( + doc="The current job id", + default=None, + generator_cls=ProcessingActionGenerator + ) + clutter_model: ClutterModel = Property( + default=None, + doc="An optional clutter generator that adds a set of simulated " + ":class:`Clutter` objects to the measurements at each time step. " + "The clutter is simulated according to the provided distribution.") + + def __init__(self, *args, **kwargs): + self._current_job = None + super().__init__(*args, **kwargs) + self._job_queue = [] + + @current_job_id.setter + def current_job_id(self, value): + self._property_current_job_id = value + if self.current_job is not None: + self._current_job = next((job for job in self.job_queue if job.id == value), None) + else: + self._current_job = None + + @property + def job_queue(self): + return self._job_queue + + @property + def current_job(self): + return self._current_job + + @property + def measurement_model(self): + return LinearGaussian( + ndim_state=self.ndim_state, + mapping=self.mapping, + noise_covar=self.noise_covar) + + def act(self, timestamp: datetime.datetime): + if not self.validate_timestamp(): + self.timestamp = timestamp + return + + if self.current_job is None or self.current_job.state == ProcessingJobState.COMPLETED: + self._job_queue.remove(self.current_job) + self.current_job_id = None + elif self.current_job is not None: + if self.current_job.state == ProcessingJobState.RUNNING \ + and timestamp >= self.current_job.end_time: + self.current_job.state = ProcessingJobState.COMPLETED + self.timestamp = timestamp + + + def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True, + **kwargs) -> Set[TrueDetection]: + detections = set() + measurement_model = self.measurement_model + + if self.current_job is not None and self.current_job.state == ProcessingJobState.COMPLETED: + for ground_truth in ground_truths: + if random.random() < self.current_job.probability_of_detection: + measurement = measurement_model.function(ground_truth.state_vector, noise, **kwargs) + detection = TrueDetection(state_vector=measurement, + groundtruth_path=ground_truth, + sensor_state=self) + detections.add(detection) + if self.clutter_model is not None: + + + diff --git a/stonesoup/custom/sensormanager/__init__.py b/stonesoup/custom/sensormanager/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/sensormanager/base.py b/stonesoup/custom/sensormanager/base.py new file mode 100644 index 000000000..be82cddc8 --- /dev/null +++ b/stonesoup/custom/sensormanager/base.py @@ -0,0 +1,193 @@ +import numpy as np +import itertools as it + +from ...base import Property +from ...sensormanager import SensorManager, BruteForceSensorManager + + +class UniqueBruteForceSensorManager(SensorManager): + """A sensor manager which returns a choice of action from those available. The sensor manager + iterates through every possible configuration of sensors and actions and + selects the configuration which returns the maximum reward as calculated by a reward function. + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): + """Returns a chosen [list of] action(s) from the action set for each sensor. + Chosen action(s) is selected by finding the configuration of sensors: actions which returns + the maximum reward, as calculated by a reward function. + + Parameters + ---------- + tracks: set of :class:`~Track` + Set of tracks at given time. Used in reward function. + timestamp: :class:`datetime.datetime` + Time at which the actions are carried out until + nchoose : int + Number of actions from the set to choose (default is 1) + + Returns + ------- + : dict + The pairs of :class:`~.Sensor`: [:class:`~.Action`] selected + """ + + all_action_choices = dict() + + for sensor in self.sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + best_rewards = np.zeros(nchoose) - np.inf + selected_configs = [None] * nchoose + rewards = [] + + for i, config in enumerate(configs): + reward = self.reward_function(config, tracks, timestamp) + rewards.append(reward) + # vars.append(var) + if reward > min(best_rewards): + selected_configs[np.argmin(best_rewards)] = config + best_rewards[np.argmin(best_rewards)] = reward + + # Return mapping of sensors and chosen actions for sensors + return selected_configs + + +class SampleBruteForceSensorManager(BruteForceSensorManager): + """A sensor manager which returns a choice of action from those available. The sensor manager + iterates through every possible configuration of sensors and actions and + selects the configuration which returns the maximum reward as calculated by a reward function. + + """ + + num_samples: int = Property(doc="Number of samples to take for each timestep", default=10) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): + """Returns a chosen [list of] action(s) from the action set for each sensor. + Chosen action(s) is selected by finding the configuration of sensors: actions which returns + the maximum reward, as calculated by a reward function. + + Parameters + ---------- + tracks: set of :class:`~Track` + Set of tracks at given time. Used in reward function. + timestamp: :class:`datetime.datetime` + Time at which the actions are carried out until + nchoose : int + Number of actions from the set to choose (default is 1) + + Returns + ------- + : dict + The pairs of :class:`~.Sensor`: [:class:`~.Action`] selected + """ + + all_action_choices = dict() + + for sensor in self.sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # get tuple of dictionaries of sensors: actions + configs = list({sensor: action + for sensor, action in zip(all_action_choices.keys(), actionconfig)} + for actionconfig in it.product(*all_action_choices.values())) + cfgs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + cfgs.append(cfg) + poss.append(pos) + + idx = np.random.choice(len(configs), self.num_samples) + + configs = np.array([configs[i] for i in idx]) + + best_rewards = np.zeros(nchoose) - np.inf + selected_configs = [None] * nchoose + rewards = [] + for config in configs: + # calculate reward for dictionary of sensors: actions + reward = self.reward_function(config, tracks, timestamp) + rewards.append(reward) + # if reward > min(best_rewards): + # selected_configs[np.argmin(best_rewards)] = config + # best_rewards[np.argmin(best_rewards)] = reward + max_idx = np.argwhere(rewards == np.amax(rewards)).flatten() + best_configs = configs[max_idx] + if best_configs.size == 1: + best_config = best_configs[0] + else: + best_config = None + min_dist = np.inf + for config in best_configs: + dist = 0 + for sensor, actions in config.items(): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + sensor_loc = sensor.position[0:2].flatten() + action_loc = np.array([action_x.target_value, action_y.target_value]) + dist += np.linalg.norm(sensor_loc - action_loc) + if dist < min_dist: + min_dist = dist + best_config = config + # Return mapping of sensors and chosen actions for sensors + return [best_config] + + +def is_valid_config(config, **kwargs): + num_sensors = int(len(kwargs)/2) + actions_sets = list(config.values()) + for i in range(num_sensors): + x = kwargs[f'x{i+1}'] + y = kwargs[f'y{i+1}'] + actions = actions_sets[i] + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + if action_x.target_value != x or action_y.target_value != y: + return False + return True \ No newline at end of file diff --git a/stonesoup/custom/sensormanager/reward.py b/stonesoup/custom/sensormanager/reward.py new file mode 100644 index 000000000..ee5dd8e9d --- /dev/null +++ b/stonesoup/custom/sensormanager/reward.py @@ -0,0 +1,866 @@ +import copy +import datetime +from typing import Mapping, Sequence, Set, List, Any +import itertools as it + +import numpy as np +from matplotlib.path import Path +from shapely.geometry import Point, Polygon +from shapely.ops import unary_union + +from reactive_isr_core.data import TaskType +from stonesoup.base import Property +from stonesoup.custom.functions import calculate_num_targets_dist, geodesic_point_buffer, eval_rfi +from stonesoup.custom.tracker import SMCPHD_JIPDA +from stonesoup.functions import gm_reduce_single +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.sensor.action import Action +from stonesoup.sensor.sensor import Sensor +from stonesoup.sensormanager.reward import RewardFunction +from stonesoup.tracker import Tracker +from stonesoup.types.array import StateVectors +from stonesoup.types.detection import TrueDetection +from stonesoup.types.hypothesis import SingleHypothesis +from stonesoup.types.numeric import Probability +from stonesoup.types.state import ParticleState +from stonesoup.types.track import Track +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import ExtendedKalmanUpdater + + +class RolloutUncertaintyRewardFunction(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") + updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " + "the track to the new state.") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.predictor.predict(predicted_track, timestamp=timestamp)) + predicted_tracks.add(predicted_track) + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection.groundtruth_path: detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + for predicted_track, detection in detections.items(): + # Generate hypothesis based on prediction/previous update and detection + hypothesis = SingleHypothesis(predicted_track.state, detection) + + # Do the update based on this hypothesis and store covariance matrix + update = self.updater.update(hypothesis) + + previous_cov_norm = np.linalg.norm(predicted_track.covar) + update_cov_norm = np.linalg.norm(update.covar) + + # Replace prediction with update + predicted_track.append(update) + + # Calculate metric for the track observation and add to the metric + # for the configuration + metric = previous_cov_norm - update_cov_norm + config_metric += metric + + if timestamp == end_time: + return config_metric + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + configs = list({sensor: action + for sensor, action in zip(all_action_choices.keys(), actionconfig)} + for actionconfig in it.product(*all_action_choices.values())) + + idx = np.random.choice(len(configs), self.num_samples) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, tracks, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric + + +class RolloutPriorityRewardFunction(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + tracker: Tracker = Property(doc="Tracker used to track the tracks") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + if not len(self.rfis): + return 0, np.inf + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) + time_interval = timestamp - predicted_track.timestamp + prob_survive = np.exp(-self.tracker.prob_death* time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + predicted_tracks.add(predicted_track) + + + tracks_copy = [copy.copy(track) for track in tracks] + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + center = (sensor.position[1], sensor.position[0]) + radius = sensor.fov_radius + p = geodesic_point_buffer(*center, radius) + self.tracker.prob_detect = _prob_detect_func([p]) + + associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) + + for track, multihypothesis in associations.items(): + if isinstance(self.tracker, SMCPHD_JIPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self.tracker._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + else: + if multihypothesis: + # Update track + state_post = self.tracker._updater.update(multihypothesis) + track.append(state_post) + track.exist_prob = Probability(1.) + else: + time_interval = timestamp - track.timestamp + track.append(multihypothesis.prediction) + non_exist_weight = 1 - track.exist_prob + prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) + non_det_weight = prob_survive * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + var = np.inf + for rfi in self.rfis: + xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + + if rfi.task_type == TaskType.COUNT: + target_types = [t.target_type.value for t in rfi.targets] + _, var = calculate_num_targets_dist(tracks_copy, geom, target_types=target_types) + if var < rfi.threshold_over_time.threshold[0]: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if self.use_variance: + config_metric += 1/var + elif rfi.task_type == TaskType.FOLLOW: + for target in rfi.targets: + track = next((track for track in tracks_copy + if track.id == str(target.target_UUID)), None) + if track is not None: + var = track.covar[0, 0] + track.covar[2, 2] + if var < rfi.threshold_over_time.threshold[0]: + config_metric += rfi.priority_over_time.priority[0] + + + if timestamp == end_time: + return config_metric, 0 + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + if len(configs) > self.num_samples: + idx = np.random.choice(len(configs), self.num_samples, replace=False) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, tracks_copy, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric, var + + +class RolloutPriorityRewardFunction2(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + tracker: Tracker = Property(doc="Tracker used to track the tracks") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + if not len(self.rfis): + return 0 + + # Reward value + end_time = metric_time + self.timesteps * self.interval + + # Reward value + config_metric, updated_tracks, predicted_sensors = self._compute_metric(config, tracks, + metric_time) + + if metric_time == end_time: + return config_metric + + timestamp = metric_time + self.interval + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + if len(configs) > self.num_samples: + idx = np.random.choice(len(configs), self.num_samples, replace=False) + configs = [configs[i] for i in idx] + + rewards = [config_metric + self._rollout(config, updated_tracks, timestamp, end_time) + for config in configs] + + # Return value of configuration metric + return np.max(rewards) + + def _compute_metric(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime): + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append( + self.tracker._predictor.predict(predicted_track, timestamp=timestamp)) + predicted_tracks.add(predicted_track) + + tracks_copy = [copy.copy(track) for track in predicted_tracks] + + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + # center = (sensor.position[1], sensor.position[0]) + # radius = sensor.fov_radius + # p = geodesic_point_buffer(*center, radius) + p = sensor.footprint + self.tracker.prob_detect = _prob_detect_func([p]) + + associations = self.tracker._associator.associate(tracks_copy, detections, timestamp) + + for track, multihypothesis in associations.items(): + if isinstance(self.tracker, SMCPHD_JIPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self.tracker._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + else: + if multihypothesis: + # Update track + state_post = self.tracker._updater.update(multihypothesis) + track.append(state_post) + track.exist_prob = Probability(1.) + else: + timestamp_m1 = track.timestamp if self.tracker.predict else track[-2].timestamp + time_interval = timestamp - timestamp_m1 + track.append(multihypothesis.prediction) + prob_survive = np.exp(-self.tracker.prob_death * time_interval.total_seconds()) + track.exist_prob *= prob_survive + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1 - self.tracker.prob_detect( + multihypothesis.prediction)) * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + for rfi in self.rfis: + config_metric += eval_rfi(rfi, tracks_copy, predicted_sensors, + use_variance=self.use_variance) + + return config_metric, tracks_copy, predicted_sensors + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + if not len(self.rfis): + return 0 + + # Reward value + config_metric, updated_tracks, predicted_sensors = self._compute_metric(config, tracks, + timestamp) + + if timestamp == end_time: + return config_metric + + timestamp += self.interval + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + idx = np.random.choice(len(configs), 1, replace=False) + next_config = configs[idx[0]] + + config_metric += self._rollout(next_config, updated_tracks, timestamp, end_time) + + return config_metric + + +class RolloutPriorityRewardFunction3(RewardFunction): + """A reward function which calculates the potential reduction in the uncertainty of track estimates + if a particular action is taken by a sensor or group of sensors. + + Given a configuration of sensors and actions, a metric is calculated for the potential + reduction in the uncertainty of the tracks that would occur if the sensing configuration + were used to make an observation. A larger value indicates a greater reduction in + uncertainty. + """ + + predictor: KalmanPredictor = Property(doc="Predictor used to predict the track to a new state") + updater: ExtendedKalmanUpdater = Property(doc="Updater used to update " + "the track to the new state.") + timesteps: int = Property(doc="Number of timesteps to rollout") + num_samples: int = Property(doc="Number of samples to take for each timestep", default=30) + interval: datetime.timedelta = Property(doc="Interval between timesteps", + default=datetime.timedelta(seconds=1)) + rfis: List[Any] = Property(doc="List of reward functions to use for prioritisation", + default=None) + prob_survive: Probability = Property(doc="Probability of survival", default=Probability(0.99)) + prob_death: Probability = Property(doc="Probability of death", default=Probability(0.01)) + use_variance: bool = Property(doc="Use variance in prioritisation", default=False) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.rfis is None: + self.rfis = [] + + def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], + metric_time: datetime.datetime, *args, **kwargs): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + end_time = metric_time + datetime.timedelta(seconds=self.timesteps) + + config_metric = self._rollout(config, tracks, metric_time, end_time) + + # Return value of configuration metric + return config_metric + + def _rollout(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track], timestamp: datetime.datetime, end_time: datetime.datetime): + """ + For a given configuration of sensors and actions this reward function calculates the + potential uncertainty reduction of each track by + computing the difference between the covariance matrix norms of the prediction + and the posterior assuming a predicted measurement corresponding to that prediction. + + This requires a mapping of sensors to action(s) + to be evaluated by reward function, a set of tracks at given time and the time at which + the actions would be carried out until. + + The metric returned is the total potential reduction in uncertainty across all tracks. + + Returns + ------- + : float + Metric of uncertainty for given configuration + + """ + + # Reward value + config_metric = 0 + + predicted_sensors = list() + memo = {} + + if not len(self.rfis): + return 0, np.inf + + # For each sensor in the configuration + for sensor, actions in config.items(): + predicted_sensor = copy.deepcopy(sensor, memo) + predicted_sensor.add_actions(actions) + predicted_sensor.act(timestamp) + if isinstance(sensor, Sensor): + predicted_sensors.append(predicted_sensor) # checks if its a sensor + + # Create dictionary of predictions for the tracks in the configuration + predicted_tracks = set() + for track in tracks: + predicted_track = copy.copy(track) + predicted_track.append(self.predictor.predict(predicted_track, timestamp=timestamp)) + time_interval = timestamp - predicted_track.timestamp + prob_survive = np.exp(-self.prob_death* time_interval.total_seconds()) + track.exist_prob = prob_survive * track.exist_prob + predicted_tracks.add(predicted_track) + + detected_tracks = set() + for sensor in predicted_sensors: + + # Assumes one detection per track + detections = {detection.groundtruth_path: detection + for detection in sensor.measure(predicted_tracks, noise=False) + if isinstance(detection, TrueDetection)} + + for predicted_track, detection in detections.items(): + # Generate hypothesis based on prediction/previous update and detection + hypothesis = SingleHypothesis(predicted_track.state, detection) + + # Do the update based on this hypothesis and store covariance matrix + update = self.updater.update(hypothesis) + + # Replace prediction with update + predicted_track.append(update) + predicted_track.exist_prob = Probability(1.) + detected_tracks.add(predicted_track) + + non_detected_tracks = predicted_tracks - detected_tracks + for track in non_detected_tracks: + time_interval = timestamp - track.timestamp + non_exist_weight = 1 - track.exist_prob + prob_survive = np.exp(-self.prob_death * time_interval.total_seconds()) + non_det_weight = prob_survive * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + var = np.inf + for rfi in self.rfis: + xmin, ymin = rfi.region_of_interest.corners[0].longitude, rfi.region_of_interest.corners[0].latitude + xmax, ymax = rfi.region_of_interest.corners[1].longitude, rfi.region_of_interest.corners[1].latitude + geom = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]) + _, var = calculate_num_targets_dist(predicted_tracks, geom) + if var < rfi.threshold: + # TODO: Need to select the priority + config_metric += rfi.priority_over_time.priority[0] + if self.use_variance: + config_metric += 1/var + + + if timestamp == end_time: + return config_metric, 0 + + timestamp = timestamp + datetime.timedelta(seconds=1) + + all_action_choices = dict() + for sensor in predicted_sensors: + # get action 'generator(s)' + action_generators = sensor.actions(timestamp) + # list possible action combinations for the sensor + action_choices = list(it.product(*action_generators)) + # dictionary of sensors: list(action combinations) + all_action_choices[sensor] = action_choices + + # configs = list({sensor: action + # for sensor, action in zip(all_action_choices.keys(), actionconfig)} + # for actionconfig in it.product(*all_action_choices.values())) + configs = [] + poss = [] + for actionconfig in it.product(*all_action_choices.values()): + cfg = dict() + pos = set() + for sensor, actions in zip(all_action_choices.keys(), actionconfig): + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + cfg[sensor] = actions + pos.add((action_x.target_value, action_y.target_value)) + if pos not in poss: + configs.append(cfg) + poss.append(pos) + + if len(configs) > self.num_samples: + idx = np.random.choice(len(configs), self.num_samples, replace=False) + configs = [configs[i] for i in idx] + + rewards = [self._rollout(config, predicted_tracks, timestamp, end_time) for config in configs] + config_metric += np.max(rewards) + + return config_metric, var + + +def _prob_detect_func(fovs): + """Closure to return the probability of detection function for a given environment scan""" + prob_detect = Probability(0.9) + # Get the union of all field of views + fovs_union = unary_union(fovs) + if fovs_union.geom_type == 'MultiPolygon': + fovs = [poly for poly in fovs_union] + else: + fovs = [fovs_union] + + paths = [Path(poly.boundary.coords) for poly in fovs] + + # Probability of detection nested function + def prob_detect_func(state): + for path_p in paths: + if isinstance(state, ParticleState): + prob_detect_arr = np.full((len(state),), Probability(0.1)) + points = state.state_vector[[0, 2], :].T + inside_points = path_p.contains_points(points) + prob_detect_arr[inside_points] = prob_detect + return prob_detect_arr + else: + points = state.state_vector[[0, 2], :].T + return prob_detect if np.alltrue(path_p.contains_points(points)) \ + else Probability(0) + + return prob_detect_func \ No newline at end of file diff --git a/stonesoup/custom/simulator/__init__.py b/stonesoup/custom/simulator/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/simulator/platform.py b/stonesoup/custom/simulator/platform.py new file mode 100644 index 000000000..0be4398dc --- /dev/null +++ b/stonesoup/custom/simulator/platform.py @@ -0,0 +1,31 @@ +from typing import Sequence + +from stonesoup.base import Property +from stonesoup.buffered_generator import BufferedGenerator +from stonesoup.platform import Platform +from stonesoup.simulator.platform import PlatformDetectionSimulator + + +class PlatformTargetDetectionSimulator(PlatformDetectionSimulator): + """A simple platform detection simulator. + + Processes ground truth data and generates :class:`~.Detection` data + according to a list of platforms by calling each sensor in these platforms. + + """ + targets: Sequence[Platform] = Property( + doc='List of target platforms to be detected' + ) + + @BufferedGenerator.generator_method + def detections_gen(self): + for time, truths in self.groundtruth: + for platform in self.platforms: + platform.move(time) + for platform in self.targets: + platform.move(time) + for platform in self.platforms: + for sensor in platform.sensors: + truths_to_be_measured = truths.union(self.targets) + detections = sensor.measure(truths_to_be_measured, timestamp=time) + yield time, detections \ No newline at end of file diff --git a/stonesoup/custom/tracker/__init__.py b/stonesoup/custom/tracker/__init__.py new file mode 100644 index 000000000..275359133 --- /dev/null +++ b/stonesoup/custom/tracker/__init__.py @@ -0,0 +1,332 @@ +from abc import abstractmethod +from copy import copy +from datetime import datetime, timezone + +import numpy as np +from scipy.stats import multivariate_normal + +from stonesoup.base import Property, Base +from stonesoup.custom.dataassociator.jipda import JIPDAWithEHM2 +from stonesoup.custom.initiator.smcphd import SMCPHDFilter, SMCPHDInitiator, ISMCPHDFilter, \ + ISMCPHDInitiator +from stonesoup.dataassociator.neighbour import GNNWith2DAssignment +from stonesoup.functions import gm_reduce_single +from stonesoup.gater.distance import DistanceGater +from stonesoup.custom.hypothesiser.probability import PDAHypothesiser, IPDAHypothesiser +from stonesoup.hypothesiser.distance import DistanceHypothesiser +from stonesoup.measures import Mahalanobis +from stonesoup.models.measurement import MeasurementModel +from stonesoup.models.transition import TransitionModel +from stonesoup.predictor.kalman import KalmanPredictor +from stonesoup.resampler.particle import SystematicResampler +from stonesoup.types.array import StateVectors +from stonesoup.types.mixture import GaussianMixture +from stonesoup.types.numeric import Probability +from stonesoup.types.state import State, ParticleState +from stonesoup.types.update import GaussianStateUpdate +from stonesoup.updater.kalman import KalmanUpdater + + +class _BaseTracker(Base): + transition_model: TransitionModel = Property(doc='The transition model') + measurement_model: MeasurementModel = Property(doc='The measurement model') + prob_detection: Probability = Property(doc='The probability of detection') + prob_death: Probability = Property(doc='The probability of death') + prob_birth: Probability = Property(doc='The probability of birth') + birth_rate: float = Property( + doc='The birth rate (i.e. number of new/born targets at each iteration(') + birth_density: State = Property( + doc='The birth density (i.e. density from which we sample birth particles)') + clutter_intensity: float = Property(doc='The clutter intensity per unit volume') + num_samples: int = Property(doc='The number of samples. Default is 1024', default=1024) + birth_scheme: str = Property( + doc='The scheme for birth particles. Options are "expansion" | "mixture". ' + 'Default is "expansion"', + default='expansion' + ) + start_time: datetime = Property(doc='Start time of the tracker', default=None) + + def __init__(self, *args, **kwargs): + self._clutter_intensity = kwargs.pop('clutter_intensity', None) + super().__init__(*args, **kwargs) + self.prob_detect = self.prob_detection + self._tracks = set() + + @property + def tracks(self): + return self._tracks + + @property + def prob_detect(self): + return self._prob_detect + + @prob_detect.setter + def prob_detect(self, prob_detect): + if not callable(prob_detect): + self.prob_detection = prob_detect + self._prob_detect = self._prob_detect_simple + else: + self._prob_detect = copy(prob_detect) + if hasattr(self, '_hypothesiser'): + if hasattr(self._hypothesiser, 'hypothesiser'): + self._hypothesiser.hypothesiser.prob_detect = self._prob_detect + else: + self._hypothesiser.prob_detect = self._prob_detect + if hasattr(self, '_initiator'): + self._initiator.filter.prob_detect = self._prob_detect + + @property + def clutter_intensity(self): + return self._clutter_intensity + + @clutter_intensity.setter + def clutter_intensity(self, clutter_intensity): + self._clutter_intensity = clutter_intensity + if hasattr(self, '_hypothesiser'): + self._hypothesiser.clutter_spatial_density = self._clutter_intensity + if hasattr(self, '_initiator'): + self._initiator.filter.clutter_intensity = self._clutter_intensity + + @abstractmethod + def track(self, detections, timestamp, *args, **kwargs): + raise NotImplementedError + + def _prob_detect_simple(self, state_vector): + """A simple probability of detection function.""" + return self.prob_detection + + +class SMCPHD_JIPDA(_BaseTracker): + """A JIPDA tracker using an SMC-PHD filter as the track initiator.""" + + detector: Base = Property(doc='The detector used to generate detections', default=None) + use_ismcphd: bool = Property(doc='Use ISMC-PHD filter for track initiation', default=True) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._predictor = KalmanPredictor(self.transition_model) + self._updater = KalmanUpdater(self.measurement_model) + self._hypothesiser = IPDAHypothesiser(self._predictor, self._updater, + self.clutter_intensity, + prob_detect=self.prob_detect, + prob_survive=1 - self.prob_death) + self._hypothesiser = DistanceGater(self._hypothesiser, Mahalanobis(), 10) + self._associator = JIPDAWithEHM2(self._hypothesiser) + + resampler = SystematicResampler() + if self.use_ismcphd: + phd_filter = ISMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme, + scale_birth_weights=True) + else: + phd_filter = SMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme, + scale_birth_weights=True) + # Sample prior state from birth density + if isinstance(self.birth_density, GaussianMixture): + state_vector = np.zeros((self.transition_model.ndim_state, 0)) + particles_per_component = self.num_samples // len(self.birth_density) + for i, component in enumerate(self.birth_density): + if i == len(self.birth_density) - 1: + particles_per_component += self.num_samples % len(self.birth_density) + particles_component = multivariate_normal.rvs( + component.mean.ravel(), + component.covar, + particles_per_component).T + state_vector = np.hstack((state_vector, particles_component)) + state_vector = StateVectors(state_vector) + else: + state_vector = StateVectors( + multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) + weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) * self.birth_rate + state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) + + if self.use_ismcphd: + self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) + else: + self._initiator = SMCPHDInitiator(filter=phd_filter, prior=state) + + def __iter__(self): + self.detector_iter = iter(self.detector) + return self + + def __next__(self): + timestamp, detections = next(self.detector_iter) + return timestamp, self.track(detections, timestamp) + + def track(self, detections, timestamp, *args, **kwargs): + tracks = list(self.tracks) + detections = list(detections) + num_tracks = len(tracks) + num_detections = len(detections) + + # Perform data association + associations = self._associator.associate(tracks, detections, timestamp) + + # Compute measurement weights + assoc_prob_matrix = np.zeros((num_tracks, num_detections + 1)) + rho = np.zeros((len(detections))) + for i, track in enumerate(tracks): + for hyp in associations[track]: + if not hyp: + assoc_prob_matrix[i, 0] = hyp.weight + else: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + assoc_prob_matrix[i, j + 1] = hyp.weight + for j, detection in enumerate(detections): + # rho_tmp = 0 if len(assoc_prob_matrix) and np.sum(assoc_prob_matrix[:, j + 1]) > 0 else 1 + rho_tmp = 1 + if len(assoc_prob_matrix): + for i, track in enumerate(tracks): + rho_tmp *= 1 - assoc_prob_matrix[i, j + 1] + rho[j] = rho_tmp + + # Update tracks + for track, multihypothesis in associations.items(): + + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hypothesis in multihypothesis: + posterior_state_weights.append(hypothesis.probability) + if hypothesis: + posterior_states.append(self._updater.update(hypothesis)) + else: + posterior_states.append(hypothesis.prediction) + + # Merge/Collapse to single Gaussian + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + track.append(GaussianStateUpdate( + np.array(post_mean), np.array(post_covar), + multihypothesis, + multihypothesis[0].prediction.timestamp)) + + # Initiate new tracks + tracks = set(tracks) + new_tracks = self._initiator.initiate(detections, timestamp, weights=rho) + tracks |= new_tracks + + # Delete tracks that have not been updated for a while + del_tracks = set() + for track in tracks: + if track.exist_prob < 0.01: + del_tracks.add(track) + tracks -= del_tracks + + self._tracks = set(tracks) + return self._tracks + + +class SMCPHD_IGNN(_BaseTracker): + """ A IGNN tracker using an SMC-PHD filter as the track initiator. """ + predict = Property(bool, default=True, doc="Whether to predict tracks") + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._predictor = KalmanPredictor(self.transition_model) + self._updater = KalmanUpdater(self.measurement_model) + self._hypothesiser = PDAHypothesiser(self._predictor, self._updater, + self.clutter_intensity, + prob_detect=self.prob_detect, + predict=self.predict, + normalise=False) + # self._hypothesiser = DistanceHypothesiser(self._predictor, self._updater, + # Mahalanobis(), 10) + self._associator = GNNWith2DAssignment(self._hypothesiser) + + resampler = SystematicResampler() + phd_filter = ISMCPHDFilter(birth_density=self.birth_density, + transition_model=self.transition_model, + measurement_model=self.measurement_model, + prob_detect=self.prob_detect, + prob_death=self.prob_death, + prob_birth=self.prob_birth, + birth_rate=self.birth_rate, + clutter_intensity=self.clutter_intensity, + num_samples=self.num_samples, + resampler=resampler, + birth_scheme=self.birth_scheme) + + # Sample prior state from birth density + state_vector = StateVectors( + multivariate_normal.rvs(self.birth_density.state_vector.ravel(), + self.birth_density.covar, + size=self.num_samples).T) + weight = np.full((self.num_samples,), Probability(1 / self.num_samples)) + state = ParticleState(state_vector=state_vector, weight=weight, timestamp=self.start_time) + + self._initiator = ISMCPHDInitiator(filter=phd_filter, prior=state) + + def track(self, detections, timestamp, *args, **kwargs): + + tracks = list(self.tracks) + detections = list(detections) + + # Perform data association + associations = self._associator.associate(tracks, detections, timestamp) + + # Compute measurement weights + rho = np.ones((len(detections))) + for i, track in enumerate(tracks): + hyp = associations[track] + if hyp: + j = next(d_i for d_i, detection in enumerate(detections) + if hyp.measurement == detection) + rho[j] = 0 + + # Update tracks + for track, hypothesis in associations.items(): + if hypothesis: + # Update track + state_post = self._updater.update(hypothesis) + track.append(state_post) + track.exist_prob = Probability(1.) + else: + time_interval = timestamp - track.timestamp + track.append(hypothesis.prediction) + prob_survive = np.exp(-self.prob_death * time_interval.total_seconds()) + track.exist_prob *= prob_survive + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1-self.prob_detect(hypothesis.prediction)) * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + # Initiate new tracks + tracks = set(tracks) + new_tracks = self._initiator.initiate(detections, timestamp, weights=rho) + tracks |= new_tracks + + # Delete tracks that have not been updated for a while + del_tracks = set() + for track in tracks: + if track.exist_prob < 0.01: + del_tracks.add(track) + tracks -= del_tracks + + self._tracks = set(tracks) + return self._tracks diff --git a/stonesoup/custom/tracker/fuse.py b/stonesoup/custom/tracker/fuse.py new file mode 100644 index 000000000..6257ff5e4 --- /dev/null +++ b/stonesoup/custom/tracker/fuse.py @@ -0,0 +1,299 @@ +import numpy as np + +from ..types.hypothesis import MultiHypothesis +from ..types.tracklet import Scan +from ...base import Base, Property +from ...dataassociator.probability import JPDA +from ...tracker import Tracker +from ...predictor import Predictor +from ...types.mixture import GaussianMixture +from ...types.multihypothesis import MultipleHypothesis +from ...updater import Updater +from ...dataassociator import DataAssociator +from ...types.numeric import Probability +from ...types.prediction import Prediction +from ...types.array import StateVectors +from ...types.update import Update +from ...initiator import Initiator +from ...functions import gm_reduce_single + + +from ..reader.tracklet import PseudoMeasExtractor, TrackletExtractor +from ..types.update import TwoStateGaussianStateUpdate + + +class _BaseFuseTracker(Base): + initiator: Initiator = Property(doc='The initiator used to initiate fused tracks') + predictor: Predictor = Property(doc='Predictor used to predict fused tracks') + updater: Updater = Property(doc='Updater used to update fused tracks') + associator: DataAssociator = Property(doc='Associator used to associate fused tracks with' + 'pseudomeasurements') + death_rate: float = Property(doc='The exponential death rate of tracks. Default is 1e-4', + default=1e-4) + prob_detect: Probability = Property(doc='The probability of detection', default=0.9) + delete_thresh: Probability = Property(doc='The existence probability deletion threshold', + default=0.1) + + def __init__(self, *args, **kwargs): + super(_BaseFuseTracker, self).__init__(*args, **kwargs) + self._max_track_id = 0 + + def process_scan(self, scan, tracks, current_end_time): + new_start_time = scan.start_time + new_end_time = scan.end_time + if current_end_time and new_start_time < current_end_time: + print('Scans out of order! Skipping a scan...') + return tracks, current_end_time + + if hasattr(self.initiator, 'predict'): + self.initiator.predict(new_start_time, new_end_time) + self.initiator.current_end_time = new_end_time + + # Predict two-state tracks forward + for track in tracks: + self.predict_track(track, current_end_time, new_start_time, new_end_time, + self.death_rate) + + current_start_time = new_start_time + current_end_time = new_end_time + + if not len(scan.sensor_scans): + tracks = list(tracks) + detections = set() + + # Perform data association + associations = self.associator.associate(tracks, detections, + timestamp=current_end_time) + # Update tracks + for track in tracks: + self.update_track(track, associations[track], scan.id) + + # Initiate new tracks on unassociated detections + if isinstance(self.associator, JPDA): + assoc_detections = set( + [h.measurement for hyp in associations.values() for h in hyp if h]) + else: + assoc_detections = set( + [hyp.measurement for hyp in associations.values() if hyp]) + + tracks = set(tracks) + + for sensor_scan in scan.sensor_scans: + tracks = list(tracks) + detections = set(sensor_scan.detections) + + # Perform data association + associations = self.associator.associate(tracks, detections, + timestamp=current_end_time) + # Update tracks + for track in tracks: + self.update_track(track, associations[track], scan.id) + + # Initiate new tracks on unassociated detections + if isinstance(self.associator, JPDA): + assoc_detections = set( + [h.measurement for hyp in associations.values() for h in hyp if h]) + else: + assoc_detections = set( + [hyp.measurement for hyp in associations.values() if hyp]) + + + tracks = set(tracks) + unassoc_detections = set(detections) - assoc_detections + if isinstance(sensor_scan.sensor_id, str): + tracks |= self.initiator.initiate(unassoc_detections, sensor_scan.timestamp, + sensor_scan.timestamp, + sensor_id=sensor_scan.sensor_id) + else: + tracks |= self.initiator.initiate(unassoc_detections, current_start_time, + current_end_time, sensor_id=sensor_scan.sensor_id) + try: + self.initiator.current_end_time = current_end_time + except AttributeError: + pass + + tracks -= self.delete_tracks(tracks) + return tracks, current_end_time + + def predict_track(self, track, current_end_time, new_start_time, new_end_time, + death_rate=0.): + + # Predict existence + survive_prob = np.exp(-death_rate * (new_end_time - current_end_time).total_seconds()) + track.exist_prob = track.exist_prob * survive_prob + + # Predict forward + # p(x_k, x_{k+\Delta} | y^{1:S}_{1:k}) + if not isinstance(track.state, GaussianMixture): + prediction = self.predictor.predict(track.state, current_end_time=current_end_time, + new_start_time=new_start_time, + new_end_time=new_end_time) + else: + pred_components = [] + for component in track.state: + pred_components.append(self.predictor.predict(component, + current_end_time=current_end_time, + new_start_time=new_start_time, + new_end_time=new_end_time)) + prediction = GaussianMixture(pred_components) + # Append prediction to track history + track.append(prediction) + + def update_track(self, track, hypothesis, scan_id): + last_state = track.states[-1] + + if isinstance(self.associator, JPDA): + # calculate each Track's state as a Gaussian Mixture of + # its possible associations with each detection, then + # reduce the Mixture to a single Gaussian State + posterior_states = [] + posterior_state_weights = [] + for hyp in hypothesis: + if not hyp: + posterior_states.append(hyp.prediction) + # Ensure null hyp weight is at index 0 + posterior_state_weights.insert(0, hyp.probability) + else: + posterior_states.append( + self.updater.update(hyp)) + posterior_state_weights.append( + hyp.probability) + if 'track_id' in hyp.measurement.metadata: + try: + track.track_ids.add(hyp.measurement.metadata['track_id']) + except AttributeError: + track.track_ids = {hyp.measurement.metadata['track_id']} + + means = StateVectors([state.state_vector for state in posterior_states]) + covars = np.stack([state.covar for state in posterior_states], axis=2) + weights = np.asarray(posterior_state_weights) + + post_mean, post_covar = gm_reduce_single(means, covars, weights) + + update = TwoStateGaussianStateUpdate(post_mean, post_covar, + start_time=posterior_states[0].start_time, + end_time=posterior_states[0].end_time, + hypothesis=hypothesis) + track[-1] = update + # Compute existence probability + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1 - self.prob_detect) * track.exist_prob + null_exist_weight = non_det_weight / (non_exist_weight + non_det_weight) + exist_probs = np.array([null_exist_weight, *[1. for i in range(len(weights) - 1)]]) + track.exist_prob = Probability.sum(exist_probs * weights) + else: + if hypothesis: + # Perform update using the hypothesis + update = self.updater.update(hypothesis) + # Modify track states depending on type of last state + if isinstance(last_state, Update) and last_state.timestamp == update.timestamp: + # If the last scan was an update with the same timestamp, we need to modify this + # state to reflect the computed mean and covariance, as well as the hypotheses that + # resulted to this + hyp = last_state.hypothesis + try: + hyp.measurements.append(hypothesis.measurement) + except AttributeError: + hyp = MultiHypothesis(prediction=hypothesis.prediction, + measurements=[hyp.measurement, + hypothesis.measurement]) + update.hypothesis = hyp # Update the hypothesis + track[-1] = update # Replace the last state + elif isinstance(last_state, + Prediction) and last_state.timestamp == update.timestamp: + # If the last state was a prediction with the same timestamp, it means that the + # state was created by a sensor scan in the same overall scan, due to the track not + # having been associated to any measurement. Therefore, we replace the prediction + # with the update + update.hypothesis = MultiHypothesis(prediction=hypothesis.prediction, + measurements=[hypothesis.measurement]) + track[-1] = update + else: + # Else simply append the update to the track history + update.hypothesis = MultiHypothesis(prediction=hypothesis.prediction, + measurements=[hypothesis.measurement]) + track.append(update) + # Set existence probability to 1 + track.exist_prob = 1 + if 'track_id' in hypothesis.measurement.metadata: + try: + track.track_ids.add(hypothesis.measurement.metadata['track_id']) + except AttributeError: + track.track_ids = {hypothesis.measurement.metadata['track_id']} + else: + # If the track was not associated to any measurement, simply update the existence + # probability + non_exist_weight = 1 - track.exist_prob + non_det_weight = (1 - self.prob_detect) * track.exist_prob + track.exist_prob = non_det_weight / (non_exist_weight + non_det_weight) + + def delete_tracks(self, tracks): + del_tracks = set([track for track in tracks if track.exist_prob < self.delete_thresh]) + return del_tracks + + +class FuseTracker(Tracker, _BaseFuseTracker): + """ + + """ + + detector: PseudoMeasExtractor = Property(doc='The pseudo-measurement extractor') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._tracks = set() + self._current_end_time = None + + @property + def tracks(self): + return self._tracks + + def __iter__(self): + self.detector_iter = iter(self.detector) + return super().__iter__() + + def __next__(self): + timestamp, scans = next(self.detector_iter) + for scan in scans: + self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, self._current_end_time) + return timestamp, self.tracks + + +class FuseTracker2(_BaseFuseTracker): + + tracklet_extractor: TrackletExtractor = Property(doc='The tracklet extractor') + pseudomeas_extractor: PseudoMeasExtractor = Property(doc='The pseudo-measurement extractor') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._tracks = set() + self._current_end_time = None + self._scans = [] # PRH: Added to get scans + + @property + def tracks(self): + return self._tracks + + def process_tracks(self, alltracks, timestamp): + # Extract tracklets + tracklets = self.tracklet_extractor.extract(alltracks, timestamp) + # Extract pseudo-measurements + scans = self.pseudomeas_extractor.extract(tracklets, timestamp) + if not len(scans) and self._current_end_time and timestamp - self._current_end_time >= self.tracklet_extractor.fuse_interval: + scans = [Scan(self._current_end_time, timestamp, [])] + + for scan in scans: + self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, + self._current_end_time) + + self._scans = scans#.update(scans) # PRH: Added to get scans + + return self.tracks + + def process_scans(self, scans): + for scan in scans: + self._tracks, self._current_end_time = self.process_scan(scan, self.tracks, + self._current_end_time) + return self.tracks + + diff --git a/stonesoup/custom/types/__init__.py b/stonesoup/custom/types/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/types/hypothesis.py b/stonesoup/custom/types/hypothesis.py new file mode 100644 index 000000000..9e52e33a3 --- /dev/null +++ b/stonesoup/custom/types/hypothesis.py @@ -0,0 +1,14 @@ +from typing import List + +from stonesoup.base import Property +from stonesoup.types.detection import Detection +from stonesoup.types.hypothesis import Hypothesis +from stonesoup.types.prediction import Prediction, MeasurementPrediction + + +class MultiHypothesis(Hypothesis): + """A hypothesis based on multiple measurements. """ + prediction: Prediction = Property(doc="Predicted track state") + measurements: List[Detection] = Property(doc="Detection used for hypothesis and updating") + measurement_prediction: MeasurementPrediction = Property( + default=None, doc="Optional track prediction in measurement space") \ No newline at end of file diff --git a/stonesoup/custom/types/prediction.py b/stonesoup/custom/types/prediction.py new file mode 100644 index 000000000..024aca9d2 --- /dev/null +++ b/stonesoup/custom/types/prediction.py @@ -0,0 +1,7 @@ +from stonesoup.custom.types.state import TwoStateGaussianState +from stonesoup.types.prediction import Prediction + + +class TwoStateGaussianStatePrediction(Prediction, TwoStateGaussianState): + """ A Gaussian state object representing the predicted distribution + :math:`p(x_{k+T}, x_{k} | Y)` """ \ No newline at end of file diff --git a/stonesoup/custom/types/state.py b/stonesoup/custom/types/state.py new file mode 100644 index 000000000..6eff39140 --- /dev/null +++ b/stonesoup/custom/types/state.py @@ -0,0 +1,18 @@ +import datetime + +from stonesoup.base import Property +from stonesoup.types.numeric import Probability +from stonesoup.types.state import GaussianState + + +class TwoStateGaussianState(GaussianState): + """ A Gaussian state object representing the distribution :math:`p(x_{k+T}, x_{k} | Y)` """ + start_time: datetime.datetime = Property(doc='Timestamp at t_k') + end_time: datetime.datetime = Property(doc='Timestamp at t_{k+T}') + weight: Probability = Property(default=0, doc="Weight of the Gaussian State.") + tag: str = Property(default=None, doc="Unique tag of the Gaussian State.") + # scan_id: int = Property(doc='The scan id') + + @property + def timestamp(self): + return self.end_time \ No newline at end of file diff --git a/stonesoup/custom/types/tracklet.py b/stonesoup/custom/types/tracklet.py new file mode 100644 index 000000000..6038a1cf1 --- /dev/null +++ b/stonesoup/custom/types/tracklet.py @@ -0,0 +1,69 @@ +import datetime +import uuid +from typing import Set, List +import collections.abc + +from ...base import Property +from ...types.base import Type +from ...types.track import Track +from ...types.detection import Detection +from ...models.transition import TransitionModel + + +class Tracklet(Track): + pass + + +class SensorTracks(Type, collections.abc.Set): + """ A container object for tracks relating to a particular sensor """ + tracks: Set[Track] = Property(doc='A list of tracks', default=None) + sensor_id: str = Property(doc='The id of the sensor', default=None) + transition_model: TransitionModel = Property(doc='Transition model used by the tracker', + default=None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.tracks is None: + self.tracks = set() + + def __iter__(self): + return (t for t in self.tracks) + + def __len__(self): + return self.tracks.__len__() + + def __contains__(self, item): + return self.tracks.__contains__(item) + + +class SensorTracklets(SensorTracks): + """ A container object for tracklets relating to a particular sensor """ + pass + + +class SensorScan(Type): + """ A wrapper around a set of detections produced by a particular sensor """ + sensor_id: str = Property(doc='The id of the sensor') + detections: Set[Detection] = Property(doc='The detections contained in the scan') + id: str = Property(default=None, doc="The unique scan ID") + timestamp: datetime.datetime = Property(default=None, doc='The scan timestamp') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.id is None: + self.id = str(uuid.uuid4()) + + +class Scan(Type): + """ A wrapper around a set of sensor scans within a given time interval """ + start_time: datetime.datetime = Property(doc='The scan start time') + end_time: datetime.datetime = Property(doc='The scan end time') + sensor_scans: List[SensorScan] = Property(doc='The sensor scans') + id: str = Property(default=None, doc="The unique scan ID") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.id is None: + self.id = str(uuid.uuid4()) + + diff --git a/stonesoup/custom/types/update.py b/stonesoup/custom/types/update.py new file mode 100644 index 000000000..23124b949 --- /dev/null +++ b/stonesoup/custom/types/update.py @@ -0,0 +1,7 @@ +from stonesoup.custom.types.state import TwoStateGaussianState +from stonesoup.types.update import Update + + +class TwoStateGaussianStateUpdate(Update, TwoStateGaussianState): + """ A Gaussian state object representing the predicted distribution + :math:`p(x_{k+T}, x_{k} | Y)` """ \ No newline at end of file diff --git a/stonesoup/custom/updater/__init__.py b/stonesoup/custom/updater/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stonesoup/custom/updater/twostate.py b/stonesoup/custom/updater/twostate.py new file mode 100644 index 000000000..4a23ddfdd --- /dev/null +++ b/stonesoup/custom/updater/twostate.py @@ -0,0 +1,62 @@ +from ...types.update import Update +from stonesoup.updater.kalman import ExtendedKalmanUpdater + + +class TwoStateKalmanUpdater(ExtendedKalmanUpdater): + + def update(self, hypothesis, **kwargs): + r"""The Kalman update method. Given a hypothesised association between + a predicted state or predicted measurement and an actual measurement, + calculate the posterior state. + + Parameters + ---------- + hypothesis : :class:`~.SingleHypothesis` + the prediction-measurement association hypothesis. This hypothesis + may carry a predicted measurement, or a predicted state. In the + latter case a predicted measurement will be calculated. + **kwargs : various + These are passed to :meth:`predict_measurement` + + Returns + ------- + : :class:`~.GaussianStateUpdate` + The posterior state Gaussian with mean :math:`\mathbf{x}_{k|k}` and + covariance :math:`P_{x|x}` + + """ + # Get the predicted state out of the hypothesis + predicted_state = hypothesis.prediction + + # If there is no measurement prediction in the hypothesis then do the + # measurement prediction (and attach it back to the hypothesis). + if hypothesis.measurement_prediction is None: + # Get the measurement model out of the measurement if it's there. + # If not, use the one native to the updater (which might still be + # none) + measurement_model = hypothesis.measurement.measurement_model + measurement_model = self._check_measurement_model( + measurement_model) + + # Attach the measurement prediction to the hypothesis + hypothesis.measurement_prediction = self.predict_measurement( + predicted_state, measurement_model=measurement_model, **kwargs) + + # Kalman gain and posterior covariance + posterior_covariance, kalman_gain = self._posterior_covariance(hypothesis) + + # Posterior mean + posterior_mean = predicted_state.state_vector + \ + kalman_gain@(hypothesis.measurement.state_vector - + hypothesis.measurement_prediction.state_vector) + + if self.force_symmetric_covariance: + posterior_covariance = \ + (posterior_covariance + posterior_covariance.T)/2 + + return Update.from_state( + hypothesis.prediction, + posterior_mean, posterior_covariance, + start_time=hypothesis.prediction.start_time, + end_time=hypothesis.measurement.timestamp, + hypothesis=hypothesis) diff --git a/stonesoup/dataassociator/neighbour.py b/stonesoup/dataassociator/neighbour.py index 2ea26f6da..f332e4f3a 100644 --- a/stonesoup/dataassociator/neighbour.py +++ b/stonesoup/dataassociator/neighbour.py @@ -145,7 +145,7 @@ class GNNWith2DAssignment(DataAssociator): hypothesiser: Hypothesiser = Property( doc="Generate a set of hypotheses for each prediction-detection pair") - def associate(self, tracks, detections, timestamp, **kwargs): + def associate(self, tracks, detections, timestamp, hypotheses=None, **kwargs): """Associate a set of detections with predicted states. Parameters @@ -163,8 +163,9 @@ def associate(self, tracks, detections, timestamp, **kwargs): Key value pair of tracks with associated detection """ - # Generate a set of hypotheses for each track on each detection - hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs) + if hypotheses is None: + # Generate a set of hypotheses for each track on each detection + hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs) # Create dictionary for associations associations = {} diff --git a/stonesoup/functions/__init__.py b/stonesoup/functions/__init__.py index f18e14ab9..9222682a5 100644 --- a/stonesoup/functions/__init__.py +++ b/stonesoup/functions/__init__.py @@ -2,6 +2,7 @@ import copy import numpy as np +from numpy import linalg as la from ..types.numeric import Probability from ..types.array import StateVector, StateVectors, CovarianceMatrix @@ -670,3 +671,59 @@ def sde_euler_maruyama_integration(fun, t_values, state_x0): a, b = fun(state_x, t) state_x.state_vector = state_x.state_vector + a*delta_t + b@delta_w return state_x.state_vector + + +def nearestSPD(A): + """Find the nearest positive-semidefinite matrix to input + + A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which + credits [2]. + + [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd + + [2] N.J. Higham, "Computing a nearest symmetric positive semidefinite + matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 + """ + + if isPD(A): + return A + + B = (A + A.T) / 2 + _, s, V = la.svd(B) + + H = np.dot(V.T, np.dot(np.diag(s), V)) + + A2 = (B + H) / 2 + + A3 = (A2 + A2.T) / 2 + + if isPD(A3): + return A3 + + spacing = np.spacing(la.norm(A)) + # The above is different from [1]. It appears that MATLAB's `chol` Cholesky + # decomposition will accept matrixes with exactly 0-eigenvalue, whereas + # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab + # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` + # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on + # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas + # `spacing` will, for Gaussian random matrixes of small dimension, be on + # othe order of 1e-16. In practice, both ways converge, as the unit test + # below suggests. + I = np.eye(A.shape[0]) + k = 1 + while not isPD(A3): + mineig = np.min(np.real(la.eigvals(A3))) + A3 += I * (-mineig * k**2 + spacing) + k += 1 + + return CovarianceMatrix(A3) + + +def isPD(B): + """Returns true when input is positive-definite, via Cholesky""" + try: + _ = la.cholesky(B) + return True + except la.LinAlgError: + return False \ No newline at end of file diff --git a/stonesoup/models/clutter/clutter.py b/stonesoup/models/clutter/clutter.py index ec6aa7efa..3a820c083 100644 --- a/stonesoup/models/clutter/clutter.py +++ b/stonesoup/models/clutter/clutter.py @@ -1,3 +1,5 @@ +import datetime + import numpy as np from scipy.stats import poisson from typing import Set, Union, Callable, Tuple, Optional @@ -57,7 +59,8 @@ def __init__(self, *args, **kwargs): else: self.random_state = None - def function(self, ground_truths: Set[GroundTruthState], **kwargs) -> Set[Clutter]: + def function(self, ground_truths: Set[GroundTruthState], timestamp: datetime.datetime, + **kwargs) -> Set[Clutter]: """ Use the defined distribution and parameters to create simulated clutter for the current time step. Return this clutter to the calling sensor so @@ -73,12 +76,6 @@ def function(self, ground_truths: Set[GroundTruthState], **kwargs) -> Set[Clutte : set of :class:`~.Clutter` The simulated clutter. """ - # Extract the timestamp from the ground_truths. Groundtruth is - # necessary to get the proper timestamp. If there is no - # groundtruth return a set of no Clutter. - if not ground_truths: - return set() - timestamp = next(iter(ground_truths)).timestamp # Generate the clutter for this time step clutter = set() diff --git a/stonesoup/resampler/particle.py b/stonesoup/resampler/particle.py index dc35e575c..427948175 100644 --- a/stonesoup/resampler/particle.py +++ b/stonesoup/resampler/particle.py @@ -8,7 +8,7 @@ class SystematicResampler(Resampler): - def resample(self, particles): + def resample(self, particles, n_particles=None): """Resample the particles Parameters @@ -24,7 +24,8 @@ def resample(self, particles): if not isinstance(particles, ParticleState): particles = ParticleState(None, particle_list=particles) - n_particles = len(particles) + if n_particles is None: + n_particles = len(particles) weight = Probability(1 / n_particles) log_weights = np.asfarray(np.log(particles.weight)) diff --git a/stonesoup/sensormanager/base.py b/stonesoup/sensormanager/base.py index 6c9a6fc40..c3c8dbe60 100644 --- a/stonesoup/sensormanager/base.py +++ b/stonesoup/sensormanager/base.py @@ -1,4 +1,5 @@ from abc import abstractmethod, ABC +from multiprocessing import Pool, cpu_count from typing import Callable, Set import random import numpy as np @@ -129,18 +130,57 @@ def choose_actions(self, tracks, timestamp, nchoose=1, **kwargs): all_action_choices[sensor] = action_choices # get tuple of dictionaries of sensors: actions - configs = ({sensor: action + configs = list({sensor: action for sensor, action in zip(all_action_choices.keys(), actionconfig)} for actionconfig in it.product(*all_action_choices.values())) best_rewards = np.zeros(nchoose) - np.inf selected_configs = [None] * nchoose - for config in configs: + rewards = [] + vars = [] + for i, config in enumerate(configs): # calculate reward for dictionary of sensors: actions - reward = self.reward_function(config, tracks, timestamp) + # actions_sets = list(config.values()) + # flag = True + # for actions in actions_sets: + # action_x = next( + # action for action in actions if action.generator.attribute == 'location_x') + # action_y = next( + # action for action in actions if action.generator.attribute == 'location_y') + # if action_x.target_value != -4.5 or action_y.target_value != 51.5: + # flag = False + # break + # if flag: + # a = 2 + reward= self.reward_function(config, tracks, timestamp) + rewards.append(reward) + # vars.append(var) if reward > min(best_rewards): selected_configs[np.argmin(best_rewards)] = config best_rewards[np.argmin(best_rewards)] = reward + # inputs = [(config, tracks, timestamp) for config in configs] + # with Pool(cpu_count()) as pool: + # rewards = pool.starmap(self.reward_function, inputs) + + # rewards = [self.reward_function(config, tracks, timestamp) for config in configs] + # selected_configs = [configs[i] for i in np.argsort(rewards)[-nchoose:]] + # Return mapping of sensors and chosen actions for sensors return selected_configs + + +def is_valid_config(config, **kwargs): + num_sensors = int(len(kwargs)/2) + actions_sets = list(config.values()) + for i in range(num_sensors): + x = kwargs[f'x{i+1}'] + y = kwargs[f'y{i+1}'] + actions = actions_sets[i] + action_x = next( + action for action in actions if action.generator.attribute == 'location_x') + action_y = next( + action for action in actions if action.generator.attribute == 'location_y') + if action_x.target_value != x or action_y.target_value != y: + return False + return True \ No newline at end of file diff --git a/stonesoup/types/hypothesis.py b/stonesoup/types/hypothesis.py index 12834bd14..ba1219ba3 100644 --- a/stonesoup/types/hypothesis.py +++ b/stonesoup/types/hypothesis.py @@ -1,6 +1,6 @@ from abc import abstractmethod from collections import UserDict -from typing import Sequence +from typing import Sequence, Dict import numpy as np @@ -58,6 +58,7 @@ class SingleHypothesis(Hypothesis): measurement: Detection = Property(doc="Detection used for hypothesis and updating") measurement_prediction: MeasurementPrediction = Property( default=None, doc="Optional track prediction in measurement space") + metadata: Dict = Property(default=None, doc="Optional metadata") def __bool__(self): return (not isinstance(self.measurement, MissedDetection)) and \ diff --git a/stonesoup/types/track.py b/stonesoup/types/track.py index 99baa22ee..4895b1978 100644 --- a/stonesoup/types/track.py +++ b/stonesoup/types/track.py @@ -6,6 +6,7 @@ from .state import State, StateMutableSequence from .update import Update from ..base import Property +from ..custom.types.hypothesis import MultiHypothesis class Track(StateMutableSequence): @@ -133,6 +134,10 @@ def _update_metadata_from_state(self, state): if hypothesis \ and hypothesis.measurement.metadata is not None: self.metadata.update(hypothesis.measurement.metadata) + elif isinstance(state.hypothesis, MultiHypothesis): + hypothesis = state.hypothesis + for measurement in hypothesis.measurements: + self.metadata.update(measurement.metadata) else: hypothesis = state.hypothesis if hypothesis and hypothesis.measurement.metadata is not None: