Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
#ifndef ASLAM_MATCH_OUTLIER_REJECTION_TWOPT_H_
#define ASLAM_MATCH_OUTLIER_REJECTION_TWOPT_H_

#include <Eigen/Dense>
#include <vector>

#include <Eigen/Core>
#include <glog/logging.h>

#include <aslam/common/pose-types.h>
Expand All @@ -11,29 +13,25 @@ namespace aslam {
class VisualFrame;

namespace geometric_vision {
using BearingVectors =
std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>>;

/// \brief Runs two RANSAC schemes over the specified match list to separate matches into
/// out-/inliers. Both a translation only and rotation only RANSAC are computed.
/// The final set of inliers is the union of both inlier sets.
/// @param[in] frame_kp1 Current frame.
/// @param[in] frame_k Previous frame.
/// @param[in] q_Ckp1_Ck Rotation taking points from the camera frame k to the camera frame k+1.
/// @param[in] matches_kp1_k The matches between the frames.
/// @param[in] fix_random_seed Use a fixed random seed for RANSAC.
/// @param[in] ransac_threshold RANSAC threshold to consider a sample as inlier.
/// The threshold is defined as: 1 - cos(max_ray_disparity_angle).
/// @param[in] ransac_max_iterations Max. RANSAC iterations.
/// @param[out] inlier_matches_kp1_k The list of inlier matches.
/// @param[out] outlier_matches_kp1_k The list of outlier matches.
/// @return RANSAC successful?
// RANSAC threshold can be defined as: 1 - cos(max_ray_disparity_angle).
bool rejectOutlierFeatureMatchesTranslationRotationSAC(
const aslam::VisualFrame& frame_kp1, const aslam::VisualFrame& frame_k,
const aslam::Quaternion& q_Ckp1_Ck,
const aslam::FrameToFrameMatchesWithScore& matches_kp1_k, bool fix_random_seed,
double ransac_threshold, size_t ransac_max_iterations,
const aslam::FrameToFrameMatchesWithScore& matches_kp1_k,
bool fix_random_seed, double ransac_threshold, size_t ransac_max_iterations,
aslam::FrameToFrameMatchesWithScore* inlier_matches_kp1_k,
aslam::FrameToFrameMatchesWithScore* outlier_matches_kp1_k);

bool rejectOutlierFeatureMatchesTranslationRotationSAC(
const BearingVectors& bearing_vectors_kp1,
const BearingVectors& bearing_vectors_k,
const aslam::Quaternion& q_Ckp1_Ck, bool fix_random_seed,
double ransac_threshold, size_t ransac_max_iterations,
std::unordered_set<int>* inlier_indices);

} // namespace geometric_vision

} // namespace aslam
Expand Down
92 changes: 55 additions & 37 deletions aslam_cv_geometric_vision/src/match-outlier-rejection-twopt.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
#include "aslam/geometric-vision/match-outlier-rejection-twopt.h"

#include <memory>
#include <unordered_set>
#include <vector>

#include <aslam/common/pose-types.h>
Expand All @@ -16,38 +19,65 @@ namespace geometric_vision {
bool rejectOutlierFeatureMatchesTranslationRotationSAC(
const aslam::VisualFrame& frame_kp1, const aslam::VisualFrame& frame_k,
const aslam::Quaternion& q_Ckp1_Ck,
const aslam::FrameToFrameMatchesWithScore& matches_kp1_k, bool fix_random_seed,
double ransac_threshold, size_t ransac_max_iterations,
const aslam::FrameToFrameMatchesWithScore& matches_kp1_k,
bool fix_random_seed, double ransac_threshold, size_t ransac_max_iterations,
aslam::FrameToFrameMatchesWithScore* inlier_matches_kp1_k,
aslam::FrameToFrameMatchesWithScore* outlier_matches_kp1_k) {

BearingVectors bearing_vectors_kp1;
BearingVectors bearing_vectors_k;

aslam::FrameToFrameMatches matches_without_score_kp1_k;
aslam::convertMatchesWithScoreToMatches<aslam::FrameToFrameMatchWithScore,
aslam::FrameToFrameMatch>(matches_kp1_k, &matches_without_score_kp1_k);
aslam::getBearingVectorsFromMatches(frame_kp1, frame_k,
matches_without_score_kp1_k,
&bearing_vectors_kp1, &bearing_vectors_k);

std::unordered_set<int> inlier_indices;
const bool success = rejectOutlierFeatureMatchesTranslationRotationSAC(
bearing_vectors_kp1, bearing_vectors_k, q_Ckp1_Ck, fix_random_seed,
ransac_threshold, ransac_max_iterations, &inlier_indices);

// Remove the outliers from the matches list.
int match_index = 0;
for (const aslam::FrameToFrameMatchWithScore& match : matches_kp1_k) {
if (inlier_indices.count(match_index)) {
inlier_matches_kp1_k->emplace_back(match);
} else {
outlier_matches_kp1_k->emplace_back(match);
}
++match_index;
}
CHECK_EQ(inlier_matches_kp1_k->size() + outlier_matches_kp1_k->size(),
matches_kp1_k.size());
return success;
}

bool rejectOutlierFeatureMatchesTranslationRotationSAC(
const BearingVectors& bearing_vectors_kp1,
const BearingVectors& bearing_vectors_k,
const aslam::Quaternion& q_Ckp1_Ck, bool fix_random_seed,
double ransac_threshold, size_t ransac_max_iterations,
std::unordered_set<int>* inlier_indices) {
CHECK_GT(ransac_threshold, 0.0);
CHECK_GT(ransac_max_iterations, 0u);
inlier_matches_kp1_k->clear();
outlier_matches_kp1_k->clear();
CHECK_NOTNULL(inlier_indices)->clear();
CHECK_EQ(bearing_vectors_kp1.size(), bearing_vectors_kp1.size());

// Handle the case with too few matches to distinguish between out-/inliers.
static constexpr size_t kMinKeypointCorrespondences = 6u;
if (matches_kp1_k.size() < kMinKeypointCorrespondences) {
if (bearing_vectors_kp1.size() < kMinKeypointCorrespondences) {
VLOG(1) << "Too few matches to run RANSAC.";
*outlier_matches_kp1_k = matches_kp1_k;
inlier_indices->clear(); // Treat all as outliers.
return false;
}

opengv::bearingVectors_t bearing_vectors_kp1;
opengv::bearingVectors_t bearing_vectors_k;

aslam::FrameToFrameMatches matches_without_score_kp1_k;
aslam::convertMatchesWithScoreToMatches<aslam::FrameToFrameMatchWithScore,
aslam::FrameToFrameMatch>(
matches_kp1_k, &matches_without_score_kp1_k);
aslam::getBearingVectorsFromMatches(frame_kp1, frame_k, matches_without_score_kp1_k,
&bearing_vectors_kp1, &bearing_vectors_k);
using opengv::relative_pose::CentralRelativeAdapter;
CentralRelativeAdapter adapter(bearing_vectors_kp1, bearing_vectors_k,
q_Ckp1_Ck.getRotationMatrix());

typedef opengv::sac_problems::relative_pose::RotationOnlySacProblem
RotationOnlySacProblem;
typedef opengv::sac_problems::relative_pose::RotationOnlySacProblem RotationOnlySacProblem;
std::shared_ptr<RotationOnlySacProblem> rotation_sac_problem(
new RotationOnlySacProblem(adapter, !fix_random_seed));

Expand All @@ -57,11 +87,9 @@ bool rejectOutlierFeatureMatchesTranslationRotationSAC(
rotation_ransac.max_iterations_ = ransac_max_iterations;
rotation_ransac.computeModel();

typedef opengv::sac_problems::relative_pose::TranslationOnlySacProblem
TranslationOnlySacProblem;
typedef opengv::sac_problems::relative_pose::TranslationOnlySacProblem TranslationOnlySacProblem;
std::shared_ptr<TranslationOnlySacProblem> translation_sac_problem(
new TranslationOnlySacProblem(
adapter, !fix_random_seed));
new TranslationOnlySacProblem(adapter, !fix_random_seed));
opengv::sac::Ransac<TranslationOnlySacProblem> translation_ransac;
translation_ransac.sac_model_ = translation_sac_problem;
translation_ransac.threshold_ = ransac_threshold;
Expand All @@ -74,27 +102,17 @@ bool rejectOutlierFeatureMatchesTranslationRotationSAC(
// closer to the boundary of the image. On the contrary, rotation only
// ransac erroneously discards many matches close to the border of the image
// but it correctly classifies matches in the center of the image.
std::unordered_set<int> inlier_indices(
rotation_ransac.inliers_.begin(), rotation_ransac.inliers_.end());
inlier_indices.insert(translation_ransac.inliers_.begin(), translation_ransac.inliers_.end());
inlier_indices->insert(rotation_ransac.inliers_.begin(),
rotation_ransac.inliers_.end());
inlier_indices->insert(translation_ransac.inliers_.begin(),
translation_ransac.inliers_.end());

if (inlier_indices.size() < kMinKeypointCorrespondences) {
if (inlier_indices->size() < kMinKeypointCorrespondences) {
VLOG(1) << "Too few inliers to reliably classify outlier matches.";
*outlier_matches_kp1_k = matches_kp1_k;
inlier_indices->clear(); // Treat all as outliers.
return false;
}

// Remove the outliers from the matches list.
int match_index = 0;
for (const aslam::FrameToFrameMatchWithScore& match : matches_kp1_k) {
if (inlier_indices.count(match_index)) {
inlier_matches_kp1_k->emplace_back(match);
} else {
outlier_matches_kp1_k->emplace_back(match);
}
++match_index;
}
CHECK_EQ(inlier_matches_kp1_k->size() + outlier_matches_kp1_k->size(), matches_kp1_k.size());
return true;
}

Expand Down
12 changes: 12 additions & 0 deletions aslam_cv_matcher/include/aslam/matcher/match-helpers-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,18 @@ void convertMatchesWithScoreToMatches(
matches_with_score_A_B, matches_A_B);
}

template<typename TypedMatches>
void convertFrameToFrameMatchesToMatches(
const TypedMatches& matches_typed_A_B, Matches* raw_matches_A_B) {
CHECK_NOTNULL(raw_matches_A_B)->clear();

raw_matches_A_B->reserve(matches_typed_A_B.size());
for (const aslam::FrameToFrameMatch& match_A_B : matches_typed_A_B) {
raw_matches_A_B->emplace_back(
aslam::Match(match_A_B.getKeypointIndexAppleFrame(),
match_A_B.getKeypointIndexBananaFrame()));
}
}
} // namespace aslam

#endif // ASLAM_MATCHER_MATCH_HELPERS_INL_H_
15 changes: 13 additions & 2 deletions aslam_cv_matcher/include/aslam/matcher/match-helpers.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#ifndef ASLAM_MATCHER_MATCH_HELPERS_H_
#define ASLAM_MATCHER_MATCH_HELPERS_H_

#include <aslam/cameras/camera.h>
#include <aslam/common/pose-types.h>

#include "aslam/matcher/match.h"
Expand Down Expand Up @@ -34,17 +35,22 @@ void convertMatchesWithScoreToOpenCvMatches(
const Aligned<std::vector, MatchWithScore>& matches_with_score_A_B,
OpenCvMatches* matches_A_B);

template<typename TypedMatches>
void convertFrameToFrameMatchesToMatches(
const TypedMatches& matches_typed_A_B, Matches* raw_matches_A_B);

/// Select and return N random matches for each camera in the rig.
void pickNRandomRigMatches(
size_t n_per_camera, const FrameToFrameMatchesList& rig_matches,
FrameToFrameMatchesList* selected_rig_matches);

/// Get the matches based on the track id channels for one VisualFrame.
size_t extractMatchesFromTrackIdChannel(const Eigen::VectorXi& track_ids_kp1,
const Eigen::VectorXi& track_ids_k,
FrameToFrameMatches* matches_kp1_kp);
size_t extractMatchesFromTrackIdChannel(const VisualFrame& frame_kp1,
const VisualFrame& frame_k,
FrameToFrameMatches* matches_kp1_kp);

/// Get the matches based on the track id channels for one VisualNFrame.
size_t extractMatchesFromTrackIdChannels(
const VisualNFrame& nframe_kp1, const VisualNFrame& nframe_k,
FrameToFrameMatchesList* rig_matches_kp1_kp);
Expand Down Expand Up @@ -75,6 +81,11 @@ void predictKeypointsByRotation(const VisualFrame& frame_k,
const aslam::Quaternion& q_Ckp1_Ck,
Eigen::Matrix2Xd* predicted_keypoints_kp1,
std::vector<unsigned char>* prediction_success);
void predictKeypointsByRotation(const aslam::Camera& camera,
const Eigen::Matrix2Xd keypoints_k,
const aslam::Quaternion& q_Ckp1_Ck,
Eigen::Matrix2Xd* predicted_keypoints_kp1,
std::vector<unsigned char>* prediction_success);

} // namespace aslam

Expand Down
48 changes: 37 additions & 11 deletions aslam_cv_matcher/src/match-helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,21 @@ void pickNRandomRigMatches(
size_t extractMatchesFromTrackIdChannel(const VisualFrame& frame_kp1,
const VisualFrame& frame_k,
FrameToFrameMatches* matches_kp1_kp) {
CHECK_NOTNULL(matches_kp1_kp);
CHECK_EQ(frame_kp1.getRawCameraGeometry().get(), frame_k.getRawCameraGeometry().get());
CHECK_EQ(frame_kp1.getRawCameraGeometry().get(),
frame_k.getRawCameraGeometry().get());
const Eigen::VectorXi& track_ids_kp1 = frame_kp1.getTrackIds();
const Eigen::VectorXi& track_ids_k = frame_k.getTrackIds();

return extractMatchesFromTrackIdChannel(
track_ids_kp1, track_ids_k, matches_kp1_kp);
}


size_t extractMatchesFromTrackIdChannel(const Eigen::VectorXi& track_ids_kp1,
const Eigen::VectorXi& track_ids_k,
FrameToFrameMatches* matches_kp1_kp) {
CHECK_NOTNULL(matches_kp1_kp);

// Build trackid <-> keypoint_idx lookup table.
typedef std::unordered_map<int, size_t> TrackIdKeypointIdxMap;
TrackIdKeypointIdxMap track_id_kp1_keypoint_idx_kp1_map;
Expand All @@ -47,7 +57,8 @@ size_t extractMatchesFromTrackIdChannel(const VisualFrame& frame_kp1,
continue;
track_id_kp1_keypoint_idx_kp1_map.insert(std::make_pair(track_id_kp1, keypoint_idx_kp1));
}
CHECK_LE(track_id_kp1_keypoint_idx_kp1_map.size(), frame_kp1.getNumKeypointMeasurements());

CHECK_LE(track_id_kp1_keypoint_idx_kp1_map.size(), track_ids_kp1.rows());

// Create indices matches vector using the lookup table.
matches_kp1_kp->clear();
Expand Down Expand Up @@ -206,43 +217,58 @@ void getBearingVectorsFromMatches(
keypoint_indices_k, &success), bearing_vectors_k);
}


void predictKeypointsByRotation(const VisualFrame& frame_k,
const aslam::Quaternion& q_Ckp1_Ck,
Eigen::Matrix2Xd* predicted_keypoints_kp1,
std::vector<unsigned char>* prediction_success) {
CHECK_NOTNULL(predicted_keypoints_kp1);
CHECK_NOTNULL(prediction_success)->clear();
CHECK(frame_k.hasKeypointMeasurements());
if (frame_k.getNumKeypointMeasurements() == 0u) {

const aslam::Camera& camera =
*CHECK_NOTNULL(frame_k.getCameraGeometry().get());

predictKeypointsByRotation(camera, frame_k.getKeypointMeasurements(),
q_Ckp1_Ck, predicted_keypoints_kp1,
prediction_success);
}

void predictKeypointsByRotation(
const aslam::Camera& camera, const Eigen::Matrix2Xd keypoints_k,
const aslam::Quaternion& q_Ckp1_Ck,
Eigen::Matrix2Xd* predicted_keypoints_kp1,
std::vector<unsigned char>* prediction_success) {
CHECK_NOTNULL(predicted_keypoints_kp1);
CHECK_NOTNULL(prediction_success)->clear();
if (keypoints_k.cols() == 0u) {
return;
}
const aslam::Camera& camera = *CHECK_NOTNULL(frame_k.getCameraGeometry().get());

// Early exit for identity rotation.
if (std::abs(q_Ckp1_Ck.w() - 1.0) < 1e-8) {
*predicted_keypoints_kp1 = frame_k.getKeypointMeasurements();
*predicted_keypoints_kp1 = keypoints_k;
prediction_success->resize(predicted_keypoints_kp1->size(), true);
}

// Backproject the keypoints to bearing vectors.
Eigen::Matrix3Xd bearing_vectors_k;
camera.backProject3Vectorized(frame_k.getKeypointMeasurements(), &bearing_vectors_k,
camera.backProject3Vectorized(keypoints_k, &bearing_vectors_k,
prediction_success);
CHECK_EQ(static_cast<int>(prediction_success->size()), bearing_vectors_k.cols());
CHECK_EQ(static_cast<int>(frame_k.getNumKeypointMeasurements()), bearing_vectors_k.cols());
CHECK_EQ(keypoints_k.cols(), bearing_vectors_k.cols());

// Rotate the bearing vectors into the frame_kp1 coordinates.
// Rotate the bearing vectors into the keypoints_kp1 coordinates.
const Eigen::Matrix3Xd bearing_vectors_kp1 = q_Ckp1_Ck.rotateVectorized(bearing_vectors_k);

// Project the bearing vectors to the frame_kp1.
// Project the bearing vectors to the keypoints_kp1.
std::vector<ProjectionResult> projection_results;
camera.project3Vectorized(bearing_vectors_kp1, predicted_keypoints_kp1, &projection_results);
CHECK_EQ(predicted_keypoints_kp1->cols(), bearing_vectors_k.cols());
CHECK_EQ(static_cast<int>(projection_results.size()), bearing_vectors_k.cols());

// Set the success based on the backprojection and projection results and output the initial
// unrotated keypoint for failed predictions.
const Eigen::Matrix2Xd& keypoints_k = frame_k.getKeypointMeasurements();
CHECK_EQ(keypoints_k.cols(), predicted_keypoints_kp1->cols());

for (size_t idx = 0u; idx < projection_results.size(); ++idx) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,22 @@ void visualizeMatches(const aslam::VisualFrame& frame_kp1,
const aslam::VisualFrame& frame_k,
const MatchesWithScore& matches_with_score,
cv::Mat* image) {
aslam::Matches matches;
aslam::convertMatchesWithScoreToMatches<MatchesWithScore>(
matches_with_score, &matches);
visualizeMatchesWithoutScore(frame_kp1, frame_k, matches, image);
}

inline void visualizeMatchesWithoutScore(
const aslam::VisualFrame& frame_kp1, const aslam::VisualFrame& frame_k,
const aslam::Matches& matches, cv::Mat* image) {
CHECK_NOTNULL(image);

cv::cvtColor(frame_kp1.getRawImage(), *image, CV_GRAY2BGR);

VLOG(4) << "Converted raw image from grayscale to color.";

aslam::Matches matches;
aslam::convertMatchesWithScoreToMatches<MatchesWithScore>(matches_with_score, &matches);
VLOG(4) << "Converted the matches.";

CHECK_NOTNULL(image->data);

drawKeypointMatches(
frame_kp1, frame_k, matches, cv::Scalar(255, 255, 0), cv::Scalar(255, 0, 255), image);
frame_kp1, frame_k, matches, cv::Scalar(255, 255, 0),
cv::Scalar(255, 0, 255), image);
}

} // namespace aslam_cv_visualization
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ void visualizeMatches(const aslam::VisualFrame& frame_kp1,
const aslam::VisualFrame& frame_k,
const MatchesWithScore& matches,
cv::Mat* image);
void visualizeMatchesWithoutScore(
const aslam::VisualFrame& frame_kp1, const aslam::VisualFrame& frame_k,
const aslam::Matches& matches, cv::Mat* image);

////////////////////////////////////////////////
/// Low-Level functions - They only draw some visualization onto an image.
Expand Down