From 18c51c1895fc64c60d1082c54d1f73879fc7036f Mon Sep 17 00:00:00 2001 From: Patrick Geneva Date: Tue, 15 Jun 2021 14:47:48 -0400 Subject: [PATCH] Clang format the project --- .clang-format | 137 + ov_core/src/cpi/CpiBase.h | 270 +- ov_core/src/cpi/CpiV1.h | 685 +- ov_core/src/cpi/CpiV2.h | 879 +- ov_core/src/dummy.cpp | 20 +- ov_core/src/feat/Feature.cpp | 106 +- ov_core/src/feat/Feature.h | 101 +- ov_core/src/feat/FeatureDatabase.h | 714 +- ov_core/src/feat/FeatureInitializer.cpp | 711 +- ov_core/src/feat/FeatureInitializer.h | 244 +- ov_core/src/feat/FeatureInitializerOptions.h | 95 +- ov_core/src/init/InertialInitializer.cpp | 249 +- ov_core/src/init/InertialInitializer.h | 158 +- ov_core/src/sim/BsplineSE3.cpp | 652 +- ov_core/src/sim/BsplineSE3.h | 376 +- ov_core/src/test_tracking.cpp | 488 +- ov_core/src/test_webcam.cpp | 305 +- ov_core/src/track/Grider_DOG.h | 283 +- ov_core/src/track/Grider_FAST.h | 228 +- ov_core/src/track/TrackAruco.cpp | 284 +- ov_core/src/track/TrackAruco.h | 172 +- ov_core/src/track/TrackBase.cpp | 299 +- ov_core/src/track/TrackBase.h | 667 +- ov_core/src/track/TrackDescriptor.cpp | 828 +- ov_core/src/track/TrackDescriptor.h | 275 +- ov_core/src/track/TrackKLT.cpp | 1066 +- ov_core/src/track/TrackKLT.h | 247 +- ov_core/src/track/TrackSIM.cpp | 75 +- ov_core/src/track/TrackSIM.h | 111 +- ov_core/src/types/IMU.h | 444 +- ov_core/src/types/JPLQuat.h | 236 +- ov_core/src/types/Landmark.cpp | 234 +- ov_core/src/types/Landmark.h | 157 +- ov_core/src/types/LandmarkRepresentation.h | 169 +- ov_core/src/types/PoseJPL.h | 346 +- ov_core/src/types/Type.h | 240 +- ov_core/src/types/Vec.h | 73 +- ov_core/src/utils/CLI11.hpp | 12791 ++++++++--------- ov_core/src/utils/colors.h | 41 +- ov_core/src/utils/dataset_reader.h | 302 +- ov_core/src/utils/lambda_body.h | 31 +- ov_core/src/utils/quat_ops.h | 896 +- ov_core/src/utils/sensor_data.h | 85 +- ov_eval/src/alignment/AlignTrajectory.cpp | 230 +- ov_eval/src/alignment/AlignTrajectory.h | 190 +- ov_eval/src/alignment/AlignUtils.cpp | 304 +- ov_eval/src/alignment/AlignUtils.h | 170 +- ov_eval/src/calc/ResultSimulation.cpp | 846 +- ov_eval/src/calc/ResultSimulation.h | 452 +- ov_eval/src/calc/ResultTrajectory.cpp | 671 +- ov_eval/src/calc/ResultTrajectory.h | 335 +- ov_eval/src/dummy.cpp | 16 +- ov_eval/src/error_comparison.cpp | 661 +- ov_eval/src/error_dataset.cpp | 553 +- ov_eval/src/error_simulation.cpp | 55 +- ov_eval/src/error_singlerun.cpp | 554 +- ov_eval/src/format_converter.cpp | 232 +- ov_eval/src/plot/matplotlibcpp.h | 3107 ++-- ov_eval/src/plot_trajectories.cpp | 296 +- ov_eval/src/pose_to_file.cpp | 92 +- ov_eval/src/timing_comparison.cpp | 236 +- ov_eval/src/timing_flamegraph.cpp | 193 +- ov_eval/src/timing_percentages.cpp | 382 +- ov_eval/src/utils/Colors.h | 37 +- ov_eval/src/utils/Loader.cpp | 566 +- ov_eval/src/utils/Loader.h | 137 +- ov_eval/src/utils/Math.h | 990 +- ov_eval/src/utils/Recorder.h | 325 +- ov_eval/src/utils/Statistics.h | 217 +- ov_msckf/src/core/RosVisualizer.cpp | 1797 ++- ov_msckf/src/core/RosVisualizer.h | 237 +- ov_msckf/src/core/VioManager.cpp | 1884 ++- ov_msckf/src/core/VioManager.h | 619 +- ov_msckf/src/core/VioManagerOptions.h | 495 +- ov_msckf/src/dummy.cpp | 20 +- ov_msckf/src/ros_serial_msckf.cpp | 417 +- ov_msckf/src/ros_subscribe_msckf.cpp | 327 +- ov_msckf/src/run_simulation.cpp | 182 +- ov_msckf/src/sim/Simulator.cpp | 1078 +- ov_msckf/src/sim/Simulator.h | 340 +- ov_msckf/src/state/Propagator.cpp | 948 +- ov_msckf/src/state/Propagator.h | 507 +- ov_msckf/src/state/State.cpp | 112 +- ov_msckf/src/state/State.h | 189 +- ov_msckf/src/state/StateHelper.cpp | 1019 +- ov_msckf/src/state/StateHelper.h | 467 +- ov_msckf/src/state/StateOptions.h | 119 +- ov_msckf/src/test_sim_meas.cpp | 84 +- ov_msckf/src/test_sim_repeat.cpp | 202 +- ov_msckf/src/update/UpdaterHelper.cpp | 1130 +- ov_msckf/src/update/UpdaterHelper.h | 256 +- ov_msckf/src/update/UpdaterMSCKF.cpp | 429 +- ov_msckf/src/update/UpdaterMSCKF.h | 148 +- ov_msckf/src/update/UpdaterOptions.h | 40 +- ov_msckf/src/update/UpdaterSLAM.cpp | 1065 +- ov_msckf/src/update/UpdaterSLAM.h | 215 +- ov_msckf/src/update/UpdaterZeroVelocity.cpp | 301 +- ov_msckf/src/update/UpdaterZeroVelocity.h | 237 +- ov_msckf/src/utils/parse_cmd.h | 564 +- ov_msckf/src/utils/parse_ros.h | 522 +- run_format.sh | 8 + 101 files changed, 26462 insertions(+), 28113 deletions(-) create mode 100644 .clang-format create mode 100755 run_format.sh diff --git a/.clang-format b/.clang-format new file mode 100644 index 000000000..daa0ac88f --- /dev/null +++ b/.clang-format @@ -0,0 +1,137 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: false +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: All +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: MultiLine +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 140 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + SortPriority: 0 + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + SortPriority: 0 + - Regex: '.*' + Priority: 1 + SortPriority: 0 +IncludeIsMainRegex: '(Test)?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentWidth: 2 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Latest +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 2 +UseCRLF: false +UseTab: Never +... + diff --git a/ov_core/src/cpi/CpiBase.h b/ov_core/src/cpi/CpiBase.h index eb4cb5232..0dc6c60bb 100644 --- a/ov_core/src/cpi/CpiBase.h +++ b/ov_core/src/cpi/CpiBase.h @@ -26,9 +26,8 @@ * SOFTWARE. */ - -#include #include "utils/quat_ops.h" +#include /** * @namespace ov_core @@ -36,143 +35,134 @@ */ namespace ov_core { - /** - * @brief Base class for continuous preintegration integrators. - * - * This is the base class that both continuous-time preintegrators extend. - * Please take a look at the derived classes CpiV1 and CpiV2 for the actual implementation. - * Please see the following publication for details on the theory @cite Eckenhoff2019IJRR : - * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation - * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang - * > http://udel.edu/~ghuang/papers/tr_cpi.pdf - * - * The steps to use this preintegration class are as follows: - * 1. call setLinearizationPoints() to set the bias/orientation linearization point - * 2. call feed_IMU() will all IMU measurements you want to precompound over - * 3. access public varibles, to get means, Jacobians, and measurement covariance - */ - class CpiBase { - - public: - - - /** - * @brief Default constructor - * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) - * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) - * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) - * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) - * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) - */ - CpiBase(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) { - // Calculate our covariance matrix - Q_c.block(0, 0, 3, 3) = std::pow(sigma_w, 2) * eye3; - Q_c.block(3, 3, 3, 3) = std::pow(sigma_wb, 2) * eye3; - Q_c.block(6, 6, 3, 3) = std::pow(sigma_a, 2) * eye3; - Q_c.block(9, 9, 3, 3) = std::pow(sigma_ab, 2) * eye3; - imu_avg = imu_avg_; - // Calculate our unit vectors, and their skews (used in bias jacobian calcs) - e_1 << 1, 0, 0; - e_2 << 0, 1, 0; - e_3 << 0, 0, 1; - e_1x = skew_x(e_1); - e_2x = skew_x(e_2); - e_3x = skew_x(e_3); - } - - - /** - * @brief Set linearization points of the integration. - * @param[in] b_w_lin_ gyroscope bias linearization point - * @param[in] b_a_lin_ accelerometer bias linearization point - * @param[in] q_k_lin_ orientation linearization point (only model 2 uses) - * @param[in] grav_ global gravity at the current timestep - * - * This function sets the linearization points we are to preintegrate about. - * For model 2 we will also pass the q_GtoK and current gravity estimate. - */ - void setLinearizationPoints(Eigen::Matrix b_w_lin_, Eigen::Matrix b_a_lin_, - Eigen::Matrix q_k_lin_ = Eigen::Matrix::Zero(), - Eigen::Matrix grav_ = Eigen::Matrix::Zero()) { - b_w_lin = b_w_lin_; - b_a_lin = b_a_lin_; - q_k_lin = q_k_lin_; - grav = grav_; - } - - - /** - * @brief Main function that will sequentially compute the preintegration measurement. - * @param[in] t_0 first IMU timestamp - * @param[in] t_1 second IMU timestamp - * @param[in] w_m_0 first imu gyroscope measurement - * @param[in] a_m_0 first imu acceleration measurement - * @param[in] w_m_1 second imu gyroscope measurement - * @param[in] a_m_1 second imu acceleration measurement - * - * This new IMU messages and will precompound our measurements, jacobians, and measurement covariance. - * Please see both CpiV1 and CpiV2 classes for implementation details on how this works. - */ - virtual void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, - Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), - Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) = 0; - - - // Flag if we should perform IMU averaging or not - // For version 1 we should average the measurement - // For version 2 we average the local true - bool imu_avg = false; - - - // Measurement Means - double DT = 0; ///< measurement integration time - Eigen::Matrix alpha_tau = Eigen::Matrix::Zero(); ///< alpha measurement mean - Eigen::Matrix beta_tau = Eigen::Matrix::Zero(); ///< beta measurement mean - Eigen::Matrix q_k2tau; ///< orientation measurement mean - Eigen::Matrix R_k2tau = Eigen::Matrix::Identity(); ///< orientation measurement mean - - // Jacobians - Eigen::Matrix J_q = Eigen::Matrix::Zero(); ///< orientation Jacobian wrt b_w - Eigen::Matrix J_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_w - Eigen::Matrix J_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_w - Eigen::Matrix H_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_a - Eigen::Matrix H_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_a - - // Linearization points - Eigen::Matrix b_w_lin; ///< b_w linearization point (gyroscope) - Eigen::Matrix b_a_lin; ///< b_a linearization point (accelerometer) - Eigen::Matrix q_k_lin; ///< q_k linearization point (only model 2 uses) - - /// Global gravity - Eigen::Matrix grav = Eigen::Matrix::Zero(); - - /// Our continous-time measurement noise matrix (computed from contructor noise values) - Eigen::Matrix Q_c = Eigen::Matrix::Zero(); - - /// Our final measurement covariance - Eigen::Matrix P_meas = Eigen::Matrix::Zero(); - - - //========================================================================== - // HELPER VARIABLES - //========================================================================== - - // 3x3 identity matrix - Eigen::Matrix eye3 = Eigen::Matrix::Identity(); - - // Simple unit vectors (used in bias jacobian calculations) - Eigen::Matrix e_1;// = Eigen::Matrix::Constant(1,0,0); - Eigen::Matrix e_2;// = Eigen::Matrix::Constant(0,1,0); - Eigen::Matrix e_3;// = Eigen::Matrix::Constant(0,0,1); - - // Calculate the skew-symetric of our unit vectors - Eigen::Matrix e_1x;// = skew_x(e_1); - Eigen::Matrix e_2x;// = skew_x(e_2); - Eigen::Matrix e_3x;// = skew_x(e_3); - - - }; - -} +/** + * @brief Base class for continuous preintegration integrators. + * + * This is the base class that both continuous-time preintegrators extend. + * Please take a look at the derived classes CpiV1 and CpiV2 for the actual implementation. + * Please see the following publication for details on the theory @cite Eckenhoff2019IJRR : + * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation + * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang + * > http://udel.edu/~ghuang/papers/tr_cpi.pdf + * + * The steps to use this preintegration class are as follows: + * 1. call setLinearizationPoints() to set the bias/orientation linearization point + * 2. call feed_IMU() will all IMU measurements you want to precompound over + * 3. access public varibles, to get means, Jacobians, and measurement covariance + */ +class CpiBase { + +public: + /** + * @brief Default constructor + * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) + * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) + * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) + * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) + * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) + */ + CpiBase(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) { + // Calculate our covariance matrix + Q_c.block(0, 0, 3, 3) = std::pow(sigma_w, 2) * eye3; + Q_c.block(3, 3, 3, 3) = std::pow(sigma_wb, 2) * eye3; + Q_c.block(6, 6, 3, 3) = std::pow(sigma_a, 2) * eye3; + Q_c.block(9, 9, 3, 3) = std::pow(sigma_ab, 2) * eye3; + imu_avg = imu_avg_; + // Calculate our unit vectors, and their skews (used in bias jacobian calcs) + e_1 << 1, 0, 0; + e_2 << 0, 1, 0; + e_3 << 0, 0, 1; + e_1x = skew_x(e_1); + e_2x = skew_x(e_2); + e_3x = skew_x(e_3); + } + + /** + * @brief Set linearization points of the integration. + * @param[in] b_w_lin_ gyroscope bias linearization point + * @param[in] b_a_lin_ accelerometer bias linearization point + * @param[in] q_k_lin_ orientation linearization point (only model 2 uses) + * @param[in] grav_ global gravity at the current timestep + * + * This function sets the linearization points we are to preintegrate about. + * For model 2 we will also pass the q_GtoK and current gravity estimate. + */ + void setLinearizationPoints(Eigen::Matrix b_w_lin_, Eigen::Matrix b_a_lin_, + Eigen::Matrix q_k_lin_ = Eigen::Matrix::Zero(), + Eigen::Matrix grav_ = Eigen::Matrix::Zero()) { + b_w_lin = b_w_lin_; + b_a_lin = b_a_lin_; + q_k_lin = q_k_lin_; + grav = grav_; + } + + /** + * @brief Main function that will sequentially compute the preintegration measurement. + * @param[in] t_0 first IMU timestamp + * @param[in] t_1 second IMU timestamp + * @param[in] w_m_0 first imu gyroscope measurement + * @param[in] a_m_0 first imu acceleration measurement + * @param[in] w_m_1 second imu gyroscope measurement + * @param[in] a_m_1 second imu acceleration measurement + * + * This new IMU messages and will precompound our measurements, jacobians, and measurement covariance. + * Please see both CpiV1 and CpiV2 classes for implementation details on how this works. + */ + virtual void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, + Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), + Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) = 0; + + // Flag if we should perform IMU averaging or not + // For version 1 we should average the measurement + // For version 2 we average the local true + bool imu_avg = false; + + // Measurement Means + double DT = 0; ///< measurement integration time + Eigen::Matrix alpha_tau = Eigen::Matrix::Zero(); ///< alpha measurement mean + Eigen::Matrix beta_tau = Eigen::Matrix::Zero(); ///< beta measurement mean + Eigen::Matrix q_k2tau; ///< orientation measurement mean + Eigen::Matrix R_k2tau = Eigen::Matrix::Identity(); ///< orientation measurement mean + + // Jacobians + Eigen::Matrix J_q = Eigen::Matrix::Zero(); ///< orientation Jacobian wrt b_w + Eigen::Matrix J_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_w + Eigen::Matrix J_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_w + Eigen::Matrix H_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_a + Eigen::Matrix H_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_a + + // Linearization points + Eigen::Matrix b_w_lin; ///< b_w linearization point (gyroscope) + Eigen::Matrix b_a_lin; ///< b_a linearization point (accelerometer) + Eigen::Matrix q_k_lin; ///< q_k linearization point (only model 2 uses) + + /// Global gravity + Eigen::Matrix grav = Eigen::Matrix::Zero(); + + /// Our continous-time measurement noise matrix (computed from contructor noise values) + Eigen::Matrix Q_c = Eigen::Matrix::Zero(); + + /// Our final measurement covariance + Eigen::Matrix P_meas = Eigen::Matrix::Zero(); + + //========================================================================== + // HELPER VARIABLES + //========================================================================== + + // 3x3 identity matrix + Eigen::Matrix eye3 = Eigen::Matrix::Identity(); + + // Simple unit vectors (used in bias jacobian calculations) + Eigen::Matrix e_1; // = Eigen::Matrix::Constant(1,0,0); + Eigen::Matrix e_2; // = Eigen::Matrix::Constant(0,1,0); + Eigen::Matrix e_3; // = Eigen::Matrix::Constant(0,0,1); + + // Calculate the skew-symetric of our unit vectors + Eigen::Matrix e_1x; // = skew_x(e_1); + Eigen::Matrix e_2x; // = skew_x(e_2); + Eigen::Matrix e_3x; // = skew_x(e_3); +}; + +} // namespace ov_core #endif /* CPI_BASE_H */ \ No newline at end of file diff --git a/ov_core/src/cpi/CpiV1.h b/ov_core/src/cpi/CpiV1.h index 4ed92207c..fdf64b311 100644 --- a/ov_core/src/cpi/CpiV1.h +++ b/ov_core/src/cpi/CpiV1.h @@ -26,364 +26,341 @@ * SOFTWARE. */ - -#include #include "CpiBase.h" #include "utils/quat_ops.h" - +#include namespace ov_core { - - /** - * @brief Model 1 of continuous preintegration. - * - * This model is the "piecewise constant measurement assumption" which was first presented in: - * > Eckenhoff, Kevin, Patrick Geneva, and Guoquan Huang. - * > "High-accuracy preintegration for visual inertial navigation." - * > International Workshop on the Algorithmic Foundations of Robotics. 2016. - * Please see the following publication for details on the theory @cite Eckenhoff2019IJRR : - * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation - * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang - * > http://udel.edu/~ghuang/papers/tr_cpi.pdf - * - * The steps to use this preintegration class are as follows: - * 1. call setLinearizationPoints() to set the bias/orientation linearization point - * 2. call feed_IMU() will all IMU measurements you want to precompound over - * 3. access public varibles, to get means, Jacobians, and measurement covariance - */ - class CpiV1 : public CpiBase { - - public: - - /** - * @brief Default constructor for our Model 1 preintegration (piecewise constant measurement assumption) - * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) - * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) - * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) - * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) - * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) - */ - CpiV1(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) : - CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {} - - - /** - * @brief Our precompound function for Model 1 - * @param[in] t_0 first IMU timestamp - * @param[in] t_1 second IMU timestamp - * @param[in] w_m_0 first imu gyroscope measurement - * @param[in] a_m_0 first imu acceleration measurement - * @param[in] w_m_1 second imu gyroscope measurement - * @param[in] a_m_1 second imu acceleration measurement - * - * We will first analytically integrate our meansurements and Jacobians. - * Then we perform numerical integration for our measurement covariance. - */ - void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, - Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), - Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { - - - // Get time difference - double delta_t = t_1 - t_0; - DT += delta_t; - - //If no time has passed do nothing - if (delta_t == 0) { - return; - } - - // Get estimated imu readings - Eigen::Matrix w_hat = w_m_0 - b_w_lin; - Eigen::Matrix a_hat = a_m_0 - b_a_lin; - - // If averaging, average - if (imu_avg) { - w_hat += w_m_1 - b_w_lin; - w_hat = 0.5 * w_hat; - a_hat += a_m_1 - b_a_lin; - a_hat = .5 * a_hat; - } - - // Get angle change w*dt - Eigen::Matrix w_hatdt = w_hat * delta_t; - - // Get entries of w_hat - double w_1 = w_hat(0, 0); - double w_2 = w_hat(1, 0); - double w_3 = w_hat(2, 0); - - // Get magnitude of w and wdt - double mag_w = w_hat.norm(); - double w_dt = mag_w * delta_t; - - // Threshold to determine if equations will be unstable - bool small_w = (mag_w < 0.008726646); - - // Get some of the variables used in the preintegration equations - double dt_2 = pow(delta_t, 2); - double cos_wt = cos(w_dt); - double sin_wt = sin(w_dt); - - Eigen::Matrix w_x = skew_x(w_hat); - Eigen::Matrix a_x = skew_x(a_hat); - Eigen::Matrix w_tx = skew_x(w_hatdt); - Eigen::Matrix w_x_2 = w_x * w_x; - - - //========================================================================== - // MEASUREMENT MEANS - //========================================================================== - - // Get relative rotation - Eigen::Matrix R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2 : - eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; - - // Updated rotation and its transpose - Eigen::Matrix R_k2tau1 = R_tau2tau1 * R_k2tau; - Eigen::Matrix R_tau12k = R_k2tau1.transpose(); - - //Intermediate variables for evaluating the measurement/bias Jacobian update - double f_1; - double f_2; - double f_3; - double f_4; - - if (small_w) { - f_1 = -(pow(delta_t, 3) / 3); - f_2 = (pow(delta_t, 4) / 8); - f_3 = -(pow(delta_t, 2) / 2); - f_4 = (pow(delta_t, 3) / 6); - } else { - f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); - f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); - f_3 = -(1 - cos_wt) / pow(mag_w, 2); - f_4 = (w_dt - sin_wt) / pow(mag_w, 3); - } - - // Compute the main part of our analytical means - Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); - Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); - - // Matrices that will multiply the a_hat in the update expressions - Eigen::MatrixXd H_al = R_tau12k * alpha_arg; - Eigen::MatrixXd H_be = R_tau12k * Beta_arg; - - // Update the measurement means - alpha_tau += beta_tau * delta_t + H_al * a_hat; - beta_tau += H_be * a_hat; - - - //========================================================================== - // BIAS JACOBIANS (ANALYTICAL) - //========================================================================== - - // Get right Jacobian - Eigen::Matrix J_r_tau1 = small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx : - eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + - ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; - - // Update orientation in respect to gyro bias Jacobians - J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; - - // Update alpha and beta in respect to accel bias Jacobians - H_a -= H_al; - H_a += delta_t * H_b; - H_b -= H_be; - - // Derivatives of R_tau12k wrt bias_w entries - Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); - Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); - Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); - - // Now compute the gyro bias Jacobian terms - double df_1_dbw_1; - double df_1_dbw_2; - double df_1_dbw_3; - - double df_2_dbw_1; - double df_2_dbw_2; - double df_2_dbw_3; - - double df_3_dbw_1; - double df_3_dbw_2; - double df_3_dbw_3; - - double df_4_dbw_1; - double df_4_dbw_2; - double df_4_dbw_3; - - if (small_w) { - double df_1_dw_mag = -(pow(delta_t, 5) / 15); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - double df_2_dw_mag = (pow(delta_t, 6) / 72); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = -(pow(delta_t, 4) / 12); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (pow(delta_t, 5) / 60); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } else { - double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - - double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } - - // Update alpha and beta gyro bias Jacobians - J_a += J_b * delta_t; - J_a.block(0, 0, 3, 1) += (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + - df_2_dbw_1 * w_x_2 - - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat; - J_a.block(0, 1, 3, 1) += (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + - df_2_dbw_2 * w_x_2 - - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat; - J_a.block(0, 2, 3, 1) += (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + - df_2_dbw_3 * w_x_2 - - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat; - J_b.block(0, 0, 3, 1) += (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + - df_4_dbw_1 * w_x_2 - - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat; - J_b.block(0, 1, 3, 1) += (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + - df_4_dbw_2 * w_x_2 - - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat; - J_b.block(0, 2, 3, 1) += (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + - df_4_dbw_3 * w_x_2 - - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat; - - - //========================================================================== - // MEASUREMENT COVARIANCE - //========================================================================== - - // Going to need orientation at intermediate time i.e. at .5*dt; - Eigen::Matrix R_mid = small_w ? eye3 - .5 * delta_t * w_x + (pow(.5 * delta_t, 2) / 2) * w_x_2 : - eye3 - (sin(mag_w * .5 * delta_t) / mag_w) * w_x + ((1.0 - cos(mag_w * .5 * delta_t)) / (pow(mag_w, 2.0))) * w_x_2; - R_mid = R_mid * R_k2tau; - - - //Compute covariance (in this implementation, we use RK4) - //k1------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); - F_k1.block(0, 0, 3, 3) = -w_x; - F_k1.block(0, 3, 3, 3) = -eye3; - F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; - F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); - F_k1.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); - G_k1.block(0, 0, 3, 3) = -eye3; - G_k1.block(3, 3, 3, 3) = eye3; - G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); - G_k1.block(9, 9, 3, 3) = eye3; - - // Get covariance derivative - Eigen::Matrix P_dot_k1 = F_k1 * P_meas + P_meas * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); - - - //k2------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); - F_k2.block(0, 0, 3, 3) = -w_x; - F_k2.block(0, 3, 3, 3) = -eye3; - F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; - F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); - F_k2.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); - G_k2.block(0, 0, 3, 3) = -eye3; - G_k2.block(3, 3, 3, 3) = eye3; - G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); - G_k2.block(9, 9, 3, 3) = eye3; - - // Get covariance derivative - Eigen::Matrix P_k2 = P_meas + P_dot_k1 * delta_t / 2.0; - Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); - - //k3------------------------------------------------------------------------------------------------- - - // Our state and noise Jacobians are the same as k2 - // Since k2 and k3 correspond to the same estimates for the midpoint - Eigen::Matrix F_k3 = F_k2; - Eigen::Matrix G_k3 = G_k2; - - // Get covariance derivative - Eigen::Matrix P_k3 = P_meas + P_dot_k2 * delta_t / 2.0; - Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); - - //k4------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); - F_k4.block(0, 0, 3, 3) = -w_x; - F_k4.block(0, 3, 3, 3) = -eye3; - F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; - F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); - F_k4.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); - G_k4.block(0, 0, 3, 3) = -eye3; - G_k4.block(3, 3, 3, 3) = eye3; - G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); - G_k4.block(9, 9, 3, 3) = eye3; - - // Get covariance derivative - Eigen::Matrix P_k4 = P_meas + P_dot_k3 * delta_t; - Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); - - - //done------------------------------------------------------------------------------------------------- - - // Collect covariance solution - // Ensure it is positive definite - P_meas += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); - P_meas = 0.5 * (P_meas + P_meas.transpose()); - - // Update rotation mean - // Note we had to wait to do this, since we use the old orientation in our covariance calculation - R_k2tau = R_k2tau1; - q_k2tau = rot_2_quat(R_k2tau); - - - } - - - }; - -} - +/** + * @brief Model 1 of continuous preintegration. + * + * This model is the "piecewise constant measurement assumption" which was first presented in: + * > Eckenhoff, Kevin, Patrick Geneva, and Guoquan Huang. + * > "High-accuracy preintegration for visual inertial navigation." + * > International Workshop on the Algorithmic Foundations of Robotics. 2016. + * Please see the following publication for details on the theory @cite Eckenhoff2019IJRR : + * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation + * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang + * > http://udel.edu/~ghuang/papers/tr_cpi.pdf + * + * The steps to use this preintegration class are as follows: + * 1. call setLinearizationPoints() to set the bias/orientation linearization point + * 2. call feed_IMU() will all IMU measurements you want to precompound over + * 3. access public varibles, to get means, Jacobians, and measurement covariance + */ +class CpiV1 : public CpiBase { + +public: + /** + * @brief Default constructor for our Model 1 preintegration (piecewise constant measurement assumption) + * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) + * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) + * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) + * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) + * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) + */ + CpiV1(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) + : CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {} + + /** + * @brief Our precompound function for Model 1 + * @param[in] t_0 first IMU timestamp + * @param[in] t_1 second IMU timestamp + * @param[in] w_m_0 first imu gyroscope measurement + * @param[in] a_m_0 first imu acceleration measurement + * @param[in] w_m_1 second imu gyroscope measurement + * @param[in] a_m_1 second imu acceleration measurement + * + * We will first analytically integrate our meansurements and Jacobians. + * Then we perform numerical integration for our measurement covariance. + */ + void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, + Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), + Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { + + // Get time difference + double delta_t = t_1 - t_0; + DT += delta_t; + + // If no time has passed do nothing + if (delta_t == 0) { + return; + } + + // Get estimated imu readings + Eigen::Matrix w_hat = w_m_0 - b_w_lin; + Eigen::Matrix a_hat = a_m_0 - b_a_lin; + + // If averaging, average + if (imu_avg) { + w_hat += w_m_1 - b_w_lin; + w_hat = 0.5 * w_hat; + a_hat += a_m_1 - b_a_lin; + a_hat = .5 * a_hat; + } + + // Get angle change w*dt + Eigen::Matrix w_hatdt = w_hat * delta_t; + + // Get entries of w_hat + double w_1 = w_hat(0, 0); + double w_2 = w_hat(1, 0); + double w_3 = w_hat(2, 0); + + // Get magnitude of w and wdt + double mag_w = w_hat.norm(); + double w_dt = mag_w * delta_t; + + // Threshold to determine if equations will be unstable + bool small_w = (mag_w < 0.008726646); + + // Get some of the variables used in the preintegration equations + double dt_2 = pow(delta_t, 2); + double cos_wt = cos(w_dt); + double sin_wt = sin(w_dt); + + Eigen::Matrix w_x = skew_x(w_hat); + Eigen::Matrix a_x = skew_x(a_hat); + Eigen::Matrix w_tx = skew_x(w_hatdt); + Eigen::Matrix w_x_2 = w_x * w_x; + + //========================================================================== + // MEASUREMENT MEANS + //========================================================================== + + // Get relative rotation + Eigen::Matrix R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2 + : eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; + + // Updated rotation and its transpose + Eigen::Matrix R_k2tau1 = R_tau2tau1 * R_k2tau; + Eigen::Matrix R_tau12k = R_k2tau1.transpose(); + + // Intermediate variables for evaluating the measurement/bias Jacobian update + double f_1; + double f_2; + double f_3; + double f_4; + + if (small_w) { + f_1 = -(pow(delta_t, 3) / 3); + f_2 = (pow(delta_t, 4) / 8); + f_3 = -(pow(delta_t, 2) / 2); + f_4 = (pow(delta_t, 3) / 6); + } else { + f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); + f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); + f_3 = -(1 - cos_wt) / pow(mag_w, 2); + f_4 = (w_dt - sin_wt) / pow(mag_w, 3); + } + + // Compute the main part of our analytical means + Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); + Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); + + // Matrices that will multiply the a_hat in the update expressions + Eigen::MatrixXd H_al = R_tau12k * alpha_arg; + Eigen::MatrixXd H_be = R_tau12k * Beta_arg; + + // Update the measurement means + alpha_tau += beta_tau * delta_t + H_al * a_hat; + beta_tau += H_be * a_hat; + + //========================================================================== + // BIAS JACOBIANS (ANALYTICAL) + //========================================================================== + + // Get right Jacobian + Eigen::Matrix J_r_tau1 = + small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx + : eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; + + // Update orientation in respect to gyro bias Jacobians + J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; + + // Update alpha and beta in respect to accel bias Jacobians + H_a -= H_al; + H_a += delta_t * H_b; + H_b -= H_be; + + // Derivatives of R_tau12k wrt bias_w entries + Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); + Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); + Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); + + // Now compute the gyro bias Jacobian terms + double df_1_dbw_1; + double df_1_dbw_2; + double df_1_dbw_3; + + double df_2_dbw_1; + double df_2_dbw_2; + double df_2_dbw_3; + + double df_3_dbw_1; + double df_3_dbw_2; + double df_3_dbw_3; + + double df_4_dbw_1; + double df_4_dbw_2; + double df_4_dbw_3; + + if (small_w) { + double df_1_dw_mag = -(pow(delta_t, 5) / 15); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + double df_2_dw_mag = (pow(delta_t, 6) / 72); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = -(pow(delta_t, 4) / 12); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (pow(delta_t, 5) / 60); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } else { + double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } + + // Update alpha and beta gyro bias Jacobians + J_a += J_b * delta_t; + J_a.block(0, 0, 3, 1) += + (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + df_2_dbw_1 * w_x_2 - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat; + J_a.block(0, 1, 3, 1) += + (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + df_2_dbw_2 * w_x_2 - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat; + J_a.block(0, 2, 3, 1) += + (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + df_2_dbw_3 * w_x_2 - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat; + J_b.block(0, 0, 3, 1) += + (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + df_4_dbw_1 * w_x_2 - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat; + J_b.block(0, 1, 3, 1) += + (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + df_4_dbw_2 * w_x_2 - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat; + J_b.block(0, 2, 3, 1) += + (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + df_4_dbw_3 * w_x_2 - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat; + + //========================================================================== + // MEASUREMENT COVARIANCE + //========================================================================== + + // Going to need orientation at intermediate time i.e. at .5*dt; + Eigen::Matrix R_mid = + small_w ? eye3 - .5 * delta_t * w_x + (pow(.5 * delta_t, 2) / 2) * w_x_2 + : eye3 - (sin(mag_w * .5 * delta_t) / mag_w) * w_x + ((1.0 - cos(mag_w * .5 * delta_t)) / (pow(mag_w, 2.0))) * w_x_2; + R_mid = R_mid * R_k2tau; + + // Compute covariance (in this implementation, we use RK4) + // k1------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); + F_k1.block(0, 0, 3, 3) = -w_x; + F_k1.block(0, 3, 3, 3) = -eye3; + F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; + F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); + F_k1.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); + G_k1.block(0, 0, 3, 3) = -eye3; + G_k1.block(3, 3, 3, 3) = eye3; + G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); + G_k1.block(9, 9, 3, 3) = eye3; + + // Get covariance derivative + Eigen::Matrix P_dot_k1 = F_k1 * P_meas + P_meas * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); + + // k2------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); + F_k2.block(0, 0, 3, 3) = -w_x; + F_k2.block(0, 3, 3, 3) = -eye3; + F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; + F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); + F_k2.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); + G_k2.block(0, 0, 3, 3) = -eye3; + G_k2.block(3, 3, 3, 3) = eye3; + G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); + G_k2.block(9, 9, 3, 3) = eye3; + + // Get covariance derivative + Eigen::Matrix P_k2 = P_meas + P_dot_k1 * delta_t / 2.0; + Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); + + // k3------------------------------------------------------------------------------------------------- + + // Our state and noise Jacobians are the same as k2 + // Since k2 and k3 correspond to the same estimates for the midpoint + Eigen::Matrix F_k3 = F_k2; + Eigen::Matrix G_k3 = G_k2; + + // Get covariance derivative + Eigen::Matrix P_k3 = P_meas + P_dot_k2 * delta_t / 2.0; + Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); + + // k4------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); + F_k4.block(0, 0, 3, 3) = -w_x; + F_k4.block(0, 3, 3, 3) = -eye3; + F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; + F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); + F_k4.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); + G_k4.block(0, 0, 3, 3) = -eye3; + G_k4.block(3, 3, 3, 3) = eye3; + G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); + G_k4.block(9, 9, 3, 3) = eye3; + + // Get covariance derivative + Eigen::Matrix P_k4 = P_meas + P_dot_k3 * delta_t; + Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); + + // done------------------------------------------------------------------------------------------------- + + // Collect covariance solution + // Ensure it is positive definite + P_meas += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); + P_meas = 0.5 * (P_meas + P_meas.transpose()); + + // Update rotation mean + // Note we had to wait to do this, since we use the old orientation in our covariance calculation + R_k2tau = R_k2tau1; + q_k2tau = rot_2_quat(R_k2tau); + } +}; + +} // namespace ov_core #endif /* CPI_V1_H */ \ No newline at end of file diff --git a/ov_core/src/cpi/CpiV2.h b/ov_core/src/cpi/CpiV2.h index ac5dac977..8a358501e 100644 --- a/ov_core/src/cpi/CpiV2.h +++ b/ov_core/src/cpi/CpiV2.h @@ -26,469 +26,430 @@ * SOFTWARE. */ - -#include #include "CpiBase.h" #include "utils/quat_ops.h" - +#include namespace ov_core { - - /** - * @brief Model 2 of continuous preintegration. - * - * This model is the "piecewise constant local acceleration assumption." - * Please see the following publication for details on the theory @cite Eckenhoff2019IJRR : - * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation - * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang - * > http://udel.edu/~ghuang/papers/tr_cpi.pdf - * - * The steps to use this preintegration class are as follows: - * 1. call setLinearizationPoints() to set the bias/orientation linearization point - * 2. call feed_IMU() will all IMU measurements you want to precompound over - * 3. access public varibles, to get means, Jacobians, and measurement covariance - */ - class CpiV2 : public CpiBase { - - private: - - // Extended covariance used to handle the sampling model - Eigen::Matrix P_big = Eigen::Matrix::Zero(); - - // Our large compounded state transition Jacobian matrix - Eigen::Matrix Discrete_J_b = Eigen::Matrix::Identity(); - - - public: - - - /** - * @brief - * If we want to use analytical jacobians or not. - * In the paper we just numerically integrated the jacobians - * If set to false, we use a closed form version similar to model 1. - */ - bool state_transition_jacobians = true; - - - // Alpha and beta Jacobians wrt linearization orientation - Eigen::Matrix O_a = Eigen::Matrix::Zero(); - Eigen::Matrix O_b = Eigen::Matrix::Zero(); - - - /** - * @brief Default constructor for our Model 2 preintegration (piecewise constant local acceleration assumption) - * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) - * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) - * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) - * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) - * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) - */ - CpiV2(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) : - CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {} - - - /** - * @brief Our precompound function for Model 2 - * @param[in] t_0 first IMU timestamp - * @param[in] t_1 second IMU timestamp - * @param[in] w_m_0 first imu gyroscope measurement - * @param[in] a_m_0 first imu acceleration measurement - * @param[in] w_m_1 second imu gyroscope measurement - * @param[in] a_m_1 second imu acceleration measurement - * - * We will first analytically integrate our meansurement. - * We can numerically or analytically integrate our bias jacobians. - * Then we perform numerical integration for our measurement covariance. - */ - void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, - Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), - Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { - - //Get time difference - double delta_t = t_1 - t_0; - DT += delta_t; - - // If no time has passed do nothing - if (delta_t == 0) { - return; - } - - // Get estimated imu readings - Eigen::Matrix w_hat = w_m_0 - b_w_lin; - Eigen::Matrix a_hat = a_m_0 - b_a_lin - R_k2tau * quat_2_Rot(q_k_lin) * grav; - - // If averaging, average - // Note: we will average the LOCAL acceleration after getting the relative rotation - if (imu_avg) { - w_hat += w_m_1 - b_w_lin; - w_hat = 0.5 * w_hat; - } - - // Get angle change w*dt - Eigen::Matrix w_hatdt = w_hat * delta_t; - - // Get entries of w_hat - double w_1 = w_hat(0, 0); - double w_2 = w_hat(1, 0); - double w_3 = w_hat(2, 0); - - //Get magnitude of w and wdt - double mag_w = w_hat.norm(); - double w_dt = mag_w * delta_t; - - //Threshold to determine if equations will be unstable - bool small_w = (mag_w < 0.008726646); - - //Get some of the variables used in the preintegration equations - double dt_2 = pow(delta_t, 2); - double cos_wt = cos(w_dt); - double sin_wt = sin(w_dt); - - Eigen::Matrix w_x = skew_x(w_hat); - Eigen::Matrix w_tx = skew_x(w_hatdt); - Eigen::Matrix w_x_2 = w_x * w_x; - - - //========================================================================== - // MEASUREMENT MEANS - //========================================================================== - - // Get relative rotation - Eigen::Matrix R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2 : - eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; - - // Updated roation and its transpose - Eigen::Matrix R_k2tau1 = R_tau2tau1 * R_k2tau; - Eigen::Matrix R_tau12k = R_k2tau1.transpose(); - - // If averaging, average the LOCAL acceleration - if (imu_avg) { - a_hat += a_m_1 - b_a_lin - R_k2tau1 * quat_2_Rot(q_k_lin) * grav; - a_hat = 0.5 * a_hat; - } - Eigen::Matrix a_x = skew_x(a_hat); - - // Intermediate variables for evaluating the measurement/bias Jacobian update - double f_1; - double f_2; - double f_3; - double f_4; - - if (small_w) { - f_1 = -(pow(delta_t, 3) / 3); - f_2 = (pow(delta_t, 4) / 8); - f_3 = -(pow(delta_t, 2) / 2); - f_4 = (pow(delta_t, 3) / 6); - } else { - f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); - f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); - f_3 = -(1 - cos_wt) / pow(mag_w, 2); - f_4 = (w_dt - sin_wt) / pow(mag_w, 3); - } - - // Compute the main part of our analytical means - Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); - Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); - - // Matrices that will multiply the a_hat in the update expressions - Eigen::Matrix H_al = R_tau12k * alpha_arg; - Eigen::Matrix H_be = R_tau12k * Beta_arg; - - // Update the measurements - alpha_tau += beta_tau * delta_t + H_al * a_hat; - beta_tau += H_be * a_hat; - - - //========================================================================== - // BIAS JACOBIANS (ANALYTICAL) - //========================================================================== - - //Get right Jacobian - Eigen::Matrix J_r_tau1 = small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx : - eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + - ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; - - // Update orientation in respect to gyro bias Jacobians - Eigen::Matrix J_save = J_q; - J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; - - // Update alpha and beta in respect to accel bias Jacobian - H_a -= H_al; - H_a += delta_t * H_b; - H_b -= H_be; - - // Update alpha and beta in respect to q_GtoLIN Jacobian - Eigen::Matrix g_k = quat_2_Rot(q_k_lin) * grav; - O_a += delta_t * O_b; - O_a += -H_al * R_k2tau * skew_x(g_k); - O_b += -H_be * R_k2tau * skew_x(g_k); - - // Derivatives of R_tau12k wrt bias_w entries - Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); - Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); - Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); - - - // Now compute the gyro bias Jacobian terms - double df_1_dbw_1; - double df_1_dbw_2; - double df_1_dbw_3; - - double df_2_dbw_1; - double df_2_dbw_2; - double df_2_dbw_3; - - double df_3_dbw_1; - double df_3_dbw_2; - double df_3_dbw_3; - - double df_4_dbw_1; - double df_4_dbw_2; - double df_4_dbw_3; - - - if (small_w) { - double df_1_dw_mag = -(pow(delta_t, 5) / 15); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - double df_2_dw_mag = (pow(delta_t, 6) / 72); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = -(pow(delta_t, 4) / 12); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (pow(delta_t, 5) / 60); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } else { - double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - - double df_2_dw_mag = - (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } - - // Gravity rotated into the "tau'th" frame (i.e. start of the measurement interval) - Eigen::Matrix g_tau = R_k2tau * quat_2_Rot(q_k_lin) * grav; - - - // Update gyro bias Jacobians - J_a += J_b * delta_t; - J_a.block(0, 0, 3, 1) += (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + - df_2_dbw_1 * w_x_2 - - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat - - H_al * skew_x((J_save * e_1)) * g_tau; - J_a.block(0, 1, 3, 1) += (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + - df_2_dbw_2 * w_x_2 - - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat - - H_al * skew_x((J_save * e_2)) * g_tau; - J_a.block(0, 2, 3, 1) += (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + - df_2_dbw_3 * w_x_2 - - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat - - H_al * skew_x((J_save * e_3)) * g_tau; - J_b.block(0, 0, 3, 1) += (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + - df_4_dbw_1 * w_x_2 - - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat - - -H_be * skew_x((J_save * e_1)) * g_tau; - J_b.block(0, 1, 3, 1) += (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + - df_4_dbw_2 * w_x_2 - - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat - - H_be * skew_x((J_save * e_2)) * g_tau; - J_b.block(0, 2, 3, 1) += (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + - df_4_dbw_3 * w_x_2 - - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat - - H_be * skew_x((J_save * e_3)) * g_tau; - - - - - //========================================================================== - // MEASUREMENT COVARIANCE - //========================================================================== - - // Going to need orientation at intermediate time i.e. at .5*dt; - Eigen::Matrix R_G_to_k = quat_2_Rot(q_k_lin); - double dt_mid = delta_t / 2.0; - double w_dt_mid = mag_w * dt_mid; - Eigen::Matrix R_mid; - - // The middle of this interval (i.e., rotation from k to mid) - R_mid = small_w ? eye3 - dt_mid * w_x + (pow(dt_mid, 2) / 2) * w_x_2 : - eye3 - (sin(w_dt_mid) / mag_w) * w_x + ((1.0 - cos(w_dt_mid)) / (pow(mag_w, 2.0))) * w_x_2; - R_mid = R_mid * R_k2tau; - - - // Compute covariance (in this implementation, we use RK4) - //k1------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); - F_k1.block(0, 0, 3, 3) = -w_x; - F_k1.block(0, 3, 3, 3) = -eye3; - F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; - F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); - F_k1.block(6, 15, 3, 3) = -R_k2tau.transpose() * skew_x(R_k2tau * R_G_to_k * grav); - F_k1.block(6, 18, 3, 3) = -R_k2tau.transpose() * R_k2tau * skew_x(R_G_to_k * grav); - F_k1.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); - G_k1.block(0, 0, 3, 3) = -eye3; - G_k1.block(3, 3, 3, 3) = eye3; - G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); - G_k1.block(9, 9, 3, 3) = eye3; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_dot_k1 = F_k1; - Eigen::Matrix P_dot_k1 = F_k1 * P_big + P_big * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); - - - //k2------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); - F_k2.block(0, 0, 3, 3) = -w_x; - F_k2.block(0, 3, 3, 3) = -eye3; - F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; - F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); - F_k2.block(6, 15, 3, 3) = -R_mid.transpose() * skew_x(R_k2tau * R_G_to_k * grav); - F_k2.block(6, 18, 3, 3) = -R_mid.transpose() * R_k2tau * skew_x(R_G_to_k * grav); - F_k2.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); - G_k2.block(0, 0, 3, 3) = -eye3; - G_k2.block(3, 3, 3, 3) = eye3; - G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); - G_k2.block(9, 9, 3, 3) = eye3; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_k2 = Eigen::Matrix::Identity() + Phi_dot_k1 * dt_mid; - Eigen::Matrix P_k2 = P_big + P_dot_k1 * dt_mid; - Eigen::Matrix Phi_dot_k2 = F_k2 * Phi_k2; - Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); - - - //k3------------------------------------------------------------------------------------------------- - - // Our state and noise Jacobians are the same as k2 - // Since k2 and k3 correspond to the same estimates for the midpoint - Eigen::Matrix F_k3 = F_k2; - Eigen::Matrix G_k3 = G_k2; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_k3 = Eigen::Matrix::Identity() + Phi_dot_k2 * dt_mid; - Eigen::Matrix P_k3 = P_big + P_dot_k2 * dt_mid; - Eigen::Matrix Phi_dot_k3 = F_k3 * Phi_k3; - Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); - - - //k4------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); - F_k4.block(0, 0, 3, 3) = -w_x; - F_k4.block(0, 3, 3, 3) = -eye3; - F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; - F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); - F_k4.block(6, 15, 3, 3) = -R_k2tau1.transpose() * skew_x(R_k2tau * R_G_to_k * grav); - F_k4.block(6, 18, 3, 3) = -R_k2tau1.transpose() * R_k2tau * skew_x(R_G_to_k * grav); - F_k4.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); - G_k4.block(0, 0, 3, 3) = -eye3; - G_k4.block(3, 3, 3, 3) = eye3; - G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); - G_k4.block(9, 9, 3, 3) = eye3; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_k4 = Eigen::Matrix::Identity() + Phi_dot_k3 * delta_t; - Eigen::Matrix P_k4 = P_big + P_dot_k3 * delta_t; - Eigen::Matrix Phi_dot_k4 = F_k4 * Phi_k4; - Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); - - - //done------------------------------------------------------------------------------------------------- - - // Collect covariance solution - // Ensure it is positive definite - P_big += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); - P_big = 0.5 * (P_big + P_big.transpose()); - - // Calculate the state transition from time k to tau - Eigen::Matrix Phi = Eigen::Matrix::Identity() - + (delta_t / 6.0) * (Phi_dot_k1 + 2.0 * Phi_dot_k2 + 2.0 * Phi_dot_k3 + Phi_dot_k4); - - - - - //========================================================================== - // CLONE TO NEW SAMPLE TIME AND MARGINALIZE OLD SAMPLE TIME - //========================================================================== - - // Matrix that performs the clone and mariginalization - Eigen::Matrix B_k = Eigen::Matrix::Identity(); - B_k.block(15, 15, 3, 3).setZero(); - B_k.block(15, 0, 3, 3) = Eigen::Matrix::Identity(); - - // Change our measurement covariance and state transition - P_big = B_k * P_big * B_k.transpose(); - P_big = 0.5 * (P_big + P_big.transpose()); - Discrete_J_b = B_k * Phi * Discrete_J_b; - - // Our measurement covariance is the top 15x15 of our large covariance - P_meas = P_big.block(0, 0, 15, 15); - - // If we are using the state transition Jacobian, then we should overwrite the analytical versions - // Note: we flip the sign for J_q to match the Model 1 derivation - if (state_transition_jacobians) { - J_q = -Discrete_J_b.block(0, 3, 3, 3); - J_a = Discrete_J_b.block(12, 3, 3, 3); - J_b = Discrete_J_b.block(6, 3, 3, 3); - H_a = Discrete_J_b.block(12, 9, 3, 3); - H_b = Discrete_J_b.block(6, 9, 3, 3); - O_a = Discrete_J_b.block(12, 18, 3, 3); - O_b = Discrete_J_b.block(6, 18, 3, 3); - } - - - // Update rotation mean - // Note we had to wait to do this, since we use the old orientation in our covariance calculation - R_k2tau = R_k2tau1; - q_k2tau = rot_2_quat(R_k2tau); - - - } - - - }; - -} +/** + * @brief Model 2 of continuous preintegration. + * + * This model is the "piecewise constant local acceleration assumption." + * Please see the following publication for details on the theory @cite Eckenhoff2019IJRR : + * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation + * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang + * > http://udel.edu/~ghuang/papers/tr_cpi.pdf + * + * The steps to use this preintegration class are as follows: + * 1. call setLinearizationPoints() to set the bias/orientation linearization point + * 2. call feed_IMU() will all IMU measurements you want to precompound over + * 3. access public varibles, to get means, Jacobians, and measurement covariance + */ +class CpiV2 : public CpiBase { + +private: + // Extended covariance used to handle the sampling model + Eigen::Matrix P_big = Eigen::Matrix::Zero(); + + // Our large compounded state transition Jacobian matrix + Eigen::Matrix Discrete_J_b = Eigen::Matrix::Identity(); + +public: + /** + * @brief + * If we want to use analytical jacobians or not. + * In the paper we just numerically integrated the jacobians + * If set to false, we use a closed form version similar to model 1. + */ + bool state_transition_jacobians = true; + + // Alpha and beta Jacobians wrt linearization orientation + Eigen::Matrix O_a = Eigen::Matrix::Zero(); + Eigen::Matrix O_b = Eigen::Matrix::Zero(); + + /** + * @brief Default constructor for our Model 2 preintegration (piecewise constant local acceleration assumption) + * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) + * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) + * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) + * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) + * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) + */ + CpiV2(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) + : CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {} + + /** + * @brief Our precompound function for Model 2 + * @param[in] t_0 first IMU timestamp + * @param[in] t_1 second IMU timestamp + * @param[in] w_m_0 first imu gyroscope measurement + * @param[in] a_m_0 first imu acceleration measurement + * @param[in] w_m_1 second imu gyroscope measurement + * @param[in] a_m_1 second imu acceleration measurement + * + * We will first analytically integrate our meansurement. + * We can numerically or analytically integrate our bias jacobians. + * Then we perform numerical integration for our measurement covariance. + */ + void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, + Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), + Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { + + // Get time difference + double delta_t = t_1 - t_0; + DT += delta_t; + + // If no time has passed do nothing + if (delta_t == 0) { + return; + } + + // Get estimated imu readings + Eigen::Matrix w_hat = w_m_0 - b_w_lin; + Eigen::Matrix a_hat = a_m_0 - b_a_lin - R_k2tau * quat_2_Rot(q_k_lin) * grav; + + // If averaging, average + // Note: we will average the LOCAL acceleration after getting the relative rotation + if (imu_avg) { + w_hat += w_m_1 - b_w_lin; + w_hat = 0.5 * w_hat; + } + + // Get angle change w*dt + Eigen::Matrix w_hatdt = w_hat * delta_t; + + // Get entries of w_hat + double w_1 = w_hat(0, 0); + double w_2 = w_hat(1, 0); + double w_3 = w_hat(2, 0); + + // Get magnitude of w and wdt + double mag_w = w_hat.norm(); + double w_dt = mag_w * delta_t; + + // Threshold to determine if equations will be unstable + bool small_w = (mag_w < 0.008726646); + + // Get some of the variables used in the preintegration equations + double dt_2 = pow(delta_t, 2); + double cos_wt = cos(w_dt); + double sin_wt = sin(w_dt); + + Eigen::Matrix w_x = skew_x(w_hat); + Eigen::Matrix w_tx = skew_x(w_hatdt); + Eigen::Matrix w_x_2 = w_x * w_x; + + //========================================================================== + // MEASUREMENT MEANS + //========================================================================== + + // Get relative rotation + Eigen::Matrix R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2 + : eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; + + // Updated roation and its transpose + Eigen::Matrix R_k2tau1 = R_tau2tau1 * R_k2tau; + Eigen::Matrix R_tau12k = R_k2tau1.transpose(); + + // If averaging, average the LOCAL acceleration + if (imu_avg) { + a_hat += a_m_1 - b_a_lin - R_k2tau1 * quat_2_Rot(q_k_lin) * grav; + a_hat = 0.5 * a_hat; + } + Eigen::Matrix a_x = skew_x(a_hat); + + // Intermediate variables for evaluating the measurement/bias Jacobian update + double f_1; + double f_2; + double f_3; + double f_4; + + if (small_w) { + f_1 = -(pow(delta_t, 3) / 3); + f_2 = (pow(delta_t, 4) / 8); + f_3 = -(pow(delta_t, 2) / 2); + f_4 = (pow(delta_t, 3) / 6); + } else { + f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); + f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); + f_3 = -(1 - cos_wt) / pow(mag_w, 2); + f_4 = (w_dt - sin_wt) / pow(mag_w, 3); + } + + // Compute the main part of our analytical means + Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); + Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); + + // Matrices that will multiply the a_hat in the update expressions + Eigen::Matrix H_al = R_tau12k * alpha_arg; + Eigen::Matrix H_be = R_tau12k * Beta_arg; + + // Update the measurements + alpha_tau += beta_tau * delta_t + H_al * a_hat; + beta_tau += H_be * a_hat; + + //========================================================================== + // BIAS JACOBIANS (ANALYTICAL) + //========================================================================== + + // Get right Jacobian + Eigen::Matrix J_r_tau1 = + small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx + : eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; + + // Update orientation in respect to gyro bias Jacobians + Eigen::Matrix J_save = J_q; + J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; + + // Update alpha and beta in respect to accel bias Jacobian + H_a -= H_al; + H_a += delta_t * H_b; + H_b -= H_be; + + // Update alpha and beta in respect to q_GtoLIN Jacobian + Eigen::Matrix g_k = quat_2_Rot(q_k_lin) * grav; + O_a += delta_t * O_b; + O_a += -H_al * R_k2tau * skew_x(g_k); + O_b += -H_be * R_k2tau * skew_x(g_k); + + // Derivatives of R_tau12k wrt bias_w entries + Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); + Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); + Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); + + // Now compute the gyro bias Jacobian terms + double df_1_dbw_1; + double df_1_dbw_2; + double df_1_dbw_3; + + double df_2_dbw_1; + double df_2_dbw_2; + double df_2_dbw_3; + + double df_3_dbw_1; + double df_3_dbw_2; + double df_3_dbw_3; + + double df_4_dbw_1; + double df_4_dbw_2; + double df_4_dbw_3; + + if (small_w) { + double df_1_dw_mag = -(pow(delta_t, 5) / 15); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + double df_2_dw_mag = (pow(delta_t, 6) / 72); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = -(pow(delta_t, 4) / 12); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (pow(delta_t, 5) / 60); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } else { + double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } + + // Gravity rotated into the "tau'th" frame (i.e. start of the measurement interval) + Eigen::Matrix g_tau = R_k2tau * quat_2_Rot(q_k_lin) * grav; + + // Update gyro bias Jacobians + J_a += J_b * delta_t; + J_a.block(0, 0, 3, 1) += + (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + df_2_dbw_1 * w_x_2 - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat - + H_al * skew_x((J_save * e_1)) * g_tau; + J_a.block(0, 1, 3, 1) += + (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + df_2_dbw_2 * w_x_2 - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat - + H_al * skew_x((J_save * e_2)) * g_tau; + J_a.block(0, 2, 3, 1) += + (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + df_2_dbw_3 * w_x_2 - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat - + H_al * skew_x((J_save * e_3)) * g_tau; + J_b.block(0, 0, 3, 1) += + (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + df_4_dbw_1 * w_x_2 - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat - + -H_be * skew_x((J_save * e_1)) * g_tau; + J_b.block(0, 1, 3, 1) += + (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + df_4_dbw_2 * w_x_2 - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat - + H_be * skew_x((J_save * e_2)) * g_tau; + J_b.block(0, 2, 3, 1) += + (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + df_4_dbw_3 * w_x_2 - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat - + H_be * skew_x((J_save * e_3)) * g_tau; + + //========================================================================== + // MEASUREMENT COVARIANCE + //========================================================================== + + // Going to need orientation at intermediate time i.e. at .5*dt; + Eigen::Matrix R_G_to_k = quat_2_Rot(q_k_lin); + double dt_mid = delta_t / 2.0; + double w_dt_mid = mag_w * dt_mid; + Eigen::Matrix R_mid; + + // The middle of this interval (i.e., rotation from k to mid) + R_mid = small_w ? eye3 - dt_mid * w_x + (pow(dt_mid, 2) / 2) * w_x_2 + : eye3 - (sin(w_dt_mid) / mag_w) * w_x + ((1.0 - cos(w_dt_mid)) / (pow(mag_w, 2.0))) * w_x_2; + R_mid = R_mid * R_k2tau; + + // Compute covariance (in this implementation, we use RK4) + // k1------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); + F_k1.block(0, 0, 3, 3) = -w_x; + F_k1.block(0, 3, 3, 3) = -eye3; + F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; + F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); + F_k1.block(6, 15, 3, 3) = -R_k2tau.transpose() * skew_x(R_k2tau * R_G_to_k * grav); + F_k1.block(6, 18, 3, 3) = -R_k2tau.transpose() * R_k2tau * skew_x(R_G_to_k * grav); + F_k1.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); + G_k1.block(0, 0, 3, 3) = -eye3; + G_k1.block(3, 3, 3, 3) = eye3; + G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); + G_k1.block(9, 9, 3, 3) = eye3; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_dot_k1 = F_k1; + Eigen::Matrix P_dot_k1 = F_k1 * P_big + P_big * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); + + // k2------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); + F_k2.block(0, 0, 3, 3) = -w_x; + F_k2.block(0, 3, 3, 3) = -eye3; + F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; + F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); + F_k2.block(6, 15, 3, 3) = -R_mid.transpose() * skew_x(R_k2tau * R_G_to_k * grav); + F_k2.block(6, 18, 3, 3) = -R_mid.transpose() * R_k2tau * skew_x(R_G_to_k * grav); + F_k2.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); + G_k2.block(0, 0, 3, 3) = -eye3; + G_k2.block(3, 3, 3, 3) = eye3; + G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); + G_k2.block(9, 9, 3, 3) = eye3; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_k2 = Eigen::Matrix::Identity() + Phi_dot_k1 * dt_mid; + Eigen::Matrix P_k2 = P_big + P_dot_k1 * dt_mid; + Eigen::Matrix Phi_dot_k2 = F_k2 * Phi_k2; + Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); + + // k3------------------------------------------------------------------------------------------------- + + // Our state and noise Jacobians are the same as k2 + // Since k2 and k3 correspond to the same estimates for the midpoint + Eigen::Matrix F_k3 = F_k2; + Eigen::Matrix G_k3 = G_k2; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_k3 = Eigen::Matrix::Identity() + Phi_dot_k2 * dt_mid; + Eigen::Matrix P_k3 = P_big + P_dot_k2 * dt_mid; + Eigen::Matrix Phi_dot_k3 = F_k3 * Phi_k3; + Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); + + // k4------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); + F_k4.block(0, 0, 3, 3) = -w_x; + F_k4.block(0, 3, 3, 3) = -eye3; + F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; + F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); + F_k4.block(6, 15, 3, 3) = -R_k2tau1.transpose() * skew_x(R_k2tau * R_G_to_k * grav); + F_k4.block(6, 18, 3, 3) = -R_k2tau1.transpose() * R_k2tau * skew_x(R_G_to_k * grav); + F_k4.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); + G_k4.block(0, 0, 3, 3) = -eye3; + G_k4.block(3, 3, 3, 3) = eye3; + G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); + G_k4.block(9, 9, 3, 3) = eye3; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_k4 = Eigen::Matrix::Identity() + Phi_dot_k3 * delta_t; + Eigen::Matrix P_k4 = P_big + P_dot_k3 * delta_t; + Eigen::Matrix Phi_dot_k4 = F_k4 * Phi_k4; + Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); + + // done------------------------------------------------------------------------------------------------- + + // Collect covariance solution + // Ensure it is positive definite + P_big += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); + P_big = 0.5 * (P_big + P_big.transpose()); + + // Calculate the state transition from time k to tau + Eigen::Matrix Phi = + Eigen::Matrix::Identity() + (delta_t / 6.0) * (Phi_dot_k1 + 2.0 * Phi_dot_k2 + 2.0 * Phi_dot_k3 + Phi_dot_k4); + + //========================================================================== + // CLONE TO NEW SAMPLE TIME AND MARGINALIZE OLD SAMPLE TIME + //========================================================================== + + // Matrix that performs the clone and mariginalization + Eigen::Matrix B_k = Eigen::Matrix::Identity(); + B_k.block(15, 15, 3, 3).setZero(); + B_k.block(15, 0, 3, 3) = Eigen::Matrix::Identity(); + + // Change our measurement covariance and state transition + P_big = B_k * P_big * B_k.transpose(); + P_big = 0.5 * (P_big + P_big.transpose()); + Discrete_J_b = B_k * Phi * Discrete_J_b; + + // Our measurement covariance is the top 15x15 of our large covariance + P_meas = P_big.block(0, 0, 15, 15); + + // If we are using the state transition Jacobian, then we should overwrite the analytical versions + // Note: we flip the sign for J_q to match the Model 1 derivation + if (state_transition_jacobians) { + J_q = -Discrete_J_b.block(0, 3, 3, 3); + J_a = Discrete_J_b.block(12, 3, 3, 3); + J_b = Discrete_J_b.block(6, 3, 3, 3); + H_a = Discrete_J_b.block(12, 9, 3, 3); + H_b = Discrete_J_b.block(6, 9, 3, 3); + O_a = Discrete_J_b.block(12, 18, 3, 3); + O_b = Discrete_J_b.block(6, 18, 3, 3); + } + + // Update rotation mean + // Note we had to wait to do this, since we use the old orientation in our covariance calculation + R_k2tau = R_k2tau1; + q_k2tau = rot_2_quat(R_k2tau); + } +}; + +} // namespace ov_core #endif /* CPI_V2_H */ \ No newline at end of file diff --git a/ov_core/src/dummy.cpp b/ov_core/src/dummy.cpp index fbd16dbc5..0b43aa229 100644 --- a/ov_core/src/dummy.cpp +++ b/ov_core/src/dummy.cpp @@ -19,14 +19,13 @@ * along with this program. If not, see . */ - /** * @namespace ov_core * * This has the core algorithms that all projects within the OpenVINS ecosystem leverage. - * The purpose is to allow for the reuse of code that could be shared between different localization systems (i.e. msckf-based, batch-based, etc.). - * These algorithms are the foundation which is necessary before we can even write an estimator that can perform localization. - * The key components of the ov_core codebase are the following: + * The purpose is to allow for the reuse of code that could be shared between different localization systems (i.e. msckf-based, batch-based, + * etc.). These algorithms are the foundation which is necessary before we can even write an estimator that can perform localization. The + * key components of the ov_core codebase are the following: * * - 3d feature initialization (see @ref ov_core::FeatureInitializer) * - Inertial state initialization (see @ref ov_core::InertialInitializer) @@ -42,8 +41,7 @@ * If you are looking for the different types please take a look at the ov_type namespace for the ones we have. * */ -namespace ov_core { } - +namespace ov_core {} /** * @namespace ov_type @@ -71,12 +69,4 @@ namespace ov_core { } * * */ -namespace ov_type { } - - - - - - - - +namespace ov_type {} diff --git a/ov_core/src/feat/Feature.cpp b/ov_core/src/feat/Feature.cpp index 39472d650..d389d96df 100644 --- a/ov_core/src/feat/Feature.cpp +++ b/ov_core/src/feat/Feature.cpp @@ -20,72 +20,62 @@ */ #include "Feature.h" - using namespace ov_core; - - - void Feature::clean_old_measurements(std::vector valid_times) { - - // Loop through each of the cameras we have - for(auto const &pair : timestamps) { - - // Assert that we have all the parts of a measurement - assert(timestamps[pair.first].size() == uvs[pair.first].size()); - assert(timestamps[pair.first].size() == uvs_norm[pair.first].size()); - - // Our iterators - auto it1 = timestamps[pair.first].begin(); - auto it2 = uvs[pair.first].begin(); - auto it3 = uvs_norm[pair.first].begin(); - - // Loop through measurement times, remove ones that are not in our timestamps - while (it1 != timestamps[pair.first].end()) { - if (std::find(valid_times.begin(),valid_times.end(),*it1) == valid_times.end()) { - it1 = timestamps[pair.first].erase(it1); - it2 = uvs[pair.first].erase(it2); - it3 = uvs_norm[pair.first].erase(it3); - } else { - ++it1; - ++it2; - ++it3; - } - } + // Loop through each of the cameras we have + for (auto const &pair : timestamps) { + + // Assert that we have all the parts of a measurement + assert(timestamps[pair.first].size() == uvs[pair.first].size()); + assert(timestamps[pair.first].size() == uvs_norm[pair.first].size()); + + // Our iterators + auto it1 = timestamps[pair.first].begin(); + auto it2 = uvs[pair.first].begin(); + auto it3 = uvs_norm[pair.first].begin(); + + // Loop through measurement times, remove ones that are not in our timestamps + while (it1 != timestamps[pair.first].end()) { + if (std::find(valid_times.begin(), valid_times.end(), *it1) == valid_times.end()) { + it1 = timestamps[pair.first].erase(it1); + it2 = uvs[pair.first].erase(it2); + it3 = uvs_norm[pair.first].erase(it3); + } else { + ++it1; + ++it2; + ++it3; + } } - + } } void Feature::clean_older_measurements(double timestamp) { - - // Loop through each of the cameras we have - for(auto const &pair : timestamps) { - - // Assert that we have all the parts of a measurement - assert(timestamps[pair.first].size() == uvs[pair.first].size()); - assert(timestamps[pair.first].size() == uvs_norm[pair.first].size()); - - // Our iterators - auto it1 = timestamps[pair.first].begin(); - auto it2 = uvs[pair.first].begin(); - auto it3 = uvs_norm[pair.first].begin(); - - // Loop through measurement times, remove ones that are older then the specified one - while (it1 != timestamps[pair.first].end()) { - if (*it1 <= timestamp) { - it1 = timestamps[pair.first].erase(it1); - it2 = uvs[pair.first].erase(it2); - it3 = uvs_norm[pair.first].erase(it3); - } else { - ++it1; - ++it2; - ++it3; - } - } + // Loop through each of the cameras we have + for (auto const &pair : timestamps) { + + // Assert that we have all the parts of a measurement + assert(timestamps[pair.first].size() == uvs[pair.first].size()); + assert(timestamps[pair.first].size() == uvs_norm[pair.first].size()); + + // Our iterators + auto it1 = timestamps[pair.first].begin(); + auto it2 = uvs[pair.first].begin(); + auto it3 = uvs_norm[pair.first].begin(); + + // Loop through measurement times, remove ones that are older then the specified one + while (it1 != timestamps[pair.first].end()) { + if (*it1 <= timestamp) { + it1 = timestamps[pair.first].erase(it1); + it2 = uvs[pair.first].erase(it2); + it3 = uvs_norm[pair.first].erase(it3); + } else { + ++it1; + ++it2; + ++it3; + } } - + } } - - diff --git a/ov_core/src/feat/Feature.h b/ov_core/src/feat/Feature.h index f87343917..ba0d89b34 100644 --- a/ov_core/src/feat/Feature.h +++ b/ov_core/src/feat/Feature.h @@ -21,75 +21,70 @@ #ifndef OV_CORE_FEATURE_H #define OV_CORE_FEATURE_H - -#include +#include #include #include -#include +#include namespace ov_core { - /** - * @brief Sparse feature class used to collect measurements - * - * This feature class allows for holding of all tracking information for a given feature. - * Each feature has a unique ID assigned to it, and should have a set of feature tracks alongside it. - * See the FeatureDatabase class for details on how we load information into this, and how we delete features. - */ - class Feature { - - public: - - /// Unique ID of this feature - size_t featid; - - /// If this feature should be deleted - bool to_delete; - - /// UV coordinates that this feature has been seen from (mapped by camera ID) - std::unordered_map> uvs; +/** + * @brief Sparse feature class used to collect measurements + * + * This feature class allows for holding of all tracking information for a given feature. + * Each feature has a unique ID assigned to it, and should have a set of feature tracks alongside it. + * See the FeatureDatabase class for details on how we load information into this, and how we delete features. + */ +class Feature { - /// UV normalized coordinates that this feature has been seen from (mapped by camera ID) - std::unordered_map> uvs_norm; +public: + /// Unique ID of this feature + size_t featid; - /// Timestamps of each UV measurement (mapped by camera ID) - std::unordered_map> timestamps; + /// If this feature should be deleted + bool to_delete; - /// What camera ID our pose is anchored in!! By default the first measurement is the anchor. - int anchor_cam_id = -1; + /// UV coordinates that this feature has been seen from (mapped by camera ID) + std::unordered_map> uvs; - /// Timestamp of anchor clone - double anchor_clone_timestamp; + /// UV normalized coordinates that this feature has been seen from (mapped by camera ID) + std::unordered_map> uvs_norm; - /// Triangulated position of this feature, in the anchor frame - Eigen::Vector3d p_FinA; + /// Timestamps of each UV measurement (mapped by camera ID) + std::unordered_map> timestamps; - /// Triangulated position of this feature, in the global frame - Eigen::Vector3d p_FinG; + /// What camera ID our pose is anchored in!! By default the first measurement is the anchor. + int anchor_cam_id = -1; + /// Timestamp of anchor clone + double anchor_clone_timestamp; - /** - * @brief Remove measurements that do not occur at passed timestamps. - * - * Given a series of valid timestamps, this will remove all measurements that have not occurred at these times. - * This would normally be used to ensure that the measurements that we have occur at our clone times. - * - * @param valid_times Vector of timestamps that our measurements must occur at - */ - void clean_old_measurements(std::vector valid_times); + /// Triangulated position of this feature, in the anchor frame + Eigen::Vector3d p_FinA; - /** - * @brief Remove measurements that are older then the specified timestamp. - * - * Given a valid timestamp, this will remove all measurements that have occured earlier then this. - * - * @param timestamp Timestamps that our measurements must occur after - */ - void clean_older_measurements(double timestamp); + /// Triangulated position of this feature, in the global frame + Eigen::Vector3d p_FinG; - }; + /** + * @brief Remove measurements that do not occur at passed timestamps. + * + * Given a series of valid timestamps, this will remove all measurements that have not occurred at these times. + * This would normally be used to ensure that the measurements that we have occur at our clone times. + * + * @param valid_times Vector of timestamps that our measurements must occur at + */ + void clean_old_measurements(std::vector valid_times); -} + /** + * @brief Remove measurements that are older then the specified timestamp. + * + * Given a valid timestamp, this will remove all measurements that have occured earlier then this. + * + * @param timestamp Timestamps that our measurements must occur after + */ + void clean_older_measurements(double timestamp); +}; +} // namespace ov_core #endif /* OV_CORE_FEATURE_H */ \ No newline at end of file diff --git a/ov_core/src/feat/FeatureDatabase.h b/ov_core/src/feat/FeatureDatabase.h index 5c08c7584..2dc30d5e6 100644 --- a/ov_core/src/feat/FeatureDatabase.h +++ b/ov_core/src/feat/FeatureDatabase.h @@ -21,381 +21,369 @@ #ifndef OV_CORE_FEATURE_DATABASE_H #define OV_CORE_FEATURE_DATABASE_H - -#include -#include -#include #include +#include +#include +#include #include "Feature.h" - namespace ov_core { - /** - * @brief Database containing features we are currently tracking. - * - * Each visual tracker has this database in it and it contains all features that we are tracking. - * The trackers will insert information into this database when they get new measurements from doing tracking. - * A user would then query this database for features that can be used for update and remove them after they have been processed. - * - * - * @m_class{m-note m-warning} - * - * @par A Note on Multi-Threading Support - * There is some support for asynchronous multi-threaded access. - * Since each feature is a pointer just directly returning and using them is not thread safe. - * Thus, to be thread safe, use the "remove" flag for each function which will remove it from this feature database. - * This prevents the trackers from adding new measurements and editing the feature information. - * For example, if you are asynchronous tracking cameras and you chose to update the state, then remove all features you will use in update. - * The feature trackers will continue to add features while you update, whose measurements can be used in the next update step! - * - */ - class FeatureDatabase { - - public: - - /** - * @brief Default constructor - */ - FeatureDatabase() {} - - /** - * @brief Get a specified feature - * @param id What feature we want to get - * @param remove Set to true if you want to remove the feature from the database (you will need to handle the freeing of memory) - * @return Either a feature object, or null if it is not in the database. - */ - std::shared_ptr get_feature(size_t id, bool remove=false) { - std::unique_lock lck(mtx); - if (features_idlookup.find(id) != features_idlookup.end()) { - std::shared_ptr temp = features_idlookup.at(id); - if(remove) features_idlookup.erase(id); - return temp; - } else { - return nullptr; - } - } - - - /** - * @brief Update a feature object - * @param id ID of the feature we will update - * @param timestamp time that this measurement occured at - * @param cam_id which camera this measurement was from - * @param u raw u coordinate - * @param v raw v coordinate - * @param u_n undistorted/normalized u coordinate - * @param v_n undistorted/normalized v coordinate - * - * This will update a given feature based on the passed ID it has. - * It will create a new feature, if it is an ID that we have not seen before. - */ - void update_feature(size_t id, double timestamp, size_t cam_id, - float u, float v, float u_n, float v_n) { - - // Find this feature using the ID lookup - std::unique_lock lck(mtx); - if (features_idlookup.find(id) != features_idlookup.end()) { - // Get our feature - std::shared_ptr feat = features_idlookup.at(id); - // Append this new information to it! - feat->uvs[cam_id].push_back(Eigen::Vector2f(u, v)); - feat->uvs_norm[cam_id].push_back(Eigen::Vector2f(u_n, v_n)); - feat->timestamps[cam_id].push_back(timestamp); - return; - } - - // Debug info - //ROS_INFO("featdb - adding new feature %d",(int)id); - - // Else we have not found the feature, so lets make it be a new one! - std::shared_ptr feat = std::make_shared(); - feat->featid = id; - feat->uvs[cam_id].push_back(Eigen::Vector2f(u, v)); - feat->uvs_norm[cam_id].push_back(Eigen::Vector2f(u_n, v_n)); - feat->timestamps[cam_id].push_back(timestamp); - - // Append this new feature into our database - features_idlookup[id] = feat; - } - - - /** - * @brief Get features that do not have newer measurement then the specified time. - * - * This function will return all features that do not a measurement at a time greater than the specified time. - * For example this could be used to get features that have not been successfully tracked into the newest frame. - * All features returned will not have any measurements occurring at a time greater then the specified. - */ - std::vector> features_not_containing_newer(double timestamp, bool remove=false, bool skip_deleted=false) { - - // Our vector of features that do not have measurements after the specified time - std::vector> feats_old; - - // Now lets loop through all features, and just make sure they are not old - std::unique_lock lck(mtx); - for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { - // Skip if already deleted - if(skip_deleted && (*it).second->to_delete) { - it++; - continue; - } - // Loop through each camera - bool has_newer_measurement = false; - for (auto const &pair : (*it).second->timestamps) { - // If we have a measurement greater-than or equal to the specified, this measurement is find - if (!pair.second.empty() && pair.second.at(pair.second.size()-1) >= timestamp) { - has_newer_measurement = true; - break; - } - } - // If it is not being actively tracked, then it is old - if (!has_newer_measurement) { - feats_old.push_back((*it).second); - if(remove) features_idlookup.erase(it++); - else it++; - } else { - it++; - } - } - - // Debugging - //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - - // Return the old features - return feats_old; - - } - - - /** - * @brief Get features that has measurements older then the specified time. - * - * This will collect all features that have measurements occurring before the specified timestamp. - * For example, we would want to remove all features older then the last clone/state in our sliding window. - */ - std::vector> features_containing_older(double timestamp, bool remove=false, bool skip_deleted=false) { - - // Our vector of old features - std::vector> feats_old; - - // Now lets loop through all features, and just make sure they are not old - std::unique_lock lck(mtx); - for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { - // Skip if already deleted - if(skip_deleted && (*it).second->to_delete) { - it++; - continue; - } - // Loop through each camera - bool found_containing_older = false; - for (auto const &pair : (*it).second->timestamps) { - if (!pair.second.empty() && pair.second.at(0) < timestamp) { - found_containing_older = true; - break; - } - } - // If it has an older timestamp, then add it - if(found_containing_older) { - feats_old.push_back((*it).second); - if(remove) features_idlookup.erase(it++); - else it++; - } else { - it++; - } - } - - // Debugging - //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - - // Return the old features - return feats_old; - - } - - /** - * @brief Get features that has measurements at the specified time. - * - * This function will return all features that have the specified time in them. - * This would be used to get all features that occurred at a specific clone/state. - */ - std::vector> features_containing(double timestamp, bool remove=false, bool skip_deleted=false) { - - // Our vector of old features - std::vector> feats_has_timestamp; - - // Now lets loop through all features, and just make sure they are not - std::unique_lock lck(mtx); - for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { - // Skip if already deleted - if(skip_deleted && (*it).second->to_delete) { - it++; - continue; - } - // Boolean if it has the timestamp - bool has_timestamp = false; - for (auto const &pair : (*it).second->timestamps) { - // Loop through all timestamps, and see if it has it - for (auto &timefeat : pair.second) { - if (timefeat == timestamp) { - has_timestamp = true; - break; - } - } - // Break out if we found a single timestamp that is equal to the specified time - if (has_timestamp) { - break; - } - } - // Remove this feature if it contains the specified timestamp - if (has_timestamp) { - feats_has_timestamp.push_back((*it).second); - if(remove) features_idlookup.erase(it++); - else it++; - } else { - it++; - } - } - - // Debugging - //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - //std::cout << "return vector = " << feats_has_timestamp.size() << std::endl; - - // Return the features - return feats_has_timestamp; - - } - - /** - * @brief This function will delete all features that have been used up. - * - * If a feature was unable to be used, it will still remain since it will not have a delete flag set - */ - void cleanup() { - // Debug - //int sizebefore = (int)features_idlookup.size(); - // Loop through all features - std::unique_lock lck(mtx); - for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { - // If delete flag is set, then delete it - // NOTE: if we are using a shared pointer, then no need to do this! - if ((*it).second->to_delete) { - //delete (*it).second; - features_idlookup.erase(it++); - } else { - it++; - } - } - // Debug - //std::cout << "feat db = " << sizebefore << " -> " << (int)features_idlookup.size() << std::endl; +/** + * @brief Database containing features we are currently tracking. + * + * Each visual tracker has this database in it and it contains all features that we are tracking. + * The trackers will insert information into this database when they get new measurements from doing tracking. + * A user would then query this database for features that can be used for update and remove them after they have been processed. + * + * + * @m_class{m-note m-warning} + * + * @par A Note on Multi-Threading Support + * There is some support for asynchronous multi-threaded access. + * Since each feature is a pointer just directly returning and using them is not thread safe. + * Thus, to be thread safe, use the "remove" flag for each function which will remove it from this feature database. + * This prevents the trackers from adding new measurements and editing the feature information. + * For example, if you are asynchronous tracking cameras and you chose to update the state, then remove all features you will use in update. + * The feature trackers will continue to add features while you update, whose measurements can be used in the next update step! + * + */ +class FeatureDatabase { + +public: + /** + * @brief Default constructor + */ + FeatureDatabase() {} + + /** + * @brief Get a specified feature + * @param id What feature we want to get + * @param remove Set to true if you want to remove the feature from the database (you will need to handle the freeing of memory) + * @return Either a feature object, or null if it is not in the database. + */ + std::shared_ptr get_feature(size_t id, bool remove = false) { + std::unique_lock lck(mtx); + if (features_idlookup.find(id) != features_idlookup.end()) { + std::shared_ptr temp = features_idlookup.at(id); + if (remove) + features_idlookup.erase(id); + return temp; + } else { + return nullptr; + } + } + + /** + * @brief Update a feature object + * @param id ID of the feature we will update + * @param timestamp time that this measurement occured at + * @param cam_id which camera this measurement was from + * @param u raw u coordinate + * @param v raw v coordinate + * @param u_n undistorted/normalized u coordinate + * @param v_n undistorted/normalized v coordinate + * + * This will update a given feature based on the passed ID it has. + * It will create a new feature, if it is an ID that we have not seen before. + */ + void update_feature(size_t id, double timestamp, size_t cam_id, float u, float v, float u_n, float v_n) { + + // Find this feature using the ID lookup + std::unique_lock lck(mtx); + if (features_idlookup.find(id) != features_idlookup.end()) { + // Get our feature + std::shared_ptr feat = features_idlookup.at(id); + // Append this new information to it! + feat->uvs[cam_id].push_back(Eigen::Vector2f(u, v)); + feat->uvs_norm[cam_id].push_back(Eigen::Vector2f(u_n, v_n)); + feat->timestamps[cam_id].push_back(timestamp); + return; + } + + // Debug info + // ROS_INFO("featdb - adding new feature %d",(int)id); + + // Else we have not found the feature, so lets make it be a new one! + std::shared_ptr feat = std::make_shared(); + feat->featid = id; + feat->uvs[cam_id].push_back(Eigen::Vector2f(u, v)); + feat->uvs_norm[cam_id].push_back(Eigen::Vector2f(u_n, v_n)); + feat->timestamps[cam_id].push_back(timestamp); + + // Append this new feature into our database + features_idlookup[id] = feat; + } + + /** + * @brief Get features that do not have newer measurement then the specified time. + * + * This function will return all features that do not a measurement at a time greater than the specified time. + * For example this could be used to get features that have not been successfully tracked into the newest frame. + * All features returned will not have any measurements occurring at a time greater then the specified. + */ + std::vector> features_not_containing_newer(double timestamp, bool remove = false, bool skip_deleted = false) { + + // Our vector of features that do not have measurements after the specified time + std::vector> feats_old; + + // Now lets loop through all features, and just make sure they are not old + std::unique_lock lck(mtx); + for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { + // Skip if already deleted + if (skip_deleted && (*it).second->to_delete) { + it++; + continue; + } + // Loop through each camera + bool has_newer_measurement = false; + for (auto const &pair : (*it).second->timestamps) { + // If we have a measurement greater-than or equal to the specified, this measurement is find + if (!pair.second.empty() && pair.second.at(pair.second.size() - 1) >= timestamp) { + has_newer_measurement = true; + break; } - - /** - * @brief This function will delete all feature measurements that are older then the specified timestamp - */ - void cleanup_measurements(double timestamp) { - std::unique_lock lck(mtx); - for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { - // Remove the older measurements - (*it).second->clean_older_measurements(timestamp); - // Count how many measurements - int ct_meas = 0; - for(const auto &pair : (*it).second->timestamps) { - ct_meas += (*it).second->timestamps.at(pair.first).size(); - } - // If delete flag is set, then delete it - // NOTE: if we are using a shared pointer, then no need to do this! - if (ct_meas < 1) { - //delete (*it).second; - features_idlookup.erase(it++); - } else { - it++; - } - } + } + // If it is not being actively tracked, then it is old + if (!has_newer_measurement) { + feats_old.push_back((*it).second); + if (remove) + features_idlookup.erase(it++); + else + it++; + } else { + it++; + } + } + + // Debugging + // std::cout << "feature db size = " << features_idlookup.size() << std::endl; + + // Return the old features + return feats_old; + } + + /** + * @brief Get features that has measurements older then the specified time. + * + * This will collect all features that have measurements occurring before the specified timestamp. + * For example, we would want to remove all features older then the last clone/state in our sliding window. + */ + std::vector> features_containing_older(double timestamp, bool remove = false, bool skip_deleted = false) { + + // Our vector of old features + std::vector> feats_old; + + // Now lets loop through all features, and just make sure they are not old + std::unique_lock lck(mtx); + for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { + // Skip if already deleted + if (skip_deleted && (*it).second->to_delete) { + it++; + continue; + } + // Loop through each camera + bool found_containing_older = false; + for (auto const &pair : (*it).second->timestamps) { + if (!pair.second.empty() && pair.second.at(0) < timestamp) { + found_containing_older = true; + break; } - - - /** - * @brief Returns the size of the feature database - */ - size_t size() { - std::unique_lock lck(mtx); - return features_idlookup.size(); + } + // If it has an older timestamp, then add it + if (found_containing_older) { + feats_old.push_back((*it).second); + if (remove) + features_idlookup.erase(it++); + else + it++; + } else { + it++; + } + } + + // Debugging + // std::cout << "feature db size = " << features_idlookup.size() << std::endl; + + // Return the old features + return feats_old; + } + + /** + * @brief Get features that has measurements at the specified time. + * + * This function will return all features that have the specified time in them. + * This would be used to get all features that occurred at a specific clone/state. + */ + std::vector> features_containing(double timestamp, bool remove = false, bool skip_deleted = false) { + + // Our vector of old features + std::vector> feats_has_timestamp; + + // Now lets loop through all features, and just make sure they are not + std::unique_lock lck(mtx); + for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { + // Skip if already deleted + if (skip_deleted && (*it).second->to_delete) { + it++; + continue; + } + // Boolean if it has the timestamp + bool has_timestamp = false; + for (auto const &pair : (*it).second->timestamps) { + // Loop through all timestamps, and see if it has it + for (auto &timefeat : pair.second) { + if (timefeat == timestamp) { + has_timestamp = true; + break; + } } - - - /** - * @brief Returns the internal data (should not normally be used) - */ - std::unordered_map> get_internal_data() { - std::unique_lock lck(mtx); - return features_idlookup; + // Break out if we found a single timestamp that is equal to the specified time + if (has_timestamp) { + break; } - - - /** - * @brief Will update the passed database with this database's latest feature information. - */ - void append_new_measurements(const std::shared_ptr& database) { - std::unique_lock lck(mtx); - - // Loop through the other database's internal database - //int sizebefore = (int)features_idlookup.size(); - for(const auto &feat : database->get_internal_data()) { - if (features_idlookup.find(feat.first) != features_idlookup.end()) { - - // For this feature, now try to append the new measurement data - std::shared_ptr temp = features_idlookup.at(feat.first); - for(const auto × : feat.second->timestamps) { - // Append the whole camera vector is not seen - // Otherwise need to loop through each and append - size_t cam_id = times.first; - if(temp->timestamps.find(cam_id)==temp->timestamps.end()) { - temp->timestamps[cam_id] = feat.second->timestamps.at(cam_id); - temp->uvs[cam_id] = feat.second->uvs.at(cam_id); - temp->uvs_norm[cam_id] = feat.second->uvs_norm.at(cam_id); - } else { - auto temp_times = temp->timestamps.at(cam_id); - for(size_t i=0; itimestamps.at(cam_id).size(); i++) { - double time_to_find = feat.second->timestamps.at(cam_id).at(i); - if(std::find(temp_times.begin(),temp_times.end(),time_to_find)==temp_times.end()) { - temp->timestamps.at(cam_id).push_back(feat.second->timestamps.at(cam_id).at(i)); - temp->uvs.at(cam_id).push_back(feat.second->uvs.at(cam_id).at(i)); - temp->uvs_norm.at(cam_id).push_back(feat.second->uvs_norm.at(cam_id).at(i)); - } - } - } - } - - } else { - - // Else we have not found the feature, so lets make it be a new one! - std::shared_ptr temp = std::make_shared(); - temp->featid = feat.second->featid; - temp->timestamps = feat.second->timestamps; - temp->uvs = feat.second->uvs; - temp->uvs_norm = feat.second->uvs_norm; - features_idlookup[feat.first] = temp; - - } + } + // Remove this feature if it contains the specified timestamp + if (has_timestamp) { + feats_has_timestamp.push_back((*it).second); + if (remove) + features_idlookup.erase(it++); + else + it++; + } else { + it++; + } + } + + // Debugging + // std::cout << "feature db size = " << features_idlookup.size() << std::endl; + // std::cout << "return vector = " << feats_has_timestamp.size() << std::endl; + + // Return the features + return feats_has_timestamp; + } + + /** + * @brief This function will delete all features that have been used up. + * + * If a feature was unable to be used, it will still remain since it will not have a delete flag set + */ + void cleanup() { + // Debug + // int sizebefore = (int)features_idlookup.size(); + // Loop through all features + std::unique_lock lck(mtx); + for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { + // If delete flag is set, then delete it + // NOTE: if we are using a shared pointer, then no need to do this! + if ((*it).second->to_delete) { + // delete (*it).second; + features_idlookup.erase(it++); + } else { + it++; + } + } + // Debug + // std::cout << "feat db = " << sizebefore << " -> " << (int)features_idlookup.size() << std::endl; + } + + /** + * @brief This function will delete all feature measurements that are older then the specified timestamp + */ + void cleanup_measurements(double timestamp) { + std::unique_lock lck(mtx); + for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { + // Remove the older measurements + (*it).second->clean_older_measurements(timestamp); + // Count how many measurements + int ct_meas = 0; + for (const auto &pair : (*it).second->timestamps) { + ct_meas += (*it).second->timestamps.at(pair.first).size(); + } + // If delete flag is set, then delete it + // NOTE: if we are using a shared pointer, then no need to do this! + if (ct_meas < 1) { + // delete (*it).second; + features_idlookup.erase(it++); + } else { + it++; + } + } + } + + /** + * @brief Returns the size of the feature database + */ + size_t size() { + std::unique_lock lck(mtx); + return features_idlookup.size(); + } + + /** + * @brief Returns the internal data (should not normally be used) + */ + std::unordered_map> get_internal_data() { + std::unique_lock lck(mtx); + return features_idlookup; + } + + /** + * @brief Will update the passed database with this database's latest feature information. + */ + void append_new_measurements(const std::shared_ptr &database) { + std::unique_lock lck(mtx); + + // Loop through the other database's internal database + // int sizebefore = (int)features_idlookup.size(); + for (const auto &feat : database->get_internal_data()) { + if (features_idlookup.find(feat.first) != features_idlookup.end()) { + + // For this feature, now try to append the new measurement data + std::shared_ptr temp = features_idlookup.at(feat.first); + for (const auto × : feat.second->timestamps) { + // Append the whole camera vector is not seen + // Otherwise need to loop through each and append + size_t cam_id = times.first; + if (temp->timestamps.find(cam_id) == temp->timestamps.end()) { + temp->timestamps[cam_id] = feat.second->timestamps.at(cam_id); + temp->uvs[cam_id] = feat.second->uvs.at(cam_id); + temp->uvs_norm[cam_id] = feat.second->uvs_norm.at(cam_id); + } else { + auto temp_times = temp->timestamps.at(cam_id); + for (size_t i = 0; i < feat.second->timestamps.at(cam_id).size(); i++) { + double time_to_find = feat.second->timestamps.at(cam_id).at(i); + if (std::find(temp_times.begin(), temp_times.end(), time_to_find) == temp_times.end()) { + temp->timestamps.at(cam_id).push_back(feat.second->timestamps.at(cam_id).at(i)); + temp->uvs.at(cam_id).push_back(feat.second->uvs.at(cam_id).at(i)); + temp->uvs_norm.at(cam_id).push_back(feat.second->uvs_norm.at(cam_id).at(i)); + } } - //std::cout << "feat db = " << sizebefore << " -> " << (int)features_idlookup.size() << std::endl; - + } } - protected: - - /// Mutex lock for our map - std::mutex mtx; - - /// Our lookup array that allow use to query based on ID - std::unordered_map> features_idlookup; - - - }; - - -} + } else { + + // Else we have not found the feature, so lets make it be a new one! + std::shared_ptr temp = std::make_shared(); + temp->featid = feat.second->featid; + temp->timestamps = feat.second->timestamps; + temp->uvs = feat.second->uvs; + temp->uvs_norm = feat.second->uvs_norm; + features_idlookup[feat.first] = temp; + } + } + // std::cout << "feat db = " << sizebefore << " -> " << (int)features_idlookup.size() << std::endl; + } + +protected: + /// Mutex lock for our map + std::mutex mtx; + + /// Our lookup array that allow use to query based on ID + std::unordered_map> features_idlookup; +}; + +} // namespace ov_core #endif /* OV_CORE_FEATURE_DATABASE_H */ \ No newline at end of file diff --git a/ov_core/src/feat/FeatureInitializer.cpp b/ov_core/src/feat/FeatureInitializer.cpp index ab010d26f..57173974f 100644 --- a/ov_core/src/feat/FeatureInitializer.cpp +++ b/ov_core/src/feat/FeatureInitializer.cpp @@ -22,407 +22,386 @@ using namespace ov_core; - - - -bool FeatureInitializer::single_triangulation(Feature* feat, std::unordered_map> &clonesCAM) { - - - // Total number of measurements - // Also set the first measurement to be the anchor frame - int total_meas = 0; - size_t anchor_most_meas = 0; - size_t most_meas = 0; - for (auto const& pair : feat->timestamps) { - total_meas += (int)pair.second.size(); - if(pair.second.size() > most_meas) { - anchor_most_meas = pair.first; - most_meas = pair.second.size(); - } +bool FeatureInitializer::single_triangulation(Feature *feat, std::unordered_map> &clonesCAM) { + + // Total number of measurements + // Also set the first measurement to be the anchor frame + int total_meas = 0; + size_t anchor_most_meas = 0; + size_t most_meas = 0; + for (auto const &pair : feat->timestamps) { + total_meas += (int)pair.second.size(); + if (pair.second.size() > most_meas) { + anchor_most_meas = pair.first; + most_meas = pair.second.size(); } - feat->anchor_cam_id = anchor_most_meas; - feat->anchor_clone_timestamp = feat->timestamps.at(feat->anchor_cam_id).back(); - - // Our linear system matrices - Eigen::Matrix3d A = Eigen::Matrix3d::Zero(); - Eigen::Vector3d b = Eigen::Vector3d::Zero(); - - // Get the position of the anchor pose - ClonePose anchorclone = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp); - const Eigen::Matrix &R_GtoA = anchorclone.Rot(); - const Eigen::Matrix &p_AinG = anchorclone.pos(); - - // Loop through each camera for this feature - for (auto const& pair : feat->timestamps) { - - // Add CAM_I features - for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { - - // Get the position of this clone in the global - const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot(); - const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); - - // Convert current position relative to anchor - Eigen::Matrix R_AtoCi; - R_AtoCi.noalias() = R_GtoCi*R_GtoA.transpose(); - Eigen::Matrix p_CiinA; - p_CiinA.noalias() = R_GtoA*(p_CiinG-p_AinG); - - // Get the UV coordinate normal - Eigen::Matrix b_i; - b_i << feat->uvs_norm.at(pair.first).at(m)(0), feat->uvs_norm.at(pair.first).at(m)(1), 1; - b_i = R_AtoCi.transpose() * b_i; - b_i = b_i / b_i.norm(); - Eigen::Matrix3d Bperp = skew_x(b_i); - - // Append to our linear system - Eigen::Matrix3d Ai = Bperp.transpose() * Bperp; - A += Ai; - b += Ai * p_CiinA; - - } - } - - // Solve the linear system - Eigen::MatrixXd p_f = A.colPivHouseholderQr().solve(b); - - // Check A and p_f - Eigen::JacobiSVD svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV); - Eigen::MatrixXd singularValues; - singularValues.resize(svd.singularValues().rows(), 1); - singularValues = svd.singularValues(); - double condA = singularValues(0, 0) / singularValues(singularValues.rows() - 1, 0); - - // If we have a bad condition number, or it is too close - // Then set the flag for bad (i.e. set z-axis to nan) - if (std::abs(condA) > _options.max_cond_number || p_f(2,0) < _options.min_dist || p_f(2,0) > _options.max_dist || std::isnan(p_f.norm())) { - return false; + } + feat->anchor_cam_id = anchor_most_meas; + feat->anchor_clone_timestamp = feat->timestamps.at(feat->anchor_cam_id).back(); + + // Our linear system matrices + Eigen::Matrix3d A = Eigen::Matrix3d::Zero(); + Eigen::Vector3d b = Eigen::Vector3d::Zero(); + + // Get the position of the anchor pose + ClonePose anchorclone = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp); + const Eigen::Matrix &R_GtoA = anchorclone.Rot(); + const Eigen::Matrix &p_AinG = anchorclone.pos(); + + // Loop through each camera for this feature + for (auto const &pair : feat->timestamps) { + + // Add CAM_I features + for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { + + // Get the position of this clone in the global + const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot(); + const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); + + // Convert current position relative to anchor + Eigen::Matrix R_AtoCi; + R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose(); + Eigen::Matrix p_CiinA; + p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG); + + // Get the UV coordinate normal + Eigen::Matrix b_i; + b_i << feat->uvs_norm.at(pair.first).at(m)(0), feat->uvs_norm.at(pair.first).at(m)(1), 1; + b_i = R_AtoCi.transpose() * b_i; + b_i = b_i / b_i.norm(); + Eigen::Matrix3d Bperp = skew_x(b_i); + + // Append to our linear system + Eigen::Matrix3d Ai = Bperp.transpose() * Bperp; + A += Ai; + b += Ai * p_CiinA; } - - // Store it in our feature object - feat->p_FinA = p_f; - feat->p_FinG = R_GtoA.transpose()*feat->p_FinA + p_AinG; - return true; - + } + + // Solve the linear system + Eigen::MatrixXd p_f = A.colPivHouseholderQr().solve(b); + + // Check A and p_f + Eigen::JacobiSVD svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV); + Eigen::MatrixXd singularValues; + singularValues.resize(svd.singularValues().rows(), 1); + singularValues = svd.singularValues(); + double condA = singularValues(0, 0) / singularValues(singularValues.rows() - 1, 0); + + // If we have a bad condition number, or it is too close + // Then set the flag for bad (i.e. set z-axis to nan) + if (std::abs(condA) > _options.max_cond_number || p_f(2, 0) < _options.min_dist || p_f(2, 0) > _options.max_dist || + std::isnan(p_f.norm())) { + return false; + } + + // Store it in our feature object + feat->p_FinA = p_f; + feat->p_FinG = R_GtoA.transpose() * feat->p_FinA + p_AinG; + return true; } - -bool FeatureInitializer::single_triangulation_1d(Feature* feat, std::unordered_map> &clonesCAM) { - - - // Total number of measurements - // Also set the first measurement to be the anchor frame - int total_meas = 0; - size_t anchor_most_meas = 0; - size_t most_meas = 0; - for (auto const& pair : feat->timestamps) { - total_meas += (int)pair.second.size(); - if(pair.second.size() > most_meas) { - anchor_most_meas = pair.first; - most_meas = pair.second.size(); - } +bool FeatureInitializer::single_triangulation_1d(Feature *feat, + std::unordered_map> &clonesCAM) { + + // Total number of measurements + // Also set the first measurement to be the anchor frame + int total_meas = 0; + size_t anchor_most_meas = 0; + size_t most_meas = 0; + for (auto const &pair : feat->timestamps) { + total_meas += (int)pair.second.size(); + if (pair.second.size() > most_meas) { + anchor_most_meas = pair.first; + most_meas = pair.second.size(); } - feat->anchor_cam_id = anchor_most_meas; - feat->anchor_clone_timestamp = feat->timestamps.at(feat->anchor_cam_id).back(); - size_t idx_anchor_bearing = feat->timestamps.at(feat->anchor_cam_id).size()-1; - - // Our linear system matrices - double A = 0.0; - double b = 0.0; - - // Get the position of the anchor pose - ClonePose anchorclone = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp); - const Eigen::Matrix &R_GtoA = anchorclone.Rot(); - const Eigen::Matrix &p_AinG = anchorclone.pos(); - - // Get bearing in anchor frame - Eigen::Matrix bearing_inA; - bearing_inA << feat->uvs_norm.at(feat->anchor_cam_id).at(idx_anchor_bearing)(0), - feat->uvs_norm.at(feat->anchor_cam_id).at(idx_anchor_bearing)(1), 1; - bearing_inA = bearing_inA / bearing_inA.norm(); - - // Loop through each camera for this feature - for (auto const& pair : feat->timestamps) { - - // Add CAM_I features - for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { - - // Skip the anchor bearing - if((int)pair.first == feat->anchor_cam_id && m == idx_anchor_bearing) - continue; - - // Get the position of this clone in the global - const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot(); - const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); - - // Convert current position relative to anchor - Eigen::Matrix R_AtoCi; - R_AtoCi.noalias() = R_GtoCi*R_GtoA.transpose(); - Eigen::Matrix p_CiinA; - p_CiinA.noalias() = R_GtoA*(p_CiinG-p_AinG); - - // Get the UV coordinate normal - Eigen::Matrix b_i; - b_i << feat->uvs_norm.at(pair.first).at(m)(0), feat->uvs_norm.at(pair.first).at(m)(1), 1; - b_i = R_AtoCi.transpose() * b_i; - b_i = b_i / b_i.norm(); - Eigen::Matrix3d Bperp = skew_x(b_i); - - // Append to our linear system - Eigen::Vector3d BperpBanchor = Bperp * bearing_inA; - A += BperpBanchor.dot(BperpBanchor); - b += BperpBanchor.dot(Bperp * p_CiinA); - - } + } + feat->anchor_cam_id = anchor_most_meas; + feat->anchor_clone_timestamp = feat->timestamps.at(feat->anchor_cam_id).back(); + size_t idx_anchor_bearing = feat->timestamps.at(feat->anchor_cam_id).size() - 1; + + // Our linear system matrices + double A = 0.0; + double b = 0.0; + + // Get the position of the anchor pose + ClonePose anchorclone = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp); + const Eigen::Matrix &R_GtoA = anchorclone.Rot(); + const Eigen::Matrix &p_AinG = anchorclone.pos(); + + // Get bearing in anchor frame + Eigen::Matrix bearing_inA; + bearing_inA << feat->uvs_norm.at(feat->anchor_cam_id).at(idx_anchor_bearing)(0), + feat->uvs_norm.at(feat->anchor_cam_id).at(idx_anchor_bearing)(1), 1; + bearing_inA = bearing_inA / bearing_inA.norm(); + + // Loop through each camera for this feature + for (auto const &pair : feat->timestamps) { + + // Add CAM_I features + for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { + + // Skip the anchor bearing + if ((int)pair.first == feat->anchor_cam_id && m == idx_anchor_bearing) + continue; + + // Get the position of this clone in the global + const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot(); + const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); + + // Convert current position relative to anchor + Eigen::Matrix R_AtoCi; + R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose(); + Eigen::Matrix p_CiinA; + p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG); + + // Get the UV coordinate normal + Eigen::Matrix b_i; + b_i << feat->uvs_norm.at(pair.first).at(m)(0), feat->uvs_norm.at(pair.first).at(m)(1), 1; + b_i = R_AtoCi.transpose() * b_i; + b_i = b_i / b_i.norm(); + Eigen::Matrix3d Bperp = skew_x(b_i); + + // Append to our linear system + Eigen::Vector3d BperpBanchor = Bperp * bearing_inA; + A += BperpBanchor.dot(BperpBanchor); + b += BperpBanchor.dot(Bperp * p_CiinA); } + } - // Solve the linear system - double depth = b / A; - Eigen::MatrixXd p_f = depth * bearing_inA; + // Solve the linear system + double depth = b / A; + Eigen::MatrixXd p_f = depth * bearing_inA; - // Then set the flag for bad (i.e. set z-axis to nan) - if (p_f(2,0) < _options.min_dist || p_f(2,0) > _options.max_dist || std::isnan(p_f.norm())) { - return false; - } - - // Store it in our feature object - feat->p_FinA = p_f; - feat->p_FinG = R_GtoA.transpose()*feat->p_FinA + p_AinG; - return true; + // Then set the flag for bad (i.e. set z-axis to nan) + if (p_f(2, 0) < _options.min_dist || p_f(2, 0) > _options.max_dist || std::isnan(p_f.norm())) { + return false; + } + // Store it in our feature object + feat->p_FinA = p_f; + feat->p_FinG = R_GtoA.transpose() * feat->p_FinA + p_AinG; + return true; } +bool FeatureInitializer::single_gaussnewton(Feature *feat, std::unordered_map> &clonesCAM) { + // Get into inverse depth + double rho = 1 / feat->p_FinA(2); + double alpha = feat->p_FinA(0) / feat->p_FinA(2); + double beta = feat->p_FinA(1) / feat->p_FinA(2); + // Optimization parameters + double lam = _options.init_lamda; + double eps = 10000; + int runs = 0; -bool FeatureInitializer::single_gaussnewton(Feature* feat, std::unordered_map> &clonesCAM) { - - //Get into inverse depth - double rho = 1/feat->p_FinA(2); - double alpha = feat->p_FinA(0)/feat->p_FinA(2); - double beta = feat->p_FinA(1)/feat->p_FinA(2); - - // Optimization parameters - double lam = _options.init_lamda; - double eps = 10000; - int runs = 0; - - // Variables used in the optimization - bool recompute = true; - Eigen::Matrix Hess = Eigen::Matrix::Zero(); - Eigen::Matrix grad = Eigen::Matrix::Zero(); - - // Cost at the last iteration - double cost_old = compute_error(clonesCAM,feat,alpha,beta,rho); - - // Get the position of the anchor pose - const Eigen::Matrix &R_GtoA = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).Rot(); - const Eigen::Matrix &p_AinG = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).pos(); - - // Loop till we have either - // 1. Reached our max iteration count - // 2. System is unstable - // 3. System has converged - while (runs < _options.max_runs && lam < _options.max_lamda && eps > _options.min_dx) { - - // Triggers a recomputation of jacobians/information/gradients - if (recompute) { - - Hess.setZero(); - grad.setZero(); - - double err = 0; + // Variables used in the optimization + bool recompute = true; + Eigen::Matrix Hess = Eigen::Matrix::Zero(); + Eigen::Matrix grad = Eigen::Matrix::Zero(); - // Loop through each camera for this feature - for (auto const& pair : feat->timestamps) { + // Cost at the last iteration + double cost_old = compute_error(clonesCAM, feat, alpha, beta, rho); - // Add CAM_I features - for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { + // Get the position of the anchor pose + const Eigen::Matrix &R_GtoA = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).Rot(); + const Eigen::Matrix &p_AinG = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).pos(); - //===================================================================================== - //===================================================================================== + // Loop till we have either + // 1. Reached our max iteration count + // 2. System is unstable + // 3. System has converged + while (runs < _options.max_runs && lam < _options.max_lamda && eps > _options.min_dx) { - // Get the position of this clone in the global - const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps[pair.first].at(m)).Rot(); - const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps[pair.first].at(m)).pos(); - // Convert current position relative to anchor - Eigen::Matrix R_AtoCi; - R_AtoCi.noalias() = R_GtoCi*R_GtoA.transpose(); - Eigen::Matrix p_CiinA; - p_CiinA.noalias() = R_GtoA*(p_CiinG-p_AinG); - Eigen::Matrix p_AinCi; - p_AinCi.noalias() = -R_AtoCi*p_CiinA; + // Triggers a recomputation of jacobians/information/gradients + if (recompute) { - //===================================================================================== - //===================================================================================== + Hess.setZero(); + grad.setZero(); - // Middle variables of the system - double hi1 = R_AtoCi(0, 0) * alpha + R_AtoCi(0, 1) * beta + R_AtoCi(0, 2) + rho * p_AinCi(0, 0); - double hi2 = R_AtoCi(1, 0) * alpha + R_AtoCi(1, 1) * beta + R_AtoCi(1, 2) + rho * p_AinCi(1, 0); - double hi3 = R_AtoCi(2, 0) * alpha + R_AtoCi(2, 1) * beta + R_AtoCi(2, 2) + rho * p_AinCi(2, 0); - // Calculate jacobian - double d_z1_d_alpha = (R_AtoCi(0, 0) * hi3 - hi1 * R_AtoCi(2, 0)) / (pow(hi3, 2)); - double d_z1_d_beta = (R_AtoCi(0, 1) * hi3 - hi1 * R_AtoCi(2, 1)) / (pow(hi3, 2)); - double d_z1_d_rho = (p_AinCi(0, 0) * hi3 - hi1 * p_AinCi(2, 0)) / (pow(hi3, 2)); - double d_z2_d_alpha = (R_AtoCi(1, 0) * hi3 - hi2 * R_AtoCi(2, 0)) / (pow(hi3, 2)); - double d_z2_d_beta = (R_AtoCi(1, 1) * hi3 - hi2 * R_AtoCi(2, 1)) / (pow(hi3, 2)); - double d_z2_d_rho = (p_AinCi(1, 0) * hi3 - hi2 * p_AinCi(2, 0)) / (pow(hi3, 2)); - Eigen::Matrix H; - H << d_z1_d_alpha, d_z1_d_beta, d_z1_d_rho, d_z2_d_alpha, d_z2_d_beta, d_z2_d_rho; - // Calculate residual - Eigen::Matrix z; - z << hi1 / hi3, hi2 / hi3; - Eigen::Matrix res = feat->uvs_norm.at(pair.first).at(m) - z; + double err = 0; - //===================================================================================== - //===================================================================================== + // Loop through each camera for this feature + for (auto const &pair : feat->timestamps) { - // Append to our summation variables - err += std::pow(res.norm(), 2); - grad.noalias() += H.transpose() * res.cast(); - Hess.noalias() += H.transpose() * H; - } - - } - - } - - // Solve Levenberg iteration - Eigen::Matrix Hess_l = Hess; - for (size_t r=0; r < (size_t)Hess.rows(); r++) { - Hess_l(r,r) *= (1.0+lam); - } - - Eigen::Matrix dx = Hess_l.colPivHouseholderQr().solve(grad); - //Eigen::Matrix dx = (Hess+lam*Eigen::MatrixXd::Identity(Hess.rows(), Hess.rows())).colPivHouseholderQr().solve(grad); - - // Check if error has gone down - double cost = compute_error(clonesCAM,feat,alpha+dx(0,0),beta+dx(1,0),rho+dx(2,0)); - - // Debug print - //cout << "run = " << runs << " | cost = " << dx.norm() << " | lamda = " << lam << " | depth = " << 1/rho << endl; + // Add CAM_I features + for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { - // Check if converged - if (cost <= cost_old && (cost_old-cost)/cost_old < _options.min_dcost) { - alpha += dx(0, 0); - beta += dx(1, 0); - rho += dx(2, 0); - eps = 0; - break; + //===================================================================================== + //===================================================================================== + + // Get the position of this clone in the global + const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps[pair.first].at(m)).Rot(); + const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps[pair.first].at(m)).pos(); + // Convert current position relative to anchor + Eigen::Matrix R_AtoCi; + R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose(); + Eigen::Matrix p_CiinA; + p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG); + Eigen::Matrix p_AinCi; + p_AinCi.noalias() = -R_AtoCi * p_CiinA; + + //===================================================================================== + //===================================================================================== + + // Middle variables of the system + double hi1 = R_AtoCi(0, 0) * alpha + R_AtoCi(0, 1) * beta + R_AtoCi(0, 2) + rho * p_AinCi(0, 0); + double hi2 = R_AtoCi(1, 0) * alpha + R_AtoCi(1, 1) * beta + R_AtoCi(1, 2) + rho * p_AinCi(1, 0); + double hi3 = R_AtoCi(2, 0) * alpha + R_AtoCi(2, 1) * beta + R_AtoCi(2, 2) + rho * p_AinCi(2, 0); + // Calculate jacobian + double d_z1_d_alpha = (R_AtoCi(0, 0) * hi3 - hi1 * R_AtoCi(2, 0)) / (pow(hi3, 2)); + double d_z1_d_beta = (R_AtoCi(0, 1) * hi3 - hi1 * R_AtoCi(2, 1)) / (pow(hi3, 2)); + double d_z1_d_rho = (p_AinCi(0, 0) * hi3 - hi1 * p_AinCi(2, 0)) / (pow(hi3, 2)); + double d_z2_d_alpha = (R_AtoCi(1, 0) * hi3 - hi2 * R_AtoCi(2, 0)) / (pow(hi3, 2)); + double d_z2_d_beta = (R_AtoCi(1, 1) * hi3 - hi2 * R_AtoCi(2, 1)) / (pow(hi3, 2)); + double d_z2_d_rho = (p_AinCi(1, 0) * hi3 - hi2 * p_AinCi(2, 0)) / (pow(hi3, 2)); + Eigen::Matrix H; + H << d_z1_d_alpha, d_z1_d_beta, d_z1_d_rho, d_z2_d_alpha, d_z2_d_beta, d_z2_d_rho; + // Calculate residual + Eigen::Matrix z; + z << hi1 / hi3, hi2 / hi3; + Eigen::Matrix res = feat->uvs_norm.at(pair.first).at(m) - z; + + //===================================================================================== + //===================================================================================== + + // Append to our summation variables + err += std::pow(res.norm(), 2); + grad.noalias() += H.transpose() * res.cast(); + Hess.noalias() += H.transpose() * H; } + } + } - // If cost is lowered, accept step - // Else inflate lambda (try to make more stable) - if (cost <= cost_old) { - recompute = true; - cost_old = cost; - alpha += dx(0, 0); - beta += dx(1, 0); - rho += dx(2, 0); - runs++; - lam = lam/_options.lam_mult; - eps = dx.norm(); - } else { - recompute = false; - lam = lam*_options.lam_mult; - continue; - } + // Solve Levenberg iteration + Eigen::Matrix Hess_l = Hess; + for (size_t r = 0; r < (size_t)Hess.rows(); r++) { + Hess_l(r, r) *= (1.0 + lam); } - // Revert to standard, and set to all - feat->p_FinA(0) = alpha/rho; - feat->p_FinA(1) = beta/rho; - feat->p_FinA(2) = 1/rho; + Eigen::Matrix dx = Hess_l.colPivHouseholderQr().solve(grad); + // Eigen::Matrix dx = (Hess+lam*Eigen::MatrixXd::Identity(Hess.rows(), Hess.rows())).colPivHouseholderQr().solve(grad); - // Get tangent plane to x_hat - Eigen::HouseholderQR qr(feat->p_FinA); - Eigen::MatrixXd Q = qr.householderQ(); + // Check if error has gone down + double cost = compute_error(clonesCAM, feat, alpha + dx(0, 0), beta + dx(1, 0), rho + dx(2, 0)); - // Max baseline we have between poses - double base_line_max = 0.0; + // Debug print + // cout << "run = " << runs << " | cost = " << dx.norm() << " | lamda = " << lam << " | depth = " << 1/rho << endl; - // Check maximum baseline - // Loop through each camera for this feature - for (auto const& pair : feat->timestamps) { - // Loop through the other clones to see what the max baseline is - for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { - // Get the position of this clone in the global - const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); - // Convert current position relative to anchor - Eigen::Matrix p_CiinA = R_GtoA*(p_CiinG-p_AinG); - // Dot product camera pose and nullspace - double base_line = ((Q.block(0,1,3,2)).transpose() * p_CiinA).norm(); - if (base_line > base_line_max) base_line_max = base_line; - } + // Check if converged + if (cost <= cost_old && (cost_old - cost) / cost_old < _options.min_dcost) { + alpha += dx(0, 0); + beta += dx(1, 0); + rho += dx(2, 0); + eps = 0; + break; } - // Check if this feature is bad or not - // 1. If the feature is too close - // 2. If the feature is invalid - // 3. If the baseline ratio is large - if(feat->p_FinA(2) < _options.min_dist - || feat->p_FinA(2) > _options.max_dist - || (feat->p_FinA.norm() / base_line_max) > _options.max_baseline - || std::isnan(feat->p_FinA.norm())) { - return false; + // If cost is lowered, accept step + // Else inflate lambda (try to make more stable) + if (cost <= cost_old) { + recompute = true; + cost_old = cost; + alpha += dx(0, 0); + beta += dx(1, 0); + rho += dx(2, 0); + runs++; + lam = lam / _options.lam_mult; + eps = dx.norm(); + } else { + recompute = false; + lam = lam * _options.lam_mult; + continue; } - - // Finally get position in global frame - feat->p_FinG = R_GtoA.transpose()*feat->p_FinA + p_AinG; - return true; - + } + + // Revert to standard, and set to all + feat->p_FinA(0) = alpha / rho; + feat->p_FinA(1) = beta / rho; + feat->p_FinA(2) = 1 / rho; + + // Get tangent plane to x_hat + Eigen::HouseholderQR qr(feat->p_FinA); + Eigen::MatrixXd Q = qr.householderQ(); + + // Max baseline we have between poses + double base_line_max = 0.0; + + // Check maximum baseline + // Loop through each camera for this feature + for (auto const &pair : feat->timestamps) { + // Loop through the other clones to see what the max baseline is + for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { + // Get the position of this clone in the global + const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); + // Convert current position relative to anchor + Eigen::Matrix p_CiinA = R_GtoA * (p_CiinG - p_AinG); + // Dot product camera pose and nullspace + double base_line = ((Q.block(0, 1, 3, 2)).transpose() * p_CiinA).norm(); + if (base_line > base_line_max) + base_line_max = base_line; + } + } + + // Check if this feature is bad or not + // 1. If the feature is too close + // 2. If the feature is invalid + // 3. If the baseline ratio is large + if (feat->p_FinA(2) < _options.min_dist || feat->p_FinA(2) > _options.max_dist || + (feat->p_FinA.norm() / base_line_max) > _options.max_baseline || std::isnan(feat->p_FinA.norm())) { + return false; + } + + // Finally get position in global frame + feat->p_FinG = R_GtoA.transpose() * feat->p_FinA + p_AinG; + return true; } - -double FeatureInitializer::compute_error(std::unordered_map> &clonesCAM, - Feature* feat, double alpha, double beta, double rho) { - - // Total error - double err = 0; - - // Get the position of the anchor pose - const Eigen::Matrix &R_GtoA = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).Rot(); - const Eigen::Matrix &p_AinG = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).pos(); - - // Loop through each camera for this feature - for (auto const& pair : feat->timestamps) { - // Add CAM_I features - for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { - - //===================================================================================== - //===================================================================================== - - // Get the position of this clone in the global - const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot(); - const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); - // Convert current position relative to anchor - Eigen::Matrix R_AtoCi; - R_AtoCi.noalias() = R_GtoCi*R_GtoA.transpose(); - Eigen::Matrix p_CiinA; - p_CiinA.noalias() = R_GtoA*(p_CiinG-p_AinG); - Eigen::Matrix p_AinCi; - p_AinCi.noalias() = -R_AtoCi*p_CiinA; - - //===================================================================================== - //===================================================================================== - - // Middle variables of the system - double hi1 = R_AtoCi(0, 0) * alpha + R_AtoCi(0, 1) * beta + R_AtoCi(0, 2) + rho * p_AinCi(0, 0); - double hi2 = R_AtoCi(1, 0) * alpha + R_AtoCi(1, 1) * beta + R_AtoCi(1, 2) + rho * p_AinCi(1, 0); - double hi3 = R_AtoCi(2, 0) * alpha + R_AtoCi(2, 1) * beta + R_AtoCi(2, 2) + rho * p_AinCi(2, 0); - // Calculate residual - Eigen::Matrix z; - z << hi1 / hi3, hi2 / hi3; - Eigen::Matrix res = feat->uvs_norm.at(pair.first).at(m) - z; - // Append to our summation variables - err += pow(res.norm(), 2); - } +double FeatureInitializer::compute_error(std::unordered_map> &clonesCAM, Feature *feat, + double alpha, double beta, double rho) { + + // Total error + double err = 0; + + // Get the position of the anchor pose + const Eigen::Matrix &R_GtoA = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).Rot(); + const Eigen::Matrix &p_AinG = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).pos(); + + // Loop through each camera for this feature + for (auto const &pair : feat->timestamps) { + // Add CAM_I features + for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) { + + //===================================================================================== + //===================================================================================== + + // Get the position of this clone in the global + const Eigen::Matrix &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot(); + const Eigen::Matrix &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos(); + // Convert current position relative to anchor + Eigen::Matrix R_AtoCi; + R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose(); + Eigen::Matrix p_CiinA; + p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG); + Eigen::Matrix p_AinCi; + p_AinCi.noalias() = -R_AtoCi * p_CiinA; + + //===================================================================================== + //===================================================================================== + + // Middle variables of the system + double hi1 = R_AtoCi(0, 0) * alpha + R_AtoCi(0, 1) * beta + R_AtoCi(0, 2) + rho * p_AinCi(0, 0); + double hi2 = R_AtoCi(1, 0) * alpha + R_AtoCi(1, 1) * beta + R_AtoCi(1, 2) + rho * p_AinCi(1, 0); + double hi3 = R_AtoCi(2, 0) * alpha + R_AtoCi(2, 1) * beta + R_AtoCi(2, 2) + rho * p_AinCi(2, 0); + // Calculate residual + Eigen::Matrix z; + z << hi1 / hi3, hi2 / hi3; + Eigen::Matrix res = feat->uvs_norm.at(pair.first).at(m) - z; + // Append to our summation variables + err += pow(res.norm(), 2); } + } - return err; - + return err; } - - - - diff --git a/ov_core/src/feat/FeatureInitializer.h b/ov_core/src/feat/FeatureInitializer.h index 8826a0b5d..0ac850d83 100644 --- a/ov_core/src/feat/FeatureInitializer.h +++ b/ov_core/src/feat/FeatureInitializer.h @@ -29,131 +29,119 @@ namespace ov_core { - /** - * @brief Class that triangulates feature - * - * This class has the functions needed to triangulate and then refine a given 3D feature. - * As in the standard MSCKF, we know the clones of the camera from propagation and past updates. - * Thus, we just need to triangulate a feature in 3D with the known poses and then refine it. - * One should first call the single_triangulation() function afterwhich single_gaussnewton() allows for refinement. - * Please see the @ref update-featinit page for detailed derivations. - */ - class FeatureInitializer - { - - public: - - /** - * @brief Structure which stores pose estimates for use in triangulation - * - * - R_GtoC - rotation from global to camera - * - p_CinG - position of camera in global frame - */ - struct ClonePose { - - /// Rotation - Eigen::Matrix _Rot; - - /// Position - Eigen::Matrix _pos; - - /// Constructs pose from rotation and position - ClonePose(const Eigen::Matrix &R, const Eigen::Matrix &p) { - _Rot = R; - _pos = p; - } - - /// Constructs pose from quaternion and position - ClonePose(const Eigen::Matrix &q, const Eigen::Matrix &p) { - _Rot = quat_2_Rot(q); - _pos = p; - } - - /// Default constructor - ClonePose() { - _Rot = Eigen::Matrix::Identity(); - _pos = Eigen::Matrix::Zero(); - } - - /// Accessor for rotation - const Eigen::Matrix &Rot() { - return _Rot; - } - - /// Accessor for position - const Eigen::Matrix &pos() { - return _pos; - } - - }; - - - /** - * @brief Default constructor - * @param options Options for the initializer - */ - FeatureInitializer(FeatureInitializerOptions &options) : _options(options) {} - - /** - * @brief Uses a linear triangulation to get initial estimate for the feature - * - * The derivations for this method can be found in the @ref featinit-linear documentation page. - * - * @param feat Pointer to feature - * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera in global frame) - * @return Returns false if it fails to triangulate (based on the thresholds) - */ - bool single_triangulation(Feature* feat, std::unordered_map> &clonesCAM); - - /** - * @brief Uses a linear triangulation to get initial estimate for the feature, treating the anchor observation as a true bearing. - * - * The derivations for this method can be found in the @ref featinit-linear-1d documentation page. - * This function should be used if you want speed, or know your anchor bearing is reasonably accurate. - * - * @param feat Pointer to feature - * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera in global frame) - * @return Returns false if it fails to triangulate (based on the thresholds) - */ - bool single_triangulation_1d(Feature* feat, std::unordered_map> &clonesCAM); - - /** - * @brief Uses a nonlinear triangulation to refine initial linear estimate of the feature - * @param feat Pointer to feature - * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera in global frame) - * @return Returns false if it fails to be optimize (based on the thresholds) - */ - bool single_gaussnewton(Feature* feat, std::unordered_map> &clonesCAM); - - /** - * @brief Gets the current configuration of the feature initializer - * @return Const feature initializer config - */ - const FeatureInitializerOptions config() { - return _options; - } - - - protected: - - /// Contains options for the initializer process - FeatureInitializerOptions _options; - - /** - * @brief Helper function for the gauss newton method that computes error of the given estimate - * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate - * @param feat Pointer to the feature - * @param alpha x/z in anchor - * @param beta y/z in anchor - * @param rho 1/z inverse depth - */ - double compute_error(std::unordered_map> &clonesCAM,Feature* feat,double alpha,double beta,double rho); - - }; - - - -} - - -#endif //OPEN_VINS_FEATUREINITIALIZER_H \ No newline at end of file +/** + * @brief Class that triangulates feature + * + * This class has the functions needed to triangulate and then refine a given 3D feature. + * As in the standard MSCKF, we know the clones of the camera from propagation and past updates. + * Thus, we just need to triangulate a feature in 3D with the known poses and then refine it. + * One should first call the single_triangulation() function afterwhich single_gaussnewton() allows for refinement. + * Please see the @ref update-featinit page for detailed derivations. + */ +class FeatureInitializer { + +public: + /** + * @brief Structure which stores pose estimates for use in triangulation + * + * - R_GtoC - rotation from global to camera + * - p_CinG - position of camera in global frame + */ + struct ClonePose { + + /// Rotation + Eigen::Matrix _Rot; + + /// Position + Eigen::Matrix _pos; + + /// Constructs pose from rotation and position + ClonePose(const Eigen::Matrix &R, const Eigen::Matrix &p) { + _Rot = R; + _pos = p; + } + + /// Constructs pose from quaternion and position + ClonePose(const Eigen::Matrix &q, const Eigen::Matrix &p) { + _Rot = quat_2_Rot(q); + _pos = p; + } + + /// Default constructor + ClonePose() { + _Rot = Eigen::Matrix::Identity(); + _pos = Eigen::Matrix::Zero(); + } + + /// Accessor for rotation + const Eigen::Matrix &Rot() { return _Rot; } + + /// Accessor for position + const Eigen::Matrix &pos() { return _pos; } + }; + + /** + * @brief Default constructor + * @param options Options for the initializer + */ + FeatureInitializer(FeatureInitializerOptions &options) : _options(options) {} + + /** + * @brief Uses a linear triangulation to get initial estimate for the feature + * + * The derivations for this method can be found in the @ref featinit-linear documentation page. + * + * @param feat Pointer to feature + * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera + * in global frame) + * @return Returns false if it fails to triangulate (based on the thresholds) + */ + bool single_triangulation(Feature *feat, std::unordered_map> &clonesCAM); + + /** + * @brief Uses a linear triangulation to get initial estimate for the feature, treating the anchor observation as a true bearing. + * + * The derivations for this method can be found in the @ref featinit-linear-1d documentation page. + * This function should be used if you want speed, or know your anchor bearing is reasonably accurate. + * + * @param feat Pointer to feature + * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera + * in global frame) + * @return Returns false if it fails to triangulate (based on the thresholds) + */ + bool single_triangulation_1d(Feature *feat, std::unordered_map> &clonesCAM); + + /** + * @brief Uses a nonlinear triangulation to refine initial linear estimate of the feature + * @param feat Pointer to feature + * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera + * in global frame) + * @return Returns false if it fails to be optimize (based on the thresholds) + */ + bool single_gaussnewton(Feature *feat, std::unordered_map> &clonesCAM); + + /** + * @brief Gets the current configuration of the feature initializer + * @return Const feature initializer config + */ + const FeatureInitializerOptions config() { return _options; } + +protected: + /// Contains options for the initializer process + FeatureInitializerOptions _options; + + /** + * @brief Helper function for the gauss newton method that computes error of the given estimate + * @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate + * @param feat Pointer to the feature + * @param alpha x/z in anchor + * @param beta y/z in anchor + * @param rho 1/z inverse depth + */ + double compute_error(std::unordered_map> &clonesCAM, Feature *feat, double alpha, + double beta, double rho); +}; + +} // namespace ov_core + +#endif // OPEN_VINS_FEATUREINITIALIZER_H \ No newline at end of file diff --git a/ov_core/src/feat/FeatureInitializerOptions.h b/ov_core/src/feat/FeatureInitializerOptions.h index 2cd78e16a..0acaaacae 100644 --- a/ov_core/src/feat/FeatureInitializerOptions.h +++ b/ov_core/src/feat/FeatureInitializerOptions.h @@ -21,69 +21,66 @@ #ifndef OV_CORE_INITIALIZEROPTIONS_H #define OV_CORE_INITIALIZEROPTIONS_H - namespace ov_core { +/** + * @brief Struct which stores all our feature initializer options + */ +struct FeatureInitializerOptions { - /** - * @brief Struct which stores all our feature initializer options - */ - struct FeatureInitializerOptions { - - /// If we should perform 1d triangulation instead of 3d - bool triangulate_1d = false; - - /// If we should perform Levenberg-Marquardt refinment - bool refine_features = true; + /// If we should perform 1d triangulation instead of 3d + bool triangulate_1d = false; - /// Max runs for Levenberg-Marquardt - int max_runs = 5; + /// If we should perform Levenberg-Marquardt refinment + bool refine_features = true; - /// Init lambda for Levenberg-Marquardt optimization - double init_lamda = 1e-3; + /// Max runs for Levenberg-Marquardt + int max_runs = 5; - /// Max lambda for Levenberg-Marquardt optimization - double max_lamda = 1e10; + /// Init lambda for Levenberg-Marquardt optimization + double init_lamda = 1e-3; - /// Cutoff for dx increment to consider as converged - double min_dx = 1e-6; + /// Max lambda for Levenberg-Marquardt optimization + double max_lamda = 1e10; - /// Cutoff for cost decrement to consider as converged - double min_dcost = 1e-6; + /// Cutoff for dx increment to consider as converged + double min_dx = 1e-6; - /// Multiplier to increase/decrease lambda - double lam_mult = 10; + /// Cutoff for cost decrement to consider as converged + double min_dcost = 1e-6; - /// Minimum distance to accept triangulated features - double min_dist = 0.10; + /// Multiplier to increase/decrease lambda + double lam_mult = 10; - /// Minimum distance to accept triangulated features - double max_dist = 60; + /// Minimum distance to accept triangulated features + double min_dist = 0.10; - /// Max baseline ratio to accept triangulated features - double max_baseline = 40; + /// Minimum distance to accept triangulated features + double max_dist = 60; - /// Max condition number of linear triangulation matrix accept triangulated features - double max_cond_number = 10000; + /// Max baseline ratio to accept triangulated features + double max_baseline = 40; - /// Nice print function of what parameters we have loaded - void print() { - printf("\t- triangulate_1d: %d\n", triangulate_1d); - printf("\t- refine_features: %d\n", refine_features); - printf("\t- max_runs: %d\n", max_runs); - printf("\t- init_lamda: %.3f\n", init_lamda); - printf("\t- max_lamda: %.3f\n", max_lamda); - printf("\t- min_dx: %.7f\n", min_dx); - printf("\t- min_dcost: %.7f\n", min_dcost); - printf("\t- lam_mult: %.3f\n", lam_mult); - printf("\t- min_dist: %.3f\n", min_dist); - printf("\t- max_dist: %.3f\n", max_dist); - printf("\t- max_baseline: %.3f\n", max_baseline); - printf("\t- max_cond_number: %.3f\n", max_cond_number); - } + /// Max condition number of linear triangulation matrix accept triangulated features + double max_cond_number = 10000; - }; + /// Nice print function of what parameters we have loaded + void print() { + printf("\t- triangulate_1d: %d\n", triangulate_1d); + printf("\t- refine_features: %d\n", refine_features); + printf("\t- max_runs: %d\n", max_runs); + printf("\t- init_lamda: %.3f\n", init_lamda); + printf("\t- max_lamda: %.3f\n", max_lamda); + printf("\t- min_dx: %.7f\n", min_dx); + printf("\t- min_dcost: %.7f\n", min_dcost); + printf("\t- lam_mult: %.3f\n", lam_mult); + printf("\t- min_dist: %.3f\n", min_dist); + printf("\t- max_dist: %.3f\n", max_dist); + printf("\t- max_baseline: %.3f\n", max_baseline); + printf("\t- max_cond_number: %.3f\n", max_cond_number); + } +}; -} +} // namespace ov_core -#endif //OV_CORE_INITIALIZEROPTIONS_H \ No newline at end of file +#endif // OV_CORE_INITIALIZEROPTIONS_H \ No newline at end of file diff --git a/ov_core/src/init/InertialInitializer.cpp b/ov_core/src/init/InertialInitializer.cpp index ad7b81e11..9df83857f 100644 --- a/ov_core/src/init/InertialInitializer.cpp +++ b/ov_core/src/init/InertialInitializer.cpp @@ -20,145 +20,136 @@ */ #include "InertialInitializer.h" - using namespace ov_core; - void InertialInitializer::feed_imu(const ImuData &message) { - // Append it to our vector - imu_data.push_back(message); - - // Delete all measurements older than three of our initialization windows - auto it0 = imu_data.begin(); - while(it0 != imu_data.end() && it0->timestamp < message.timestamp-3*_window_length) { - it0 = imu_data.erase(it0); - } + // Append it to our vector + imu_data.push_back(message); + // Delete all measurements older than three of our initialization windows + auto it0 = imu_data.begin(); + while (it0 != imu_data.end() && it0->timestamp < message.timestamp - 3 * _window_length) { + it0 = imu_data.erase(it0); + } } +bool InertialInitializer::initialize_with_imu(double &time0, Eigen::Matrix &q_GtoI0, Eigen::Matrix &b_w0, + Eigen::Matrix &v_I0inG, Eigen::Matrix &b_a0, + Eigen::Matrix &p_I0inG, bool wait_for_jerk) { + // Return if we don't have any measurements + if (imu_data.empty()) { + return false; + } + // Newest imu timestamp + double newesttime = imu_data.at(imu_data.size() - 1).timestamp; - -bool InertialInitializer::initialize_with_imu(double &time0, Eigen::Matrix &q_GtoI0, Eigen::Matrix &b_w0, - Eigen::Matrix &v_I0inG, Eigen::Matrix &b_a0, Eigen::Matrix &p_I0inG, bool wait_for_jerk) { - - // Return if we don't have any measurements - if(imu_data.empty()) { - return false; - } - - // Newest imu timestamp - double newesttime = imu_data.at(imu_data.size()-1).timestamp; - - // First lets collect a window of IMU readings from the newest measurement to the oldest - std::vector window_newest, window_secondnew; - for(const ImuData& data : imu_data) { - if(data.timestamp > newesttime-1*_window_length && data.timestamp <= newesttime-0*_window_length) { - window_newest.push_back(data); - } - if(data.timestamp > newesttime-2*_window_length && data.timestamp <= newesttime-1*_window_length) { - window_secondnew.push_back(data); - } - } - - // Return if both of these failed - if(window_newest.empty() || window_secondnew.empty()) { - //printf(YELLOW "InertialInitializer::initialize_with_imu(): unable to select window of IMU readings, not enough readings\n" RESET); - return false; - } - - // Calculate the sample variance for the newest one - Eigen::Matrix a_avg = Eigen::Matrix::Zero(); - for(const ImuData& data : window_newest) { - a_avg += data.am; + // First lets collect a window of IMU readings from the newest measurement to the oldest + std::vector window_newest, window_secondnew; + for (const ImuData &data : imu_data) { + if (data.timestamp > newesttime - 1 * _window_length && data.timestamp <= newesttime - 0 * _window_length) { + window_newest.push_back(data); } - a_avg /= (int)window_newest.size(); - double a_var = 0; - for(const ImuData& data : window_newest) { - a_var += (data.am-a_avg).dot(data.am-a_avg); - } - a_var = std::sqrt(a_var/((int)window_newest.size()-1)); - - // If it is below the threshold and we want to wait till we detect a jerk - if(a_var < _imu_excite_threshold && wait_for_jerk) { - printf(YELLOW "InertialInitializer::initialize_with_imu(): no IMU excitation, below threshold %.4f < %.4f\n" RESET,a_var,_imu_excite_threshold); - return false; + if (data.timestamp > newesttime - 2 * _window_length && data.timestamp <= newesttime - 1 * _window_length) { + window_secondnew.push_back(data); } - - // Return if we don't have any measurements - //if(imu_data.size() < 200) { - // return false; - //} - - // Sum up our current accelerations and velocities - Eigen::Vector3d linsum = Eigen::Vector3d::Zero(); - Eigen::Vector3d angsum = Eigen::Vector3d::Zero(); - for(size_t i=0; i _imu_excite_threshold || a_var2 > _imu_excite_threshold) && !wait_for_jerk) { - printf(YELLOW "InertialInitializer::initialize_with_imu(): to much IMU excitation, above threshold %.4f,%.4f > %.4f\n" RESET,a_var,a_var2,_imu_excite_threshold); - return false; - } - - // Get z axis, which alines with -g (z_in_G=0,0,1) - Eigen::Vector3d z_axis = linavg/linavg.norm(); - - // Create an x_axis - Eigen::Vector3d e_1(1,0,0); - - // Make x_axis perpendicular to z - Eigen::Vector3d x_axis = e_1-z_axis*z_axis.transpose()*e_1; - x_axis= x_axis/x_axis.norm(); - - // Get z from the cross product of these two - Eigen::Matrix y_axis = skew_x(z_axis)*x_axis; - - // From these axes get rotation - Eigen::Matrix Ro; - Ro.block(0,0,3,1) = x_axis; - Ro.block(0,1,3,1) = y_axis; - Ro.block(0,2,3,1) = z_axis; - - // Create our state variables - Eigen::Matrix q_GtoI = rot_2_quat(Ro); - - // Set our biases equal to our noise (subtract our gravity from accelerometer bias) - Eigen::Matrix bg = angavg; - Eigen::Matrix ba = linavg - quat_2_Rot(q_GtoI)*_gravity; - - // Set our state variables - time0 = window_secondnew.at(window_secondnew.size()-1).timestamp; - q_GtoI0 = q_GtoI; - b_w0 = bg; - v_I0inG = Eigen::Matrix::Zero(); - b_a0 = ba; - p_I0inG = Eigen::Matrix::Zero(); - - // Done!!! - return true; - + } + + // Return if both of these failed + if (window_newest.empty() || window_secondnew.empty()) { + // printf(YELLOW "InertialInitializer::initialize_with_imu(): unable to select window of IMU readings, not enough readings\n" RESET); + return false; + } + + // Calculate the sample variance for the newest one + Eigen::Matrix a_avg = Eigen::Matrix::Zero(); + for (const ImuData &data : window_newest) { + a_avg += data.am; + } + a_avg /= (int)window_newest.size(); + double a_var = 0; + for (const ImuData &data : window_newest) { + a_var += (data.am - a_avg).dot(data.am - a_avg); + } + a_var = std::sqrt(a_var / ((int)window_newest.size() - 1)); + + // If it is below the threshold and we want to wait till we detect a jerk + if (a_var < _imu_excite_threshold && wait_for_jerk) { + printf(YELLOW "InertialInitializer::initialize_with_imu(): no IMU excitation, below threshold %.4f < %.4f\n" RESET, a_var, + _imu_excite_threshold); + return false; + } + + // Return if we don't have any measurements + // if(imu_data.size() < 200) { + // return false; + //} + + // Sum up our current accelerations and velocities + Eigen::Vector3d linsum = Eigen::Vector3d::Zero(); + Eigen::Vector3d angsum = Eigen::Vector3d::Zero(); + for (size_t i = 0; i < window_secondnew.size(); i++) { + linsum += window_secondnew.at(i).am; + angsum += window_secondnew.at(i).wm; + } + + // Calculate the mean of the linear acceleration and angular velocity + Eigen::Vector3d linavg = Eigen::Vector3d::Zero(); + Eigen::Vector3d angavg = Eigen::Vector3d::Zero(); + linavg = linsum / window_secondnew.size(); + angavg = angsum / window_secondnew.size(); + + // Calculate variance of the + double a_var2 = 0; + for (const ImuData &data : window_secondnew) { + a_var2 += (data.am - linavg).dot(data.am - linavg); + } + a_var2 = std::sqrt(a_var2 / ((int)window_secondnew.size() - 1)); + + // If it is above the threshold and we are not waiting for a jerk + // Then we are not stationary (i.e. moving) so we should wait till we are + if ((a_var > _imu_excite_threshold || a_var2 > _imu_excite_threshold) && !wait_for_jerk) { + printf(YELLOW "InertialInitializer::initialize_with_imu(): to much IMU excitation, above threshold %.4f,%.4f > %.4f\n" RESET, a_var, + a_var2, _imu_excite_threshold); + return false; + } + + // Get z axis, which alines with -g (z_in_G=0,0,1) + Eigen::Vector3d z_axis = linavg / linavg.norm(); + + // Create an x_axis + Eigen::Vector3d e_1(1, 0, 0); + + // Make x_axis perpendicular to z + Eigen::Vector3d x_axis = e_1 - z_axis * z_axis.transpose() * e_1; + x_axis = x_axis / x_axis.norm(); + + // Get z from the cross product of these two + Eigen::Matrix y_axis = skew_x(z_axis) * x_axis; + + // From these axes get rotation + Eigen::Matrix Ro; + Ro.block(0, 0, 3, 1) = x_axis; + Ro.block(0, 1, 3, 1) = y_axis; + Ro.block(0, 2, 3, 1) = z_axis; + + // Create our state variables + Eigen::Matrix q_GtoI = rot_2_quat(Ro); + + // Set our biases equal to our noise (subtract our gravity from accelerometer bias) + Eigen::Matrix bg = angavg; + Eigen::Matrix ba = linavg - quat_2_Rot(q_GtoI) * _gravity; + + // Set our state variables + time0 = window_secondnew.at(window_secondnew.size() - 1).timestamp; + q_GtoI0 = q_GtoI; + b_w0 = bg; + v_I0inG = Eigen::Matrix::Zero(); + b_a0 = ba; + p_I0inG = Eigen::Matrix::Zero(); + + // Done!!! + return true; } - - - - diff --git a/ov_core/src/init/InertialInitializer.h b/ov_core/src/init/InertialInitializer.h index b73201438..60d8d4ee2 100644 --- a/ov_core/src/init/InertialInitializer.h +++ b/ov_core/src/init/InertialInitializer.h @@ -21,91 +21,83 @@ #ifndef OV_CORE_INERTIALINITIALIZER_H #define OV_CORE_INERTIALINITIALIZER_H -#include "utils/quat_ops.h" #include "utils/colors.h" +#include "utils/quat_ops.h" #include "utils/sensor_data.h" namespace ov_core { - - - /** - * @brief Initializer for visual-inertial system. - * - * This class has a series of functions that can be used to initialize your system. - * Right now we have our implementation that assumes that the imu starts from standing still. - * In the future we plan to add support for structure-from-motion dynamic initialization. - * - * To initialize from standstill: - * 1. Collect all inertial measurements - * 2. See if within the last window there was a jump in acceleration - * 3. If the jump is past our threshold we should init (i.e. we have started moving) - * 4. Use the *previous* window, which should have been stationary to initialize orientation - * 5. Return a roll and pitch aligned with gravity and biases. - * - */ - class InertialInitializer { - - public: - - /** - * @brief Default constructor - * @param gravity Gravity in the global frame of reference - * @param window_length Amount of time we will initialize over (seconds) - * @param imu_excite_threshold Variance threshold on our acceleration to be classified as moving - */ - InertialInitializer(Eigen::Matrix gravity, double window_length, double imu_excite_threshold) : - _gravity(gravity), _window_length(window_length), _imu_excite_threshold(imu_excite_threshold) {} - - /** - * @brief Feed function for inertial data - * @param message Contains our timestamp and inertial information - */ - void feed_imu(const ImuData &message); - - - /** - * @brief Try to initialize the system using just the imu - * - * This will check if we have had a large enough jump in our acceleration. - * If we have then we will use the period of time before this jump to initialize the state. - * This assumes that our imu is sitting still and is not moving (so this would fail if we are experiencing constant acceleration). - * - * In the case that we do not wait for a jump (i.e. `wait_for_jerk` is false), then the system will try to initialize as soon as possible. - * This is only recommended if you have zero velocity update enabled to handle the stationary cases. - * To initialize in this case, we need to have the average angular variance be below the set threshold (i.e. we need to be stationary). - * - * @param[out] time0 Timestamp that the returned state is at - * @param[out] q_GtoI0 Orientation at initialization - * @param[out] b_w0 Gyro bias at initialization - * @param[out] v_I0inG Velocity at initialization - * @param[out] b_a0 Acceleration bias at initialization - * @param[out] p_I0inG Position at initialization - * @param wait_for_jerk If true we will wait for a "jerk" - * @return True if we have successfully initialized our system - */ - bool initialize_with_imu(double &time0, Eigen::Matrix &q_GtoI0, Eigen::Matrix &b_w0, - Eigen::Matrix &v_I0inG, Eigen::Matrix &b_a0, Eigen::Matrix &p_I0inG, bool wait_for_jerk=true); - - - protected: - - /// Gravity vector - Eigen::Matrix _gravity; - - /// Amount of time we will initialize over (seconds) - double _window_length; - - /// Variance threshold on our acceleration to be classified as moving - double _imu_excite_threshold; - - /// Our history of IMU messages (time, angular, linear) - std::vector imu_data; - - - }; - - -} - -#endif //OV_CORE_INERTIALINITIALIZER_H +/** + * @brief Initializer for visual-inertial system. + * + * This class has a series of functions that can be used to initialize your system. + * Right now we have our implementation that assumes that the imu starts from standing still. + * In the future we plan to add support for structure-from-motion dynamic initialization. + * + * To initialize from standstill: + * 1. Collect all inertial measurements + * 2. See if within the last window there was a jump in acceleration + * 3. If the jump is past our threshold we should init (i.e. we have started moving) + * 4. Use the *previous* window, which should have been stationary to initialize orientation + * 5. Return a roll and pitch aligned with gravity and biases. + * + */ +class InertialInitializer { + +public: + /** + * @brief Default constructor + * @param gravity Gravity in the global frame of reference + * @param window_length Amount of time we will initialize over (seconds) + * @param imu_excite_threshold Variance threshold on our acceleration to be classified as moving + */ + InertialInitializer(Eigen::Matrix gravity, double window_length, double imu_excite_threshold) + : _gravity(gravity), _window_length(window_length), _imu_excite_threshold(imu_excite_threshold) {} + + /** + * @brief Feed function for inertial data + * @param message Contains our timestamp and inertial information + */ + void feed_imu(const ImuData &message); + + /** + * @brief Try to initialize the system using just the imu + * + * This will check if we have had a large enough jump in our acceleration. + * If we have then we will use the period of time before this jump to initialize the state. + * This assumes that our imu is sitting still and is not moving (so this would fail if we are experiencing constant acceleration). + * + * In the case that we do not wait for a jump (i.e. `wait_for_jerk` is false), then the system will try to initialize as soon as possible. + * This is only recommended if you have zero velocity update enabled to handle the stationary cases. + * To initialize in this case, we need to have the average angular variance be below the set threshold (i.e. we need to be stationary). + * + * @param[out] time0 Timestamp that the returned state is at + * @param[out] q_GtoI0 Orientation at initialization + * @param[out] b_w0 Gyro bias at initialization + * @param[out] v_I0inG Velocity at initialization + * @param[out] b_a0 Acceleration bias at initialization + * @param[out] p_I0inG Position at initialization + * @param wait_for_jerk If true we will wait for a "jerk" + * @return True if we have successfully initialized our system + */ + bool initialize_with_imu(double &time0, Eigen::Matrix &q_GtoI0, Eigen::Matrix &b_w0, + Eigen::Matrix &v_I0inG, Eigen::Matrix &b_a0, Eigen::Matrix &p_I0inG, + bool wait_for_jerk = true); + +protected: + /// Gravity vector + Eigen::Matrix _gravity; + + /// Amount of time we will initialize over (seconds) + double _window_length; + + /// Variance threshold on our acceleration to be classified as moving + double _imu_excite_threshold; + + /// Our history of IMU messages (time, angular, linear) + std::vector imu_data; +}; + +} // namespace ov_core + +#endif // OV_CORE_INERTIALINITIALIZER_H diff --git a/ov_core/src/sim/BsplineSE3.cpp b/ov_core/src/sim/BsplineSE3.cpp index 65d636206..e75f1bb96 100644 --- a/ov_core/src/sim/BsplineSE3.cpp +++ b/ov_core/src/sim/BsplineSE3.cpp @@ -20,362 +20,336 @@ */ #include "BsplineSE3.h" - using namespace ov_core; - - - - void BsplineSE3::feed_trajectory(std::vector traj_points) { - - // Find the average frequency to use as our uniform timesteps - double sumdt = 0; - for(size_t i=0; i= timestamp_min) { - timestamp_max = pose.first; - } + // Find the average frequency to use as our uniform timesteps + double sumdt = 0; + for (size_t i = 0; i < traj_points.size() - 1; i++) { + sumdt += traj_points.at(i + 1)(0) - traj_points.at(i)(0); + } + dt = sumdt / (traj_points.size() - 1); + dt = (dt < 0.05) ? 0.05 : dt; + printf("[B-SPLINE]: control point dt = %.3f (original dt of %.3f)\n", dt, sumdt / (traj_points.size() - 1)); + + // convert all our trajectory points into SE(3) matrices + // we are given [timestamp, p_IinG, q_GtoI] + AlignedEigenMat4d trajectory_points; + for (size_t i = 0; i < traj_points.size() - 1; i++) { + Eigen::Matrix4d T_IinG = Eigen::Matrix4d::Identity(); + T_IinG.block(0, 0, 3, 3) = quat_2_Rot(traj_points.at(i).block(4, 0, 4, 1)).transpose(); + T_IinG.block(0, 3, 3, 1) = traj_points.at(i).block(1, 0, 3, 1); + trajectory_points.insert({traj_points.at(i)(0), T_IinG}); + } + + // Get the oldest timestamp + double timestamp_min = INFINITY; + double timestamp_max = -INFINITY; + for (const auto &pose : trajectory_points) { + if (pose.first <= timestamp_min) { + timestamp_min = pose.first; } - printf("[B-SPLINE]: trajectory start time = %.6f\n",timestamp_min); - printf("[B-SPLINE]: trajectory end time = %.6f\n",timestamp_max); - - - // then create spline control points - double timestamp_curr = timestamp_min; - while(true) { - - // Get bounding posed for the current time - double t0, t1; - Eigen::Matrix4d pose0, pose1; - bool success = find_bounding_poses(timestamp_curr, trajectory_points, t0, pose0, t1, pose1); - //printf("[SIM]: time curr = %.6f | lambda = %.3f | dt = %.3f | dtmeas = %.3f\n",timestamp_curr,(timestamp_curr-t0)/(t1-t0),dt,(t1-t0)); - - // If we didn't find a bounding pose, then that means we are at the end of the dataset - // Thus break out of this loop since we have created our max number of control points - if(!success) - break; - - // Linear interpolation and append to our control points - double lambda = (timestamp_curr-t0)/(t1-t0); - Eigen::Matrix4d pose_interp = exp_se3(lambda*log_se3(pose1*Inv_se3(pose0)))*pose0; - control_points.insert({timestamp_curr, pose_interp}); - timestamp_curr += dt; - //std::cout << pose_interp(0,3) << "," << pose_interp(1,3) << "," << pose_interp(2,3) << std::endl; - + if (pose.first >= timestamp_min) { + timestamp_max = pose.first; } - - // The start time of the system is two dt in since we need at least two older control points - timestamp_start = timestamp_min + 2*dt; - printf("[B-SPLINE]: start trajectory time of %.6f\n",timestamp_start); - + } + printf("[B-SPLINE]: trajectory start time = %.6f\n", timestamp_min); + printf("[B-SPLINE]: trajectory end time = %.6f\n", timestamp_max); + + // then create spline control points + double timestamp_curr = timestamp_min; + while (true) { + + // Get bounding posed for the current time + double t0, t1; + Eigen::Matrix4d pose0, pose1; + bool success = find_bounding_poses(timestamp_curr, trajectory_points, t0, pose0, t1, pose1); + // printf("[SIM]: time curr = %.6f | lambda = %.3f | dt = %.3f | dtmeas = + // %.3f\n",timestamp_curr,(timestamp_curr-t0)/(t1-t0),dt,(t1-t0)); + + // If we didn't find a bounding pose, then that means we are at the end of the dataset + // Thus break out of this loop since we have created our max number of control points + if (!success) + break; + + // Linear interpolation and append to our control points + double lambda = (timestamp_curr - t0) / (t1 - t0); + Eigen::Matrix4d pose_interp = exp_se3(lambda * log_se3(pose1 * Inv_se3(pose0))) * pose0; + control_points.insert({timestamp_curr, pose_interp}); + timestamp_curr += dt; + // std::cout << pose_interp(0,3) << "," << pose_interp(1,3) << "," << pose_interp(2,3) << std::endl; + } + + // The start time of the system is two dt in since we need at least two older control points + timestamp_start = timestamp_min + 2 * dt; + printf("[B-SPLINE]: start trajectory time of %.6f\n", timestamp_start); } - - - - bool BsplineSE3::get_pose(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG) { - // Get the bounding poses for the desired timestamp - double t0, t1, t2, t3; - Eigen::Matrix4d pose0, pose1, pose2, pose3; - bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3); - //printf("[SIM]: time curr = %.6f | dt1 = %.3f | dt2 = %.3f | dt3 = %.3f | dt4 = %.3f | success = %d\n",timestamp,t0-timestamp,t1-timestamp,t2-timestamp,t3-timestamp,(int)success); - - // Return failure if we can't get bounding poses - if(!success) { - R_GtoI.setIdentity(); - p_IinG.setZero(); - return false; - } - - // Our De Boor-Cox matrix scalars - double DT = (t2-t1); - double u = (timestamp-t1)/DT; - double b0 = 1.0/6.0*(5+3*u-3*u*u+u*u*u); - double b1 = 1.0/6.0*(1+3*u+3*u*u-2*u*u*u); - double b2 = 1.0/6.0*(u*u*u); - - // Calculate interpolated poses - Eigen::Matrix4d A0 = exp_se3(b0*log_se3(Inv_se3(pose0)*pose1)); - Eigen::Matrix4d A1 = exp_se3(b1*log_se3(Inv_se3(pose1)*pose2)); - Eigen::Matrix4d A2 = exp_se3(b2*log_se3(Inv_se3(pose2)*pose3)); - - // Finally get the interpolated pose - Eigen::Matrix4d pose_interp = pose0*A0*A1*A2; - R_GtoI = pose_interp.block(0,0,3,3).transpose(); - p_IinG = pose_interp.block(0,3,3,1); - return true; - + // Get the bounding poses for the desired timestamp + double t0, t1, t2, t3; + Eigen::Matrix4d pose0, pose1, pose2, pose3; + bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3); + // printf("[SIM]: time curr = %.6f | dt1 = %.3f | dt2 = %.3f | dt3 = %.3f | dt4 = %.3f | success = + // %d\n",timestamp,t0-timestamp,t1-timestamp,t2-timestamp,t3-timestamp,(int)success); + + // Return failure if we can't get bounding poses + if (!success) { + R_GtoI.setIdentity(); + p_IinG.setZero(); + return false; + } + + // Our De Boor-Cox matrix scalars + double DT = (t2 - t1); + double u = (timestamp - t1) / DT; + double b0 = 1.0 / 6.0 * (5 + 3 * u - 3 * u * u + u * u * u); + double b1 = 1.0 / 6.0 * (1 + 3 * u + 3 * u * u - 2 * u * u * u); + double b2 = 1.0 / 6.0 * (u * u * u); + + // Calculate interpolated poses + Eigen::Matrix4d A0 = exp_se3(b0 * log_se3(Inv_se3(pose0) * pose1)); + Eigen::Matrix4d A1 = exp_se3(b1 * log_se3(Inv_se3(pose1) * pose2)); + Eigen::Matrix4d A2 = exp_se3(b2 * log_se3(Inv_se3(pose2) * pose3)); + + // Finally get the interpolated pose + Eigen::Matrix4d pose_interp = pose0 * A0 * A1 * A2; + R_GtoI = pose_interp.block(0, 0, 3, 3).transpose(); + p_IinG = pose_interp.block(0, 3, 3, 1); + return true; } - - - -bool BsplineSE3::get_velocity(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, Eigen::Vector3d &v_IinG) { - - // Get the bounding poses for the desired timestamp - double t0, t1, t2, t3; - Eigen::Matrix4d pose0, pose1, pose2, pose3; - bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3); - //printf("[SIM]: time curr = %.6f | dt1 = %.3f | dt2 = %.3f | dt3 = %.3f | dt4 = %.3f | success = %d\n",timestamp,t0-timestamp,t1-timestamp,t2-timestamp,t3-timestamp,(int)success); - - // Return failure if we can't get bounding poses - if(!success) { - w_IinI.setZero(); - v_IinG.setZero(); - return false; - } - - // Our De Boor-Cox matrix scalars - double DT = (t2-t1); - double u = (timestamp-t1)/DT; - double b0 = 1.0/6.0*(5+3*u-3*u*u+u*u*u); - double b1 = 1.0/6.0*(1+3*u+3*u*u-2*u*u*u); - double b2 = 1.0/6.0*(u*u*u); - double b0dot = 1.0/(6.0*DT)*(3-6*u+3*u*u); - double b1dot = 1.0/(6.0*DT)*(3+6*u-6*u*u); - double b2dot = 1.0/(6.0*DT)*(3*u*u); - - // Cache some values we use alot - Eigen::Matrix omega_10 = log_se3(Inv_se3(pose0)*pose1); - Eigen::Matrix omega_21 = log_se3(Inv_se3(pose1)*pose2); - Eigen::Matrix omega_32 = log_se3(Inv_se3(pose2)*pose3); - - // Calculate interpolated poses - Eigen::Matrix4d A0 = exp_se3(b0*omega_10); - Eigen::Matrix4d A1 = exp_se3(b1*omega_21); - Eigen::Matrix4d A2 = exp_se3(b2*omega_32); - Eigen::Matrix4d A0dot = b0dot*hat_se3(omega_10)*A0; - Eigen::Matrix4d A1dot = b1dot*hat_se3(omega_21)*A1; - Eigen::Matrix4d A2dot = b2dot*hat_se3(omega_32)*A2; - - // Get the interpolated pose - Eigen::Matrix4d pose_interp = pose0*A0*A1*A2; - R_GtoI = pose_interp.block(0,0,3,3).transpose(); - p_IinG = pose_interp.block(0,3,3,1); - - // Finally get the interpolated velocities - // NOTE: Rdot = R*skew(omega) => R^T*Rdot = skew(omega) - Eigen::Matrix4d vel_interp = pose0*(A0dot*A1*A2+A0*A1dot*A2+A0*A1*A2dot); - w_IinI = vee(pose_interp.block(0,0,3,3).transpose()*vel_interp.block(0,0,3,3)); - v_IinG = vel_interp.block(0,3,3,1); - return true; - +bool BsplineSE3::get_velocity(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, + Eigen::Vector3d &v_IinG) { + + // Get the bounding poses for the desired timestamp + double t0, t1, t2, t3; + Eigen::Matrix4d pose0, pose1, pose2, pose3; + bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3); + // printf("[SIM]: time curr = %.6f | dt1 = %.3f | dt2 = %.3f | dt3 = %.3f | dt4 = %.3f | success = + // %d\n",timestamp,t0-timestamp,t1-timestamp,t2-timestamp,t3-timestamp,(int)success); + + // Return failure if we can't get bounding poses + if (!success) { + w_IinI.setZero(); + v_IinG.setZero(); + return false; + } + + // Our De Boor-Cox matrix scalars + double DT = (t2 - t1); + double u = (timestamp - t1) / DT; + double b0 = 1.0 / 6.0 * (5 + 3 * u - 3 * u * u + u * u * u); + double b1 = 1.0 / 6.0 * (1 + 3 * u + 3 * u * u - 2 * u * u * u); + double b2 = 1.0 / 6.0 * (u * u * u); + double b0dot = 1.0 / (6.0 * DT) * (3 - 6 * u + 3 * u * u); + double b1dot = 1.0 / (6.0 * DT) * (3 + 6 * u - 6 * u * u); + double b2dot = 1.0 / (6.0 * DT) * (3 * u * u); + + // Cache some values we use alot + Eigen::Matrix omega_10 = log_se3(Inv_se3(pose0) * pose1); + Eigen::Matrix omega_21 = log_se3(Inv_se3(pose1) * pose2); + Eigen::Matrix omega_32 = log_se3(Inv_se3(pose2) * pose3); + + // Calculate interpolated poses + Eigen::Matrix4d A0 = exp_se3(b0 * omega_10); + Eigen::Matrix4d A1 = exp_se3(b1 * omega_21); + Eigen::Matrix4d A2 = exp_se3(b2 * omega_32); + Eigen::Matrix4d A0dot = b0dot * hat_se3(omega_10) * A0; + Eigen::Matrix4d A1dot = b1dot * hat_se3(omega_21) * A1; + Eigen::Matrix4d A2dot = b2dot * hat_se3(omega_32) * A2; + + // Get the interpolated pose + Eigen::Matrix4d pose_interp = pose0 * A0 * A1 * A2; + R_GtoI = pose_interp.block(0, 0, 3, 3).transpose(); + p_IinG = pose_interp.block(0, 3, 3, 1); + + // Finally get the interpolated velocities + // NOTE: Rdot = R*skew(omega) => R^T*Rdot = skew(omega) + Eigen::Matrix4d vel_interp = pose0 * (A0dot * A1 * A2 + A0 * A1dot * A2 + A0 * A1 * A2dot); + w_IinI = vee(pose_interp.block(0, 0, 3, 3).transpose() * vel_interp.block(0, 0, 3, 3)); + v_IinG = vel_interp.block(0, 3, 3, 1); + return true; } - - - -bool BsplineSE3::get_acceleration(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, - Eigen::Vector3d &w_IinI, Eigen::Vector3d &v_IinG, - Eigen::Vector3d &alpha_IinI, Eigen::Vector3d &a_IinG) { - - // Get the bounding poses for the desired timestamp - double t0, t1, t2, t3; - Eigen::Matrix4d pose0, pose1, pose2, pose3; - bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3); - - // Return failure if we can't get bounding poses - if(!success) { - alpha_IinI.setZero(); - a_IinG.setZero(); - return false; - } - - // Our De Boor-Cox matrix scalars - double DT = (t2-t1); - double u = (timestamp-t1)/DT; - double b0 = 1.0/6.0*(5+3*u-3*u*u+u*u*u); - double b1 = 1.0/6.0*(1+3*u+3*u*u-2*u*u*u); - double b2 = 1.0/6.0*(u*u*u); - double b0dot = 1.0/(6.0*DT)*(3-6*u+3*u*u); - double b1dot = 1.0/(6.0*DT)*(3+6*u-6*u*u); - double b2dot = 1.0/(6.0*DT)*(3*u*u); - double b0dotdot = 1.0/(6.0*DT*DT)*(-6+6*u); - double b1dotdot = 1.0/(6.0*DT*DT)*(6-12*u); - double b2dotdot = 1.0/(6.0*DT*DT)*(6*u); - - // Cache some values we use alot - Eigen::Matrix omega_10 = log_se3(Inv_se3(pose0)*pose1); - Eigen::Matrix omega_21 = log_se3(Inv_se3(pose1)*pose2); - Eigen::Matrix omega_32 = log_se3(Inv_se3(pose2)*pose3); - Eigen::Matrix4d omega_10_hat = hat_se3(omega_10); - Eigen::Matrix4d omega_21_hat = hat_se3(omega_21); - Eigen::Matrix4d omega_32_hat = hat_se3(omega_32); - - // Calculate interpolated poses - Eigen::Matrix4d A0 = exp_se3(b0*omega_10); - Eigen::Matrix4d A1 = exp_se3(b1*omega_21); - Eigen::Matrix4d A2 = exp_se3(b2*omega_32); - Eigen::Matrix4d A0dot = b0dot*omega_10_hat*A0; - Eigen::Matrix4d A1dot = b1dot*omega_21_hat*A1; - Eigen::Matrix4d A2dot = b2dot*omega_32_hat*A2; - Eigen::Matrix4d A0dotdot = b0dot*omega_10_hat*A0dot+b0dotdot*omega_10_hat*A0; - Eigen::Matrix4d A1dotdot = b1dot*omega_21_hat*A1dot+b1dotdot*omega_21_hat*A1; - Eigen::Matrix4d A2dotdot = b2dot*omega_32_hat*A2dot+b2dotdot*omega_32_hat*A2; - - // Get the interpolated pose - Eigen::Matrix4d pose_interp = pose0*A0*A1*A2; - R_GtoI = pose_interp.block(0,0,3,3).transpose(); - p_IinG = pose_interp.block(0,3,3,1); - - // Get the interpolated velocities - // NOTE: Rdot = R*skew(omega) => R^T*Rdot = skew(omega) - Eigen::Matrix4d vel_interp = pose0*(A0dot*A1*A2+A0*A1dot*A2+A0*A1*A2dot); - w_IinI = vee(pose_interp.block(0,0,3,3).transpose()*vel_interp.block(0,0,3,3)); - v_IinG = vel_interp.block(0,3,3,1); - - // Finally get the interpolated velocities - // NOTE: Rdot = R*skew(omega) - // NOTE: Rdotdot = Rdot*skew(omega) + R*skew(alpha) => R^T*(Rdotdot-Rdot*skew(omega))=skew(alpha) - Eigen::Matrix4d acc_interp = pose0*(A0dotdot*A1*A2+A0*A1dotdot*A2+A0*A1*A2dotdot - +2*A0dot*A1dot*A2+2*A0*A1dot*A2dot+2*A0dot*A1*A2dot); - Eigen::Matrix3d omegaskew = pose_interp.block(0,0,3,3).transpose()*vel_interp.block(0,0,3,3); - alpha_IinI = vee(pose_interp.block(0,0,3,3).transpose()*(acc_interp.block(0,0,3,3)-vel_interp.block(0,0,3,3)*omegaskew)); - a_IinG = acc_interp.block(0,3,3,1); - return true; - +bool BsplineSE3::get_acceleration(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, + Eigen::Vector3d &v_IinG, Eigen::Vector3d &alpha_IinI, Eigen::Vector3d &a_IinG) { + + // Get the bounding poses for the desired timestamp + double t0, t1, t2, t3; + Eigen::Matrix4d pose0, pose1, pose2, pose3; + bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3); + + // Return failure if we can't get bounding poses + if (!success) { + alpha_IinI.setZero(); + a_IinG.setZero(); + return false; + } + + // Our De Boor-Cox matrix scalars + double DT = (t2 - t1); + double u = (timestamp - t1) / DT; + double b0 = 1.0 / 6.0 * (5 + 3 * u - 3 * u * u + u * u * u); + double b1 = 1.0 / 6.0 * (1 + 3 * u + 3 * u * u - 2 * u * u * u); + double b2 = 1.0 / 6.0 * (u * u * u); + double b0dot = 1.0 / (6.0 * DT) * (3 - 6 * u + 3 * u * u); + double b1dot = 1.0 / (6.0 * DT) * (3 + 6 * u - 6 * u * u); + double b2dot = 1.0 / (6.0 * DT) * (3 * u * u); + double b0dotdot = 1.0 / (6.0 * DT * DT) * (-6 + 6 * u); + double b1dotdot = 1.0 / (6.0 * DT * DT) * (6 - 12 * u); + double b2dotdot = 1.0 / (6.0 * DT * DT) * (6 * u); + + // Cache some values we use alot + Eigen::Matrix omega_10 = log_se3(Inv_se3(pose0) * pose1); + Eigen::Matrix omega_21 = log_se3(Inv_se3(pose1) * pose2); + Eigen::Matrix omega_32 = log_se3(Inv_se3(pose2) * pose3); + Eigen::Matrix4d omega_10_hat = hat_se3(omega_10); + Eigen::Matrix4d omega_21_hat = hat_se3(omega_21); + Eigen::Matrix4d omega_32_hat = hat_se3(omega_32); + + // Calculate interpolated poses + Eigen::Matrix4d A0 = exp_se3(b0 * omega_10); + Eigen::Matrix4d A1 = exp_se3(b1 * omega_21); + Eigen::Matrix4d A2 = exp_se3(b2 * omega_32); + Eigen::Matrix4d A0dot = b0dot * omega_10_hat * A0; + Eigen::Matrix4d A1dot = b1dot * omega_21_hat * A1; + Eigen::Matrix4d A2dot = b2dot * omega_32_hat * A2; + Eigen::Matrix4d A0dotdot = b0dot * omega_10_hat * A0dot + b0dotdot * omega_10_hat * A0; + Eigen::Matrix4d A1dotdot = b1dot * omega_21_hat * A1dot + b1dotdot * omega_21_hat * A1; + Eigen::Matrix4d A2dotdot = b2dot * omega_32_hat * A2dot + b2dotdot * omega_32_hat * A2; + + // Get the interpolated pose + Eigen::Matrix4d pose_interp = pose0 * A0 * A1 * A2; + R_GtoI = pose_interp.block(0, 0, 3, 3).transpose(); + p_IinG = pose_interp.block(0, 3, 3, 1); + + // Get the interpolated velocities + // NOTE: Rdot = R*skew(omega) => R^T*Rdot = skew(omega) + Eigen::Matrix4d vel_interp = pose0 * (A0dot * A1 * A2 + A0 * A1dot * A2 + A0 * A1 * A2dot); + w_IinI = vee(pose_interp.block(0, 0, 3, 3).transpose() * vel_interp.block(0, 0, 3, 3)); + v_IinG = vel_interp.block(0, 3, 3, 1); + + // Finally get the interpolated velocities + // NOTE: Rdot = R*skew(omega) + // NOTE: Rdotdot = Rdot*skew(omega) + R*skew(alpha) => R^T*(Rdotdot-Rdot*skew(omega))=skew(alpha) + Eigen::Matrix4d acc_interp = pose0 * (A0dotdot * A1 * A2 + A0 * A1dotdot * A2 + A0 * A1 * A2dotdot + 2 * A0dot * A1dot * A2 + + 2 * A0 * A1dot * A2dot + 2 * A0dot * A1 * A2dot); + Eigen::Matrix3d omegaskew = pose_interp.block(0, 0, 3, 3).transpose() * vel_interp.block(0, 0, 3, 3); + alpha_IinI = vee(pose_interp.block(0, 0, 3, 3).transpose() * (acc_interp.block(0, 0, 3, 3) - vel_interp.block(0, 0, 3, 3) * omegaskew)); + a_IinG = acc_interp.block(0, 3, 3, 1); + return true; } - -bool BsplineSE3::find_bounding_poses(const double timestamp, const AlignedEigenMat4d &poses, - double &t0, Eigen::Matrix4d &pose0, double &t1, Eigen::Matrix4d &pose1) { - - // Set the default values - t0 = -1; - t1 = -1; - pose0 = Eigen::Matrix4d::Identity(); - pose1 = Eigen::Matrix4d::Identity(); - - // Find the bounding poses - bool found_older = false; - bool found_newer = false; - - // Find the bounding poses for interpolation. - auto lower_bound = poses.lower_bound(timestamp); // Finds timestamp or next(timestamp) if not available - auto upper_bound = poses.upper_bound(timestamp); // Finds next(timestamp) - - if(lower_bound != poses.end()) { - // Check that the lower bound is the timestamp. - // If not then we move iterator to previous timestamp so that the timestamp is bounded - if(lower_bound->first == timestamp) { - found_older = true; - } else if(lower_bound != poses.begin()) { - --lower_bound; - found_older = true; - } - } - - if(upper_bound != poses.end()) { - found_newer = true; - } - - // If we found the older one, set it - if (found_older) { - t0 = lower_bound->first; - pose0 = lower_bound->second; - } - - // If we found the newest one, set it - if (found_newer) { - t1 = upper_bound->first; - pose1 = upper_bound->second; +bool BsplineSE3::find_bounding_poses(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0, double &t1, + Eigen::Matrix4d &pose1) { + + // Set the default values + t0 = -1; + t1 = -1; + pose0 = Eigen::Matrix4d::Identity(); + pose1 = Eigen::Matrix4d::Identity(); + + // Find the bounding poses + bool found_older = false; + bool found_newer = false; + + // Find the bounding poses for interpolation. + auto lower_bound = poses.lower_bound(timestamp); // Finds timestamp or next(timestamp) if not available + auto upper_bound = poses.upper_bound(timestamp); // Finds next(timestamp) + + if (lower_bound != poses.end()) { + // Check that the lower bound is the timestamp. + // If not then we move iterator to previous timestamp so that the timestamp is bounded + if (lower_bound->first == timestamp) { + found_older = true; + } else if (lower_bound != poses.begin()) { + --lower_bound; + found_older = true; } - - // Assert the timestamps - if (found_older && found_newer) - assert(t0 < t1); - - // Return true if we found both bounds - return (found_older && found_newer); - + } + + if (upper_bound != poses.end()) { + found_newer = true; + } + + // If we found the older one, set it + if (found_older) { + t0 = lower_bound->first; + pose0 = lower_bound->second; + } + + // If we found the newest one, set it + if (found_newer) { + t1 = upper_bound->first; + pose1 = upper_bound->second; + } + + // Assert the timestamps + if (found_older && found_newer) + assert(t0 < t1); + + // Return true if we found both bounds + return (found_older && found_newer); } - - -bool BsplineSE3::find_bounding_control_points(const double timestamp, const AlignedEigenMat4d &poses, - double &t0, Eigen::Matrix4d &pose0, double &t1, Eigen::Matrix4d &pose1, - double &t2, Eigen::Matrix4d &pose2, double &t3, Eigen::Matrix4d &pose3) { - - // Set the default values - t0 = -1; - t1 = -1; - t2 = -1; - t3 = -1; - pose0 = Eigen::Matrix4d::Identity(); - pose1 = Eigen::Matrix4d::Identity(); - pose2 = Eigen::Matrix4d::Identity(); - pose3 = Eigen::Matrix4d::Identity(); - - // Get the two bounding poses - bool success = find_bounding_poses(timestamp, poses, t1, pose1, t2, pose2); - - // Return false if this was a failure - if (!success) - return false; - - // Now find the poses that are below and above - auto iter_t1 = poses.find(t1); - auto iter_t2 = poses.find(t2); - - // Check that t1 is not the first timestamp - if(iter_t1 == poses.begin()) { - return false; - } - - // Move the older pose backwards in time - // Move the newer one forwards in time - auto iter_t0 = --iter_t1; - auto iter_t3 = ++iter_t2; - - // Check that it is valid - if (iter_t3 == poses.end()) { - return false; - } - - // Set the oldest one - t0 = iter_t0->first; - pose0 = iter_t0->second; - - // Set the newest one - t3 = iter_t3->first; - pose3 = iter_t3->second; - - // Assert the timestamps - if (success) { - assert(t0 < t1); - assert(t1 < t2); - assert(t2 < t3); - } - - // Return true if we found all four bounding poses - return success; - +bool BsplineSE3::find_bounding_control_points(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0, + double &t1, Eigen::Matrix4d &pose1, double &t2, Eigen::Matrix4d &pose2, double &t3, + Eigen::Matrix4d &pose3) { + + // Set the default values + t0 = -1; + t1 = -1; + t2 = -1; + t3 = -1; + pose0 = Eigen::Matrix4d::Identity(); + pose1 = Eigen::Matrix4d::Identity(); + pose2 = Eigen::Matrix4d::Identity(); + pose3 = Eigen::Matrix4d::Identity(); + + // Get the two bounding poses + bool success = find_bounding_poses(timestamp, poses, t1, pose1, t2, pose2); + + // Return false if this was a failure + if (!success) + return false; + + // Now find the poses that are below and above + auto iter_t1 = poses.find(t1); + auto iter_t2 = poses.find(t2); + + // Check that t1 is not the first timestamp + if (iter_t1 == poses.begin()) { + return false; + } + + // Move the older pose backwards in time + // Move the newer one forwards in time + auto iter_t0 = --iter_t1; + auto iter_t3 = ++iter_t2; + + // Check that it is valid + if (iter_t3 == poses.end()) { + return false; + } + + // Set the oldest one + t0 = iter_t0->first; + pose0 = iter_t0->second; + + // Set the newest one + t3 = iter_t3->first; + pose3 = iter_t3->second; + + // Assert the timestamps + if (success) { + assert(t0 < t1); + assert(t1 < t2); + assert(t2 < t3); + } + + // Return true if we found all four bounding poses + return success; } - - diff --git a/ov_core/src/sim/BsplineSE3.h b/ov_core/src/sim/BsplineSE3.h index 36bd35eff..ddafee009 100644 --- a/ov_core/src/sim/BsplineSE3.h +++ b/ov_core/src/sim/BsplineSE3.h @@ -21,206 +21,190 @@ #ifndef OV_CORE_BSPLINESE3_H #define OV_CORE_BSPLINESE3_H +#include #include #include -#include #include "utils/quat_ops.h" namespace ov_core { - - - /** - * @brief B-Spline which performs interpolation over SE(3) manifold. - * - * This class implements the b-spline functionality that allows for interpolation over the \f$\mathbb{SE}(3)\f$ manifold. - * This is based off of the derivations from [Continuous-Time Visual-Inertial Odometry for Event Cameras](https://ieeexplore.ieee.org/abstract/document/8432102/) - * and [A Spline-Based Trajectory Representation for Sensor Fusion and Rolling Shutter Cameras](https://link.springer.com/article/10.1007/s11263-015-0811-3) - * with some additional derivations being available in [these notes](http://udel.edu/~pgeneva/downloads/notes/2018_notes_mueffler2017arxiv.pdf). - * The use of b-splines for \f$\mathbb{SE}(3)\f$ interpolation has the following properties: - * - * 1. Local control, allowing the system to function online as well as in batch - * 2. \f$C^2\f$-continuity to enable inertial predictions and calculations - * 3. Good approximation of minimal torque trajectories - * 4. A parameterization of rigid-body motion devoid of singularities - * - * The key idea is to convert a set of trajectory points into a continuous-time *uniform cubic cumulative* b-spline. - * As compared to standard b-spline representations, the cumulative form ensures local continuity which is needed for on-manifold interpolation. - * We leverage the cubic b-spline to ensure \f$C^2\f$-continuity to ensure that we can calculate accelerations at any point along the trajectory. - * The general equations are the following - * - * \f{align*}{ - * {}^{w}_{s}\mathbf{T}(u(t)) - * &= {}^{w}_{i-1}\mathbf{T}~\mathbf{A}_0~\mathbf{A}_1~\mathbf{A}_2 \\ - * \empty - * {}^{w}_{s}\dot{\mathbf{T}}(u(t)) &= - * {}^{w}_{i-1}\mathbf{T} - * \Big( - * \dot{\mathbf{A}}_0~\mathbf{A}_1~\mathbf{A}_2 + - * \mathbf{A}_0~\dot{\mathbf{A}}_1~\mathbf{A}_2 + - * \mathbf{A}_0~\mathbf{A}_1~\dot{\mathbf{A}}_2 - * \Big) \\ - * \empty - * {}^{w}_{s}\ddot{\mathbf{T}}(u(t)) &= - * {}^{w}_{i-1}\mathbf{T} - * \Big( - * \ddot{\mathbf{A}}_0~\mathbf{A}_1~\mathbf{A}_2 + - * \mathbf{A}_0~\ddot{\mathbf{A}}_1~\mathbf{A}_2 + - * \mathbf{A}_0~\mathbf{A}_1~\ddot{\mathbf{A}}_2 \nonumber\\ - * &\hspace{4cm} - * + 2\dot{\mathbf{A}}_0\dot{\mathbf{A}}_1\mathbf{A}_2 + - * 2\mathbf{A}_0\dot{\mathbf{A}}_1\dot{\mathbf{A}}_2 + - * 2\dot{\mathbf{A}}_0\mathbf{A}_1\dot{\mathbf{A}}_2 - * \Big) \\[1em] - * \empty - * {}^{i-1}_{i}\mathbf{\Omega} &= \mathrm{log}\big( {}^{w}_{i-1}\mathbf{T}^{-1}~{}^{w}_{i}\mathbf{T} \big) \\ - * \mathbf{A}_j &= \mathrm{exp}\Big({B}_j(u(t))~{}^{i-1+j}_{i+j}\mathbf{\Omega} \Big) \\ - * \dot{\mathbf{A}}_j &= \dot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\mathbf{A}_j \\ - * \ddot{\mathbf{A}}_j &= - * \dot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\dot{\mathbf{A}}_j + - * \ddot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\mathbf{A}_j \\[1em] - * \empty - * {B}_0(u(t)) &= \frac{1}{3!}~(5+3u-3u^2+u^3) \\ - * {B}_1(u(t)) &= \frac{1}{3!}~(1+3u+3u^2-2u^3) \\ - * {B}_2(u(t)) &= \frac{1}{3!}~(u^3) \\[1em] - * \empty - * \dot{{B}}_0(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3-6u+3u^2) \\ - * \dot{{B}}_1(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3+6u-6u^2) \\ - * \dot{{B}}_2(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3u^2) \\[1em] - * \empty - * \ddot{{B}}_0(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(-6+6u) \\ - * \ddot{{B}}_1(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(6-12u) \\ - * \ddot{{B}}_2(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(6u) - * \f} - * - * where \f$u(t_s)=(t_s-t_i)/\Delta t=(t_s-t_i)/(t_{i+1}-t_i)\f$ is used for all values of *u*. - * Note that one needs to ensure that they use the SE(3) matrix expodential, logorithm, and hat operation for all above equations. - * The indexes correspond to the the two poses that are older and two poses that are newer then the current time we want to get (i.e. i-1 and i are less than s, while i+1 and i+2 are both greater than time s). - * Some additional derivations are available in [these notes](http://udel.edu/~pgeneva/downloads/notes/2018_notes_mueffler2017arxiv.pdf). - */ - class BsplineSE3 { - - public: - - - /** - * @brief Default constructor - */ - BsplineSE3() {} - - - /** - * @brief Will feed in a series of poses that we will then convert into control points. - * - * Our control points need to be uniformly spaced over the trajectory, thus given a trajectory we will - * uniformly sample based on the average spacing between the pose points specified. - * - * @param traj_points Trajectory poses that we will convert into control points (timestamp(s), q_GtoI, p_IinG) - */ - void feed_trajectory(std::vector traj_points); - - - /** - * @brief Gets the orientation and position at a given timestamp - * @param timestamp Desired time to get the pose at - * @param R_GtoI SO(3) orientation of the pose in the global frame - * @param p_IinG Position of the pose in the global - * @return False if we can't find it - */ - bool get_pose(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG); - - - /** - * @brief Gets the angular and linear velocity at a given timestamp - * @param timestamp Desired time to get the pose at - * @param R_GtoI SO(3) orientation of the pose in the global frame - * @param p_IinG Position of the pose in the global - * @param w_IinI Angular velocity in the inertial frame - * @param v_IinG Linear velocity in the global frame - * @return False if we can't find it - */ - bool get_velocity(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, Eigen::Vector3d &v_IinG); - - - /** - * @brief Gets the angular and linear acceleration at a given timestamp - * @param timestamp Desired time to get the pose at - * @param R_GtoI SO(3) orientation of the pose in the global frame - * @param p_IinG Position of the pose in the global - * @param w_IinI Angular velocity in the inertial frame - * @param v_IinG Linear velocity in the global frame - * @param alpha_IinI Angular acceleration in the inertial frame - * @param a_IinG Linear acceleration in the global frame - * @return False if we can't find it - */ - bool get_acceleration(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, - Eigen::Vector3d &w_IinI, Eigen::Vector3d &v_IinG, - Eigen::Vector3d &alpha_IinI, Eigen::Vector3d &a_IinG); - - - /// Returns the simulation start time that we should start simulating from - double get_start_time() { - return timestamp_start; - } - - - protected: - - /// Uniform sampling time for our control points - double dt; - - /// Start time of the system - double timestamp_start; - - /// Type defintion of our aligned eigen4d matrix: https://eigen.tuxfamily.org/dox/group__TopicStlContainers.html - typedef std::map, - Eigen::aligned_allocator>> AlignedEigenMat4d; - - /// Our control SE3 control poses (R_ItoG, p_IinG) - AlignedEigenMat4d control_points; - - - /** - * @brief Will find the two bounding poses for a given timestamp. - * - * This will loop through the passed map of poses and find two bounding poses. - * If there are no bounding poses then this will return false. - * - * @param timestamp Desired timestamp we want to get two bounding poses of - * @param poses Map of poses and timestamps - * @param t0 Timestamp of the first pose - * @param pose0 SE(3) pose of the first pose - * @param t1 Timestamp of the second pose - * @param pose1 SE(3) pose of the second pose - * @return False if we are unable to find bounding poses - */ - static bool find_bounding_poses(const double timestamp, const AlignedEigenMat4d &poses, - double &t0, Eigen::Matrix4d &pose0, double &t1, Eigen::Matrix4d &pose1); - - - /** - * @brief Will find two older poses and two newer poses for the current timestamp - * - * @param timestamp Desired timestamp we want to get four bounding poses of - * @param poses Map of poses and timestamps - * @param t0 Timestamp of the first pose - * @param pose0 SE(3) pose of the first pose - * @param t1 Timestamp of the second pose - * @param pose1 SE(3) pose of the second pose - * @param t2 Timestamp of the third pose - * @param pose2 SE(3) pose of the third pose - * @param t3 Timestamp of the fourth pose - * @param pose3 SE(3) pose of the fourth pose - * @return False if we are unable to find bounding poses - */ - static bool find_bounding_control_points(const double timestamp, const AlignedEigenMat4d &poses, - double &t0, Eigen::Matrix4d &pose0, double &t1, Eigen::Matrix4d &pose1, - double &t2, Eigen::Matrix4d &pose2, double &t3, Eigen::Matrix4d &pose3); - - }; - - -} - -#endif //OV_CORE_BSPLINESE3_H +/** + * @brief B-Spline which performs interpolation over SE(3) manifold. + * + * This class implements the b-spline functionality that allows for interpolation over the \f$\mathbb{SE}(3)\f$ manifold. + * This is based off of the derivations from [Continuous-Time Visual-Inertial Odometry for Event + * Cameras](https://ieeexplore.ieee.org/abstract/document/8432102/) and [A Spline-Based Trajectory Representation for Sensor Fusion and + * Rolling Shutter Cameras](https://link.springer.com/article/10.1007/s11263-015-0811-3) with some additional derivations being available in + * [these notes](http://udel.edu/~pgeneva/downloads/notes/2018_notes_mueffler2017arxiv.pdf). The use of b-splines for \f$\mathbb{SE}(3)\f$ + * interpolation has the following properties: + * + * 1. Local control, allowing the system to function online as well as in batch + * 2. \f$C^2\f$-continuity to enable inertial predictions and calculations + * 3. Good approximation of minimal torque trajectories + * 4. A parameterization of rigid-body motion devoid of singularities + * + * The key idea is to convert a set of trajectory points into a continuous-time *uniform cubic cumulative* b-spline. + * As compared to standard b-spline representations, the cumulative form ensures local continuity which is needed for on-manifold + * interpolation. We leverage the cubic b-spline to ensure \f$C^2\f$-continuity to ensure that we can calculate accelerations at any point + * along the trajectory. The general equations are the following + * + * \f{align*}{ + * {}^{w}_{s}\mathbf{T}(u(t)) + * &= {}^{w}_{i-1}\mathbf{T}~\mathbf{A}_0~\mathbf{A}_1~\mathbf{A}_2 \\ + * \empty + * {}^{w}_{s}\dot{\mathbf{T}}(u(t)) &= + * {}^{w}_{i-1}\mathbf{T} + * \Big( + * \dot{\mathbf{A}}_0~\mathbf{A}_1~\mathbf{A}_2 + + * \mathbf{A}_0~\dot{\mathbf{A}}_1~\mathbf{A}_2 + + * \mathbf{A}_0~\mathbf{A}_1~\dot{\mathbf{A}}_2 + * \Big) \\ + * \empty + * {}^{w}_{s}\ddot{\mathbf{T}}(u(t)) &= + * {}^{w}_{i-1}\mathbf{T} + * \Big( + * \ddot{\mathbf{A}}_0~\mathbf{A}_1~\mathbf{A}_2 + + * \mathbf{A}_0~\ddot{\mathbf{A}}_1~\mathbf{A}_2 + + * \mathbf{A}_0~\mathbf{A}_1~\ddot{\mathbf{A}}_2 \nonumber\\ + * &\hspace{4cm} + * + 2\dot{\mathbf{A}}_0\dot{\mathbf{A}}_1\mathbf{A}_2 + + * 2\mathbf{A}_0\dot{\mathbf{A}}_1\dot{\mathbf{A}}_2 + + * 2\dot{\mathbf{A}}_0\mathbf{A}_1\dot{\mathbf{A}}_2 + * \Big) \\[1em] + * \empty + * {}^{i-1}_{i}\mathbf{\Omega} &= \mathrm{log}\big( {}^{w}_{i-1}\mathbf{T}^{-1}~{}^{w}_{i}\mathbf{T} \big) \\ + * \mathbf{A}_j &= \mathrm{exp}\Big({B}_j(u(t))~{}^{i-1+j}_{i+j}\mathbf{\Omega} \Big) \\ + * \dot{\mathbf{A}}_j &= \dot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\mathbf{A}_j \\ + * \ddot{\mathbf{A}}_j &= + * \dot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\dot{\mathbf{A}}_j + + * \ddot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\mathbf{A}_j \\[1em] + * \empty + * {B}_0(u(t)) &= \frac{1}{3!}~(5+3u-3u^2+u^3) \\ + * {B}_1(u(t)) &= \frac{1}{3!}~(1+3u+3u^2-2u^3) \\ + * {B}_2(u(t)) &= \frac{1}{3!}~(u^3) \\[1em] + * \empty + * \dot{{B}}_0(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3-6u+3u^2) \\ + * \dot{{B}}_1(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3+6u-6u^2) \\ + * \dot{{B}}_2(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3u^2) \\[1em] + * \empty + * \ddot{{B}}_0(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(-6+6u) \\ + * \ddot{{B}}_1(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(6-12u) \\ + * \ddot{{B}}_2(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(6u) + * \f} + * + * where \f$u(t_s)=(t_s-t_i)/\Delta t=(t_s-t_i)/(t_{i+1}-t_i)\f$ is used for all values of *u*. + * Note that one needs to ensure that they use the SE(3) matrix expodential, logorithm, and hat operation for all above equations. + * The indexes correspond to the the two poses that are older and two poses that are newer then the current time we want to get (i.e. i-1 + * and i are less than s, while i+1 and i+2 are both greater than time s). Some additional derivations are available in [these + * notes](http://udel.edu/~pgeneva/downloads/notes/2018_notes_mueffler2017arxiv.pdf). + */ +class BsplineSE3 { + +public: + /** + * @brief Default constructor + */ + BsplineSE3() {} + + /** + * @brief Will feed in a series of poses that we will then convert into control points. + * + * Our control points need to be uniformly spaced over the trajectory, thus given a trajectory we will + * uniformly sample based on the average spacing between the pose points specified. + * + * @param traj_points Trajectory poses that we will convert into control points (timestamp(s), q_GtoI, p_IinG) + */ + void feed_trajectory(std::vector traj_points); + + /** + * @brief Gets the orientation and position at a given timestamp + * @param timestamp Desired time to get the pose at + * @param R_GtoI SO(3) orientation of the pose in the global frame + * @param p_IinG Position of the pose in the global + * @return False if we can't find it + */ + bool get_pose(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG); + + /** + * @brief Gets the angular and linear velocity at a given timestamp + * @param timestamp Desired time to get the pose at + * @param R_GtoI SO(3) orientation of the pose in the global frame + * @param p_IinG Position of the pose in the global + * @param w_IinI Angular velocity in the inertial frame + * @param v_IinG Linear velocity in the global frame + * @return False if we can't find it + */ + bool get_velocity(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, Eigen::Vector3d &v_IinG); + + /** + * @brief Gets the angular and linear acceleration at a given timestamp + * @param timestamp Desired time to get the pose at + * @param R_GtoI SO(3) orientation of the pose in the global frame + * @param p_IinG Position of the pose in the global + * @param w_IinI Angular velocity in the inertial frame + * @param v_IinG Linear velocity in the global frame + * @param alpha_IinI Angular acceleration in the inertial frame + * @param a_IinG Linear acceleration in the global frame + * @return False if we can't find it + */ + bool get_acceleration(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, + Eigen::Vector3d &v_IinG, Eigen::Vector3d &alpha_IinI, Eigen::Vector3d &a_IinG); + + /// Returns the simulation start time that we should start simulating from + double get_start_time() { return timestamp_start; } + +protected: + /// Uniform sampling time for our control points + double dt; + + /// Start time of the system + double timestamp_start; + + /// Type defintion of our aligned eigen4d matrix: https://eigen.tuxfamily.org/dox/group__TopicStlContainers.html + typedef std::map, Eigen::aligned_allocator>> + AlignedEigenMat4d; + + /// Our control SE3 control poses (R_ItoG, p_IinG) + AlignedEigenMat4d control_points; + + /** + * @brief Will find the two bounding poses for a given timestamp. + * + * This will loop through the passed map of poses and find two bounding poses. + * If there are no bounding poses then this will return false. + * + * @param timestamp Desired timestamp we want to get two bounding poses of + * @param poses Map of poses and timestamps + * @param t0 Timestamp of the first pose + * @param pose0 SE(3) pose of the first pose + * @param t1 Timestamp of the second pose + * @param pose1 SE(3) pose of the second pose + * @return False if we are unable to find bounding poses + */ + static bool find_bounding_poses(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0, double &t1, + Eigen::Matrix4d &pose1); + + /** + * @brief Will find two older poses and two newer poses for the current timestamp + * + * @param timestamp Desired timestamp we want to get four bounding poses of + * @param poses Map of poses and timestamps + * @param t0 Timestamp of the first pose + * @param pose0 SE(3) pose of the first pose + * @param t1 Timestamp of the second pose + * @param pose1 SE(3) pose of the second pose + * @param t2 Timestamp of the third pose + * @param pose2 SE(3) pose of the third pose + * @param t3 Timestamp of the fourth pose + * @param pose3 SE(3) pose of the fourth pose + * @return False if we are unable to find bounding poses + */ + static bool find_bounding_control_points(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0, + double &t1, Eigen::Matrix4d &pose1, double &t2, Eigen::Matrix4d &pose2, double &t3, + Eigen::Matrix4d &pose3); +}; + +} // namespace ov_core + +#endif // OV_CORE_BSPLINESE3_H diff --git a/ov_core/src/test_tracking.cpp b/ov_core/src/test_tracking.cpp index b3eb03504..da0eba174 100644 --- a/ov_core/src/test_tracking.cpp +++ b/ov_core/src/test_tracking.cpp @@ -19,33 +19,33 @@ * along with this program. If not, see . */ #include -#include #include -#include #include +#include #include #include +#include +#include #include #include #include #include -#include #include #include -#include #include +#include -#include "track/TrackKLT.h" -#include "track/TrackDescriptor.h" #include "track/TrackAruco.h" +#include "track/TrackDescriptor.h" +#include "track/TrackKLT.h" using namespace ov_core; // Our feature extractor -TrackBase* extractor; +TrackBase *extractor; // FPS counter, and other statistics // https://gamedev.stackexchange.com/a/83174 @@ -60,263 +60,249 @@ ros::Time time_start; // Our master function for tracking void handle_stereo(double time0, double time1, cv::Mat img0, cv::Mat img1, bool use_stereo); - // Main function -int main(int argc, char** argv) -{ - ros::init(argc, argv, "test_tracking"); - ros::NodeHandle nh("~"); - - // Our camera topics (left and right stereo) - std::string topic_camera0; - std::string topic_camera1; - nh.param("topic_camera0", topic_camera0, "/cam0/image_raw"); - nh.param("topic_camera1", topic_camera1, "/cam1/image_raw"); - - // Location of the ROS bag we want to read in - std::string path_to_bag; - nh.param("path_bag", path_to_bag, "/home/patrick/datasets/eth/V1_01_easy.bag"); - //nh.param("path_bag", path_to_bag, "/home/patrick/datasets/eth/V2_03_difficult.bag"); - printf("ros bag path is: %s\n", path_to_bag.c_str()); - - // Get our start location and how much of the bag we want to play - // Make the bag duration < 0 to just process to the end of the bag - double bag_start, bag_durr; - nh.param("bag_start", bag_start, 0); - nh.param("bag_durr", bag_durr, -1); - - - //=================================================================================== - //=================================================================================== - //=================================================================================== - - - // Parameters for our extractor - int num_pts, num_aruco, fast_threshold, grid_x, grid_y, min_px_dist; - double knn_ratio; - bool do_downsizing, use_stereo; - nh.param("num_pts", num_pts, 800); - nh.param("num_aruco", num_aruco, 1024); - nh.param("clone_states", clone_states, 11); - nh.param("fast_threshold", fast_threshold, 10); - nh.param("grid_x", grid_x, 9); - nh.param("grid_y", grid_y, 7); - nh.param("min_px_dist", min_px_dist, 3); - nh.param("knn_ratio", knn_ratio, 0.85); - nh.param("downsize_aruco", do_downsizing, false); - nh.param("use_stereo", use_stereo, false); - - // Debug print! - printf("max features: %d\n", num_pts); - printf("max aruco: %d\n", num_aruco); - printf("clone states: %d\n", clone_states); - printf("grid size: %d x %d\n", grid_x, grid_y); - printf("fast threshold: %d\n", fast_threshold); - printf("min pixel distance: %d\n", min_px_dist); - printf("downsize aruco image: %d\n", do_downsizing); - - // Fake camera info (we don't need this, as we are not using the normalized coordinates for anything) - Eigen::Matrix cam0_calib; - cam0_calib << 1,1,0,0,0,0,0,0; - - // Create our n-camera vectors - std::map camera_fisheye; - std::map camera_calibration; - camera_fisheye.insert({0,false}); - camera_calibration.insert({0,cam0_calib}); - camera_fisheye.insert({1,false}); - camera_calibration.insert({1,cam0_calib}); - - // Lets make a feature extractor - extractor = new TrackKLT(num_pts,num_aruco,fast_threshold,grid_x,grid_y,min_px_dist); - //extractor = new TrackDescriptor(num_pts,num_aruco,true,fast_threshold,grid_x,grid_y,knn_ratio); - //extractor = new TrackAruco(num_aruco,true,do_downsizing); - extractor->set_calibration(camera_calibration, camera_fisheye); - - - //=================================================================================== - //=================================================================================== - //=================================================================================== - - - // Load rosbag here, and find messages we can play - rosbag::Bag bag; - bag.open(path_to_bag, rosbag::bagmode::Read); - - // We should load the bag as a view - // Here we go from beginning of the bag to the end of the bag - rosbag::View view_full; - rosbag::View view; - - // Start a few seconds in from the full view time - // If we have a negative duration then use the full bag length - view_full.addQuery(bag); - ros::Time time_init = view_full.getBeginTime(); - time_init += ros::Duration(bag_start); - ros::Time time_finish = (bag_durr < 0)? view_full.getEndTime() : time_init + ros::Duration(bag_durr); - printf("time start = %.6f\n", time_init.toSec()); - printf("time end = %.6f\n", time_finish.toSec()); - view.addQuery(bag, time_init, time_finish); - - // Check to make sure we have data to play - if (view.size() == 0) { - printf(RED "No messages to play on specified topics. Exiting.\n" RESET); - ros::shutdown(); - return EXIT_FAILURE; +int main(int argc, char **argv) { + ros::init(argc, argv, "test_tracking"); + ros::NodeHandle nh("~"); + + // Our camera topics (left and right stereo) + std::string topic_camera0; + std::string topic_camera1; + nh.param("topic_camera0", topic_camera0, "/cam0/image_raw"); + nh.param("topic_camera1", topic_camera1, "/cam1/image_raw"); + + // Location of the ROS bag we want to read in + std::string path_to_bag; + nh.param("path_bag", path_to_bag, "/home/patrick/datasets/eth/V1_01_easy.bag"); + // nh.param("path_bag", path_to_bag, "/home/patrick/datasets/eth/V2_03_difficult.bag"); + printf("ros bag path is: %s\n", path_to_bag.c_str()); + + // Get our start location and how much of the bag we want to play + // Make the bag duration < 0 to just process to the end of the bag + double bag_start, bag_durr; + nh.param("bag_start", bag_start, 0); + nh.param("bag_durr", bag_durr, -1); + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Parameters for our extractor + int num_pts, num_aruco, fast_threshold, grid_x, grid_y, min_px_dist; + double knn_ratio; + bool do_downsizing, use_stereo; + nh.param("num_pts", num_pts, 800); + nh.param("num_aruco", num_aruco, 1024); + nh.param("clone_states", clone_states, 11); + nh.param("fast_threshold", fast_threshold, 10); + nh.param("grid_x", grid_x, 9); + nh.param("grid_y", grid_y, 7); + nh.param("min_px_dist", min_px_dist, 3); + nh.param("knn_ratio", knn_ratio, 0.85); + nh.param("downsize_aruco", do_downsizing, false); + nh.param("use_stereo", use_stereo, false); + + // Debug print! + printf("max features: %d\n", num_pts); + printf("max aruco: %d\n", num_aruco); + printf("clone states: %d\n", clone_states); + printf("grid size: %d x %d\n", grid_x, grid_y); + printf("fast threshold: %d\n", fast_threshold); + printf("min pixel distance: %d\n", min_px_dist); + printf("downsize aruco image: %d\n", do_downsizing); + + // Fake camera info (we don't need this, as we are not using the normalized coordinates for anything) + Eigen::Matrix cam0_calib; + cam0_calib << 1, 1, 0, 0, 0, 0, 0, 0; + + // Create our n-camera vectors + std::map camera_fisheye; + std::map camera_calibration; + camera_fisheye.insert({0, false}); + camera_calibration.insert({0, cam0_calib}); + camera_fisheye.insert({1, false}); + camera_calibration.insert({1, cam0_calib}); + + // Lets make a feature extractor + extractor = new TrackKLT(num_pts, num_aruco, fast_threshold, grid_x, grid_y, min_px_dist); + // extractor = new TrackDescriptor(num_pts,num_aruco,true,fast_threshold,grid_x,grid_y,knn_ratio); + // extractor = new TrackAruco(num_aruco,true,do_downsizing); + extractor->set_calibration(camera_calibration, camera_fisheye); + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Load rosbag here, and find messages we can play + rosbag::Bag bag; + bag.open(path_to_bag, rosbag::bagmode::Read); + + // We should load the bag as a view + // Here we go from beginning of the bag to the end of the bag + rosbag::View view_full; + rosbag::View view; + + // Start a few seconds in from the full view time + // If we have a negative duration then use the full bag length + view_full.addQuery(bag); + ros::Time time_init = view_full.getBeginTime(); + time_init += ros::Duration(bag_start); + ros::Time time_finish = (bag_durr < 0) ? view_full.getEndTime() : time_init + ros::Duration(bag_durr); + printf("time start = %.6f\n", time_init.toSec()); + printf("time end = %.6f\n", time_finish.toSec()); + view.addQuery(bag, time_init, time_finish); + + // Check to make sure we have data to play + if (view.size() == 0) { + printf(RED "No messages to play on specified topics. Exiting.\n" RESET); + ros::shutdown(); + return EXIT_FAILURE; + } + + // Record the start time for our FPS counter + time_start = ros::Time::now(); + + // Our stereo pair we have + bool has_left = false; + bool has_right = false; + cv::Mat img0, img1; + double time0 = time_init.toSec(); + double time1 = time_init.toSec(); + + // Step through the rosbag + for (const rosbag::MessageInstance &m : view) { + + // If ros is wants us to stop, break out + if (!ros::ok()) + break; + + // Handle LEFT camera + sensor_msgs::Image::ConstPtr s0 = m.instantiate(); + if (s0 != nullptr && m.getTopic() == topic_camera0) { + // Get the image + cv_bridge::CvImageConstPtr cv_ptr; + try { + cv_ptr = cv_bridge::toCvShare(s0, sensor_msgs::image_encodings::MONO8); + } catch (cv_bridge::Exception &e) { + printf(RED "cv_bridge exception: %s\n" RESET, e.what()); + continue; + } + // Save to our temp variable + has_left = true; + cv::equalizeHist(cv_ptr->image, img0); + // img0 = cv_ptr->image.clone(); + time0 = cv_ptr->header.stamp.toSec(); } - // Record the start time for our FPS counter - time_start = ros::Time::now(); - - // Our stereo pair we have - bool has_left = false; - bool has_right = false; - cv::Mat img0, img1; - double time0 = time_init.toSec(); - double time1 = time_init.toSec(); - - // Step through the rosbag - for (const rosbag::MessageInstance& m : view) { - - // If ros is wants us to stop, break out - if (!ros::ok()) - break; - - // Handle LEFT camera - sensor_msgs::Image::ConstPtr s0 = m.instantiate(); - if (s0 != nullptr && m.getTopic() == topic_camera0) { - // Get the image - cv_bridge::CvImageConstPtr cv_ptr; - try { - cv_ptr = cv_bridge::toCvShare(s0, sensor_msgs::image_encodings::MONO8); - } catch (cv_bridge::Exception &e) { - printf(RED "cv_bridge exception: %s\n" RESET, e.what()); - continue; - } - // Save to our temp variable - has_left = true; - cv::equalizeHist(cv_ptr->image, img0); - //img0 = cv_ptr->image.clone(); - time0 = cv_ptr->header.stamp.toSec(); - } - - // Handle RIGHT camera - sensor_msgs::Image::ConstPtr s1 = m.instantiate(); - if (s1 != nullptr && m.getTopic() == topic_camera1) { - // Get the image - cv_bridge::CvImageConstPtr cv_ptr; - try { - cv_ptr = cv_bridge::toCvShare(s1, sensor_msgs::image_encodings::MONO8); - } catch (cv_bridge::Exception &e) { - printf(RED "cv_bridge exception: %s\n" RESET, e.what()); - continue; - } - // Save to our temp variable - has_right = true; - cv::equalizeHist(cv_ptr->image, img1); - //img1 = cv_ptr->image.clone(); - time1 = cv_ptr->header.stamp.toSec(); - } - - - // If we have both left and right, then process - if(has_left && has_right) { - // process - handle_stereo(time0, time1, img0, img1, use_stereo); - // reset bools - has_left = false; - has_right = false; - } - + // Handle RIGHT camera + sensor_msgs::Image::ConstPtr s1 = m.instantiate(); + if (s1 != nullptr && m.getTopic() == topic_camera1) { + // Get the image + cv_bridge::CvImageConstPtr cv_ptr; + try { + cv_ptr = cv_bridge::toCvShare(s1, sensor_msgs::image_encodings::MONO8); + } catch (cv_bridge::Exception &e) { + printf(RED "cv_bridge exception: %s\n" RESET, e.what()); + continue; + } + // Save to our temp variable + has_right = true; + cv::equalizeHist(cv_ptr->image, img1); + // img1 = cv_ptr->image.clone(); + time1 = cv_ptr->header.stamp.toSec(); + } + // If we have both left and right, then process + if (has_left && has_right) { + // process + handle_stereo(time0, time1, img0, img1, use_stereo); + // reset bools + has_left = false; + has_right = false; } + } - // Done! - return EXIT_SUCCESS; + // Done! + return EXIT_SUCCESS; } - - /** * This function will process the new stereo pair with the extractor! */ void handle_stereo(double time0, double time1, cv::Mat img0, cv::Mat img1, bool use_stereo) { - // Process this new image - if(use_stereo) { - extractor->feed_stereo(time0, img0, img1, 0, 1); - } else { - extractor->feed_monocular(time0, img0, 0); - extractor->feed_monocular(time0, img1, 1); - } - - - // Display the resulting tracks - cv::Mat img_active, img_history; - extractor->display_active(img_active,255,0,0,0,0,255); - extractor->display_history(img_history,0,255,255,255,255,255); - - // Show our image! - cv::imshow("Active Tracks", img_active); - cv::imshow("Track History", img_history); - cv::waitKey(1); - - // Get lost tracks - std::shared_ptr database = extractor->get_feature_database(); - std::vector> feats_lost = database->features_not_containing_newer(time0); - num_lostfeats += feats_lost.size(); - - // Mark theses feature pointers as deleted - for(size_t i=0; itimestamps) { - total_meas += (int)pair.second.size(); - } - // Update stats - featslengths += total_meas; - feats_lost[i]->to_delete = true; - } - - // Push back the current time, as a clone time - clonetimes.push_back(time0); - - // Marginalized features if we have reached 5 frame tracks - if((int)clonetimes.size() >= clone_states) { - // Remove features that have reached their max track length - double margtime = clonetimes.at(0); - clonetimes.pop_front(); - std::vector> feats_marg = database->features_containing(margtime); - num_margfeats += feats_marg.size(); - // Delete theses feature pointers - for(size_t i=0; ito_delete = true; - } + // Process this new image + if (use_stereo) { + extractor->feed_stereo(time0, img0, img1, 0, 1); + } else { + extractor->feed_monocular(time0, img0, 0); + extractor->feed_monocular(time0, img1, 1); + } + + // Display the resulting tracks + cv::Mat img_active, img_history; + extractor->display_active(img_active, 255, 0, 0, 0, 0, 255); + extractor->display_history(img_history, 0, 255, 255, 255, 255, 255); + + // Show our image! + cv::imshow("Active Tracks", img_active); + cv::imshow("Track History", img_history); + cv::waitKey(1); + + // Get lost tracks + std::shared_ptr database = extractor->get_feature_database(); + std::vector> feats_lost = database->features_not_containing_newer(time0); + num_lostfeats += feats_lost.size(); + + // Mark theses feature pointers as deleted + for (size_t i = 0; i < feats_lost.size(); i++) { + // Total number of measurements + int total_meas = 0; + for (auto const &pair : feats_lost[i]->timestamps) { + total_meas += (int)pair.second.size(); } - - // Tell the feature database to delete old features - database->cleanup(); - - // Debug print out what our current processing speed it - // We want the FPS to be as high as possible - ros::Time time_curr = ros::Time::now(); - //if (time_curr.toSec()-time_start.toSec() > 2) { - if (frames > 60) { - // Calculate the FPS - double fps = (double) frames / (time_curr.toSec()-time_start.toSec()); - double lpf = (double) num_lostfeats / frames; - double fpf = (double) featslengths / num_lostfeats; - double mpf = (double) num_margfeats / frames; - // DEBUG PRINT OUT - printf("fps = %.2f | lost_feats/frame = %.2f | track_length/lost_feat = %.2f | marg_tracks/frame = %.2f\n",fps,lpf,fpf,mpf); - // Reset variables - frames = 0; - time_start = time_curr; - num_lostfeats = 0; - num_margfeats = 0; - featslengths = 0; + // Update stats + featslengths += total_meas; + feats_lost[i]->to_delete = true; + } + + // Push back the current time, as a clone time + clonetimes.push_back(time0); + + // Marginalized features if we have reached 5 frame tracks + if ((int)clonetimes.size() >= clone_states) { + // Remove features that have reached their max track length + double margtime = clonetimes.at(0); + clonetimes.pop_front(); + std::vector> feats_marg = database->features_containing(margtime); + num_margfeats += feats_marg.size(); + // Delete theses feature pointers + for (size_t i = 0; i < feats_marg.size(); i++) { + feats_marg[i]->to_delete = true; } - frames++; - + } + + // Tell the feature database to delete old features + database->cleanup(); + + // Debug print out what our current processing speed it + // We want the FPS to be as high as possible + ros::Time time_curr = ros::Time::now(); + // if (time_curr.toSec()-time_start.toSec() > 2) { + if (frames > 60) { + // Calculate the FPS + double fps = (double)frames / (time_curr.toSec() - time_start.toSec()); + double lpf = (double)num_lostfeats / frames; + double fpf = (double)featslengths / num_lostfeats; + double mpf = (double)num_margfeats / frames; + // DEBUG PRINT OUT + printf("fps = %.2f | lost_feats/frame = %.2f | track_length/lost_feat = %.2f | marg_tracks/frame = %.2f\n", fps, lpf, fpf, mpf); + // Reset variables + frames = 0; + time_start = time_curr; + num_lostfeats = 0; + num_margfeats = 0; + featslengths = 0; + } + frames++; } - diff --git a/ov_core/src/test_webcam.cpp b/ov_core/src/test_webcam.cpp index 0cb0b0cf8..3a7e4bf9a 100644 --- a/ov_core/src/test_webcam.cpp +++ b/ov_core/src/test_webcam.cpp @@ -19,184 +19,175 @@ * along with this program. If not, see . */ #include -#include #include -#include #include +#include #include #include +#include #include #include -#include #include +#include -#include "track/TrackKLT.h" -#include "track/TrackDescriptor.h" #include "track/TrackAruco.h" +#include "track/TrackDescriptor.h" +#include "track/TrackKLT.h" #include "utils/CLI11.hpp" using namespace ov_core; // Our feature extractor -TrackBase* extractor; - +TrackBase *extractor; // Main function -int main(int argc, char** argv) -{ - - // Create our command line parser - CLI::App app{"test_webcam"}; - - // Defaults - int num_pts = 500; - int num_aruco = 1024; - int clone_states = 20; - int fast_threshold = 10; - int grid_x = 5; - int grid_y = 3; - int min_px_dist = 10; - double knn_ratio = 0.85; - bool do_downsizing = false; - - // Parameters for our extractor - app.add_option("--num_pts", num_pts, "Number of feature tracks"); - app.add_option("--num_aruco", num_aruco, "Number of aruco tag ids we have"); - app.add_option("--clone_states", clone_states, "Amount of clones to visualize track length with"); - app.add_option("--fast_threshold", fast_threshold, "Fast extraction threshold"); - app.add_option("--grid_x", grid_x, "Grid x size"); - app.add_option("--grid_y", grid_y, "Grid y size"); - app.add_option("--min_px_dist", min_px_dist, "Minimum number of pixels between different tracks"); - app.add_option("--knn_ratio", knn_ratio, "Knn descriptor ratio threshold"); - app.add_option("--do_downsizing", do_downsizing, "If we should downsize our arucotag images"); - - // Finally actually parse the command line and load it - try { - app.parse(argc, argv); - } catch (const CLI::ParseError &e) { - return app.exit(e); +int main(int argc, char **argv) { + + // Create our command line parser + CLI::App app{"test_webcam"}; + + // Defaults + int num_pts = 500; + int num_aruco = 1024; + int clone_states = 20; + int fast_threshold = 10; + int grid_x = 5; + int grid_y = 3; + int min_px_dist = 10; + double knn_ratio = 0.85; + bool do_downsizing = false; + + // Parameters for our extractor + app.add_option("--num_pts", num_pts, "Number of feature tracks"); + app.add_option("--num_aruco", num_aruco, "Number of aruco tag ids we have"); + app.add_option("--clone_states", clone_states, "Amount of clones to visualize track length with"); + app.add_option("--fast_threshold", fast_threshold, "Fast extraction threshold"); + app.add_option("--grid_x", grid_x, "Grid x size"); + app.add_option("--grid_y", grid_y, "Grid y size"); + app.add_option("--min_px_dist", min_px_dist, "Minimum number of pixels between different tracks"); + app.add_option("--knn_ratio", knn_ratio, "Knn descriptor ratio threshold"); + app.add_option("--do_downsizing", do_downsizing, "If we should downsize our arucotag images"); + + // Finally actually parse the command line and load it + try { + app.parse(argc, argv); + } catch (const CLI::ParseError &e) { + return app.exit(e); + } + + // Debug print! + printf("max features: %d\n", num_pts); + printf("max aruco: %d\n", num_aruco); + printf("clone states: %d\n", clone_states); + printf("grid size: %d x %d\n", grid_x, grid_y); + printf("fast threshold: %d\n", fast_threshold); + printf("min pixel distance: %d\n", min_px_dist); + printf("downsize aruco image: %d\n", do_downsizing); + + // Fake camera info (we don't need this, as we are not using the normalized coordinates for anything) + Eigen::Matrix cam0_calib; + cam0_calib << 1, 1, 0, 0, 0, 0, 0, 0; + + // Create our n-camera vectors + std::map camera_fisheye; + std::map camera_calibration; + camera_fisheye.insert({0, false}); + camera_calibration.insert({0, cam0_calib}); + camera_fisheye.insert({1, false}); + camera_calibration.insert({1, cam0_calib}); + + // Lets make a feature extractor + extractor = new TrackKLT(num_pts, num_aruco, fast_threshold, grid_x, grid_y, min_px_dist); + // extractor = new TrackDescriptor(num_pts,num_aruco,true,fast_threshold,grid_x,grid_y,knn_ratio); + // extractor = new TrackAruco(num_aruco,true,do_downsizing); + extractor->set_calibration(camera_calibration, camera_fisheye); + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Open the first webcam (0=laptop cam, 1=usb device) + cv::VideoCapture cap; + if (!cap.open(0)) { + printf(RED "Unable to open a webcam feed!\n" RESET); + return EXIT_FAILURE; + } + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Loop forever until we break out + double current_time = 0.0; + std::deque clonetimes; + while (true) { + + // Get the next frame (and fake advance time forward) + cv::Mat frame; + cap >> frame; + current_time += 1.0 / 30.0; + + // Stop capture if no more image feed + if (frame.empty()) + break; + + // Stop capturing by pressing ESC + if (cv::waitKey(10) == 27) + break; + + // Convert to grayscale if not + if (frame.channels() != 1) + cv::cvtColor(frame, frame, cv::COLOR_GRAY2RGB); + + // Else lets track this image + extractor->feed_monocular(current_time, frame, 0); + + // Display the resulting tracks + cv::Mat img_active, img_history; + extractor->display_active(img_active, 255, 0, 0, 0, 0, 255); + extractor->display_history(img_history, 0, 255, 255, 255, 255, 255); + + // Show our image! + cv::imshow("Active Tracks", img_active); + cv::imshow("Track History", img_history); + cv::waitKey(1); + + // Get lost tracks + std::shared_ptr database = extractor->get_feature_database(); + std::vector> feats_lost = database->features_not_containing_newer(current_time); + + // Mark theses feature pointers as deleted + for (size_t i = 0; i < feats_lost.size(); i++) { + // Total number of measurements + int total_meas = 0; + for (auto const &pair : feats_lost[i]->timestamps) { + total_meas += (int)pair.second.size(); + } + // Update stats + feats_lost[i]->to_delete = true; } - // Debug print! - printf("max features: %d\n", num_pts); - printf("max aruco: %d\n", num_aruco); - printf("clone states: %d\n", clone_states); - printf("grid size: %d x %d\n", grid_x, grid_y); - printf("fast threshold: %d\n", fast_threshold); - printf("min pixel distance: %d\n", min_px_dist); - printf("downsize aruco image: %d\n", do_downsizing); - - // Fake camera info (we don't need this, as we are not using the normalized coordinates for anything) - Eigen::Matrix cam0_calib; - cam0_calib << 1,1,0,0,0,0,0,0; - - // Create our n-camera vectors - std::map camera_fisheye; - std::map camera_calibration; - camera_fisheye.insert({0,false}); - camera_calibration.insert({0,cam0_calib}); - camera_fisheye.insert({1,false}); - camera_calibration.insert({1,cam0_calib}); - - // Lets make a feature extractor - extractor = new TrackKLT(num_pts,num_aruco,fast_threshold,grid_x,grid_y,min_px_dist); - //extractor = new TrackDescriptor(num_pts,num_aruco,true,fast_threshold,grid_x,grid_y,knn_ratio); - //extractor = new TrackAruco(num_aruco,true,do_downsizing); - extractor->set_calibration(camera_calibration, camera_fisheye); - - - //=================================================================================== - //=================================================================================== - //=================================================================================== - - // Open the first webcam (0=laptop cam, 1=usb device) - cv::VideoCapture cap; - if(!cap.open(0)) { - printf(RED "Unable to open a webcam feed!\n" RESET); - return EXIT_FAILURE; + // Push back the current time, as a clone time + clonetimes.push_back(current_time); + + // Marginalized features if we have reached 5 frame tracks + if ((int)clonetimes.size() >= clone_states) { + // Remove features that have reached their max track length + double margtime = clonetimes.at(0); + clonetimes.pop_front(); + std::vector> feats_marg = database->features_containing(margtime); + // Delete theses feature pointers + for (size_t i = 0; i < feats_marg.size(); i++) { + feats_marg[i]->to_delete = true; + } } - //=================================================================================== - //=================================================================================== - //=================================================================================== - - // Loop forever until we break out - double current_time = 0.0; - std::deque clonetimes; - while(true) { - - // Get the next frame (and fake advance time forward) - cv::Mat frame; - cap >> frame; - current_time += 1.0/30.0; - - // Stop capture if no more image feed - if(frame.empty()) - break; - - // Stop capturing by pressing ESC - if(cv::waitKey(10) == 27) - break; - - // Convert to grayscale if not - if(frame.channels() != 1) - cv::cvtColor(frame, frame, cv::COLOR_GRAY2RGB); - - // Else lets track this image - extractor->feed_monocular(current_time, frame, 0); - - // Display the resulting tracks - cv::Mat img_active, img_history; - extractor->display_active(img_active,255,0,0,0,0,255); - extractor->display_history(img_history,0,255,255,255,255,255); - - // Show our image! - cv::imshow("Active Tracks", img_active); - cv::imshow("Track History", img_history); - cv::waitKey(1); - - // Get lost tracks - std::shared_ptr database = extractor->get_feature_database(); - std::vector> feats_lost = database->features_not_containing_newer(current_time); - - // Mark theses feature pointers as deleted - for(size_t i=0; itimestamps) { - total_meas += (int)pair.second.size(); - } - // Update stats - feats_lost[i]->to_delete = true; - } - - // Push back the current time, as a clone time - clonetimes.push_back(current_time); - - // Marginalized features if we have reached 5 frame tracks - if((int)clonetimes.size() >= clone_states) { - // Remove features that have reached their max track length - double margtime = clonetimes.at(0); - clonetimes.pop_front(); - std::vector> feats_marg = database->features_containing(margtime); - // Delete theses feature pointers - for(size_t i=0; ito_delete = true; - } - } - - // Tell the feature database to delete old features - database->cleanup(); - - - } + // Tell the feature database to delete old features + database->cleanup(); + } - - - // Done! - return EXIT_SUCCESS; + // Done! + return EXIT_SUCCESS; } - - diff --git a/ov_core/src/track/Grider_DOG.h b/ov_core/src/track/Grider_DOG.h index 665587515..74c79e69c 100644 --- a/ov_core/src/track/Grider_DOG.h +++ b/ov_core/src/track/Grider_DOG.h @@ -21,165 +21,148 @@ #ifndef OV_CORE_GRIDER_DOG_H #define OV_CORE_GRIDER_DOG_H - -#include -#include #include +#include +#include #include #include - namespace ov_core { - /** - * @brief Does Difference of Gaussian (DoG) in a grid pattern. - * - * This does "Difference of Gaussian" detection in a grid pattern to try to get good features. - * We then pick the top features in each grid, and return the top features collected over the entire image. - * This class hasn't been tested that much, as we normally use the Grider_FAST class instead. - * - * @m_class{m-block m-danger} - * - * @par Improve and Test This! - * If you are interested in using this grider, then please give it a try and test if for us. - * Please open a pull request with any impromements, or an issue if you have had any success. - * We still recommend the Grider_FAST class since that is what we normally use. - * - */ - class Grider_DOG { - - public: - - - /** - * @brief Compare keypoints based on their response value. - * @param first First keypoint - * @param second Second keypoint - * - * We want to have the keypoints with the highest values! - * See: https://stackoverflow.com/a/10910921 - */ - static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { - return first.response > second.response; - } - - - /** - * @brief For a given small image region this will do the Difference of Gaussian (DOG) detection - * - * Will return the vector of keypoints with the averaged response for that given UV. - * See: https://github.com/jrdi/opencv-examples/blob/master/dog/main.cpp - */ - static void detect(const cv::Mat &img, std::vector &pts, int ksize, - float sigma_small, float sigma_big, float threshold) { - - // Calculate the two kernels we will use - cv::Mat gauBig = cv::getGaussianKernel(ksize, sigma_big, CV_32F); - cv::Mat gauSmall = cv::getGaussianKernel(ksize, sigma_small, CV_32F); - cv::Mat DoG = gauSmall - gauBig; - - // Apply the kernel to our image - cv::Mat img_filtered; - cv::sepFilter2D(img, img_filtered, CV_32F, DoG.t(), DoG); - img_filtered = cv::abs(img_filtered); - - // Assert we are a grey scaled image - assert(img_filtered.channels() == 1); - - // Loop through and record all points above our threshold - for (int j = 0; j < img_filtered.rows; j++) { - for (int i = 0; i < img_filtered.cols; i++) { - // Calculate the response at this u,v coordinate - float response = img_filtered.at(j, i); - // Check to see if it is able our specified threshold - if (response >= threshold) { - cv::KeyPoint pt; - pt.pt.x = i; - pt.pt.y = j; - pt.response = response; - pts.push_back(pt); - } - } - } - +/** + * @brief Does Difference of Gaussian (DoG) in a grid pattern. + * + * This does "Difference of Gaussian" detection in a grid pattern to try to get good features. + * We then pick the top features in each grid, and return the top features collected over the entire image. + * This class hasn't been tested that much, as we normally use the Grider_FAST class instead. + * + * @m_class{m-block m-danger} + * + * @par Improve and Test This! + * If you are interested in using this grider, then please give it a try and test if for us. + * Please open a pull request with any impromements, or an issue if you have had any success. + * We still recommend the Grider_FAST class since that is what we normally use. + * + */ +class Grider_DOG { + +public: + /** + * @brief Compare keypoints based on their response value. + * @param first First keypoint + * @param second Second keypoint + * + * We want to have the keypoints with the highest values! + * See: https://stackoverflow.com/a/10910921 + */ + static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { return first.response > second.response; } + + /** + * @brief For a given small image region this will do the Difference of Gaussian (DOG) detection + * + * Will return the vector of keypoints with the averaged response for that given UV. + * See: https://github.com/jrdi/opencv-examples/blob/master/dog/main.cpp + */ + static void detect(const cv::Mat &img, std::vector &pts, int ksize, float sigma_small, float sigma_big, float threshold) { + + // Calculate the two kernels we will use + cv::Mat gauBig = cv::getGaussianKernel(ksize, sigma_big, CV_32F); + cv::Mat gauSmall = cv::getGaussianKernel(ksize, sigma_small, CV_32F); + cv::Mat DoG = gauSmall - gauBig; + + // Apply the kernel to our image + cv::Mat img_filtered; + cv::sepFilter2D(img, img_filtered, CV_32F, DoG.t(), DoG); + img_filtered = cv::abs(img_filtered); + + // Assert we are a grey scaled image + assert(img_filtered.channels() == 1); + + // Loop through and record all points above our threshold + for (int j = 0; j < img_filtered.rows; j++) { + for (int i = 0; i < img_filtered.cols; i++) { + // Calculate the response at this u,v coordinate + float response = img_filtered.at(j, i); + // Check to see if it is able our specified threshold + if (response >= threshold) { + cv::KeyPoint pt; + pt.pt.x = i; + pt.pt.y = j; + pt.response = response; + pts.push_back(pt); } - - - /** - * @brief This function will perform grid extraction using Difference of Gaussian (DOG) - * @param img Image we will do FAST extraction on - * @param pts vector of extracted points we will return - * @param num_features max number of features we want to extract - * @param grid_x size of grid in the x-direction / u-direction - * @param grid_y size of grid in the y-direction / v-direction - * @param ksize kernel size - * @param sigma_small small gaussian sigma - * @param sigma_big big gaussian sigma - * @param threshold response threshold - * - * Given a specified grid size, this will try to extract fast features from each grid. - * It will then return the best from each grid in the return vector. - */ - static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, - int grid_x, int grid_y, int ksize, float sigma_small, float sigma_big, - float threshold) { - - // Calculate the size our extraction boxes should be - int size_x = img.cols / grid_x; - int size_y = img.rows / grid_y; - - // Make sure our sizes are not zero - assert(size_x > 0); - assert(size_y > 0); - - // We want to have equally distributed features - auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; - - // Lets loop through each grid and extract features - for (int x = 0; x < img.cols; x += size_x) { - for (int y = 0; y < img.rows; y += size_y) { - - // Skip if we are out of bounds - if (x + size_x > img.cols || y + size_y > img.rows) - continue; - - // Calculate where we should be extracting from - cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); - - // Extract DOG features for this part of the image - std::vector pts_new; - Grider_DOG::detect(img(img_roi), pts_new, ksize, sigma_small, sigma_big, threshold); - - // Now lets get the top number from this - std::sort(pts_new.begin(), pts_new.end(), Grider_DOG::compare_response); - - // Debug print out the response values - //for (auto pt : pts_new) { - // std::cout << pt.response << std::endl; - //} - //std::cout << "======================" << std::endl; - - // Append the "best" ones to our vector - // Note that we need to "correct" the point u,v since we extracted it in a ROI - // So we should append the location of that ROI in the image - for (size_t i = 0; i < (size_t) num_features_grid && i < pts_new.size(); i++) { - cv::KeyPoint pt_cor = pts_new.at(i); - pt_cor.pt.x += x; - pt_cor.pt.y += y; - pts.push_back(pt_cor); - } - - - } - } - - + } + } + } + + /** + * @brief This function will perform grid extraction using Difference of Gaussian (DOG) + * @param img Image we will do FAST extraction on + * @param pts vector of extracted points we will return + * @param num_features max number of features we want to extract + * @param grid_x size of grid in the x-direction / u-direction + * @param grid_y size of grid in the y-direction / v-direction + * @param ksize kernel size + * @param sigma_small small gaussian sigma + * @param sigma_big big gaussian sigma + * @param threshold response threshold + * + * Given a specified grid size, this will try to extract fast features from each grid. + * It will then return the best from each grid in the return vector. + */ + static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, int grid_x, int grid_y, int ksize, + float sigma_small, float sigma_big, float threshold) { + + // Calculate the size our extraction boxes should be + int size_x = img.cols / grid_x; + int size_y = img.rows / grid_y; + + // Make sure our sizes are not zero + assert(size_x > 0); + assert(size_y > 0); + + // We want to have equally distributed features + auto num_features_grid = (int)(num_features / (grid_x * grid_y)) + 1; + + // Lets loop through each grid and extract features + for (int x = 0; x < img.cols; x += size_x) { + for (int y = 0; y < img.rows; y += size_y) { + + // Skip if we are out of bounds + if (x + size_x > img.cols || y + size_y > img.rows) + continue; + + // Calculate where we should be extracting from + cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); + + // Extract DOG features for this part of the image + std::vector pts_new; + Grider_DOG::detect(img(img_roi), pts_new, ksize, sigma_small, sigma_big, threshold); + + // Now lets get the top number from this + std::sort(pts_new.begin(), pts_new.end(), Grider_DOG::compare_response); + + // Debug print out the response values + // for (auto pt : pts_new) { + // std::cout << pt.response << std::endl; + //} + // std::cout << "======================" << std::endl; + + // Append the "best" ones to our vector + // Note that we need to "correct" the point u,v since we extracted it in a ROI + // So we should append the location of that ROI in the image + for (size_t i = 0; i < (size_t)num_features_grid && i < pts_new.size(); i++) { + cv::KeyPoint pt_cor = pts_new.at(i); + pt_cor.pt.x += x; + pt_cor.pt.y += y; + pts.push_back(pt_cor); } + } + } + } +}; - - }; - -} +} // namespace ov_core #endif /* OV_CORE_GRIDER_DOG_H */ \ No newline at end of file diff --git a/ov_core/src/track/Grider_FAST.h b/ov_core/src/track/Grider_FAST.h index 63fa00da8..78dfa6969 100644 --- a/ov_core/src/track/Grider_FAST.h +++ b/ov_core/src/track/Grider_FAST.h @@ -21,143 +21,133 @@ #ifndef OV_CORE_GRIDER_FAST_H #define OV_CORE_GRIDER_FAST_H - -#include -#include -#include #include +#include +#include +#include -#include #include #include +#include #include "utils/lambda_body.h" namespace ov_core { - /** - * @brief Extracts FAST features in a grid pattern. - * - * As compared to just extracting fast features over the entire image, - * we want to have as uniform of extractions as possible over the image plane. - * Thus we split the image into a bunch of small grids, and extract points in each. - * We then pick enough top points in each grid so that we have the total number of desired points. - */ - class Grider_FAST { - - public: - - - /** - * @brief Compare keypoints based on their response value. - * @param first First keypoint - * @param second Second keypoint - * - * We want to have the keypoints with the highest values! - * See: https://stackoverflow.com/a/10910921 - */ - static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { - return first.response > second.response; - } - - - /** - * @brief This function will perform grid extraction using FAST. - * @param img Image we will do FAST extraction on - * @param pts vector of extracted points we will return - * @param num_features max number of features we want to extract - * @param grid_x size of grid in the x-direction / u-direction - * @param grid_y size of grid in the y-direction / v-direction - * @param threshold FAST threshold paramter (10 is a good value normally) - * @param nonmaxSuppression if FAST should perform non-max suppression (true normally) - * - * Given a specified grid size, this will try to extract fast features from each grid. - * It will then return the best from each grid in the return vector. - */ - static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, - int grid_x, int grid_y, int threshold, bool nonmaxSuppression) { - - // Calculate the size our extraction boxes should be - int size_x = img.cols / grid_x; - int size_y = img.rows / grid_y; - - // Make sure our sizes are not zero - assert(size_x > 0); - assert(size_y > 0); - - // We want to have equally distributed features - auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; - - // Parallelize our 2d grid extraction!! - int ct_cols = std::floor(img.cols/size_x); - int ct_rows = std::floor(img.rows/size_y); - std::vector> collection(ct_cols*ct_rows); - parallel_for_(cv::Range(0, ct_cols*ct_rows), LambdaBody([&](const cv::Range& range) { - for (int r = range.start; r < range.end; r++) { - // Calculate what cell xy value we are in - int x = r%ct_cols*size_x; - int y = r/ct_cols*size_y; - - // Skip if we are out of bounds - if (x + size_x > img.cols || y + size_y > img.rows) +/** + * @brief Extracts FAST features in a grid pattern. + * + * As compared to just extracting fast features over the entire image, + * we want to have as uniform of extractions as possible over the image plane. + * Thus we split the image into a bunch of small grids, and extract points in each. + * We then pick enough top points in each grid so that we have the total number of desired points. + */ +class Grider_FAST { + +public: + /** + * @brief Compare keypoints based on their response value. + * @param first First keypoint + * @param second Second keypoint + * + * We want to have the keypoints with the highest values! + * See: https://stackoverflow.com/a/10910921 + */ + static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { return first.response > second.response; } + + /** + * @brief This function will perform grid extraction using FAST. + * @param img Image we will do FAST extraction on + * @param pts vector of extracted points we will return + * @param num_features max number of features we want to extract + * @param grid_x size of grid in the x-direction / u-direction + * @param grid_y size of grid in the y-direction / v-direction + * @param threshold FAST threshold paramter (10 is a good value normally) + * @param nonmaxSuppression if FAST should perform non-max suppression (true normally) + * + * Given a specified grid size, this will try to extract fast features from each grid. + * It will then return the best from each grid in the return vector. + */ + static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, int grid_x, int grid_y, int threshold, + bool nonmaxSuppression) { + + // Calculate the size our extraction boxes should be + int size_x = img.cols / grid_x; + int size_y = img.rows / grid_y; + + // Make sure our sizes are not zero + assert(size_x > 0); + assert(size_y > 0); + + // We want to have equally distributed features + auto num_features_grid = (int)(num_features / (grid_x * grid_y)) + 1; + + // Parallelize our 2d grid extraction!! + int ct_cols = std::floor(img.cols / size_x); + int ct_rows = std::floor(img.rows / size_y); + std::vector> collection(ct_cols * ct_rows); + parallel_for_(cv::Range(0, ct_cols * ct_rows), LambdaBody([&](const cv::Range &range) { + for (int r = range.start; r < range.end; r++) { + // Calculate what cell xy value we are in + int x = r % ct_cols * size_x; + int y = r / ct_cols * size_y; + + // Skip if we are out of bounds + if (x + size_x > img.cols || y + size_y > img.rows) continue; - // Calculate where we should be extracting from - cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); + // Calculate where we should be extracting from + cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); - // Extract FAST features for this part of the image - std::vector pts_new; - cv::FAST(img(img_roi), pts_new, threshold, nonmaxSuppression); + // Extract FAST features for this part of the image + std::vector pts_new; + cv::FAST(img(img_roi), pts_new, threshold, nonmaxSuppression); - // Now lets get the top number from this - std::sort(pts_new.begin(), pts_new.end(), Grider_FAST::compare_response); + // Now lets get the top number from this + std::sort(pts_new.begin(), pts_new.end(), Grider_FAST::compare_response); - // Append the "best" ones to our vector - // Note that we need to "correct" the point u,v since we extracted it in a ROI - // So we should append the location of that ROI in the image - for(size_t i=0; i<(size_t)num_features_grid && i pts_refined; - for(size_t i=0; i pts_refined; + for (size_t i = 0; i < pts.size(); i++) { + pts_refined.push_back(pts.at(i).pt); + } + + // Finally get sub-pixel for all extracted features + cv::cornerSubPix(img, pts_refined, win_size, zero_zone, term_crit); + + // Save the refined points! + for (size_t i = 0; i < pts.size(); i++) { + pts.at(i).pt = pts_refined.at(i); + } + } +}; + +} // namespace ov_core #endif /* OV_CORE_GRIDER_FAST_H */ diff --git a/ov_core/src/track/TrackAruco.cpp b/ov_core/src/track/TrackAruco.cpp index 74328f535..acf62b016 100644 --- a/ov_core/src/track/TrackAruco.cpp +++ b/ov_core/src/track/TrackAruco.cpp @@ -22,162 +22,154 @@ using namespace ov_core; - void TrackAruco::feed_monocular(double timestamp, cv::Mat &imgin, size_t cam_id) { - // Start timing - rT1 = boost::posix_time::microsec_clock::local_time(); - - // Lock this data feed for this camera - std::unique_lock lck(mtx_feeds.at(cam_id)); - - // Histogram equalize - cv::Mat img; - cv::equalizeHist(imgin, img); - - // Clear the old data from the last timestep - ids_aruco[cam_id].clear(); - corners[cam_id].clear(); - rejects[cam_id].clear(); - - // If we are downsizing, then downsize - cv::Mat img0; - if(do_downsizing) { - cv::pyrDown(img,img0,cv::Size(img.cols/2,img.rows/2)); - } else { - img0 = img; + // Start timing + rT1 = boost::posix_time::microsec_clock::local_time(); + + // Lock this data feed for this camera + std::unique_lock lck(mtx_feeds.at(cam_id)); + + // Histogram equalize + cv::Mat img; + cv::equalizeHist(imgin, img); + + // Clear the old data from the last timestep + ids_aruco[cam_id].clear(); + corners[cam_id].clear(); + rejects[cam_id].clear(); + + // If we are downsizing, then downsize + cv::Mat img0; + if (do_downsizing) { + cv::pyrDown(img, img0, cv::Size(img.cols / 2, img.rows / 2)); + } else { + img0 = img; + } + + //=================================================================================== + //=================================================================================== + + // Perform extraction + cv::aruco::detectMarkers(img0, aruco_dict, corners[cam_id], ids_aruco[cam_id], aruco_params, rejects[cam_id]); + rT2 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // If we downsized, scale all our u,v measurements by a factor of two + // Note: we do this so we can use these results for visulization later + // Note: and so that the uv added is in the same image size + if (do_downsizing) { + for (size_t i = 0; i < corners[cam_id].size(); i++) { + for (size_t j = 0; j < corners[cam_id].at(i).size(); j++) { + corners[cam_id].at(i).at(j).x *= 2; + corners[cam_id].at(i).at(j).y *= 2; + } } - - - //=================================================================================== - //=================================================================================== - - // Perform extraction - cv::aruco::detectMarkers(img0,aruco_dict,corners[cam_id],ids_aruco[cam_id],aruco_params,rejects[cam_id]); - rT2 = boost::posix_time::microsec_clock::local_time(); - - - //=================================================================================== - //=================================================================================== - - // If we downsized, scale all our u,v measurements by a factor of two - // Note: we do this so we can use these results for visulization later - // Note: and so that the uv added is in the same image size - if(do_downsizing) { - for(size_t i=0; i ids_new; - - // Append to our feature database this new information - for(size_t i=0; i max_tag_id) - continue; - // Assert we have 4 points (we will only use one of them) - assert(corners[cam_id].at(i).size()==4); - // Try to undistort the point - cv::Point2f npt_l = undistort_point(corners[cam_id].at(i).at(0), cam_id); - // Append to the ids vector and database - ids_new.push_back((size_t)ids_aruco[cam_id].at(i)); - database->update_feature((size_t)ids_aruco[cam_id].at(i), timestamp, cam_id, - corners[cam_id].at(i).at(0).x, corners[cam_id].at(i).at(0).y, - npt_l.x, npt_l.y); + for (size_t i = 0; i < rejects[cam_id].size(); i++) { + for (size_t j = 0; j < rejects[cam_id].at(i).size(); j++) { + rejects[cam_id].at(i).at(j).x *= 2; + rejects[cam_id].at(i).at(j).y *= 2; + } } - - - // Move forward in time - img_last[cam_id] = img.clone(); - ids_last[cam_id] = ids_new; - rT3 = boost::posix_time::microsec_clock::local_time(); - - // Timing information - //printf("[TIME-ARUCO]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6); - //printf("[TIME-ARUCO]: %.4f seconds for feature DB update (%d features)\n",(rT3-rT2).total_microseconds() * 1e-6, (int)good_left.size()); - //printf("[TIME-ARUCO]: %.4f seconds for total\n",(rT3-rT1).total_microseconds() * 1e-6); - + } + + //=================================================================================== + //=================================================================================== + + // ID vectors, of all currently tracked IDs + std::vector ids_new; + + // Append to our feature database this new information + for (size_t i = 0; i < ids_aruco[cam_id].size(); i++) { + // Skip if ID is greater then our max + if (ids_aruco[cam_id].at(i) > max_tag_id) + continue; + // Assert we have 4 points (we will only use one of them) + assert(corners[cam_id].at(i).size() == 4); + // Try to undistort the point + cv::Point2f npt_l = undistort_point(corners[cam_id].at(i).at(0), cam_id); + // Append to the ids vector and database + ids_new.push_back((size_t)ids_aruco[cam_id].at(i)); + database->update_feature((size_t)ids_aruco[cam_id].at(i), timestamp, cam_id, corners[cam_id].at(i).at(0).x, + corners[cam_id].at(i).at(0).y, npt_l.x, npt_l.y); + } + + // Move forward in time + img_last[cam_id] = img.clone(); + ids_last[cam_id] = ids_new; + rT3 = boost::posix_time::microsec_clock::local_time(); + + // Timing information + // printf("[TIME-ARUCO]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6); + // printf("[TIME-ARUCO]: %.4f seconds for feature DB update (%d features)\n",(rT3-rT2).total_microseconds() * 1e-6, + // (int)good_left.size()); printf("[TIME-ARUCO]: %.4f seconds for total\n",(rT3-rT1).total_microseconds() * 1e-6); } - void TrackAruco::feed_stereo(double timestamp, cv::Mat &img_leftin, cv::Mat &img_rightin, size_t cam_id_left, size_t cam_id_right) { - // There is not such thing as stereo tracking for aruco - // Thus here we should just call the monocular version two times - parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range& range) { - for (int i = range.start; i < range.end; i++) { - bool is_left = (i == 0); - feed_monocular( - timestamp, - is_left ? img_leftin : img_rightin, - is_left ? cam_id_left : cam_id_right - ); - } - })); - + // There is not such thing as stereo tracking for aruco + // Thus here we should just call the monocular version two times + parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) { + for (int i = range.start; i < range.end; i++) { + bool is_left = (i == 0); + feed_monocular(timestamp, is_left ? img_leftin : img_rightin, is_left ? cam_id_left : cam_id_right); + } + })); } - void TrackAruco::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { - // Cache the images to prevent other threads from editing while we viz (which can be slow) - std::map img_last_cache; - for(auto const& pair : img_last) { - img_last_cache.insert({pair.first,pair.second.clone()}); - } - - // Get the largest width and height - int max_width = -1; - int max_height = -1; - for(auto const& pair : img_last_cache) { - if(max_width < pair.second.cols) max_width = pair.second.cols; - if(max_height < pair.second.rows) max_height = pair.second.rows; - } - - // If the image is "small" thus we shoudl use smaller display codes - bool is_small = (std::min(max_width,max_height) < 400); - - // If the image is "new" then draw the images from scratch - // Otherwise, we grab the subset of the main image and draw on top of it - bool image_new = ((int)img_last_cache.size()*max_width != img_out.cols || max_height != img_out.rows); - - // If new, then resize the current image - if(image_new) img_out = cv::Mat(max_height,(int)img_last_cache.size()*max_width,CV_8UC3,cv::Scalar(0,0,0)); - - // Loop through each image, and draw - int index_cam = 0; - for(auto const& pair : img_last_cache) { - // Lock this image - std::unique_lock lck(mtx_feeds.at(pair.first)); - // select the subset of the image - cv::Mat img_temp; - if(image_new) cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB); - else img_temp = img_out(cv::Rect(max_width*index_cam,0,max_width,max_height)); - // draw... - if(!ids_aruco[pair.first].empty()) cv::aruco::drawDetectedMarkers(img_temp, corners[pair.first], ids_aruco[pair.first]); - if(!rejects[pair.first].empty()) cv::aruco::drawDetectedMarkers(img_temp, rejects[pair.first], cv::noArray(), cv::Scalar(100,0,255)); - // Draw what camera this is - auto txtpt = (is_small)? cv::Point(10,30) : cv::Point(30,60); - cv::putText(img_temp, "CAM:"+std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0, cv::Scalar(0,255,0), 3); - // Replace the output image - img_temp.copyTo(img_out(cv::Rect(max_width*index_cam,0,img_last_cache[pair.first].cols,img_last_cache[pair.first].rows))); - index_cam++; - } - + // Cache the images to prevent other threads from editing while we viz (which can be slow) + std::map img_last_cache; + for (auto const &pair : img_last) { + img_last_cache.insert({pair.first, pair.second.clone()}); + } + + // Get the largest width and height + int max_width = -1; + int max_height = -1; + for (auto const &pair : img_last_cache) { + if (max_width < pair.second.cols) + max_width = pair.second.cols; + if (max_height < pair.second.rows) + max_height = pair.second.rows; + } + + // If the image is "small" thus we shoudl use smaller display codes + bool is_small = (std::min(max_width, max_height) < 400); + + // If the image is "new" then draw the images from scratch + // Otherwise, we grab the subset of the main image and draw on top of it + bool image_new = ((int)img_last_cache.size() * max_width != img_out.cols || max_height != img_out.rows); + + // If new, then resize the current image + if (image_new) + img_out = cv::Mat(max_height, (int)img_last_cache.size() * max_width, CV_8UC3, cv::Scalar(0, 0, 0)); + + // Loop through each image, and draw + int index_cam = 0; + for (auto const &pair : img_last_cache) { + // Lock this image + std::unique_lock lck(mtx_feeds.at(pair.first)); + // select the subset of the image + cv::Mat img_temp; + if (image_new) + cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB); + else + img_temp = img_out(cv::Rect(max_width * index_cam, 0, max_width, max_height)); + // draw... + if (!ids_aruco[pair.first].empty()) + cv::aruco::drawDetectedMarkers(img_temp, corners[pair.first], ids_aruco[pair.first]); + if (!rejects[pair.first].empty()) + cv::aruco::drawDetectedMarkers(img_temp, rejects[pair.first], cv::noArray(), cv::Scalar(100, 0, 255)); + // Draw what camera this is + auto txtpt = (is_small) ? cv::Point(10, 30) : cv::Point(30, 60); + cv::putText(img_temp, "CAM:" + std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0, + cv::Scalar(0, 255, 0), 3); + // Replace the output image + img_temp.copyTo(img_out(cv::Rect(max_width * index_cam, 0, img_last_cache[pair.first].cols, img_last_cache[pair.first].rows))); + index_cam++; + } } diff --git a/ov_core/src/track/TrackAruco.h b/ov_core/src/track/TrackAruco.h index a20b30c83..14dbd85ed 100644 --- a/ov_core/src/track/TrackAruco.h +++ b/ov_core/src/track/TrackAruco.h @@ -21,100 +21,94 @@ #ifndef OV_CORE_TRACK_ARUCO_H #define OV_CORE_TRACK_ARUCO_H - #include #include "TrackBase.h" namespace ov_core { - /** - * @brief Tracking of OpenCV Aruoc tags. - * - * This class handles the tracking of [OpenCV Aruco tags](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco). - * We track the top left corner of the tag as compared to the pose of the tag or any other corners. - * Right now we hardcode the dictionary to be `cv::aruco::DICT_6X6_25`, so please generate tags in this family of tags. - */ - class TrackAruco : public TrackBase { - - public: - - /** - * @brief Public default constructor - */ - TrackAruco() : TrackBase(), max_tag_id(1024), do_downsizing(false) { - aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); - aruco_params = cv::aruco::DetectorParameters::create(); - //aruco_params->cornerRefinementMethod = cv::aruco::CornerRefineMethod::CORNER_REFINE_SUBPIX; // people with newer opencv might fail here - } - - /** - * @brief Public constructor with configuration variables - * @param numaruco the max id of the arucotags, we don't use any tags greater than this value even if we extract them - * @param do_downsizing we can scale the image by 1/2 to increase Aruco tag extraction speed - */ - explicit TrackAruco(int numaruco, bool do_downsizing) : - TrackBase(0, numaruco), max_tag_id(numaruco), do_downsizing(do_downsizing) { - aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); - aruco_params = cv::aruco::DetectorParameters::create(); - //aruco_params->cornerRefinementMethod = cv::aruco::CornerRefineMethod::CORNER_REFINE_SUBPIX; // people with newer opencv might fail here - } - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; - - /** - * @brief We override the display equation so we can show the tags we extract. - * @param img_out image to which we will overlayed features on - * @param r1,g1,b1 first color to draw in - * @param r2,g2,b2 second color to draw in - */ - void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) override; - - - protected: - - // Timing variables - boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; - - // Max tag ID we should extract from (i.e., number of aruco tags starting from zero) - int max_tag_id; - - // If we should downsize the image - bool do_downsizing; - - // Our dictionary that we will extract aruco tags with - cv::Ptr aruco_dict; - - // Parameters the opencv extractor uses - cv::Ptr aruco_params; - - // Mutex for our ids_aruco, corners, rejects which we use for drawing - std::mutex mtx_aruco; - - // Our tag IDs and corner we will get from the extractor - std::unordered_map> ids_aruco; - std::unordered_map>> corners, rejects; - - - }; - -} - +/** + * @brief Tracking of OpenCV Aruoc tags. + * + * This class handles the tracking of [OpenCV Aruco tags](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco). + * We track the top left corner of the tag as compared to the pose of the tag or any other corners. + * Right now we hardcode the dictionary to be `cv::aruco::DICT_6X6_25`, so please generate tags in this family of tags. + */ +class TrackAruco : public TrackBase { + +public: + /** + * @brief Public default constructor + */ + TrackAruco() : TrackBase(), max_tag_id(1024), do_downsizing(false) { + aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); + aruco_params = cv::aruco::DetectorParameters::create(); + // aruco_params->cornerRefinementMethod = cv::aruco::CornerRefineMethod::CORNER_REFINE_SUBPIX; // people with newer opencv might fail + // here + } + + /** + * @brief Public constructor with configuration variables + * @param numaruco the max id of the arucotags, we don't use any tags greater than this value even if we extract them + * @param do_downsizing we can scale the image by 1/2 to increase Aruco tag extraction speed + */ + explicit TrackAruco(int numaruco, bool do_downsizing) : TrackBase(0, numaruco), max_tag_id(numaruco), do_downsizing(do_downsizing) { + aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); + aruco_params = cv::aruco::DetectorParameters::create(); + // aruco_params->cornerRefinementMethod = cv::aruco::CornerRefineMethod::CORNER_REFINE_SUBPIX; // people with newer opencv might fail + // here + } + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; + + /** + * @brief We override the display equation so we can show the tags we extract. + * @param img_out image to which we will overlayed features on + * @param r1,g1,b1 first color to draw in + * @param r2,g2,b2 second color to draw in + */ + void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) override; + +protected: + // Timing variables + boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; + + // Max tag ID we should extract from (i.e., number of aruco tags starting from zero) + int max_tag_id; + + // If we should downsize the image + bool do_downsizing; + + // Our dictionary that we will extract aruco tags with + cv::Ptr aruco_dict; + + // Parameters the opencv extractor uses + cv::Ptr aruco_params; + + // Mutex for our ids_aruco, corners, rejects which we use for drawing + std::mutex mtx_aruco; + + // Our tag IDs and corner we will get from the extractor + std::unordered_map> ids_aruco; + std::unordered_map>> corners, rejects; +}; + +} // namespace ov_core #endif /* OV_CORE_TRACK_ARUCO_H */ diff --git a/ov_core/src/track/TrackBase.cpp b/ov_core/src/track/TrackBase.cpp index 6ff9bc290..e82412604 100644 --- a/ov_core/src/track/TrackBase.cpp +++ b/ov_core/src/track/TrackBase.cpp @@ -20,162 +20,167 @@ */ #include "TrackBase.h" - using namespace ov_core; - void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { - // Cache the images to prevent other threads from editing while we viz (which can be slow) - std::map img_last_cache; - for(auto const& pair : img_last) { - img_last_cache.insert({pair.first,pair.second.clone()}); + // Cache the images to prevent other threads from editing while we viz (which can be slow) + std::map img_last_cache; + for (auto const &pair : img_last) { + img_last_cache.insert({pair.first, pair.second.clone()}); + } + + // Get the largest width and height + int max_width = -1; + int max_height = -1; + for (auto const &pair : img_last_cache) { + if (max_width < pair.second.cols) + max_width = pair.second.cols; + if (max_height < pair.second.rows) + max_height = pair.second.rows; + } + + // Return if we didn't have a last image + if (max_width == -1 || max_height == -1) + return; + + // If the image is "small" thus we should use smaller display codes + bool is_small = (std::min(max_width, max_height) < 400); + + // If the image is "new" then draw the images from scratch + // Otherwise, we grab the subset of the main image and draw on top of it + bool image_new = ((int)img_last_cache.size() * max_width != img_out.cols || max_height != img_out.rows); + + // If new, then resize the current image + if (image_new) + img_out = cv::Mat(max_height, (int)img_last_cache.size() * max_width, CV_8UC3, cv::Scalar(0, 0, 0)); + + // Loop through each image, and draw + int index_cam = 0; + for (auto const &pair : img_last_cache) { + // Lock this image + std::unique_lock lck(mtx_feeds.at(pair.first)); + // select the subset of the image + cv::Mat img_temp; + if (image_new) + cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB); + else + img_temp = img_out(cv::Rect(max_width * index_cam, 0, max_width, max_height)); + // draw, loop through all keypoints + for (size_t i = 0; i < pts_last[pair.first].size(); i++) { + // Get bounding pts for our boxes + cv::Point2f pt_l = pts_last[pair.first].at(i).pt; + // Draw the extracted points and ID + cv::circle(img_temp, pt_l, (is_small) ? 1 : 2, cv::Scalar(r1, g1, b1), cv::FILLED); + // cv::putText(img_out, std::to_string(ids_left_last.at(i)), pt_l, cv::FONT_HERSHEY_SIMPLEX,0.5,cv::Scalar(0,0,255),1,cv::LINE_AA); + // Draw rectangle around the point + cv::Point2f pt_l_top = cv::Point2f(pt_l.x - 5, pt_l.y - 5); + cv::Point2f pt_l_bot = cv::Point2f(pt_l.x + 5, pt_l.y + 5); + cv::rectangle(img_temp, pt_l_top, pt_l_bot, cv::Scalar(r2, g2, b2), 1); } - - // Get the largest width and height - int max_width = -1; - int max_height = -1; - for(auto const& pair : img_last_cache) { - if(max_width < pair.second.cols) max_width = pair.second.cols; - if(max_height < pair.second.rows) max_height = pair.second.rows; - } - - // Return if we didn't have a last image - if(max_width==-1 || max_height==-1) - return; - - // If the image is "small" thus we should use smaller display codes - bool is_small = (std::min(max_width,max_height) < 400); - - // If the image is "new" then draw the images from scratch - // Otherwise, we grab the subset of the main image and draw on top of it - bool image_new = ((int)img_last_cache.size()*max_width != img_out.cols || max_height != img_out.rows); - - // If new, then resize the current image - if(image_new) img_out = cv::Mat(max_height,(int)img_last_cache.size()*max_width,CV_8UC3,cv::Scalar(0,0,0)); - - // Loop through each image, and draw - int index_cam = 0; - for(auto const& pair : img_last_cache) { - // Lock this image - std::unique_lock lck(mtx_feeds.at(pair.first)); - // select the subset of the image - cv::Mat img_temp; - if(image_new) cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB); - else img_temp = img_out(cv::Rect(max_width*index_cam,0,max_width,max_height)); - // draw, loop through all keypoints - for(size_t i=0; i highlighted) { - // Cache the images to prevent other threads from editing while we viz (which can be slow) - std::map img_last_cache; - for(auto const& pair : img_last) { - img_last_cache.insert({pair.first,pair.second.clone()}); - } - - // Get the largest width and height - int max_width = -1; - int max_height = -1; - for(auto const& pair : img_last_cache) { - if(max_width < pair.second.cols) max_width = pair.second.cols; - if(max_height < pair.second.rows) max_height = pair.second.rows; - } - - // Return if we didn't have a last image - if(max_width==-1 || max_height==-1) - return; - - // If the image is "small" thus we shoudl use smaller display codes - bool is_small = (std::min(max_width,max_height) < 400); - - // If the image is "new" then draw the images from scratch - // Otherwise, we grab the subset of the main image and draw on top of it - bool image_new = ((int)img_last_cache.size()*max_width != img_out.cols || max_height != img_out.rows); - - // If new, then resize the current image - if(image_new) img_out = cv::Mat(max_height,(int)img_last_cache.size()*max_width,CV_8UC3,cv::Scalar(0,0,0)); - - // Max tracks to show (otherwise it clutters up the screen) - size_t maxtracks = 50; - - // Loop through each image, and draw - int index_cam = 0; - for(auto const& pair : img_last_cache) { - // Lock this image - std::unique_lock lck(mtx_feeds.at(pair.first)); - // select the subset of the image - cv::Mat img_temp; - if(image_new) cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB); - else img_temp = img_out(cv::Rect(max_width*index_cam,0,max_width,max_height)); - // draw, loop through all keypoints - for(size_t i=0; i feat = database->get_feature(ids_last[pair.first].at(i)); - // Skip if the feature is null - if(feat == nullptr || feat->uvs[pair.first].empty() || feat->to_delete) - continue; - // Draw the history of this point (start at the last inserted one) - for(size_t z=feat->uvs[pair.first].size()-1; z>0; z--) { - // Check if we have reached the max - if(feat->uvs[pair.first].size()-z > maxtracks) - break; - // Calculate what color we are drawing in - bool is_stereo = (feat->uvs.size() > 1); - int color_r = (is_stereo? b2 : r2)-(int)((is_stereo? b1 : r1)/feat->uvs[pair.first].size()*z); - int color_g = (is_stereo? r2 : g2)-(int)((is_stereo? r1 : g1)/feat->uvs[pair.first].size()*z); - int color_b = (is_stereo? g2 : b2)-(int)((is_stereo? g1 : b1)/feat->uvs[pair.first].size()*z); - // Draw current point - cv::Point2f pt_c(feat->uvs[pair.first].at(z)(0),feat->uvs[pair.first].at(z)(1)); - cv::circle(img_temp, pt_c, (is_small)? 1 : 2, cv::Scalar(color_r,color_g,color_b), cv::FILLED); - // If there is a next point, then display the line from this point to the next - if(z+1 < feat->uvs[pair.first].size()) { - cv::Point2f pt_n(feat->uvs[pair.first].at(z+1)(0),feat->uvs[pair.first].at(z+1)(1)); - cv::line(img_temp, pt_c, pt_n, cv::Scalar(color_r,color_g,color_b)); - } - // If the first point, display the ID - if(z==feat->uvs[pair.first].size()-1) { - //cv::putText(img_out0, std::to_string(feat->featid), pt_c, cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 1, cv::LINE_AA); - //cv::circle(img_out0, pt_c, 2, cv::Scalar(color,color,255), CV_FILLED); - } - } + // Cache the images to prevent other threads from editing while we viz (which can be slow) + std::map img_last_cache; + for (auto const &pair : img_last) { + img_last_cache.insert({pair.first, pair.second.clone()}); + } + + // Get the largest width and height + int max_width = -1; + int max_height = -1; + for (auto const &pair : img_last_cache) { + if (max_width < pair.second.cols) + max_width = pair.second.cols; + if (max_height < pair.second.rows) + max_height = pair.second.rows; + } + + // Return if we didn't have a last image + if (max_width == -1 || max_height == -1) + return; + + // If the image is "small" thus we shoudl use smaller display codes + bool is_small = (std::min(max_width, max_height) < 400); + + // If the image is "new" then draw the images from scratch + // Otherwise, we grab the subset of the main image and draw on top of it + bool image_new = ((int)img_last_cache.size() * max_width != img_out.cols || max_height != img_out.rows); + + // If new, then resize the current image + if (image_new) + img_out = cv::Mat(max_height, (int)img_last_cache.size() * max_width, CV_8UC3, cv::Scalar(0, 0, 0)); + + // Max tracks to show (otherwise it clutters up the screen) + size_t maxtracks = 50; + + // Loop through each image, and draw + int index_cam = 0; + for (auto const &pair : img_last_cache) { + // Lock this image + std::unique_lock lck(mtx_feeds.at(pair.first)); + // select the subset of the image + cv::Mat img_temp; + if (image_new) + cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB); + else + img_temp = img_out(cv::Rect(max_width * index_cam, 0, max_width, max_height)); + // draw, loop through all keypoints + for (size_t i = 0; i < ids_last[pair.first].size(); i++) { + // If a highlighted point, then put a nice box around it + if (std::find(highlighted.begin(), highlighted.end(), ids_last[pair.first].at(i)) != highlighted.end()) { + cv::Point2f pt_c = pts_last[pair.first].at(i).pt; + cv::Point2f pt_l_top = cv::Point2f(pt_c.x - ((is_small) ? 3 : 5), pt_c.y - ((is_small) ? 3 : 5)); + cv::Point2f pt_l_bot = cv::Point2f(pt_c.x + ((is_small) ? 3 : 5), pt_c.y + ((is_small) ? 3 : 5)); + cv::rectangle(img_temp, pt_l_top, pt_l_bot, cv::Scalar(0, 255, 0), 1); + cv::circle(img_temp, pt_c, (is_small) ? 1 : 2, cv::Scalar(0, 255, 0), cv::FILLED); + } + // Get the feature from the database + std::shared_ptr feat = database->get_feature(ids_last[pair.first].at(i)); + // Skip if the feature is null + if (feat == nullptr || feat->uvs[pair.first].empty() || feat->to_delete) + continue; + // Draw the history of this point (start at the last inserted one) + for (size_t z = feat->uvs[pair.first].size() - 1; z > 0; z--) { + // Check if we have reached the max + if (feat->uvs[pair.first].size() - z > maxtracks) + break; + // Calculate what color we are drawing in + bool is_stereo = (feat->uvs.size() > 1); + int color_r = (is_stereo ? b2 : r2) - (int)((is_stereo ? b1 : r1) / feat->uvs[pair.first].size() * z); + int color_g = (is_stereo ? r2 : g2) - (int)((is_stereo ? r1 : g1) / feat->uvs[pair.first].size() * z); + int color_b = (is_stereo ? g2 : b2) - (int)((is_stereo ? g1 : b1) / feat->uvs[pair.first].size() * z); + // Draw current point + cv::Point2f pt_c(feat->uvs[pair.first].at(z)(0), feat->uvs[pair.first].at(z)(1)); + cv::circle(img_temp, pt_c, (is_small) ? 1 : 2, cv::Scalar(color_r, color_g, color_b), cv::FILLED); + // If there is a next point, then display the line from this point to the next + if (z + 1 < feat->uvs[pair.first].size()) { + cv::Point2f pt_n(feat->uvs[pair.first].at(z + 1)(0), feat->uvs[pair.first].at(z + 1)(1)); + cv::line(img_temp, pt_c, pt_n, cv::Scalar(color_r, color_g, color_b)); } - // Draw what camera this is - auto txtpt = (is_small)? cv::Point(10,30) : cv::Point(30,60); - cv::putText(img_temp, "CAM:"+std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small)? 1.5 : 3.0, cv::Scalar(0,255,0), 3); - // Replace the output image - img_temp.copyTo(img_out(cv::Rect(max_width*index_cam,0,img_last_cache[pair.first].cols,img_last_cache[pair.first].rows))); - index_cam++; + // If the first point, display the ID + if (z == feat->uvs[pair.first].size() - 1) { + // cv::putText(img_out0, std::to_string(feat->featid), pt_c, cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 1, + // cv::LINE_AA); cv::circle(img_out0, pt_c, 2, cv::Scalar(color,color,255), CV_FILLED); + } + } } - + // Draw what camera this is + auto txtpt = (is_small) ? cv::Point(10, 30) : cv::Point(30, 60); + cv::putText(img_temp, "CAM:" + std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0, + cv::Scalar(0, 255, 0), 3); + // Replace the output image + img_temp.copyTo(img_out(cv::Rect(max_width * index_cam, 0, img_last_cache[pair.first].cols, img_last_cache[pair.first].rows))); + index_cam++; + } } - - diff --git a/ov_core/src/track/TrackBase.h b/ov_core/src/track/TrackBase.h index b94cff118..a568f8cdf 100644 --- a/ov_core/src/track/TrackBase.h +++ b/ov_core/src/track/TrackBase.h @@ -21,363 +21,348 @@ #ifndef OV_CORE_TRACK_BASE_H #define OV_CORE_TRACK_BASE_H - +#include #include -#include #include -#include +#include #include #include -#include #include #include +#include -#include "Grider_FAST.h" #include "Grider_DOG.h" +#include "Grider_FAST.h" #include "feat/FeatureDatabase.h" #include "utils/colors.h" #include "utils/lambda_body.h" - namespace ov_core { - - /** - * @brief Visual feature tracking base class - * - * This is the base class for all our visual trackers. - * The goal here is to provide a common interface so all underlying trackers can simply hide away all the complexities. - * We have something called the "feature database" which has all the tracking information inside of it. - * The user can ask this database for features which can then be used in an MSCKF or batch-based setting. - * The feature tracks store both the raw (distorted) and undistorted/normalized values. - * Right now we just support two camera models, see: undistort_point_brown() and undistort_point_fisheye(). - * - * @m_class{m-note m-warning} - * - * @par A Note on Multi-Threading Support - * There is some support for asynchronous multi-threaded feature tracking of independent cameras. - * The key assumption during implementation is that the user will not try to track on the same camera in parallel, and instead call on different cameras. - * For example, if I have two cameras, I can either sequentially call the feed function, or I spin each of these into separate threads and wait for their return. - * The @ref currid is atomic to allow for multiple threads to access it without issue and ensure that all features have unique id values. - * We also have mutex for access for the calibration and previous images and tracks (used during visualization). - * It should be noted that if a thread calls visualization, it might hang or the feed thread might, due to acquiring the mutex for that specific camera id / feed. - * - * This base class also handles most of the heavy lifting with the visualization, but the sub-classes can override - * this and do their own logic if they want (i.e. the TrackAruco has its own logic for visualization). - * This visualization needs access to the prior images and their tracks, thus must synchronise in the case of multi-threading. - * This shouldn't impact performance, but high frequency visualization calls can negatively effect the performance. - */ - class TrackBase { - - public: - - /** - * @brief Public default constructor - */ - TrackBase() : database(new FeatureDatabase()), num_features(200), currid(0) { } - - /** - * @brief Public constructor with configuration variables - * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - */ - TrackBase(int numfeats, int numaruco) : - database(new FeatureDatabase()), num_features(numfeats) { - // Our current feature ID should be larger then the number of aruco tags we have - currid = (size_t) numaruco + 1; - } - - virtual ~TrackBase() { } - - - /** - * @brief Given a the camera intrinsic values, this will set what we should normalize points with. - * This will also update the feature database with corrected normalized values. - * Normally this would only be needed if we are optimizing our camera parameters, and thus should re-normalize. - * @param camera_calib Calibration parameters for all cameras [fx,fy,cx,cy,d1,d2,d3,d4] - * @param camera_fisheye Map of camera_id => bool if we should do radtan or fisheye distortion model - * @param correct_active If we should re-undistort active features in our database - */ - void set_calibration(std::map camera_calib, - std::map camera_fisheye, bool correct_active=false) { - - // Assert vectors are equal - assert(camera_calib.size()==camera_fisheye.size()); - - // Create our mutex array based on the number of cameras we have - // See https://stackoverflow.com/a/24170141/7718197 - if(mtx_feeds.empty() || mtx_feeds.size() != camera_calib.size()) { - std::vector list(camera_calib.size()); - mtx_feeds.swap(list); - } - - // Initialize our mutex and camera intrinsic values if we are just starting up - // The number of cameras should not change over time, thus we just need to check if our initial data is empty - if(camera_k_OPENCV.empty() || camera_d_OPENCV.empty() || this->camera_fisheye.empty()) { - // Overwrite our fisheye calibration - this->camera_fisheye = camera_fisheye; - // Convert values to the OpenCV format - for (auto const &cam : camera_calib) { - // Assert we are of size eight - assert(cam.second.rows()==8); - // Camera matrix - cv::Matx33d tempK; - tempK(0, 0) = cam.second(0); - tempK(0, 1) = 0; - tempK(0, 2) = cam.second(2); - tempK(1, 0) = 0; - tempK(1, 1) = cam.second(1); - tempK(1, 2) = cam.second(3); - tempK(2, 0) = 0; - tempK(2, 1) = 0; - tempK(2, 2) = 1; - camera_k_OPENCV.insert({cam.first, tempK}); - // Distortion parameters - cv::Vec4d tempD; - tempD(0) = cam.second(4); - tempD(1) = cam.second(5); - tempD(2) = cam.second(6); - tempD(3) = cam.second(7); - camera_d_OPENCV.insert({cam.first, tempD}); - } - return; - } - - // assert that the number of cameras can not change - assert(camera_k_OPENCV.size()==camera_calib.size()); - assert(camera_k_OPENCV.size()==camera_fisheye.size()); - assert(camera_k_OPENCV.size()==camera_d_OPENCV.size()); - - // Convert values to the OpenCV format - for (auto const &cam : camera_calib) { - // Lock this image feed - std::unique_lock lck(mtx_feeds.at(cam.first)); - // Fisheye value - this->camera_fisheye.at(cam.first) = camera_fisheye.at(cam.first); - // Assert we are of size eight - assert(cam.second.rows()==8); - // Camera matrix - cv::Matx33d tempK; - tempK(0, 0) = cam.second(0); - tempK(0, 1) = 0; - tempK(0, 2) = cam.second(2); - tempK(1, 0) = 0; - tempK(1, 1) = cam.second(1); - tempK(1, 2) = cam.second(3); - tempK(2, 0) = 0; - tempK(2, 1) = 0; - tempK(2, 2) = 1; - camera_k_OPENCV.at(cam.first) = tempK; - // Distortion parameters - cv::Vec4d tempD; - tempD(0) = cam.second(4); - tempD(1) = cam.second(5); - tempD(2) = cam.second(6); - tempD(3) = cam.second(7); - camera_d_OPENCV.at(cam.first) = tempD; - } - - // If we are calibrating camera intrinsics our normalize coordinates will be stale - // This is because we appended them to the database with the current best guess *at that timestep* - // Thus here since we have a change in calibration, re-normalize all the features we have - if(correct_active) { - - // Get all features in this database - std::unordered_map> features_idlookup = database->get_internal_data(); - - // Loop through and correct each one - for(const auto& pair_feat : features_idlookup) { - // Get our feature - std::shared_ptr feat = pair_feat.second; - // Loop through each camera for this feature - for (auto const& meas_pair : feat->timestamps) { - size_t camid = meas_pair.first; - std::unique_lock lck(mtx_feeds.at(camid)); - for(size_t m=0; muvs.at(camid).size(); m++) { - cv::Point2f pt(feat->uvs.at(camid).at(m)(0), feat->uvs.at(camid).at(m)(1)); - cv::Point2f pt_n = undistort_point(pt,camid); - feat->uvs_norm.at(camid).at(m)(0) = pt_n.x; - feat->uvs_norm.at(camid).at(m)(1) = pt_n.y; - } - } - } - - } - - - } - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - virtual void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) = 0; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - virtual void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) = 0; - - /** - * @brief Shows features extracted in the last image - * @param img_out image to which we will overlayed features on - * @param r1,g1,b1 first color to draw in - * @param r2,g2,b2 second color to draw in - */ - virtual void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2); - - /** - * @brief Shows a "trail" for each feature (i.e. its history) - * @param img_out image to which we will overlayed features on - * @param r1,g1,b1 first color to draw in - * @param r2,g2,b2 second color to draw in - * @param highlighted unique ids which we wish to highlight - */ - virtual void display_history(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::vector highlighted={}); - - /** - * @brief Get the feature database with all the track information - * @return FeatureDatabase pointer that one can query for features - */ - std::shared_ptr get_feature_database() { - return database; - } - - /** - * @brief Changes the ID of an actively tracked feature to another one - * @param id_old Old id we want to change - * @param id_new Id we want to change the old id to - */ - void change_feat_id(size_t id_old, size_t id_new) { - - // If found in db then replace - if(database->get_internal_data().find(id_old)!=database->get_internal_data().end()) { - std::shared_ptr feat = database->get_internal_data().at(id_old); - database->get_internal_data().erase(id_old); - feat->featid = id_new; - database->get_internal_data().insert({id_new, feat}); - } - - // Update current track IDs - for(auto &cam_ids_pair : ids_last) { - for(size_t i=0; icamera_k_OPENCV.at(cam_id); - cv::Vec4d camD = this->camera_d_OPENCV.at(cam_id); - // Call on the fisheye if we should! - if (this->camera_fisheye.at(cam_id)) { - return undistort_point_fisheye(pt_in, camK, camD); - } - return undistort_point_brown(pt_in, camK, camD); - } - - protected: - - /** - * @brief Undistort function RADTAN/BROWN. - * - * Given a uv point, this will undistort it based on the camera matrices. - * To equate this to Kalibr's models, this is what you would use for `pinhole-radtan`. - */ - static cv::Point2f undistort_point_brown(cv::Point2f pt_in, cv::Matx33d &camK, cv::Vec4d &camD) { - // Convert to opencv format - cv::Mat mat(1, 2, CV_32F); - mat.at(0, 0) = pt_in.x; - mat.at(0, 1) = pt_in.y; - mat = mat.reshape(2); // Nx1, 2-channel - // Undistort it! - cv::undistortPoints(mat, mat, camK, camD); - // Construct our return vector - cv::Point2f pt_out; - mat = mat.reshape(1); // Nx2, 1-channel - pt_out.x = mat.at(0, 0); - pt_out.y = mat.at(0, 1); - return pt_out; +/** + * @brief Visual feature tracking base class + * + * This is the base class for all our visual trackers. + * The goal here is to provide a common interface so all underlying trackers can simply hide away all the complexities. + * We have something called the "feature database" which has all the tracking information inside of it. + * The user can ask this database for features which can then be used in an MSCKF or batch-based setting. + * The feature tracks store both the raw (distorted) and undistorted/normalized values. + * Right now we just support two camera models, see: undistort_point_brown() and undistort_point_fisheye(). + * + * @m_class{m-note m-warning} + * + * @par A Note on Multi-Threading Support + * There is some support for asynchronous multi-threaded feature tracking of independent cameras. + * The key assumption during implementation is that the user will not try to track on the same camera in parallel, and instead call on + * different cameras. For example, if I have two cameras, I can either sequentially call the feed function, or I spin each of these into + * separate threads and wait for their return. The @ref currid is atomic to allow for multiple threads to access it without issue and ensure + * that all features have unique id values. We also have mutex for access for the calibration and previous images and tracks (used during + * visualization). It should be noted that if a thread calls visualization, it might hang or the feed thread might, due to acquiring the + * mutex for that specific camera id / feed. + * + * This base class also handles most of the heavy lifting with the visualization, but the sub-classes can override + * this and do their own logic if they want (i.e. the TrackAruco has its own logic for visualization). + * This visualization needs access to the prior images and their tracks, thus must synchronise in the case of multi-threading. + * This shouldn't impact performance, but high frequency visualization calls can negatively effect the performance. + */ +class TrackBase { + +public: + /** + * @brief Public default constructor + */ + TrackBase() : database(new FeatureDatabase()), num_features(200), currid(0) {} + + /** + * @brief Public constructor with configuration variables + * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + */ + TrackBase(int numfeats, int numaruco) : database(new FeatureDatabase()), num_features(numfeats) { + // Our current feature ID should be larger then the number of aruco tags we have + currid = (size_t)numaruco + 1; + } + + virtual ~TrackBase() {} + + /** + * @brief Given a the camera intrinsic values, this will set what we should normalize points with. + * This will also update the feature database with corrected normalized values. + * Normally this would only be needed if we are optimizing our camera parameters, and thus should re-normalize. + * @param camera_calib Calibration parameters for all cameras [fx,fy,cx,cy,d1,d2,d3,d4] + * @param camera_fisheye Map of camera_id => bool if we should do radtan or fisheye distortion model + * @param correct_active If we should re-undistort active features in our database + */ + void set_calibration(std::map camera_calib, std::map camera_fisheye, bool correct_active = false) { + + // Assert vectors are equal + assert(camera_calib.size() == camera_fisheye.size()); + + // Create our mutex array based on the number of cameras we have + // See https://stackoverflow.com/a/24170141/7718197 + if (mtx_feeds.empty() || mtx_feeds.size() != camera_calib.size()) { + std::vector list(camera_calib.size()); + mtx_feeds.swap(list); + } + + // Initialize our mutex and camera intrinsic values if we are just starting up + // The number of cameras should not change over time, thus we just need to check if our initial data is empty + if (camera_k_OPENCV.empty() || camera_d_OPENCV.empty() || this->camera_fisheye.empty()) { + // Overwrite our fisheye calibration + this->camera_fisheye = camera_fisheye; + // Convert values to the OpenCV format + for (auto const &cam : camera_calib) { + // Assert we are of size eight + assert(cam.second.rows() == 8); + // Camera matrix + cv::Matx33d tempK; + tempK(0, 0) = cam.second(0); + tempK(0, 1) = 0; + tempK(0, 2) = cam.second(2); + tempK(1, 0) = 0; + tempK(1, 1) = cam.second(1); + tempK(1, 2) = cam.second(3); + tempK(2, 0) = 0; + tempK(2, 1) = 0; + tempK(2, 2) = 1; + camera_k_OPENCV.insert({cam.first, tempK}); + // Distortion parameters + cv::Vec4d tempD; + tempD(0) = cam.second(4); + tempD(1) = cam.second(5); + tempD(2) = cam.second(6); + tempD(3) = cam.second(7); + camera_d_OPENCV.insert({cam.first, tempD}); + } + return; + } + + // assert that the number of cameras can not change + assert(camera_k_OPENCV.size() == camera_calib.size()); + assert(camera_k_OPENCV.size() == camera_fisheye.size()); + assert(camera_k_OPENCV.size() == camera_d_OPENCV.size()); + + // Convert values to the OpenCV format + for (auto const &cam : camera_calib) { + // Lock this image feed + std::unique_lock lck(mtx_feeds.at(cam.first)); + // Fisheye value + this->camera_fisheye.at(cam.first) = camera_fisheye.at(cam.first); + // Assert we are of size eight + assert(cam.second.rows() == 8); + // Camera matrix + cv::Matx33d tempK; + tempK(0, 0) = cam.second(0); + tempK(0, 1) = 0; + tempK(0, 2) = cam.second(2); + tempK(1, 0) = 0; + tempK(1, 1) = cam.second(1); + tempK(1, 2) = cam.second(3); + tempK(2, 0) = 0; + tempK(2, 1) = 0; + tempK(2, 2) = 1; + camera_k_OPENCV.at(cam.first) = tempK; + // Distortion parameters + cv::Vec4d tempD; + tempD(0) = cam.second(4); + tempD(1) = cam.second(5); + tempD(2) = cam.second(6); + tempD(3) = cam.second(7); + camera_d_OPENCV.at(cam.first) = tempD; + } + + // If we are calibrating camera intrinsics our normalize coordinates will be stale + // This is because we appended them to the database with the current best guess *at that timestep* + // Thus here since we have a change in calibration, re-normalize all the features we have + if (correct_active) { + + // Get all features in this database + std::unordered_map> features_idlookup = database->get_internal_data(); + + // Loop through and correct each one + for (const auto &pair_feat : features_idlookup) { + // Get our feature + std::shared_ptr feat = pair_feat.second; + // Loop through each camera for this feature + for (auto const &meas_pair : feat->timestamps) { + size_t camid = meas_pair.first; + std::unique_lock lck(mtx_feeds.at(camid)); + for (size_t m = 0; m < feat->uvs.at(camid).size(); m++) { + cv::Point2f pt(feat->uvs.at(camid).at(m)(0), feat->uvs.at(camid).at(m)(1)); + cv::Point2f pt_n = undistort_point(pt, camid); + feat->uvs_norm.at(camid).at(m)(0) = pt_n.x; + feat->uvs_norm.at(camid).at(m)(1) = pt_n.y; + } } - - /** - * @brief Undistort function FISHEYE/EQUIDISTANT. - * - * Given a uv point, this will undistort it based on the camera matrices. - * To equate this to Kalibr's models, this is what you would use for `pinhole-equi`. - */ - static cv::Point2f undistort_point_fisheye(cv::Point2f pt_in, cv::Matx33d &camK, cv::Vec4d &camD) { - // Convert point to opencv format - cv::Mat mat(1, 2, CV_32F); - mat.at(0, 0) = pt_in.x; - mat.at(0, 1) = pt_in.y; - mat = mat.reshape(2); // Nx1, 2-channel - // Undistort it! - cv::fisheye::undistortPoints(mat, mat, camK, camD); - // Construct our return vector - cv::Point2f pt_out; - mat = mat.reshape(1); // Nx2, 1-channel - pt_out.x = mat.at(0, 0); - pt_out.y = mat.at(0, 1); - return pt_out; + } + } + } + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + virtual void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) = 0; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + virtual void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) = 0; + + /** + * @brief Shows features extracted in the last image + * @param img_out image to which we will overlayed features on + * @param r1,g1,b1 first color to draw in + * @param r2,g2,b2 second color to draw in + */ + virtual void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2); + + /** + * @brief Shows a "trail" for each feature (i.e. its history) + * @param img_out image to which we will overlayed features on + * @param r1,g1,b1 first color to draw in + * @param r2,g2,b2 second color to draw in + * @param highlighted unique ids which we wish to highlight + */ + virtual void display_history(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::vector highlighted = {}); + + /** + * @brief Get the feature database with all the track information + * @return FeatureDatabase pointer that one can query for features + */ + std::shared_ptr get_feature_database() { return database; } + + /** + * @brief Changes the ID of an actively tracked feature to another one + * @param id_old Old id we want to change + * @param id_new Id we want to change the old id to + */ + void change_feat_id(size_t id_old, size_t id_new) { + + // If found in db then replace + if (database->get_internal_data().find(id_old) != database->get_internal_data().end()) { + std::shared_ptr feat = database->get_internal_data().at(id_old); + database->get_internal_data().erase(id_old); + feat->featid = id_new; + database->get_internal_data().insert({id_new, feat}); + } + + // Update current track IDs + for (auto &cam_ids_pair : ids_last) { + for (size_t i = 0; i < cam_ids_pair.second.size(); i++) { + if (cam_ids_pair.second.at(i) == id_old) { + ids_last.at(cam_ids_pair.first).at(i) = id_new; } - - /// Database with all our current features - std::shared_ptr database; - - /// If we are a fisheye model or not - std::map camera_fisheye; - - /// Camera intrinsics in OpenCV format - std::map camera_k_OPENCV; - - /// Camera distortion in OpenCV format - std::map camera_d_OPENCV; - - /// Number of features we should try to track frame to frame - int num_features; - - /// Mutexs for our last set of image storage (img_last, pts_last, and ids_last) - std::vector mtx_feeds; - - /// Last set of images (use map so all trackers render in the same order) - std::map img_last; - - /// Last set of tracked points - std::unordered_map> pts_last; - - /// Set of IDs of each current feature in the database - std::unordered_map> ids_last; - - /// Master ID for this tracker (atomic to allow for multi-threading) - std::atomic currid; - - - }; - -} + } + } + } + + /** + * @brief Main function that will undistort/normalize a point. + * @param pt_in uv 2x1 point that we will undistort + * @param cam_id id of which camera this point is in + * @return undistorted 2x1 point + * + * Given a uv point, this will undistort it based on the camera matrices. + * This will call on the model needed, depending on what type of camera it is! + * So if we have fisheye for camera_1 is true, we will undistort with the fisheye model. + * In Kalibr's terms, the non-fisheye is `pinhole-radtan` while the fisheye is the `pinhole-equi` model. + */ + cv::Point2f undistort_point(cv::Point2f pt_in, size_t cam_id) { + // Determine what camera parameters we should use + cv::Matx33d camK = this->camera_k_OPENCV.at(cam_id); + cv::Vec4d camD = this->camera_d_OPENCV.at(cam_id); + // Call on the fisheye if we should! + if (this->camera_fisheye.at(cam_id)) { + return undistort_point_fisheye(pt_in, camK, camD); + } + return undistort_point_brown(pt_in, camK, camD); + } + +protected: + /** + * @brief Undistort function RADTAN/BROWN. + * + * Given a uv point, this will undistort it based on the camera matrices. + * To equate this to Kalibr's models, this is what you would use for `pinhole-radtan`. + */ + static cv::Point2f undistort_point_brown(cv::Point2f pt_in, cv::Matx33d &camK, cv::Vec4d &camD) { + // Convert to opencv format + cv::Mat mat(1, 2, CV_32F); + mat.at(0, 0) = pt_in.x; + mat.at(0, 1) = pt_in.y; + mat = mat.reshape(2); // Nx1, 2-channel + // Undistort it! + cv::undistortPoints(mat, mat, camK, camD); + // Construct our return vector + cv::Point2f pt_out; + mat = mat.reshape(1); // Nx2, 1-channel + pt_out.x = mat.at(0, 0); + pt_out.y = mat.at(0, 1); + return pt_out; + } + + /** + * @brief Undistort function FISHEYE/EQUIDISTANT. + * + * Given a uv point, this will undistort it based on the camera matrices. + * To equate this to Kalibr's models, this is what you would use for `pinhole-equi`. + */ + static cv::Point2f undistort_point_fisheye(cv::Point2f pt_in, cv::Matx33d &camK, cv::Vec4d &camD) { + // Convert point to opencv format + cv::Mat mat(1, 2, CV_32F); + mat.at(0, 0) = pt_in.x; + mat.at(0, 1) = pt_in.y; + mat = mat.reshape(2); // Nx1, 2-channel + // Undistort it! + cv::fisheye::undistortPoints(mat, mat, camK, camD); + // Construct our return vector + cv::Point2f pt_out; + mat = mat.reshape(1); // Nx2, 1-channel + pt_out.x = mat.at(0, 0); + pt_out.y = mat.at(0, 1); + return pt_out; + } + + /// Database with all our current features + std::shared_ptr database; + + /// If we are a fisheye model or not + std::map camera_fisheye; + + /// Camera intrinsics in OpenCV format + std::map camera_k_OPENCV; + + /// Camera distortion in OpenCV format + std::map camera_d_OPENCV; + + /// Number of features we should try to track frame to frame + int num_features; + + /// Mutexs for our last set of image storage (img_last, pts_last, and ids_last) + std::vector mtx_feeds; + + /// Last set of images (use map so all trackers render in the same order) + std::map img_last; + + /// Last set of tracked points + std::unordered_map> pts_last; + + /// Set of IDs of each current feature in the database + std::unordered_map> ids_last; + + /// Master ID for this tracker (atomic to allow for multi-threading) + std::atomic currid; +}; + +} // namespace ov_core #endif /* OV_CORE_TRACK_BASE_H */ diff --git a/ov_core/src/track/TrackDescriptor.cpp b/ov_core/src/track/TrackDescriptor.cpp index 98a36f523..81e18e028 100644 --- a/ov_core/src/track/TrackDescriptor.cpp +++ b/ov_core/src/track/TrackDescriptor.cpp @@ -22,471 +22,427 @@ using namespace ov_core; - void TrackDescriptor::feed_monocular(double timestamp, cv::Mat &imgin, size_t cam_id) { - // Start timing - rT1 = boost::posix_time::microsec_clock::local_time(); - - // Lock this data feed for this camera - std::unique_lock lck(mtx_feeds.at(cam_id)); + // Start timing + rT1 = boost::posix_time::microsec_clock::local_time(); - // Histogram equalize - cv::Mat img; - cv::equalizeHist(imgin, img); + // Lock this data feed for this camera + std::unique_lock lck(mtx_feeds.at(cam_id)); - // If we are the first frame (or have lost tracking), initialize our descriptors - if(pts_last.find(cam_id)==pts_last.end() || pts_last[cam_id].empty()) { - perform_detection_monocular(img, pts_last[cam_id], desc_last[cam_id], ids_last[cam_id]); - img_last[cam_id] = img.clone(); - return; - } - - // Our new keypoints and descriptor for the new image - std::vector pts_new; - cv::Mat desc_new; - std::vector ids_new; - - // First, extract new descriptors for this new image - perform_detection_monocular(img, pts_new, desc_new, ids_new); - rT2 = boost::posix_time::microsec_clock::local_time(); - - //=================================================================================== - //=================================================================================== - - // Our matches temporally - std::vector matches_ll; - - // Lets match temporally - robust_match(pts_last[cam_id],pts_new,desc_last[cam_id],desc_new,cam_id,cam_id,matches_ll); - rT3 = boost::posix_time::microsec_clock::local_time(); - - - //=================================================================================== - //=================================================================================== - - // Get our "good tracks" - std::vector good_left; - std::vector good_ids_left; - cv::Mat good_desc_left; - - // Count how many we have tracked from the last time - int num_tracklast = 0; - - // Loop through all current left to right points - // We want to see if any of theses have matches to the previous frame - // If we have a match new->old then we want to use that ID instead of the new one - for(size_t i=0; i pts_new; + cv::Mat desc_new; + std::vector ids_new; + + // First, extract new descriptors for this new image + perform_detection_monocular(img, pts_new, desc_new, ids_new); + rT2 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // Our matches temporally + std::vector matches_ll; + + // Lets match temporally + robust_match(pts_last[cam_id], pts_new, desc_last[cam_id], desc_new, cam_id, cam_id, matches_ll); + rT3 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // Get our "good tracks" + std::vector good_left; + std::vector good_ids_left; + cv::Mat good_desc_left; + + // Count how many we have tracked from the last time + int num_tracklast = 0; + + // Loop through all current left to right points + // We want to see if any of theses have matches to the previous frame + // If we have a match new->old then we want to use that ID instead of the new one + for (size_t i = 0; i < pts_new.size(); i++) { + + // Loop through all left matches, and find the old "train" id + int idll = -1; + for (size_t j = 0; j < matches_ll.size(); j++) { + if (matches_ll[j].trainIdx == (int)i) { + idll = matches_ll[j].queryIdx; + } } - rT4 = boost::posix_time::microsec_clock::local_time(); - - //=================================================================================== - //=================================================================================== - - - // Update our feature database, with theses new observations - for(size_t i=0; iupdate_feature(good_ids_left.at(i), timestamp, cam_id, - good_left.at(i).pt.x, good_left.at(i).pt.y, - npt_l.x, npt_l.y); + // If we found a good stereo track from left to left, and right to right + // Then lets replace the current ID with the old ID + // We also check that we are linked to the same past ID value + if (idll != -1) { + good_left.push_back(pts_new[i]); + good_desc_left.push_back(desc_new.row((int)i)); + good_ids_left.push_back(ids_last[cam_id][idll]); + num_tracklast++; + } else { + // Else just append the current feature and its unique ID + good_left.push_back(pts_new[i]); + good_desc_left.push_back(desc_new.row((int)i)); + good_ids_left.push_back(ids_new[i]); } - - // Debug info - //printf("LtoL = %d | good = %d | fromlast = %d\n",(int)matches_ll.size(),(int)good_left.size(),num_tracklast); - - - // Move forward in time - img_last[cam_id] = img.clone(); - pts_last[cam_id] = good_left; - ids_last[cam_id] = good_ids_left; - desc_last[cam_id] = good_desc_left; - rT5 = boost::posix_time::microsec_clock::local_time(); - - // Our timing information - //printf("[TIME-DESC]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6); - //printf("[TIME-DESC]: %.4f seconds for matching\n",(rT3-rT2).total_microseconds() * 1e-6); - //printf("[TIME-DESC]: %.4f seconds for merging\n",(rT4-rT3).total_microseconds() * 1e-6); - //printf("[TIME-DESC]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6, (int)good_left.size()); - //printf("[TIME-DESC]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6); - - + } + rT4 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // Update our feature database, with theses new observations + for (size_t i = 0; i < good_left.size(); i++) { + cv::Point2f npt_l = undistort_point(good_left.at(i).pt, cam_id); + database->update_feature(good_ids_left.at(i), timestamp, cam_id, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y); + } + + // Debug info + // printf("LtoL = %d | good = %d | fromlast = %d\n",(int)matches_ll.size(),(int)good_left.size(),num_tracklast); + + // Move forward in time + img_last[cam_id] = img.clone(); + pts_last[cam_id] = good_left; + ids_last[cam_id] = good_ids_left; + desc_last[cam_id] = good_desc_left; + rT5 = boost::posix_time::microsec_clock::local_time(); + + // Our timing information + // printf("[TIME-DESC]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6); + // printf("[TIME-DESC]: %.4f seconds for matching\n",(rT3-rT2).total_microseconds() * 1e-6); + // printf("[TIME-DESC]: %.4f seconds for merging\n",(rT4-rT3).total_microseconds() * 1e-6); + // printf("[TIME-DESC]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6, (int)good_left.size()); + // printf("[TIME-DESC]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6); } void TrackDescriptor::feed_stereo(double timestamp, cv::Mat &img_leftin, cv::Mat &img_rightin, size_t cam_id_left, size_t cam_id_right) { - // Start timing - rT1 = boost::posix_time::microsec_clock::local_time(); - - // Lock this data feed for this camera - std::unique_lock lck1(mtx_feeds.at(cam_id_left)); - std::unique_lock lck2(mtx_feeds.at(cam_id_right)); - - // Histogram equalize - cv::Mat img_left, img_right; - cv::equalizeHist(img_leftin, img_left); - cv::equalizeHist(img_rightin, img_right); - - // If we are the first frame (or have lost tracking), initialize our descriptors - if(pts_last[cam_id_left].empty() || pts_last[cam_id_right].empty()) { - perform_detection_stereo(img_left, img_right, - pts_last[cam_id_left], pts_last[cam_id_right], - desc_last[cam_id_left], desc_last[cam_id_right], - cam_id_left, cam_id_right, - ids_last[cam_id_left], ids_last[cam_id_right]); - img_last[cam_id_left] = img_left.clone(); - img_last[cam_id_right] = img_right.clone(); - return; - } - - // Our new keypoints and descriptor for the new image - std::vector pts_left_new, pts_right_new; - cv::Mat desc_left_new, desc_right_new; - std::vector ids_left_new, ids_right_new; - - // First, extract new descriptors for this new image - perform_detection_stereo(img_left, img_right, pts_left_new, pts_right_new, - desc_left_new, desc_right_new, cam_id_left, cam_id_right, ids_left_new, ids_right_new); - rT2 = boost::posix_time::microsec_clock::local_time(); - - - //=================================================================================== - //=================================================================================== - - // Our matches temporally - std::vector matches_ll, matches_rr; - parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range& range){ - for (int i = range.start; i < range.end; i++) { - bool is_left = (i == 0); - robust_match( - pts_last[is_left ? cam_id_left : cam_id_right], - is_left ? pts_left_new : pts_right_new, - desc_last[is_left ? cam_id_left : cam_id_right], - is_left ? desc_left_new : desc_right_new, - is_left ? cam_id_left : cam_id_right, - is_left ? cam_id_left : cam_id_right, - is_left ? matches_ll : matches_rr - ); - } - })); - rT3 = boost::posix_time::microsec_clock::local_time(); - - //=================================================================================== - //=================================================================================== - - // Get our "good tracks" - std::vector good_left, good_right; - std::vector good_ids_left, good_ids_right; - cv::Mat good_desc_left, good_desc_right; - - // Points must be of equal size - assert(pts_last[cam_id_left].size() == pts_last[cam_id_right].size()); - assert(pts_left_new.size() == pts_right_new.size()); - - // Count how many we have tracked from the last time - int num_tracklast = 0; - - // Loop through all current left to right points - // We want to see if any of theses have matches to the previous frame - // If we have a match new->old then we want to use that ID instead of the new one - for(size_t i=0; iupdate_feature(good_ids_left.at(i), timestamp, cam_id_left, - good_left.at(i).pt.x, good_left.at(i).pt.y, - npt_l.x, npt_l.y); - database->update_feature(good_ids_left.at(i), timestamp, cam_id_right, - good_right.at(i).pt.x, good_right.at(i).pt.y, - npt_r.x, npt_r.y); - } + // Lock this data feed for this camera + std::unique_lock lck1(mtx_feeds.at(cam_id_left)); + std::unique_lock lck2(mtx_feeds.at(cam_id_right)); + // Histogram equalize + cv::Mat img_left, img_right; + cv::equalizeHist(img_leftin, img_left); + cv::equalizeHist(img_rightin, img_right); - // Debug info - //printf("LtoL = %d | RtoR = %d | LtoR = %d | good = %d | fromlast = %d\n", (int)matches_ll.size(), - // (int)matches_rr.size(),(int)ids_left_new.size(),(int)good_left.size(),num_tracklast); - - - // Move forward in time + // If we are the first frame (or have lost tracking), initialize our descriptors + if (pts_last[cam_id_left].empty() || pts_last[cam_id_right].empty()) { + perform_detection_stereo(img_left, img_right, pts_last[cam_id_left], pts_last[cam_id_right], desc_last[cam_id_left], + desc_last[cam_id_right], cam_id_left, cam_id_right, ids_last[cam_id_left], ids_last[cam_id_right]); img_last[cam_id_left] = img_left.clone(); img_last[cam_id_right] = img_right.clone(); - pts_last[cam_id_left] = good_left; - pts_last[cam_id_right] = good_right; - ids_last[cam_id_left] = good_ids_left; - ids_last[cam_id_right] = good_ids_right; - desc_last[cam_id_left] = good_desc_left; - desc_last[cam_id_right] = good_desc_right; - rT5 = boost::posix_time::microsec_clock::local_time(); - - // Our timing information - //printf("[TIME-DESC]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6); - //printf("[TIME-DESC]: %.4f seconds for matching\n",(rT3-rT2).total_microseconds() * 1e-6); - //printf("[TIME-DESC]: %.4f seconds for merging\n",(rT4-rT3).total_microseconds() * 1e-6); - //printf("[TIME-DESC]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6, (int)good_left.size()); - //printf("[TIME-DESC]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6); - -} - -void TrackDescriptor::perform_detection_monocular(const cv::Mat& img0, std::vector& pts0, - cv::Mat& desc0, std::vector& ids0) { - - // Assert that we need features - assert(pts0.empty()); - - // Extract our features (use FAST with griding) - std::vector pts0_ext; - Grider_FAST::perform_griding(img0, pts0_ext, num_features, grid_x, grid_y, threshold, true); - - // For all new points, extract their descriptors - cv::Mat desc0_ext; - this->orb0->compute(img0, pts0_ext, desc0_ext); - - // For all good matches, lets append to our returned vectors - // NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features - // NOTE: this is due to the fact that we select update features based on feat id - // NOTE: thus the order will matter since we try to select oldest (smallest id) to update with - // NOTE: not sure how to remove... maybe a better way? - for(size_t i=0; i &pts0, std::vector &pts1, - cv::Mat &desc0, cv::Mat &desc1, - size_t cam_id0, size_t cam_id1, - std::vector &ids0, std::vector &ids1) { - - // Assert that we need features - assert(pts0.empty()); - assert(pts1.empty()); - - // Extract our features (use FAST with griding), and their descriptors - std::vector pts0_ext, pts1_ext; - cv::Mat desc0_ext, desc1_ext; - parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range& range) { - for (int i = range.start; i < range.end; i++) { - bool is_left = (i == 0); - Grider_FAST::perform_griding( - is_left ? img0 : img1, - is_left ? pts0_ext : pts1_ext, - num_features, grid_x, grid_y, threshold, true - ); - (is_left ? orb0 : orb1)->compute( - is_left ? img0 : img1, - is_left ? pts0_ext : pts1_ext, - is_left ? desc0_ext : desc1_ext - ); - } - })); - - // Do matching from the left to the right image - std::vector matches; - robust_match(pts0_ext, pts1_ext, desc0_ext, desc1_ext, cam_id0, cam_id1, matches); - - // For all good matches, lets append to our returned vectors - for(size_t i=0; i pts_left_new, pts_right_new; + cv::Mat desc_left_new, desc_right_new; + std::vector ids_left_new, ids_right_new; + + // First, extract new descriptors for this new image + perform_detection_stereo(img_left, img_right, pts_left_new, pts_right_new, desc_left_new, desc_right_new, cam_id_left, cam_id_right, + ids_left_new, ids_right_new); + rT2 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // Our matches temporally + std::vector matches_ll, matches_rr; + parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) { + for (int i = range.start; i < range.end; i++) { + bool is_left = (i == 0); + robust_match(pts_last[is_left ? cam_id_left : cam_id_right], is_left ? pts_left_new : pts_right_new, + desc_last[is_left ? cam_id_left : cam_id_right], is_left ? desc_left_new : desc_right_new, + is_left ? cam_id_left : cam_id_right, is_left ? cam_id_left : cam_id_right, + is_left ? matches_ll : matches_rr); + } + })); + rT3 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // Get our "good tracks" + std::vector good_left, good_right; + std::vector good_ids_left, good_ids_right; + cv::Mat good_desc_left, good_desc_right; + + // Points must be of equal size + assert(pts_last[cam_id_left].size() == pts_last[cam_id_right].size()); + assert(pts_left_new.size() == pts_right_new.size()); + + // Count how many we have tracked from the last time + int num_tracklast = 0; + + // Loop through all current left to right points + // We want to see if any of theses have matches to the previous frame + // If we have a match new->old then we want to use that ID instead of the new one + for (size_t i = 0; i < pts_left_new.size(); i++) { + + // Loop through all left matches, and find the old "train" id + int idll = -1; + for (size_t j = 0; j < matches_ll.size(); j++) { + if (matches_ll[j].trainIdx == (int)i) { + idll = matches_ll[j].queryIdx; + } } -} - -void TrackDescriptor::robust_match(std::vector& pts0, std::vector pts1, - cv::Mat& desc0, cv::Mat& desc1, size_t id0, size_t id1, std::vector& matches) { - - // Our 1to2 and 2to1 match vectors - std::vector > matches0to1, matches1to0; - - // Match descriptors (return 2 nearest neighbours) - matcher->knnMatch(desc0, desc1, matches0to1, 2); - matcher->knnMatch(desc1, desc0, matches1to0, 2); - - // Do a ratio test for both matches - robust_ratio_test(matches0to1); - robust_ratio_test(matches1to0); - - // Finally do a symmetry test - std::vector matches_good; - robust_symmetry_test(matches0to1, matches1to0, matches_good); - - // Convert points into points for RANSAC - std::vector pts0_rsc, pts1_rsc; - for(size_t i=0; i pts0_n, pts1_n; - for(size_t i=0; iupdate_feature(good_ids_left.at(i), timestamp, cam_id_left, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y); + database->update_feature(good_ids_left.at(i), timestamp, cam_id_right, good_right.at(i).pt.x, good_right.at(i).pt.y, npt_r.x, npt_r.y); + } + + // Debug info + // printf("LtoL = %d | RtoR = %d | LtoR = %d | good = %d | fromlast = %d\n", (int)matches_ll.size(), + // (int)matches_rr.size(),(int)ids_left_new.size(),(int)good_left.size(),num_tracklast); + + // Move forward in time + img_last[cam_id_left] = img_left.clone(); + img_last[cam_id_right] = img_right.clone(); + pts_last[cam_id_left] = good_left; + pts_last[cam_id_right] = good_right; + ids_last[cam_id_left] = good_ids_left; + ids_last[cam_id_right] = good_ids_right; + desc_last[cam_id_left] = good_desc_left; + desc_last[cam_id_right] = good_desc_right; + rT5 = boost::posix_time::microsec_clock::local_time(); + + // Our timing information + // printf("[TIME-DESC]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6); + // printf("[TIME-DESC]: %.4f seconds for matching\n",(rT3-rT2).total_microseconds() * 1e-6); + // printf("[TIME-DESC]: %.4f seconds for merging\n",(rT4-rT3).total_microseconds() * 1e-6); + // printf("[TIME-DESC]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6, (int)good_left.size()); + // printf("[TIME-DESC]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6); +} - // Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords) - std::vector mask_rsc; - double max_focallength_img0 = std::max(camera_k_OPENCV.at(id0)(0,0),camera_k_OPENCV.at(id0)(1,1)); - double max_focallength_img1 = std::max(camera_k_OPENCV.at(id1)(0,0),camera_k_OPENCV.at(id1)(1,1)); - double max_focallength = std::max(max_focallength_img0,max_focallength_img1); - cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 1/max_focallength, 0.999, mask_rsc); - - // Loop through all good matches, and only append ones that have passed RANSAC - for(size_t i=0; i &pts0, cv::Mat &desc0, + std::vector &ids0) { + + // Assert that we need features + assert(pts0.empty()); + + // Extract our features (use FAST with griding) + std::vector pts0_ext; + Grider_FAST::perform_griding(img0, pts0_ext, num_features, grid_x, grid_y, threshold, true); + + // For all new points, extract their descriptors + cv::Mat desc0_ext; + this->orb0->compute(img0, pts0_ext, desc0_ext); + + // For all good matches, lets append to our returned vectors + // NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features + // NOTE: this is due to the fact that we select update features based on feat id + // NOTE: thus the order will matter since we try to select oldest (smallest id) to update with + // NOTE: not sure how to remove... maybe a better way? + for (size_t i = 0; i < pts0_ext.size(); i++) { + // Append our keypoints and descriptors + pts0.push_back(pts0_ext.at(i)); + desc0.push_back(desc0_ext.row((int)i)); + // Set our IDs to be unique IDs here, will later replace with corrected ones, after temporal matching + size_t temp = ++currid; + ids0.push_back(temp); + } +} +void TrackDescriptor::perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, + std::vector &pts1, cv::Mat &desc0, cv::Mat &desc1, size_t cam_id0, + size_t cam_id1, std::vector &ids0, std::vector &ids1) { + + // Assert that we need features + assert(pts0.empty()); + assert(pts1.empty()); + + // Extract our features (use FAST with griding), and their descriptors + std::vector pts0_ext, pts1_ext; + cv::Mat desc0_ext, desc1_ext; + parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) { + for (int i = range.start; i < range.end; i++) { + bool is_left = (i == 0); + Grider_FAST::perform_griding(is_left ? img0 : img1, is_left ? pts0_ext : pts1_ext, num_features, grid_x, grid_y, + threshold, true); + (is_left ? orb0 : orb1)->compute(is_left ? img0 : img1, is_left ? pts0_ext : pts1_ext, is_left ? desc0_ext : desc1_ext); + } + })); + + // Do matching from the left to the right image + std::vector matches; + robust_match(pts0_ext, pts1_ext, desc0_ext, desc1_ext, cam_id0, cam_id1, matches); + + // For all good matches, lets append to our returned vectors + for (size_t i = 0; i < matches.size(); i++) { + // Get our ids + int index_pt0 = matches.at(i).queryIdx; + int index_pt1 = matches.at(i).trainIdx; + // Append our keypoints and descriptors + pts0.push_back(pts0_ext[index_pt0]); + pts1.push_back(pts1_ext[index_pt1]); + desc0.push_back(desc0_ext.row(index_pt0)); + desc1.push_back(desc1_ext.row(index_pt1)); + // Set our IDs to be unique IDs here, will later replace with corrected ones, after temporal matching + size_t temp = ++currid; + ids0.push_back(temp); + ids1.push_back(temp); + } +} +void TrackDescriptor::robust_match(std::vector &pts0, std::vector pts1, cv::Mat &desc0, cv::Mat &desc1, + size_t id0, size_t id1, std::vector &matches) { + + // Our 1to2 and 2to1 match vectors + std::vector> matches0to1, matches1to0; + + // Match descriptors (return 2 nearest neighbours) + matcher->knnMatch(desc0, desc1, matches0to1, 2); + matcher->knnMatch(desc1, desc0, matches1to0, 2); + + // Do a ratio test for both matches + robust_ratio_test(matches0to1); + robust_ratio_test(matches1to0); + + // Finally do a symmetry test + std::vector matches_good; + robust_symmetry_test(matches0to1, matches1to0, matches_good); + + // Convert points into points for RANSAC + std::vector pts0_rsc, pts1_rsc; + for (size_t i = 0; i < matches_good.size(); i++) { + // Get our ids + int index_pt0 = matches_good.at(i).queryIdx; + int index_pt1 = matches_good.at(i).trainIdx; + // Push back just the 2d point + pts0_rsc.push_back(pts0[index_pt0].pt); + pts1_rsc.push_back(pts1[index_pt1].pt); + } + + // If we don't have enough points for ransac just return empty + if (pts0_rsc.size() < 10) + return; + + // Normalize these points, so we can then do ransac + // We don't want to do ransac on distorted image uvs since the mapping is nonlinear + std::vector pts0_n, pts1_n; + for (size_t i = 0; i < pts0_rsc.size(); i++) { + pts0_n.push_back(undistort_point(pts0_rsc.at(i), id0)); + pts1_n.push_back(undistort_point(pts0_rsc.at(i), id1)); + } + + // Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords) + std::vector mask_rsc; + double max_focallength_img0 = std::max(camera_k_OPENCV.at(id0)(0, 0), camera_k_OPENCV.at(id0)(1, 1)); + double max_focallength_img1 = std::max(camera_k_OPENCV.at(id1)(0, 0), camera_k_OPENCV.at(id1)(1, 1)); + double max_focallength = std::max(max_focallength_img0, max_focallength_img1); + cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 1 / max_focallength, 0.999, mask_rsc); + + // Loop through all good matches, and only append ones that have passed RANSAC + for (size_t i = 0; i < matches_good.size(); i++) { + // Skip if bad ransac id + if (mask_rsc[i] != 1) + continue; + // Else, lets append this match to the return array! + matches.push_back(matches_good.at(i)); + } } -void TrackDescriptor::robust_ratio_test(std::vector >& matches) { - - // Loop through all matches - for(auto matchIterator=matches.begin(); matchIterator!= matches.end(); ++matchIterator) - { - // If 2 NN has been identified, else remove this feature - if (matchIterator->size() > 1) { - // check distance ratio, remove it if the ratio is larger - if ((*matchIterator)[0].distance / (*matchIterator)[1].distance > knn_ratio ) { - matchIterator->clear(); - } - } else { - // does not have 2 neighbours, so remove it - matchIterator->clear(); - } +void TrackDescriptor::robust_ratio_test(std::vector> &matches) { + + // Loop through all matches + for (auto matchIterator = matches.begin(); matchIterator != matches.end(); ++matchIterator) { + // If 2 NN has been identified, else remove this feature + if (matchIterator->size() > 1) { + // check distance ratio, remove it if the ratio is larger + if ((*matchIterator)[0].distance / (*matchIterator)[1].distance > knn_ratio) { + matchIterator->clear(); + } + } else { + // does not have 2 neighbours, so remove it + matchIterator->clear(); } - + } } -void TrackDescriptor::robust_symmetry_test(std::vector >& matches1, - std::vector >& matches2, std::vector& good_matches) { - - // for all matches image 1 -> image 2 - for (auto matchIterator1 = matches1.begin(); matchIterator1 != matches1.end(); ++matchIterator1) { - - // ignore deleted matches - if (matchIterator1->empty() || matchIterator1->size() < 2) - continue; - - // for all matches image 2 -> image 1 - for (auto matchIterator2 = matches2.begin(); matchIterator2 != matches2.end(); ++matchIterator2) { - // ignore deleted matches - if (matchIterator2->empty() || matchIterator2->size() < 2) - continue; - - // Match symmetry test - if ((*matchIterator1)[0].queryIdx == (*matchIterator2)[0].trainIdx && (*matchIterator2)[0].queryIdx == (*matchIterator1)[0].trainIdx) { - // add symmetrical match - good_matches.emplace_back(cv::DMatch((*matchIterator1)[0].queryIdx,(*matchIterator1)[0].trainIdx,(*matchIterator1)[0].distance)); - // next match in image 1 -> image 2 - break; - } - } +void TrackDescriptor::robust_symmetry_test(std::vector> &matches1, std::vector> &matches2, + std::vector &good_matches) { + + // for all matches image 1 -> image 2 + for (auto matchIterator1 = matches1.begin(); matchIterator1 != matches1.end(); ++matchIterator1) { + + // ignore deleted matches + if (matchIterator1->empty() || matchIterator1->size() < 2) + continue; + + // for all matches image 2 -> image 1 + for (auto matchIterator2 = matches2.begin(); matchIterator2 != matches2.end(); ++matchIterator2) { + // ignore deleted matches + if (matchIterator2->empty() || matchIterator2->size() < 2) + continue; + + // Match symmetry test + if ((*matchIterator1)[0].queryIdx == (*matchIterator2)[0].trainIdx && + (*matchIterator2)[0].queryIdx == (*matchIterator1)[0].trainIdx) { + // add symmetrical match + good_matches.emplace_back(cv::DMatch((*matchIterator1)[0].queryIdx, (*matchIterator1)[0].trainIdx, (*matchIterator1)[0].distance)); + // next match in image 1 -> image 2 + break; + } } - + } } diff --git a/ov_core/src/track/TrackDescriptor.h b/ov_core/src/track/TrackDescriptor.h index fb9a22459..f664134ec 100644 --- a/ov_core/src/track/TrackDescriptor.h +++ b/ov_core/src/track/TrackDescriptor.h @@ -27,148 +27,137 @@ namespace ov_core { - - /** - * @brief Descriptor-based visual tracking - * - * Here we use descriptor matching to track features from one frame to the next. - * We track both temporally, and across stereo pairs to get stereo constraints. - * Right now we use ORB descriptors as we have found it is the fastest when computing descriptors. - * Tracks are then rejected based on a ratio test and ransac. - */ - class TrackDescriptor : public TrackBase { - - public: - - /** - * @brief Public default constructor - */ - TrackDescriptor() : TrackBase(), threshold(10), grid_x(8), grid_y(5), knn_ratio(0.75) {} - - /** - * @brief Public constructor with configuration variables - * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - * @param fast_threshold FAST detection threshold - * @param gridx size of grid in the x-direction / u-direction - * @param gridy size of grid in the y-direction / v-direction - * @param knnratio matching ratio needed (smaller value forces top two descriptors during match to be more different) - */ - explicit TrackDescriptor(int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, double knnratio) : - TrackBase(numfeats, numaruco), threshold(fast_threshold), grid_x(gridx), grid_y(gridy), knn_ratio(knnratio) {} - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; - - - protected: - - /** - * @brief Detects new features in the current image - * @param img0 image we will detect features on - * @param pts0 vector of extracted keypoints - * @param desc0 vector of the extracted descriptors - * @param ids0 vector of all new IDs - * - * Given a set of images, and their currently extracted features, this will try to add new features. - * We return all extracted descriptors here since we DO NOT need to do stereo tracking left to right. - * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. - * See robust_match() for the matching. - */ - void perform_detection_monocular(const cv::Mat &img0, std::vector &pts0, cv::Mat &desc0, std::vector &ids0); - - /** - * @brief Detects new features in the current stereo pair - * @param img0 left image we will detect features on - * @param img1 right image we will detect features on - * @param pts0 left vector of new keypoints - * @param pts1 right vector of new keypoints - * @param desc0 left vector of extracted descriptors - * @param desc1 left vector of extracted descriptors - * @param cam_id0 id of the first camera - * @param cam_id1 id of the second camera - * @param ids0 left vector of all new IDs - * @param ids1 right vector of all new IDs - * - * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. - * We also do STEREO matching from the left to right, and only return good matches that are found in both the left and right. - * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. - * See robust_match() for the matching. - */ - void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, - std::vector &pts0, std::vector &pts1, - cv::Mat &desc0, cv::Mat &desc1, - size_t cam_id0, size_t cam_id1, - std::vector &ids0, std::vector &ids1); - - /** - * @brief Find matches between two keypoint+descriptor sets. - * @param pts0 first vector of keypoints - * @param pts1 second vector of keypoints - * @param desc0 first vector of descriptors - * @param desc1 second vector of decriptors - * @param id0 id of the first camera - * @param id1 id of the second camera - * @param matches vector of matches that we have found - * - * This will perform a "robust match" between the two sets of points (slow but has great results). - * First we do a simple KNN match from 1to2 and 2to1, which is followed by a ratio check and symmetry check. - * Original code is from the "RobustMatcher" in the opencv examples, and seems to give very good results in the matches. - * https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp - */ - void robust_match(std::vector &pts0, std::vector pts1, - cv::Mat &desc0, cv::Mat &desc1, size_t id0, size_t id1, std::vector &matches); - - // Helper functions for the robust_match function - // Original code is from the "RobustMatcher" in the opencv examples - // https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp - void robust_ratio_test(std::vector> &matches); - void robust_symmetry_test(std::vector> &matches1, - std::vector> &matches2, - std::vector &good_matches); - - // Timing variables - boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; - - // Our orb extractor - cv::Ptr orb0 = cv::ORB::create(); - cv::Ptr orb1 = cv::ORB::create(); - - // Our descriptor matcher - cv::Ptr matcher = cv::DescriptorMatcher::create("BruteForce-Hamming"); - - // Parameters for our FAST grid detector - int threshold; - int grid_x; - int grid_y; - - // The ratio between two kNN matches, if that ratio is larger then this threshold - // then the two features are too close, so should be considered ambiguous/bad match - double knn_ratio; - - // Descriptor matrices - std::unordered_map desc_last; - - - }; - - -} - +/** + * @brief Descriptor-based visual tracking + * + * Here we use descriptor matching to track features from one frame to the next. + * We track both temporally, and across stereo pairs to get stereo constraints. + * Right now we use ORB descriptors as we have found it is the fastest when computing descriptors. + * Tracks are then rejected based on a ratio test and ransac. + */ +class TrackDescriptor : public TrackBase { + +public: + /** + * @brief Public default constructor + */ + TrackDescriptor() : TrackBase(), threshold(10), grid_x(8), grid_y(5), knn_ratio(0.75) {} + + /** + * @brief Public constructor with configuration variables + * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + * @param fast_threshold FAST detection threshold + * @param gridx size of grid in the x-direction / u-direction + * @param gridy size of grid in the y-direction / v-direction + * @param knnratio matching ratio needed (smaller value forces top two descriptors during match to be more different) + */ + explicit TrackDescriptor(int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, double knnratio) + : TrackBase(numfeats, numaruco), threshold(fast_threshold), grid_x(gridx), grid_y(gridy), knn_ratio(knnratio) {} + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; + +protected: + /** + * @brief Detects new features in the current image + * @param img0 image we will detect features on + * @param pts0 vector of extracted keypoints + * @param desc0 vector of the extracted descriptors + * @param ids0 vector of all new IDs + * + * Given a set of images, and their currently extracted features, this will try to add new features. + * We return all extracted descriptors here since we DO NOT need to do stereo tracking left to right. + * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. + * See robust_match() for the matching. + */ + void perform_detection_monocular(const cv::Mat &img0, std::vector &pts0, cv::Mat &desc0, std::vector &ids0); + + /** + * @brief Detects new features in the current stereo pair + * @param img0 left image we will detect features on + * @param img1 right image we will detect features on + * @param pts0 left vector of new keypoints + * @param pts1 right vector of new keypoints + * @param desc0 left vector of extracted descriptors + * @param desc1 left vector of extracted descriptors + * @param cam_id0 id of the first camera + * @param cam_id1 id of the second camera + * @param ids0 left vector of all new IDs + * @param ids1 right vector of all new IDs + * + * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. + * We also do STEREO matching from the left to right, and only return good matches that are found in both the left and right. + * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. + * See robust_match() for the matching. + */ + void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, std::vector &pts1, + cv::Mat &desc0, cv::Mat &desc1, size_t cam_id0, size_t cam_id1, std::vector &ids0, + std::vector &ids1); + + /** + * @brief Find matches between two keypoint+descriptor sets. + * @param pts0 first vector of keypoints + * @param pts1 second vector of keypoints + * @param desc0 first vector of descriptors + * @param desc1 second vector of decriptors + * @param id0 id of the first camera + * @param id1 id of the second camera + * @param matches vector of matches that we have found + * + * This will perform a "robust match" between the two sets of points (slow but has great results). + * First we do a simple KNN match from 1to2 and 2to1, which is followed by a ratio check and symmetry check. + * Original code is from the "RobustMatcher" in the opencv examples, and seems to give very good results in the matches. + * https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp + */ + void robust_match(std::vector &pts0, std::vector pts1, cv::Mat &desc0, cv::Mat &desc1, size_t id0, size_t id1, + std::vector &matches); + + // Helper functions for the robust_match function + // Original code is from the "RobustMatcher" in the opencv examples + // https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp + void robust_ratio_test(std::vector> &matches); + void robust_symmetry_test(std::vector> &matches1, std::vector> &matches2, + std::vector &good_matches); + + // Timing variables + boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; + + // Our orb extractor + cv::Ptr orb0 = cv::ORB::create(); + cv::Ptr orb1 = cv::ORB::create(); + + // Our descriptor matcher + cv::Ptr matcher = cv::DescriptorMatcher::create("BruteForce-Hamming"); + + // Parameters for our FAST grid detector + int threshold; + int grid_x; + int grid_y; + + // The ratio between two kNN matches, if that ratio is larger then this threshold + // then the two features are too close, so should be considered ambiguous/bad match + double knn_ratio; + + // Descriptor matrices + std::unordered_map desc_last; +}; + +} // namespace ov_core #endif /* OV_CORE_TRACK_DESC_H */ diff --git a/ov_core/src/track/TrackKLT.cpp b/ov_core/src/track/TrackKLT.cpp index c2fe96569..3e0053ac2 100644 --- a/ov_core/src/track/TrackKLT.cpp +++ b/ov_core/src/track/TrackKLT.cpp @@ -22,598 +22,568 @@ using namespace ov_core; - void TrackKLT::feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) { - // Start timing - rT1 = boost::posix_time::microsec_clock::local_time(); - - // Lock this data feed for this camera - std::unique_lock lck(mtx_feeds.at(cam_id)); - - // Histogram equalize - cv::equalizeHist(img, img); - //cv::Ptr clahe = cv::createCLAHE(eq_clip_limit, eq_win_size); - //clahe->apply(img, img); - - // Extract the new image pyramid - std::vector imgpyr; - cv::buildOpticalFlowPyramid(img, imgpyr, win_size, pyr_levels); - rT2 = boost::posix_time::microsec_clock::local_time(); - - // If we didn't have any successful tracks last time, just extract this time - // This also handles, the tracking initalization on the first call to this extractor - if(pts_last[cam_id].empty()) { - // Detect new features - perform_detection_monocular(imgpyr, pts_last[cam_id], ids_last[cam_id]); - // Save the current image and pyramid - img_last[cam_id] = img.clone(); - img_pyramid_last[cam_id] = imgpyr; - return; - } - - // First we should make that the last images have enough features so we can do KLT - // This will "top-off" our number of tracks so always have a constant number - perform_detection_monocular(img_pyramid_last[cam_id], pts_last[cam_id], ids_last[cam_id]); - rT3 = boost::posix_time::microsec_clock::local_time(); + // Start timing + rT1 = boost::posix_time::microsec_clock::local_time(); - //=================================================================================== - //=================================================================================== + // Lock this data feed for this camera + std::unique_lock lck(mtx_feeds.at(cam_id)); - // Debug - //printf("current points = %d,%d\n",(int)pts_left_last.size(),(int)pts_right_last.size()); + // Histogram equalize + cv::equalizeHist(img, img); + // cv::Ptr clahe = cv::createCLAHE(eq_clip_limit, eq_win_size); + // clahe->apply(img, img); - // Our return success masks, and predicted new features - std::vector mask_ll; - std::vector pts_left_new = pts_last[cam_id]; + // Extract the new image pyramid + std::vector imgpyr; + cv::buildOpticalFlowPyramid(img, imgpyr, win_size, pyr_levels); + rT2 = boost::posix_time::microsec_clock::local_time(); - // Lets track temporally - perform_matching(img_pyramid_last[cam_id],imgpyr,pts_last[cam_id],pts_left_new,cam_id,cam_id,mask_ll); - rT4 = boost::posix_time::microsec_clock::local_time(); - - //=================================================================================== - //=================================================================================== + // If we didn't have any successful tracks last time, just extract this time + // This also handles, the tracking initalization on the first call to this extractor + if (pts_last[cam_id].empty()) { + // Detect new features + perform_detection_monocular(imgpyr, pts_last[cam_id], ids_last[cam_id]); + // Save the current image and pyramid + img_last[cam_id] = img.clone(); + img_pyramid_last[cam_id] = imgpyr; + return; + } - // If any of our mask is empty, that means we didn't have enough to do ransac, so just return - if(mask_ll.empty()) { - img_last[cam_id] = img.clone(); - img_pyramid_last[cam_id] = imgpyr; - pts_last[cam_id].clear(); - ids_last[cam_id].clear(); - printf(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting.....\n" RESET); - return; - } + // First we should make that the last images have enough features so we can do KLT + // This will "top-off" our number of tracks so always have a constant number + perform_detection_monocular(img_pyramid_last[cam_id], pts_last[cam_id], ids_last[cam_id]); + rT3 = boost::posix_time::microsec_clock::local_time(); - // Get our "good tracks" - std::vector good_left; - std::vector good_ids_left; - - // Loop through all left points - for(size_t i=0; i img.cols || (int)pts_left_new.at(i).pt.y > img.rows) - continue; - // If it is a good track, and also tracked from left to right - if(mask_ll[i]) { - good_left.push_back(pts_left_new[i]); - good_ids_left.push_back(ids_last[cam_id][i]); - } - } + //=================================================================================== + //=================================================================================== + // Debug + // printf("current points = %d,%d\n",(int)pts_left_last.size(),(int)pts_right_last.size()); - //=================================================================================== - //=================================================================================== + // Our return success masks, and predicted new features + std::vector mask_ll; + std::vector pts_left_new = pts_last[cam_id]; + // Lets track temporally + perform_matching(img_pyramid_last[cam_id], imgpyr, pts_last[cam_id], pts_left_new, cam_id, cam_id, mask_ll); + rT4 = boost::posix_time::microsec_clock::local_time(); - // Update our feature database, with theses new observations - for(size_t i=0; iupdate_feature(good_ids_left.at(i), timestamp, cam_id, - good_left.at(i).pt.x, good_left.at(i).pt.y, - npt_l.x, npt_l.y); - } + //=================================================================================== + //=================================================================================== - // Move forward in time + // If any of our mask is empty, that means we didn't have enough to do ransac, so just return + if (mask_ll.empty()) { img_last[cam_id] = img.clone(); img_pyramid_last[cam_id] = imgpyr; - pts_last[cam_id] = good_left; - ids_last[cam_id] = good_ids_left; - rT5 = boost::posix_time::microsec_clock::local_time(); - - // Timing information - //printf("[TIME-KLT]: %.4f seconds for pyramid\n",(rT2-rT1).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for detection\n",(rT3-rT2).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for temporal klt\n",(rT4-rT3).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6, (int)good_left.size()); - //printf("[TIME-KLT]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6); - - + pts_last[cam_id].clear(); + ids_last[cam_id].clear(); + printf(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting.....\n" RESET); + return; + } + + // Get our "good tracks" + std::vector good_left; + std::vector good_ids_left; + + // Loop through all left points + for (size_t i = 0; i < pts_left_new.size(); i++) { + // Ensure we do not have any bad KLT tracks (i.e., points are negative) + if (pts_left_new.at(i).pt.x < 0 || pts_left_new.at(i).pt.y < 0 || (int)pts_left_new.at(i).pt.x > img.cols || + (int)pts_left_new.at(i).pt.y > img.rows) + continue; + // If it is a good track, and also tracked from left to right + if (mask_ll[i]) { + good_left.push_back(pts_left_new[i]); + good_ids_left.push_back(ids_last[cam_id][i]); + } + } + + //=================================================================================== + //=================================================================================== + + // Update our feature database, with theses new observations + for (size_t i = 0; i < good_left.size(); i++) { + cv::Point2f npt_l = undistort_point(good_left.at(i).pt, cam_id); + database->update_feature(good_ids_left.at(i), timestamp, cam_id, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y); + } + + // Move forward in time + img_last[cam_id] = img.clone(); + img_pyramid_last[cam_id] = imgpyr; + pts_last[cam_id] = good_left; + ids_last[cam_id] = good_ids_left; + rT5 = boost::posix_time::microsec_clock::local_time(); + + // Timing information + // printf("[TIME-KLT]: %.4f seconds for pyramid\n",(rT2-rT1).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for detection\n",(rT3-rT2).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for temporal klt\n",(rT4-rT3).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6, (int)good_left.size()); + // printf("[TIME-KLT]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6); } - void TrackKLT::feed_stereo(double timestamp, cv::Mat &img_leftin, cv::Mat &img_rightin, size_t cam_id_left, size_t cam_id_right) { - // Start timing - rT1 = boost::posix_time::microsec_clock::local_time(); - - // Lock this data feed for this camera - std::unique_lock lck1(mtx_feeds.at(cam_id_left)); - std::unique_lock lck2(mtx_feeds.at(cam_id_right)); - - // Histogram equalize and then - // Extract image pyramids (boost seems to require us to put all the arguments even if there are defaults....) - cv::Mat img_left, img_right; - std::vector imgpyr_left, imgpyr_right; - //cv::Ptr clahe = cv::createCLAHE(eq_clip_limit, eq_win_size); - parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range& range) { - for (int i = range.start; i < range.end; i++) { - bool is_left = (i == 0); - // Histogram equalize - cv::equalizeHist( - is_left ? img_leftin : img_rightin, - is_left ? img_left : img_right - ); - //clahe->apply((is_left ? img_leftin : img_rightin), (is_left ? img_left : img_right)); - // Extract image pyramids (boost seems to require us to put all the arguments even if there are defaults....) - cv::buildOpticalFlowPyramid( - is_left ? img_left : img_right, - is_left ? imgpyr_left : imgpyr_right, - win_size, pyr_levels - ); - } - })); - rT2 = boost::posix_time::microsec_clock::local_time(); - - // If we didn't have any successful tracks last time, just extract this time - // This also handles, the tracking initalization on the first call to this extractor - if(pts_last[cam_id_left].empty() || pts_last[cam_id_right].empty()) { - // Track into the new image - perform_detection_stereo(imgpyr_left, imgpyr_right, pts_last[cam_id_left], pts_last[cam_id_right], ids_last[cam_id_left], ids_last[cam_id_right]); - // Save the current image and pyramid - img_last[cam_id_left] = img_left; - img_last[cam_id_right] = img_right; - img_pyramid_last[cam_id_left] = imgpyr_left; - img_pyramid_last[cam_id_right] = imgpyr_right; - return; - } - - // First we should make that the last images have enough features so we can do KLT - // This will "top-off" our number of tracks so always have a constant number - perform_detection_stereo(img_pyramid_last[cam_id_left], img_pyramid_last[cam_id_right], - pts_last[cam_id_left], pts_last[cam_id_right], - ids_last[cam_id_left], ids_last[cam_id_right]); - rT3 = boost::posix_time::microsec_clock::local_time(); - - - //=================================================================================== - //=================================================================================== - - // Our return success masks, and predicted new features - std::vector mask_ll, mask_rr; - std::vector pts_left_new = pts_last[cam_id_left]; - std::vector pts_right_new = pts_last[cam_id_right]; - - // Lets track temporally - parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range& range) { - for (int i = range.start; i < range.end; i++) { - bool is_left = (i == 0); - perform_matching( - img_pyramid_last[is_left ? cam_id_left : cam_id_right], - is_left ? imgpyr_left : imgpyr_right, - pts_last[is_left ? cam_id_left : cam_id_right], - is_left ? pts_left_new : pts_right_new, - is_left ? cam_id_left : cam_id_right, - is_left ? cam_id_left : cam_id_right, - is_left ? mask_ll : mask_rr - ); - } - })); - rT4 = boost::posix_time::microsec_clock::local_time(); - - //=================================================================================== - //=================================================================================== - - - // left to right matching - // TODO: we should probably still do this to reject outliers - // TODO: maybe we should collect all tracks that are in both frames and make they pass this? - //std::vector mask_lr; - //perform_matching(imgpyr_left, imgpyr_right, pts_left_new, pts_right_new, cam_id_left, cam_id_right, mask_lr); - rT5 = boost::posix_time::microsec_clock::local_time(); - - - //=================================================================================== - //=================================================================================== - - // If any of our masks are empty, that means we didn't have enough to do ransac, so just return - if(mask_ll.empty() || mask_rr.empty()) { - img_last[cam_id_left] = std::move(img_left); - img_last[cam_id_right] = std::move(img_right); - img_pyramid_last[cam_id_left] = std::move(imgpyr_left); - img_pyramid_last[cam_id_right] = std::move(imgpyr_right); - pts_last[cam_id_left].clear(); - pts_last[cam_id_right].clear(); - ids_last[cam_id_left].clear(); - ids_last[cam_id_right].clear(); - printf(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting....." RESET); - return; - } - - // Get our "good tracks" - std::vector good_left, good_right; - std::vector good_ids_left, good_ids_right; - - // Loop through all left points - for(size_t i=0; i img_left.cols || (int)pts_left_new.at(i).pt.y > img_left.rows) - continue; - // See if we have the same feature in the right - bool found_right = false; - size_t index_right = 0; - for(size_t n=0; n img_right.cols || (int)pts_right_new.at(index_right).pt.y > img_right.rows) - continue; - good_left.push_back(pts_left_new.at(i)); - good_right.push_back(pts_right_new.at(index_right)); - good_ids_left.push_back(ids_last[cam_id_left].at(i)); - good_ids_right.push_back(ids_last[cam_id_right].at(index_right)); - //std::cout << "adding to stereo - " << ids_last[cam_id_left].at(i) << " , " << ids_last[cam_id_right].at(index_right) << std::endl; - } else if(mask_ll[i]) { - good_left.push_back(pts_left_new.at(i)); - good_ids_left.push_back(ids_last[cam_id_left].at(i)); - //std::cout << "adding to left - " << ids_last[cam_id_left].at(i) << std::endl; - } - } - - // Loop through all right points - for(size_t i=0; i img_right.cols || (int)pts_right_new.at(i).pt.y > img_right.rows) - continue; - // See if we have the same feature in the right - bool added_already = (std::find(good_ids_right.begin(),good_ids_right.end(),ids_last[cam_id_right].at(i))!=good_ids_right.end()); - // If it has not already been added as a good feature, add it as a mono track - if(mask_rr[i] && !added_already) { - good_right.push_back(pts_right_new.at(i)); - good_ids_right.push_back(ids_last[cam_id_right].at(i)); - //std::cout << "adding to right - " << ids_last[cam_id_right].at(i) << std::endl; - } - } - - //=================================================================================== - //=================================================================================== - - // Update our feature database, with theses new observations - for(size_t i=0; iupdate_feature(good_ids_left.at(i), timestamp, cam_id_left, - good_left.at(i).pt.x, good_left.at(i).pt.y, - npt_l.x, npt_l.y); - } - for(size_t i=0; iupdate_feature(good_ids_right.at(i), timestamp, cam_id_right, - good_right.at(i).pt.x, good_right.at(i).pt.y, - npt_r.x, npt_r.y); - } - - // Move forward in time + // Start timing + rT1 = boost::posix_time::microsec_clock::local_time(); + + // Lock this data feed for this camera + std::unique_lock lck1(mtx_feeds.at(cam_id_left)); + std::unique_lock lck2(mtx_feeds.at(cam_id_right)); + + // Histogram equalize and then + // Extract image pyramids (boost seems to require us to put all the arguments even if there are defaults....) + cv::Mat img_left, img_right; + std::vector imgpyr_left, imgpyr_right; + // cv::Ptr clahe = cv::createCLAHE(eq_clip_limit, eq_win_size); + parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) { + for (int i = range.start; i < range.end; i++) { + bool is_left = (i == 0); + // Histogram equalize + cv::equalizeHist(is_left ? img_leftin : img_rightin, is_left ? img_left : img_right); + // clahe->apply((is_left ? img_leftin : img_rightin), (is_left ? img_left : img_right)); + // Extract image pyramids (boost seems to require us to put all the arguments even if there are defaults....) + cv::buildOpticalFlowPyramid(is_left ? img_left : img_right, is_left ? imgpyr_left : imgpyr_right, win_size, pyr_levels); + } + })); + rT2 = boost::posix_time::microsec_clock::local_time(); + + // If we didn't have any successful tracks last time, just extract this time + // This also handles, the tracking initalization on the first call to this extractor + if (pts_last[cam_id_left].empty() || pts_last[cam_id_right].empty()) { + // Track into the new image + perform_detection_stereo(imgpyr_left, imgpyr_right, pts_last[cam_id_left], pts_last[cam_id_right], ids_last[cam_id_left], + ids_last[cam_id_right]); + // Save the current image and pyramid + img_last[cam_id_left] = img_left; + img_last[cam_id_right] = img_right; + img_pyramid_last[cam_id_left] = imgpyr_left; + img_pyramid_last[cam_id_right] = imgpyr_right; + return; + } + + // First we should make that the last images have enough features so we can do KLT + // This will "top-off" our number of tracks so always have a constant number + perform_detection_stereo(img_pyramid_last[cam_id_left], img_pyramid_last[cam_id_right], pts_last[cam_id_left], pts_last[cam_id_right], + ids_last[cam_id_left], ids_last[cam_id_right]); + rT3 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // Our return success masks, and predicted new features + std::vector mask_ll, mask_rr; + std::vector pts_left_new = pts_last[cam_id_left]; + std::vector pts_right_new = pts_last[cam_id_right]; + + // Lets track temporally + parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) { + for (int i = range.start; i < range.end; i++) { + bool is_left = (i == 0); + perform_matching(img_pyramid_last[is_left ? cam_id_left : cam_id_right], is_left ? imgpyr_left : imgpyr_right, + pts_last[is_left ? cam_id_left : cam_id_right], is_left ? pts_left_new : pts_right_new, + is_left ? cam_id_left : cam_id_right, is_left ? cam_id_left : cam_id_right, + is_left ? mask_ll : mask_rr); + } + })); + rT4 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // left to right matching + // TODO: we should probably still do this to reject outliers + // TODO: maybe we should collect all tracks that are in both frames and make they pass this? + // std::vector mask_lr; + // perform_matching(imgpyr_left, imgpyr_right, pts_left_new, pts_right_new, cam_id_left, cam_id_right, mask_lr); + rT5 = boost::posix_time::microsec_clock::local_time(); + + //=================================================================================== + //=================================================================================== + + // If any of our masks are empty, that means we didn't have enough to do ransac, so just return + if (mask_ll.empty() || mask_rr.empty()) { img_last[cam_id_left] = std::move(img_left); img_last[cam_id_right] = std::move(img_right); img_pyramid_last[cam_id_left] = std::move(imgpyr_left); img_pyramid_last[cam_id_right] = std::move(imgpyr_right); - pts_last[cam_id_left] = std::move(good_left); - pts_last[cam_id_right] = std::move(good_right); - ids_last[cam_id_left] = std::move(good_ids_left); - ids_last[cam_id_right] = std::move(good_ids_right); - rT6 = boost::posix_time::microsec_clock::local_time(); - - // Timing information - //printf("[TIME-KLT]: %.4f seconds for pyramid\n",(rT2-rT1).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for detection\n",(rT3-rT2).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for temporal klt\n",(rT4-rT3).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for stereo klt\n",(rT5-rT4).total_microseconds() * 1e-6); - //printf("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n",(rT6-rT5).total_microseconds() * 1e-6, (int)good_left.size()); - //printf("[TIME-KLT]: %.4f seconds for total\n",(rT6-rT1).total_microseconds() * 1e-6); + pts_last[cam_id_left].clear(); + pts_last[cam_id_right].clear(); + ids_last[cam_id_left].clear(); + ids_last[cam_id_right].clear(); + printf(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting....." RESET); + return; + } + + // Get our "good tracks" + std::vector good_left, good_right; + std::vector good_ids_left, good_ids_right; + + // Loop through all left points + for (size_t i = 0; i < pts_left_new.size(); i++) { + // Ensure we do not have any bad KLT tracks (i.e., points are negative) + if (pts_left_new.at(i).pt.x < 0 || pts_left_new.at(i).pt.y < 0 || (int)pts_left_new.at(i).pt.x > img_left.cols || + (int)pts_left_new.at(i).pt.y > img_left.rows) + continue; + // See if we have the same feature in the right + bool found_right = false; + size_t index_right = 0; + for (size_t n = 0; n < ids_last[cam_id_right].size(); n++) { + if (ids_last[cam_id_left].at(i) == ids_last[cam_id_right].at(n)) { + found_right = true; + index_right = n; + break; + } + } + // If it is a good track, and also tracked from left to right + // Else track it as a mono feature in just the left image + if (mask_ll[i] && found_right && mask_rr[index_right]) { + // Ensure we do not have any bad KLT tracks (i.e., points are negative) + if (pts_right_new.at(index_right).pt.x < 0 || pts_right_new.at(index_right).pt.y < 0 || + (int)pts_right_new.at(index_right).pt.x > img_right.cols || (int)pts_right_new.at(index_right).pt.y > img_right.rows) + continue; + good_left.push_back(pts_left_new.at(i)); + good_right.push_back(pts_right_new.at(index_right)); + good_ids_left.push_back(ids_last[cam_id_left].at(i)); + good_ids_right.push_back(ids_last[cam_id_right].at(index_right)); + // std::cout << "adding to stereo - " << ids_last[cam_id_left].at(i) << " , " << ids_last[cam_id_right].at(index_right) << std::endl; + } else if (mask_ll[i]) { + good_left.push_back(pts_left_new.at(i)); + good_ids_left.push_back(ids_last[cam_id_left].at(i)); + // std::cout << "adding to left - " << ids_last[cam_id_left].at(i) << std::endl; + } + } + + // Loop through all right points + for (size_t i = 0; i < pts_right_new.size(); i++) { + // Ensure we do not have any bad KLT tracks (i.e., points are negative) + if (pts_right_new.at(i).pt.x < 0 || pts_right_new.at(i).pt.y < 0 || (int)pts_right_new.at(i).pt.x > img_right.cols || + (int)pts_right_new.at(i).pt.y > img_right.rows) + continue; + // See if we have the same feature in the right + bool added_already = (std::find(good_ids_right.begin(), good_ids_right.end(), ids_last[cam_id_right].at(i)) != good_ids_right.end()); + // If it has not already been added as a good feature, add it as a mono track + if (mask_rr[i] && !added_already) { + good_right.push_back(pts_right_new.at(i)); + good_ids_right.push_back(ids_last[cam_id_right].at(i)); + // std::cout << "adding to right - " << ids_last[cam_id_right].at(i) << std::endl; + } + } + + //=================================================================================== + //=================================================================================== + + // Update our feature database, with theses new observations + for (size_t i = 0; i < good_left.size(); i++) { + cv::Point2f npt_l = undistort_point(good_left.at(i).pt, cam_id_left); + database->update_feature(good_ids_left.at(i), timestamp, cam_id_left, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y); + } + for (size_t i = 0; i < good_right.size(); i++) { + cv::Point2f npt_r = undistort_point(good_right.at(i).pt, cam_id_right); + database->update_feature(good_ids_right.at(i), timestamp, cam_id_right, good_right.at(i).pt.x, good_right.at(i).pt.y, npt_r.x, npt_r.y); + } + + // Move forward in time + img_last[cam_id_left] = std::move(img_left); + img_last[cam_id_right] = std::move(img_right); + img_pyramid_last[cam_id_left] = std::move(imgpyr_left); + img_pyramid_last[cam_id_right] = std::move(imgpyr_right); + pts_last[cam_id_left] = std::move(good_left); + pts_last[cam_id_right] = std::move(good_right); + ids_last[cam_id_left] = std::move(good_ids_left); + ids_last[cam_id_right] = std::move(good_ids_right); + rT6 = boost::posix_time::microsec_clock::local_time(); + + // Timing information + // printf("[TIME-KLT]: %.4f seconds for pyramid\n",(rT2-rT1).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for detection\n",(rT3-rT2).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for temporal klt\n",(rT4-rT3).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for stereo klt\n",(rT5-rT4).total_microseconds() * 1e-6); + // printf("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n",(rT6-rT5).total_microseconds() * 1e-6, (int)good_left.size()); + // printf("[TIME-KLT]: %.4f seconds for total\n",(rT6-rT1).total_microseconds() * 1e-6); +} +void TrackKLT::perform_detection_monocular(const std::vector &img0pyr, std::vector &pts0, + std::vector &ids0) { + + // Create a 2D occupancy grid for this current image + // Note that we scale this down, so that each grid point is equal to a set of pixels + // This means that we will reject points that less then grid_px_size points away then existing features + // TODO: figure out why I need to add the windowsize of the klt to handle features that are outside the image bound + // TODO: I assume this is because klt of features at the corners is not really well defined, thus if it doesn't get a match it will be + // out-of-bounds + Eigen::MatrixXi grid_2d = + Eigen::MatrixXi::Zero((int)(img0pyr.at(0).rows / min_px_dist) + 15, (int)(img0pyr.at(0).cols / min_px_dist) + 15); + auto it0 = pts0.begin(); + auto it2 = ids0.begin(); + while (it0 != pts0.end()) { + // Get current left keypoint + cv::KeyPoint kpt = *it0; + // Check if this keypoint is near another point + if (grid_2d((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) == 1) { + it0 = pts0.erase(it0); + it2 = ids0.erase(it2); + continue; + } + // Else we are good, move forward to the next point + grid_2d((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) = 1; + it0++; + it2++; + } + + // First compute how many more features we need to extract from this image + int num_featsneeded = num_features - (int)pts0.size(); + + // If we don't need any features, just return + if (num_featsneeded < 0.2 * num_features) + return; + + // Extract our features (use fast with griding) + std::vector pts0_ext; + Grider_FAST::perform_griding(img0pyr.at(0), pts0_ext, num_featsneeded, grid_x, grid_y, threshold, true); + + // Now, reject features that are close a current feature + std::vector kpts0_new; + std::vector pts0_new; + for (auto &kpt : pts0_ext) { + // Check that it is in bounds + bool outof_bounds_0 = (kpt.pt.x < 0 || kpt.pt.y < 0 || kpt.pt.x >= img0pyr.at(0).cols || kpt.pt.y >= img0pyr.at(0).rows); + if (outof_bounds_0) + continue; + // See if there is a point at this location + if (grid_2d((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) == 1) + continue; + // Else lets add it! + kpts0_new.push_back(kpt); + pts0_new.push_back(kpt.pt); + grid_2d((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) = 1; + } + + // Loop through and record only ones that are valid + // NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features + // NOTE: this is due to the fact that we select update features based on feat id + // NOTE: thus the order will matter since we try to select oldest (smallest id) to update with + // NOTE: not sure how to remove... maybe a better way? + for (size_t i = 0; i < pts0_new.size(); i++) { + // update the uv coordinates + kpts0_new.at(i).pt = pts0_new.at(i); + // append the new uv coordinate + pts0.push_back(kpts0_new.at(i)); + // move id foward and append this new point + size_t temp = ++currid; + std::cout << temp << std::endl; + ids0.push_back(temp); + } } -void TrackKLT::perform_detection_monocular(const std::vector &img0pyr, std::vector &pts0, std::vector &ids0) { - - // Create a 2D occupancy grid for this current image - // Note that we scale this down, so that each grid point is equal to a set of pixels - // This means that we will reject points that less then grid_px_size points away then existing features - // TODO: figure out why I need to add the windowsize of the klt to handle features that are outside the image bound - // TODO: I assume this is because klt of features at the corners is not really well defined, thus if it doesn't get a match it will be out-of-bounds - Eigen::MatrixXi grid_2d = Eigen::MatrixXi::Zero((int)(img0pyr.at(0).rows/min_px_dist)+15, (int)(img0pyr.at(0).cols/min_px_dist)+15); - auto it0 = pts0.begin(); - auto it2 = ids0.begin(); - while(it0 != pts0.end()) { - // Get current left keypoint - cv::KeyPoint kpt = *it0; - // Check if this keypoint is near another point - if(grid_2d((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) == 1) { - it0 = pts0.erase(it0); - it2 = ids0.erase(it2); - continue; - } - // Else we are good, move forward to the next point - grid_2d((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) = 1; - it0++; - it2++; +void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, const std::vector &img1pyr, + std::vector &pts0, std::vector &pts1, std::vector &ids0, + std::vector &ids1) { + + // Create a 2D occupancy grid for this current image + // Note that we scale this down, so that each grid point is equal to a set of pixels + // This means that we will reject points that less then grid_px_size points away then existing features + // TODO: figure out why I need to add the windowsize of the klt to handle features that are outside the image bound + // TODO: I assume this is because klt of features at the corners is not really well defined, thus if it doesn't get a match it will be + // out-of-bounds + Eigen::MatrixXi grid_2d_0 = + Eigen::MatrixXi::Zero((int)(img0pyr.at(0).rows / min_px_dist) + 15, (int)(img0pyr.at(0).cols / min_px_dist) + 15); + Eigen::MatrixXi grid_2d_1 = + Eigen::MatrixXi::Zero((int)(img1pyr.at(0).rows / min_px_dist) + 15, (int)(img1pyr.at(0).cols / min_px_dist) + 15); + auto it0 = pts0.begin(); + auto it1 = ids0.begin(); + while (it0 != pts0.end()) { + // Get current left keypoint + cv::KeyPoint kpt = *it0; + // Check if this keypoint is near another point + if (grid_2d_0((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) == 1) { + it0 = pts0.erase(it0); + it1 = ids0.erase(it1); + continue; + } + // Else we are good, move forward to the next point + grid_2d_0((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) = 1; + it0++; + it1++; + } + it0 = pts1.begin(); + it1 = ids1.begin(); + while (it0 != pts1.end()) { + // Get current right keypoint + cv::KeyPoint kpt = *it0; + // Check if this keypoint is near another point + if (grid_2d_1((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) == 1) { + it0 = pts1.erase(it0); + it1 = ids1.erase(it1); + continue; } + // Else we are good, move forward to the next point + grid_2d_1((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) = 1; + it0++; + it1++; + } - // First compute how many more features we need to extract from this image - int num_featsneeded = num_features - (int)pts0.size(); + // First compute how many more features we need to extract from this image + int num_featsneeded_0 = num_features - (int)pts0.size(); - // If we don't need any features, just return - if(num_featsneeded < 0.2*num_features) - return; + // LEFT: if we need features we should extract them in the current frame + // LEFT: we will also try to track them from this frame over to the right frame + // LEFT: in the case that we have two features that are the same, then we should merge them + if (num_featsneeded_0 > 0.2 * num_features) { // Extract our features (use fast with griding) std::vector pts0_ext; - Grider_FAST::perform_griding(img0pyr.at(0), pts0_ext, num_featsneeded, grid_x, grid_y, threshold, true); + Grider_FAST::perform_griding(img0pyr.at(0), pts0_ext, num_featsneeded_0, grid_x, grid_y, threshold, true); // Now, reject features that are close a current feature std::vector kpts0_new; std::vector pts0_new; - for(auto& kpt : pts0_ext) { - // Check that it is in bounds - bool outof_bounds_0 = (kpt.pt.x < 0 || kpt.pt.y < 0 || kpt.pt.x >= img0pyr.at(0).cols || kpt.pt.y >= img0pyr.at(0).rows); - if(outof_bounds_0) - continue; - // See if there is a point at this location - if(grid_2d((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) == 1) - continue; - // Else lets add it! - kpts0_new.push_back(kpt); - pts0_new.push_back(kpt.pt); - grid_2d((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) = 1; - } - - // Loop through and record only ones that are valid - // NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features - // NOTE: this is due to the fact that we select update features based on feat id - // NOTE: thus the order will matter since we try to select oldest (smallest id) to update with - // NOTE: not sure how to remove... maybe a better way? - for(size_t i=0; i &img0pyr, const std::vector &img1pyr, - std::vector &pts0, std::vector &pts1, - std::vector &ids0, std::vector &ids1) { - - // Create a 2D occupancy grid for this current image - // Note that we scale this down, so that each grid point is equal to a set of pixels - // This means that we will reject points that less then grid_px_size points away then existing features - // TODO: figure out why I need to add the windowsize of the klt to handle features that are outside the image bound - // TODO: I assume this is because klt of features at the corners is not really well defined, thus if it doesn't get a match it will be out-of-bounds - Eigen::MatrixXi grid_2d_0 = Eigen::MatrixXi::Zero((int)(img0pyr.at(0).rows/min_px_dist)+15, (int)(img0pyr.at(0).cols/min_px_dist)+15); - Eigen::MatrixXi grid_2d_1 = Eigen::MatrixXi::Zero((int)(img1pyr.at(0).rows/min_px_dist)+15, (int)(img1pyr.at(0).cols/min_px_dist)+15); - auto it0 = pts0.begin(); - auto it1 = ids0.begin(); - while(it0 != pts0.end()) { - // Get current left keypoint - cv::KeyPoint kpt = *it0; - // Check if this keypoint is near another point - if(grid_2d_0((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) == 1) { - it0 = pts0.erase(it0); - it1 = ids0.erase(it1); - continue; + // TODO: Project points from the left frame into the right frame + // TODO: This will not work for large baseline systems..... + std::vector kpts1_new; + std::vector pts1_new; + kpts1_new = kpts0_new; + pts1_new = pts0_new; + + // If we have points, do KLT tracking to get the valid projections + if (!pts0_new.empty()) { + + // Do our KLT tracking from the left to the right frame of reference + // Note: we have a pretty big window size here since our projection might be bad + // Note: but this might cause failure in cases of repeated textures (eg. checkerboard) + std::vector mask; + std::vector error; + cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 10, 0.01); + cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0_new, pts1_new, mask, error, win_size, pyr_levels, term_crit, + cv::OPTFLOW_USE_INITIAL_FLOW); + + // Loop through and record only ones that are valid + for (size_t i = 0; i < pts0_new.size(); i++) { + + // Check that our tracks are in bounds + bool outof_bounds_0 = (pts0_new.at(i).x < 0 || pts0_new.at(i).y < 0 || pts0_new.at(i).x >= img0pyr.at(0).cols || + pts0_new.at(i).y >= img0pyr.at(0).rows); + bool outof_bounds_1 = (pts1_new.at(i).x < 0 || pts1_new.at(i).y < 0 || pts1_new.at(i).x >= img1pyr.at(0).cols || + pts1_new.at(i).y >= img1pyr.at(0).rows); + + // If this is not already in the right image, then we should treat it as a stereo + // Otherwise we will treat this as just a monocular track of the feature + // TODO: we should check to see if we can combine this new feature and the one in the right + if (!outof_bounds_0 && mask[i] == 1 && !outof_bounds_1 && + !(grid_2d_1((int)(pts1_new.at(i).y / min_px_dist), (int)(pts1_new.at(i).x / min_px_dist)) == 1)) { + // update the uv coordinates + kpts0_new.at(i).pt = pts0_new.at(i); + kpts1_new.at(i).pt = pts1_new.at(i); + // append the new uv coordinate + pts0.push_back(kpts0_new.at(i)); + pts1.push_back(kpts1_new.at(i)); + // move id forward and append this new point + size_t temp = ++currid; + ids0.push_back(temp); + ids1.push_back(temp); + } else if (!outof_bounds_0) { + // update the uv coordinates + kpts0_new.at(i).pt = pts0_new.at(i); + // append the new uv coordinate + pts0.push_back(kpts0_new.at(i)); + // move id forward and append this new point + size_t temp = ++currid; + ids0.push_back(temp); } - // Else we are good, move forward to the next point - grid_2d_0((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) = 1; - it0++; - it1++; + } } - it0 = pts1.begin(); - it1 = ids1.begin(); - while(it0 != pts1.end()) { - // Get current right keypoint - cv::KeyPoint kpt = *it0; - // Check if this keypoint is near another point - if(grid_2d_1((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) == 1) { - it0 = pts1.erase(it0); - it1 = ids1.erase(it1); - continue; - } - // Else we are good, move forward to the next point - grid_2d_1((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) = 1; - it0++; - it1++; - } - - // First compute how many more features we need to extract from this image - int num_featsneeded_0 = num_features - (int)pts0.size(); - - // LEFT: if we need features we should extract them in the current frame - // LEFT: we will also try to track them from this frame over to the right frame - // LEFT: in the case that we have two features that are the same, then we should merge them - if(num_featsneeded_0 > 0.2*num_features) { - - // Extract our features (use fast with griding) - std::vector pts0_ext; - Grider_FAST::perform_griding(img0pyr.at(0), pts0_ext, num_featsneeded_0, grid_x, grid_y, threshold, true); - - // Now, reject features that are close a current feature - std::vector kpts0_new; - std::vector pts0_new; - for(auto& kpt : pts0_ext) { - // See if there is a point at this location - if(grid_2d_0((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) == 1) - continue; - // Else lets add it! - kpts0_new.push_back(kpt); - pts0_new.push_back(kpt.pt); - grid_2d_0((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) = 1; - } + } - // TODO: Project points from the left frame into the right frame - // TODO: This will not work for large baseline systems..... - std::vector kpts1_new; - std::vector pts1_new; - kpts1_new = kpts0_new; - pts1_new = pts0_new; - - // If we have points, do KLT tracking to get the valid projections - if(!pts0_new.empty()) { - - // Do our KLT tracking from the left to the right frame of reference - // Note: we have a pretty big window size here since our projection might be bad - // Note: but this might cause failure in cases of repeated textures (eg. checkerboard) - std::vector mask; - std::vector error; - cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 10, 0.01); - cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0_new, pts1_new, mask, error, win_size, pyr_levels, term_crit, cv::OPTFLOW_USE_INITIAL_FLOW); - - // Loop through and record only ones that are valid - for(size_t i=0; i= img0pyr.at(0).cols || pts0_new.at(i).y >= img0pyr.at(0).rows); - bool outof_bounds_1 = (pts1_new.at(i).x < 0 || pts1_new.at(i).y < 0 || pts1_new.at(i).x >= img1pyr.at(0).cols || pts1_new.at(i).y >= img1pyr.at(0).rows); - - // If this is not already in the right image, then we should treat it as a stereo - // Otherwise we will treat this as just a monocular track of the feature - // TODO: we should check to see if we can combine this new feature and the one in the right - if(!outof_bounds_0 && mask[i] == 1 && !outof_bounds_1 && !(grid_2d_1((int)(pts1_new.at(i).y/min_px_dist),(int)(pts1_new.at(i).x/min_px_dist)) == 1)) { - // update the uv coordinates - kpts0_new.at(i).pt = pts0_new.at(i); - kpts1_new.at(i).pt = pts1_new.at(i); - // append the new uv coordinate - pts0.push_back(kpts0_new.at(i)); - pts1.push_back(kpts1_new.at(i)); - // move id forward and append this new point - size_t temp = ++currid; - ids0.push_back(temp); - ids1.push_back(temp); - } else if(!outof_bounds_0) { - // update the uv coordinates - kpts0_new.at(i).pt = pts0_new.at(i); - // append the new uv coordinate - pts0.push_back(kpts0_new.at(i)); - // move id forward and append this new point - size_t temp = ++currid; - ids0.push_back(temp); - } - } + // RIGHT: if we need features we should extract them in the current frame + // RIGHT: note that we don't track them to the left as we already did left->right tracking above + int num_featsneeded_1 = num_features - (int)pts1.size(); + if (num_featsneeded_1 > 0.2 * num_features) { - } - } - - // RIGHT: if we need features we should extract them in the current frame - // RIGHT: note that we don't track them to the left as we already did left->right tracking above - int num_featsneeded_1 = num_features - (int)pts1.size(); - if(num_featsneeded_1 > 0.2*num_features) { - - // Extract our features (use fast with griding) - std::vector pts1_ext; - Grider_FAST::perform_griding(img1pyr.at(0), pts1_ext, num_featsneeded_1, grid_x, grid_y, threshold, true); - - // Now, reject features that are close a current feature - for(auto& kpt : pts1_ext) { - // See if there is a point at this location - if(grid_2d_1((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) == 1) - continue; - // Else lets add it! - pts1.push_back(kpt); - size_t temp = ++currid; - ids1.push_back(temp); - grid_2d_1((int)(kpt.pt.y/min_px_dist),(int)(kpt.pt.x/min_px_dist)) = 1; - } + // Extract our features (use fast with griding) + std::vector pts1_ext; + Grider_FAST::perform_griding(img1pyr.at(0), pts1_ext, num_featsneeded_1, grid_x, grid_y, threshold, true); + // Now, reject features that are close a current feature + for (auto &kpt : pts1_ext) { + // See if there is a point at this location + if (grid_2d_1((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) == 1) + continue; + // Else lets add it! + pts1.push_back(kpt); + size_t temp = ++currid; + ids1.push_back(temp); + grid_2d_1((int)(kpt.pt.y / min_px_dist), (int)(kpt.pt.x / min_px_dist)) = 1; } - + } } - -void TrackKLT::perform_matching(const std::vector& img0pyr, const std::vector& img1pyr, - std::vector& kpts0, std::vector& kpts1, - size_t id0, size_t id1, - std::vector& mask_out) { - - // We must have equal vectors - assert(kpts0.size() == kpts1.size()); - - // Return if we don't have any points - if(kpts0.empty() || kpts1.empty()) - return; - - // Convert keypoints into points (stupid opencv stuff) - std::vector pts0, pts1; - for(size_t i=0; i mask_klt; - std::vector error; - cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 10, 0.01); - cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0, pts1, mask_klt, error, win_size, pyr_levels, term_crit, cv::OPTFLOW_USE_INITIAL_FLOW); - - // Normalize these points, so we can then do ransac - // We don't want to do ransac on distorted image uvs since the mapping is nonlinear - std::vector pts0_n, pts1_n; - for(size_t i=0; i mask_rsc; - double max_focallength_img0 = std::max(camera_k_OPENCV.at(id0)(0,0),camera_k_OPENCV.at(id0)(1,1)); - double max_focallength_img1 = std::max(camera_k_OPENCV.at(id1)(0,0),camera_k_OPENCV.at(id1)(1,1)); - double max_focallength = std::max(max_focallength_img0,max_focallength_img1); - cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 1/max_focallength, 0.999, mask_rsc); - - // Loop through and record only ones that are valid - for(size_t i=0; i &img0pyr, const std::vector &img1pyr, std::vector &kpts0, + std::vector &kpts1, size_t id0, size_t id1, std::vector &mask_out) { + + // We must have equal vectors + assert(kpts0.size() == kpts1.size()); + + // Return if we don't have any points + if (kpts0.empty() || kpts1.empty()) + return; + + // Convert keypoints into points (stupid opencv stuff) + std::vector pts0, pts1; + for (size_t i = 0; i < kpts0.size(); i++) { + pts0.push_back(kpts0.at(i).pt); + pts1.push_back(kpts1.at(i).pt); + } + + // If we don't have enough points for ransac just return empty + // We set the mask to be all zeros since all points failed RANSAC + if (pts0.size() < 10) { + for (size_t i = 0; i < pts0.size(); i++) + mask_out.push_back((uchar)0); + return; + } + + // Now do KLT tracking to get the valid new points + std::vector mask_klt; + std::vector error; + cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 10, 0.01); + cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0, pts1, mask_klt, error, win_size, pyr_levels, term_crit, cv::OPTFLOW_USE_INITIAL_FLOW); + + // Normalize these points, so we can then do ransac + // We don't want to do ransac on distorted image uvs since the mapping is nonlinear + std::vector pts0_n, pts1_n; + for (size_t i = 0; i < pts0.size(); i++) { + pts0_n.push_back(undistort_point(pts0.at(i), id0)); + pts1_n.push_back(undistort_point(pts1.at(i), id1)); + } + + // Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords) + std::vector mask_rsc; + double max_focallength_img0 = std::max(camera_k_OPENCV.at(id0)(0, 0), camera_k_OPENCV.at(id0)(1, 1)); + double max_focallength_img1 = std::max(camera_k_OPENCV.at(id1)(0, 0), camera_k_OPENCV.at(id1)(1, 1)); + double max_focallength = std::max(max_focallength_img0, max_focallength_img1); + cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 1 / max_focallength, 0.999, mask_rsc); + + // Loop through and record only ones that are valid + for (size_t i = 0; i < mask_klt.size(); i++) { + auto mask = (uchar)((i < mask_klt.size() && mask_klt[i] && i < mask_rsc.size() && mask_rsc[i]) ? 1 : 0); + mask_out.push_back(mask); + } + + // Copy back the updated positions + for (size_t i = 0; i < pts0.size(); i++) { + kpts0.at(i).pt = pts0.at(i); + kpts1.at(i).pt = pts1.at(i); + } } - - - diff --git a/ov_core/src/track/TrackKLT.h b/ov_core/src/track/TrackKLT.h index 7510a3aa2..de8d3ccc2 100644 --- a/ov_core/src/track/TrackKLT.h +++ b/ov_core/src/track/TrackKLT.h @@ -21,137 +21,128 @@ #ifndef OV_CORE_TRACK_KLT_H #define OV_CORE_TRACK_KLT_H - #include "TrackBase.h" - namespace ov_core { - - /** - * @brief KLT tracking of features. - * - * This is the implementation of a KLT visual frontend for tracking sparse features. - * We can track either monocular cameras across time (temporally) along with - * stereo cameras which we also track across time (temporally) but track from left to right - * to find the stereo correspondence information also. - * This uses the [calcOpticalFlowPyrLK](https://github.com/opencv/opencv/blob/master/modules/video/src/lkpyramid.cpp) OpenCV function to do the KLT tracking. - */ - class TrackKLT : public TrackBase { - - public: - - /** - * @brief Public default constructor - */ - TrackKLT() : TrackBase(), threshold(10), grid_x(8), grid_y(5), min_px_dist(30) {} - - /** - * @brief Public constructor with configuration variables - * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - * @param fast_threshold FAST detection threshold - * @param gridx size of grid in the x-direction / u-direction - * @param gridy size of grid in the y-direction / v-direction - * @param minpxdist features need to be at least this number pixels away from each other - */ - explicit TrackKLT(int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, int minpxdist) : - TrackBase(numfeats, numaruco), threshold(fast_threshold), grid_x(gridx), grid_y(gridy), min_px_dist(minpxdist) {} - - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; - - - protected: - - /** - * @brief Detects new features in the current image - * @param img0pyr image we will detect features on (first level of pyramid) - * @param pts0 vector of currently extracted keypoints in this image - * @param ids0 vector of feature ids for each currently extracted keypoint - * - * Given an image and its currently extracted features, this will try to add new features if needed. - * Will try to always have the "max_features" being tracked through KLT at each timestep. - * Passed images should already be grayscaled. - */ - void perform_detection_monocular(const std::vector &img0pyr, std::vector &pts0, std::vector &ids0); - - /** - * @brief Detects new features in the current stereo pair - * @param img0pyr left image we will detect features on (first level of pyramid) - * @param img1pyr right image we will detect features on (first level of pyramid) - * @param pts0 left vector of currently extracted keypoints - * @param pts1 right vector of currently extracted keypoints - * @param ids0 left vector of feature ids for each currently extracted keypoint - * @param ids1 right vector of feature ids for each currently extracted keypoint - * - * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. - * So we detect features in the left image, and then KLT track them onto the right image. - * If we have valid tracks, then we have both the keypoint on the left and its matching point in the right image. - * Will try to always have the "max_features" being tracked through KLT at each timestep. - */ - void perform_detection_stereo(const std::vector &img0pyr, const std::vector &img1pyr, std::vector &pts0, - std::vector &pts1, std::vector &ids0, std::vector &ids1); - - /** - * @brief KLT track between two images, and do RANSAC afterwards - * @param img0pyr starting image pyramid - * @param img1pyr image pyramid we want to track too - * @param pts0 starting points - * @param pts1 points we have tracked - * @param id0 id of the first camera - * @param id1 id of the second camera - * @param mask_out what points had valid tracks - * - * This will track features from the first image into the second image. - * The two point vectors will be of equal size, but the mask_out variable will specify which points are good or bad. - * If the second vector is non-empty, it will be used as an initial guess of where the keypoints are in the second image. - */ - void perform_matching(const std::vector &img0pyr, const std::vector &img1pyr, std::vector &pts0, - std::vector &pts1, size_t id0, size_t id1, std::vector &mask_out); - - // Timing variables - boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; - - // Parameters for our FAST grid detector - int threshold; - int grid_x; - int grid_y; - - // Minimum pixel distance to be "far away enough" to be a different extracted feature - int min_px_dist; - - // How many pyramid levels to track on and the window size to reduce by - int pyr_levels = 3; - cv::Size win_size = cv::Size(15, 15); - - // Alternative histagram equalization - double eq_clip_limit = 10.0; - cv::Size eq_win_size = cv::Size(8,8); - - // Last set of image pyramids - std::map> img_pyramid_last; - - }; - - -} - +/** + * @brief KLT tracking of features. + * + * This is the implementation of a KLT visual frontend for tracking sparse features. + * We can track either monocular cameras across time (temporally) along with + * stereo cameras which we also track across time (temporally) but track from left to right + * to find the stereo correspondence information also. + * This uses the [calcOpticalFlowPyrLK](https://github.com/opencv/opencv/blob/master/modules/video/src/lkpyramid.cpp) OpenCV function to do + * the KLT tracking. + */ +class TrackKLT : public TrackBase { + +public: + /** + * @brief Public default constructor + */ + TrackKLT() : TrackBase(), threshold(10), grid_x(8), grid_y(5), min_px_dist(30) {} + + /** + * @brief Public constructor with configuration variables + * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + * @param fast_threshold FAST detection threshold + * @param gridx size of grid in the x-direction / u-direction + * @param gridy size of grid in the y-direction / v-direction + * @param minpxdist features need to be at least this number pixels away from each other + */ + explicit TrackKLT(int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, int minpxdist) + : TrackBase(numfeats, numaruco), threshold(fast_threshold), grid_x(gridx), grid_y(gridy), min_px_dist(minpxdist) {} + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; + +protected: + /** + * @brief Detects new features in the current image + * @param img0pyr image we will detect features on (first level of pyramid) + * @param pts0 vector of currently extracted keypoints in this image + * @param ids0 vector of feature ids for each currently extracted keypoint + * + * Given an image and its currently extracted features, this will try to add new features if needed. + * Will try to always have the "max_features" being tracked through KLT at each timestep. + * Passed images should already be grayscaled. + */ + void perform_detection_monocular(const std::vector &img0pyr, std::vector &pts0, std::vector &ids0); + + /** + * @brief Detects new features in the current stereo pair + * @param img0pyr left image we will detect features on (first level of pyramid) + * @param img1pyr right image we will detect features on (first level of pyramid) + * @param pts0 left vector of currently extracted keypoints + * @param pts1 right vector of currently extracted keypoints + * @param ids0 left vector of feature ids for each currently extracted keypoint + * @param ids1 right vector of feature ids for each currently extracted keypoint + * + * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. + * So we detect features in the left image, and then KLT track them onto the right image. + * If we have valid tracks, then we have both the keypoint on the left and its matching point in the right image. + * Will try to always have the "max_features" being tracked through KLT at each timestep. + */ + void perform_detection_stereo(const std::vector &img0pyr, const std::vector &img1pyr, std::vector &pts0, + std::vector &pts1, std::vector &ids0, std::vector &ids1); + + /** + * @brief KLT track between two images, and do RANSAC afterwards + * @param img0pyr starting image pyramid + * @param img1pyr image pyramid we want to track too + * @param pts0 starting points + * @param pts1 points we have tracked + * @param id0 id of the first camera + * @param id1 id of the second camera + * @param mask_out what points had valid tracks + * + * This will track features from the first image into the second image. + * The two point vectors will be of equal size, but the mask_out variable will specify which points are good or bad. + * If the second vector is non-empty, it will be used as an initial guess of where the keypoints are in the second image. + */ + void perform_matching(const std::vector &img0pyr, const std::vector &img1pyr, std::vector &pts0, + std::vector &pts1, size_t id0, size_t id1, std::vector &mask_out); + + // Timing variables + boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; + + // Parameters for our FAST grid detector + int threshold; + int grid_x; + int grid_y; + + // Minimum pixel distance to be "far away enough" to be a different extracted feature + int min_px_dist; + + // How many pyramid levels to track on and the window size to reduce by + int pyr_levels = 3; + cv::Size win_size = cv::Size(15, 15); + + // Alternative histagram equalization + double eq_clip_limit = 10.0; + cv::Size eq_win_size = cv::Size(8, 8); + + // Last set of image pyramids + std::map> img_pyramid_last; +}; + +} // namespace ov_core #endif /* OV_CORE_TRACK_KLT_H */ diff --git a/ov_core/src/track/TrackSIM.cpp b/ov_core/src/track/TrackSIM.cpp index c14213bfe..ddc210672 100644 --- a/ov_core/src/track/TrackSIM.cpp +++ b/ov_core/src/track/TrackSIM.cpp @@ -20,59 +20,50 @@ */ #include "TrackSIM.h" - using namespace ov_core; +void TrackSIM::feed_measurement_simulation(double timestamp, const std::vector &camids, + const std::vector>> &feats) { + // Assert our two vectors are equal + assert(camids.size() == feats.size()); + // Loop through each camera + for (size_t i = 0; i < camids.size(); i++) { -void TrackSIM::feed_measurement_simulation(double timestamp, const std::vector &camids, const std::vector>> &feats) { - - - // Assert our two vectors are equal - assert(camids.size()==feats.size()); - - // Loop through each camera - for(size_t i=0; i good_left; - std::vector good_ids_left; + // Our good ids and points + std::vector good_left; + std::vector good_ids_left; - // Update our feature database, with theses new observations - // NOTE: we add the "currid" since we need to offset the simulator - // NOTE: ids by the number of aruoc tags we have specified as tracking - for(const auto &feat : feats.at(i)) { + // Update our feature database, with theses new observations + // NOTE: we add the "currid" since we need to offset the simulator + // NOTE: ids by the number of aruoc tags we have specified as tracking + for (const auto &feat : feats.at(i)) { - // Get our id value - size_t id = feat.first+currid; + // Get our id value + size_t id = feat.first + currid; - // Create the keypoint - cv::KeyPoint kpt; - kpt.pt.x = feat.second(0); - kpt.pt.y = feat.second(1); - good_left.push_back(kpt); - good_ids_left.push_back(id); - - // Append to the database - cv::Point2f npt_l = undistort_point(kpt.pt, cam_id); - database->update_feature(id, timestamp, cam_id, - kpt.pt.x, kpt.pt.y, npt_l.x, npt_l.y); - } - - // Get our width and height - auto wh = camera_wh.at(cam_id); - - // Move forward in time - img_last[cam_id] = cv::Mat::zeros(cv::Size(wh.first,wh.second), CV_8UC1); - pts_last[cam_id] = good_left; - ids_last[cam_id] = good_ids_left; + // Create the keypoint + cv::KeyPoint kpt; + kpt.pt.x = feat.second(0); + kpt.pt.y = feat.second(1); + good_left.push_back(kpt); + good_ids_left.push_back(id); + // Append to the database + cv::Point2f npt_l = undistort_point(kpt.pt, cam_id); + database->update_feature(id, timestamp, cam_id, kpt.pt.x, kpt.pt.y, npt_l.x, npt_l.y); } + // Get our width and height + auto wh = camera_wh.at(cam_id); + // Move forward in time + img_last[cam_id] = cv::Mat::zeros(cv::Size(wh.first, wh.second), CV_8UC1); + pts_last[cam_id] = good_left; + ids_last[cam_id] = good_ids_left; + } } - diff --git a/ov_core/src/track/TrackSIM.h b/ov_core/src/track/TrackSIM.h index 80b2655b5..5588e6b89 100644 --- a/ov_core/src/track/TrackSIM.h +++ b/ov_core/src/track/TrackSIM.h @@ -21,70 +21,59 @@ #ifndef OV_CORE_TRACK_SIM_H #define OV_CORE_TRACK_SIM_H - #include "TrackBase.h" - namespace ov_core { - - /** - * @brief Simulated tracker for when we already have uv measurements! - * - * This class should be used when we are using the @ref ov_msckf::Simulator class to generate measurements. - * It simply takes the resulting simulation data and appends it to the internal feature database. - */ - class TrackSIM : public TrackBase { - - public: - - /** - * @brief Public constructor with configuration variables - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - */ - TrackSIM(int numaruco) : TrackBase(0, numaruco) {} - - /** - * @brief Set the width and height for the cameras - * @param _camera_wh Width and height for each camera - */ - void set_width_height(std::map> _camera_wh) { - this->camera_wh = _camera_wh; - } - - /// @warning This function should not be used!! Use @ref feed_measurement_simulation() instead. - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override { - printf(RED "[SIM]: SIM TRACKER FEED MONOCULAR CALLED!!!\n" RESET); - printf(RED "[SIM]: THIS SHOULD NEVER HAPPEN!\n" RESET); - std::exit(EXIT_FAILURE); - } - - /// @warning This function should not be used!! Use @ref feed_measurement_simulation() instead. - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override { - printf(RED "[SIM]: SIM TRACKER FEED STEREO CALLED!!!\n" RESET); - printf(RED "[SIM]: THIS SHOULD NEVER HAPPEN!\n" RESET); - std::exit(EXIT_FAILURE); - } - - /** - * @brief Feed function for a synchronized simulated cameras - * @param timestamp Time that this image was collected - * @param camids Camera ids that we have simulated measurements for - * @param feats Raw uv simulated measurements - */ - void feed_measurement_simulation(double timestamp, const std::vector &camids, const std::vector>> &feats); - - - protected: - - /// Width and height of our cameras - std::map> camera_wh; - - - }; - - -} - +/** + * @brief Simulated tracker for when we already have uv measurements! + * + * This class should be used when we are using the @ref ov_msckf::Simulator class to generate measurements. + * It simply takes the resulting simulation data and appends it to the internal feature database. + */ +class TrackSIM : public TrackBase { + +public: + /** + * @brief Public constructor with configuration variables + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + */ + TrackSIM(int numaruco) : TrackBase(0, numaruco) {} + + /** + * @brief Set the width and height for the cameras + * @param _camera_wh Width and height for each camera + */ + void set_width_height(std::map> _camera_wh) { this->camera_wh = _camera_wh; } + + /// @warning This function should not be used!! Use @ref feed_measurement_simulation() instead. + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override { + printf(RED "[SIM]: SIM TRACKER FEED MONOCULAR CALLED!!!\n" RESET); + printf(RED "[SIM]: THIS SHOULD NEVER HAPPEN!\n" RESET); + std::exit(EXIT_FAILURE); + } + + /// @warning This function should not be used!! Use @ref feed_measurement_simulation() instead. + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override { + printf(RED "[SIM]: SIM TRACKER FEED STEREO CALLED!!!\n" RESET); + printf(RED "[SIM]: THIS SHOULD NEVER HAPPEN!\n" RESET); + std::exit(EXIT_FAILURE); + } + + /** + * @brief Feed function for a synchronized simulated cameras + * @param timestamp Time that this image was collected + * @param camids Camera ids that we have simulated measurements for + * @param feats Raw uv simulated measurements + */ + void feed_measurement_simulation(double timestamp, const std::vector &camids, + const std::vector>> &feats); + +protected: + /// Width and height of our cameras + std::map> camera_wh; +}; + +} // namespace ov_core #endif /* OV_CORE_TRACK_SIM_H */ diff --git a/ov_core/src/types/IMU.h b/ov_core/src/types/IMU.h index f8e7e741c..f0ea231f4 100644 --- a/ov_core/src/types/IMU.h +++ b/ov_core/src/types/IMU.h @@ -21,265 +21,215 @@ #ifndef OV_TYPE_TYPE_IMU_H #define OV_TYPE_TYPE_IMU_H - #include "PoseJPL.h" #include "utils/quat_ops.h" - namespace ov_type { +/** + * @brief Derived Type class that implements an IMU state + * + * Contains a PoseJPL, Vec velocity, Vec gyro bias, and Vec accel bias. + * This should be similar to that of the standard MSCKF state besides the ordering. + * The pose is first, followed by velocity, etc. + */ +class IMU : public Type { + +public: + IMU() : Type(15) { + + // Create all the sub-variables + _pose = std::shared_ptr(new PoseJPL()); + _v = std::shared_ptr(new Vec(3)); + _bg = std::shared_ptr(new Vec(3)); + _ba = std::shared_ptr(new Vec(3)); + + // Set our default state value + Eigen::VectorXd imu0 = Eigen::VectorXd::Zero(16, 1); + imu0(3) = 1.0; + set_value_internal(imu0); + set_fej_internal(imu0); + } + + ~IMU() {} + + /** + * @brief Sets id used to track location of variable in the filter covariance + * + * Note that we update the sub-variables also. + * + * @param new_id entry in filter covariance corresponding to this variable + */ + void set_local_id(int new_id) override { + _id = new_id; + _pose->set_local_id(new_id); + _v->set_local_id(_pose->id() + ((new_id != -1) ? _pose->size() : 0)); + _bg->set_local_id(_v->id() + ((new_id != -1) ? _v->size() : 0)); + _ba->set_local_id(_bg->id() + ((new_id != -1) ? _bg->size() : 0)); + } + + /** + * @brief Performs update operation using JPLQuat update for orientation, then vector updates for + * position, velocity, gyro bias, and accel bias (in that order). + * + * @param dx 15 DOF vector encoding update using the following order (q, p, v, bg, ba) + */ + void update(const Eigen::VectorXd &dx) override { + + assert(dx.rows() == _size); + + Eigen::Matrix newX = _value; + + Eigen::Matrix dq; + dq << .5 * dx.block(0, 0, 3, 1), 1.0; + dq = ov_core::quatnorm(dq); + + newX.block(0, 0, 4, 1) = ov_core::quat_multiply(dq, quat()); + newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1); + + newX.block(7, 0, 3, 1) += dx.block(6, 0, 3, 1); + newX.block(10, 0, 3, 1) += dx.block(9, 0, 3, 1); + newX.block(13, 0, 3, 1) += dx.block(12, 0, 3, 1); + + set_value(newX); + } + + /** + * @brief Sets the value of the estimate + * @param new_value New value we should set + */ + void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); } + + /** + * @brief Sets the value of the first estimate + * @param new_value New value we should set + */ + void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); } + + std::shared_ptr clone() override { + auto Clone = std::shared_ptr(new IMU()); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + std::shared_ptr check_if_subvariable(const std::shared_ptr check) override { + if (check == _pose) { + return _pose; + } else if (check == _pose->check_if_subvariable(check)) { + return _pose->check_if_subvariable(check); + } else if (check == _v) { + return _v; + } else if (check == _bg) { + return _bg; + } else if (check == _ba) { + return _ba; + } + return nullptr; + } + + /// Rotation access + Eigen::Matrix Rot() const { return _pose->Rot(); } + + /// FEJ Rotation access + Eigen::Matrix Rot_fej() const { return _pose->Rot_fej(); } + + /// Rotation access quaternion + Eigen::Matrix quat() const { return _pose->quat(); } + + /// FEJ Rotation access quaternion + Eigen::Matrix quat_fej() const { return _pose->quat_fej(); } + + /// Position access + Eigen::Matrix pos() const { return _pose->pos(); } + + /// FEJ position access + Eigen::Matrix pos_fej() const { return _pose->pos_fej(); } + + /// Velocity access + Eigen::Matrix vel() const { return _v->value(); } + + // FEJ velocity access + Eigen::Matrix vel_fej() const { return _v->fej(); } + + /// Gyro bias access + Eigen::Matrix bias_g() const { return _bg->value(); } + + /// FEJ gyro bias access + Eigen::Matrix bias_g_fej() const { return _bg->fej(); } + + /// Accel bias access + Eigen::Matrix bias_a() const { return _ba->value(); } + + // FEJ accel bias access + Eigen::Matrix bias_a_fej() const { return _ba->fej(); } + + /// Pose type access + std::shared_ptr pose() { return _pose; } + + /// Quaternion type access + std::shared_ptr q() { return _pose->q(); } + + /// Position type access + std::shared_ptr p() { return _pose->p(); } + + /// Velocity type access + std::shared_ptr v() { return _v; } + + /// Gyroscope bias access + std::shared_ptr bg() { return _bg; } + + /// Acceleration bias access + std::shared_ptr ba() { return _ba; } + +protected: + /// Pose subvariable + std::shared_ptr _pose; + + /// Velocity subvariable + std::shared_ptr _v; + + /// Gyroscope bias subvariable + std::shared_ptr _bg; + + /// Acceleration bias subvariable + std::shared_ptr _ba; - /** - * @brief Derived Type class that implements an IMU state - * - * Contains a PoseJPL, Vec velocity, Vec gyro bias, and Vec accel bias. - * This should be similar to that of the standard MSCKF state besides the ordering. - * The pose is first, followed by velocity, etc. - */ - class IMU : public Type { - - public: - - IMU() : Type(15) { - - // Create all the sub-variables - _pose = std::shared_ptr(new PoseJPL()); - _v = std::shared_ptr(new Vec(3)); - _bg = std::shared_ptr(new Vec(3)); - _ba = std::shared_ptr(new Vec(3)); - - // Set our default state value - Eigen::VectorXd imu0 = Eigen::VectorXd::Zero(16,1); - imu0(3) = 1.0; - set_value_internal(imu0); - set_fej_internal(imu0); - - } - - ~IMU() {} - - /** - * @brief Sets id used to track location of variable in the filter covariance - * - * Note that we update the sub-variables also. - * - * @param new_id entry in filter covariance corresponding to this variable - */ - void set_local_id(int new_id) override { - _id = new_id; - _pose->set_local_id(new_id); - _v->set_local_id(_pose->id()+((new_id!=-1)? _pose->size() : 0)); - _bg->set_local_id(_v->id()+((new_id!=-1)? _v->size() : 0)); - _ba->set_local_id(_bg->id()+((new_id!=-1)? _bg->size() : 0)); - } - - /** - * @brief Performs update operation using JPLQuat update for orientation, then vector updates for - * position, velocity, gyro bias, and accel bias (in that order). - * - * @param dx 15 DOF vector encoding update using the following order (q, p, v, bg, ba) - */ - void update(const Eigen::VectorXd& dx) override { - - assert(dx.rows() == _size); - - Eigen::Matrix newX = _value; - - Eigen::Matrix dq; - dq << .5 * dx.block(0, 0, 3, 1), 1.0; - dq = ov_core::quatnorm(dq); - - newX.block(0, 0, 4, 1) = ov_core::quat_multiply(dq, quat()); - newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1); - - newX.block(7, 0, 3, 1) += dx.block(6, 0, 3, 1); - newX.block(10, 0, 3, 1) += dx.block(9, 0, 3, 1); - newX.block(13, 0, 3, 1) += dx.block(12, 0, 3, 1); - - set_value(newX); - - } - - - /** - * @brief Sets the value of the estimate - * @param new_value New value we should set - */ - void set_value(const Eigen::MatrixXd& new_value) override { - set_value_internal(new_value); - } - - /** - * @brief Sets the value of the first estimate - * @param new_value New value we should set - */ - void set_fej(const Eigen::MatrixXd& new_value) override { - set_fej_internal(new_value); - } - - std::shared_ptr clone() override { - auto Clone = std::shared_ptr(new IMU()); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } - - std::shared_ptr check_if_subvariable(const std::shared_ptr check) override { - if (check == _pose) { - return _pose; - } else if (check == _pose->check_if_subvariable(check)) { - return _pose->check_if_subvariable(check); - } else if (check == _v) { - return _v; - } else if (check == _bg) { - return _bg; - } else if (check == _ba) { - return _ba; - } - return nullptr; - } - - /// Rotation access - Eigen::Matrix Rot() const { - return _pose->Rot(); - } - - /// FEJ Rotation access - Eigen::Matrix Rot_fej() const { - return _pose->Rot_fej(); - } - - /// Rotation access quaternion - Eigen::Matrix quat() const { - return _pose->quat(); - } - - /// FEJ Rotation access quaternion - Eigen::Matrix quat_fej() const { - return _pose->quat_fej(); - } - - /// Position access - Eigen::Matrix pos() const { - return _pose->pos(); - } - - /// FEJ position access - Eigen::Matrix pos_fej() const { - return _pose->pos_fej(); - } - - /// Velocity access - Eigen::Matrix vel() const { - return _v->value(); - } - - // FEJ velocity access - Eigen::Matrix vel_fej() const { - return _v->fej(); - } - - /// Gyro bias access - Eigen::Matrix bias_g() const { - return _bg->value(); - } - - /// FEJ gyro bias access - Eigen::Matrix bias_g_fej() const { - return _bg->fej(); - } - - /// Accel bias access - Eigen::Matrix bias_a() const { - return _ba->value(); - } - - // FEJ accel bias access - Eigen::Matrix bias_a_fej() const { - return _ba->fej(); - } - - /// Pose type access - std::shared_ptr pose() { - return _pose; - } - - /// Quaternion type access - std::shared_ptr q() { - return _pose->q(); - } - - /// Position type access - std::shared_ptr p() { - return _pose->p(); - } - - /// Velocity type access - std::shared_ptr v() { - return _v; - } - - /// Gyroscope bias access - std::shared_ptr bg() { - return _bg; - } - - /// Acceleration bias access - std::shared_ptr ba() { - return _ba; - } - - protected: - - /// Pose subvariable - std::shared_ptr _pose; - - /// Velocity subvariable - std::shared_ptr _v; - - /// Gyroscope bias subvariable - std::shared_ptr _bg; - - /// Acceleration bias subvariable - std::shared_ptr _ba; - - /** - * @brief Sets the value of the estimate - * @param new_value New value we should set - */ - void set_value_internal(const Eigen::MatrixXd& new_value) { + /** + * @brief Sets the value of the estimate + * @param new_value New value we should set + */ + void set_value_internal(const Eigen::MatrixXd &new_value) { - assert(new_value.rows() == 16); - assert(new_value.cols() == 1); - - _pose->set_value(new_value.block(0, 0, 7, 1)); - _v->set_value(new_value.block(7, 0, 3, 1)); - _bg->set_value(new_value.block(10, 0, 3, 1)); - _ba->set_value(new_value.block(13, 0, 3, 1)); + assert(new_value.rows() == 16); + assert(new_value.cols() == 1); - _value = new_value; - } - - /** - * @brief Sets the value of the first estimate - * @param new_value New value we should set - */ - void set_fej_internal(const Eigen::MatrixXd& new_value) { + _pose->set_value(new_value.block(0, 0, 7, 1)); + _v->set_value(new_value.block(7, 0, 3, 1)); + _bg->set_value(new_value.block(10, 0, 3, 1)); + _ba->set_value(new_value.block(13, 0, 3, 1)); - assert(new_value.rows() == 16); - assert(new_value.cols() == 1); + _value = new_value; + } - _pose->set_fej(new_value.block(0, 0, 7, 1)); - _v->set_fej(new_value.block(7, 0, 3, 1)); - _bg->set_fej(new_value.block(10, 0, 3, 1)); - _ba->set_fej(new_value.block(13, 0, 3, 1)); + /** + * @brief Sets the value of the first estimate + * @param new_value New value we should set + */ + void set_fej_internal(const Eigen::MatrixXd &new_value) { - _fej = new_value; - } + assert(new_value.rows() == 16); + assert(new_value.cols() == 1); - }; + _pose->set_fej(new_value.block(0, 0, 7, 1)); + _v->set_fej(new_value.block(7, 0, 3, 1)); + _bg->set_fej(new_value.block(10, 0, 3, 1)); + _ba->set_fej(new_value.block(13, 0, 3, 1)); -} + _fej = new_value; + } +}; +} // namespace ov_type -#endif //OV_TYPE_TYPE_IMU_H +#endif // OV_TYPE_TYPE_IMU_H diff --git a/ov_core/src/types/JPLQuat.h b/ov_core/src/types/JPLQuat.h index 7cfb4a99c..0dd7f2b27 100644 --- a/ov_core/src/types/JPLQuat.h +++ b/ov_core/src/types/JPLQuat.h @@ -21,134 +21,118 @@ #ifndef OV_TYPE_TYPE_JPLQUAT_H #define OV_TYPE_TYPE_JPLQUAT_H - #include "Type.h" #include "utils/quat_ops.h" - namespace ov_type { - - /** - * @brief Derived Type class that implements JPL quaternion - * - * This quaternion uses a left-multiplicative error state and follows the "JPL convention". - * Please checkout our utility functions in the quat_ops.h file. - * We recommend that people new quaternions check out the following resources: - * - http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf - * - ftp://naif.jpl.nasa.gov/pub/naif/misc/Quaternion_White_Paper/Quaternions_White_Paper.pdf - */ - class JPLQuat : public Type { - - public: - - JPLQuat() : Type(3) { - Eigen::Vector4d q0 = Eigen::Vector4d::Zero(); - q0(3) = 1.0; - set_value_internal(q0); - set_fej_internal(q0); - } - - ~JPLQuat() {} - - /** - * @brief Implements update operation by left-multiplying the current - * quaternion with a quaternion built from a small axis-angle perturbation. - * - * @f[ - * \bar{q}=norm\Big(\begin{bmatrix} 0.5*\mathbf{\theta_{dx}} \\ 1 \end{bmatrix}\Big) \hat{\bar{q}} - * @f] - * - * @param dx Axis-angle representation of the perturbing quaternion - */ - void update(const Eigen::VectorXd& dx) override { - - assert(dx.rows() == _size); - - //Build perturbing quaternion - Eigen::Matrix dq; - dq << .5 * dx, 1.0; - dq = ov_core::quatnorm(dq); - - //Update estimate and recompute R - set_value(ov_core::quat_multiply(dq, _value)); - - } - - /** - * @brief Sets the value of the estimate and recomputes the internal rotation matrix - * @param new_value New value for the quaternion estimate - */ - void set_value(const Eigen::MatrixXd& new_value) override { - set_value_internal(new_value); - } - - /** - * @brief Sets the fej value and recomputes the fej rotation matrix - * @param new_value New value for the quaternion estimate - */ - void set_fej(const Eigen::MatrixXd& new_value) override { - set_fej_internal(new_value); - } - - std::shared_ptr clone() override { - auto Clone = std::shared_ptr(new JPLQuat()); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } - - /// Rotation access - Eigen::Matrix Rot() const { - return _R; - } - - /// FEJ Rotation access - Eigen::Matrix Rot_fej() const { - return _Rfej; - } - - protected: - - // Stores the rotation - Eigen::Matrix _R; - - // Stores the first-estimate rotation - Eigen::Matrix _Rfej; - - /** - * @brief Sets the value of the estimate and recomputes the internal rotation matrix - * @param new_value New value for the quaternion estimate - */ - void set_value_internal(const Eigen::MatrixXd& new_value) { - - assert(new_value.rows() == 4); - assert(new_value.cols() == 1); - - _value = new_value; - - //compute associated rotation - _R = ov_core::quat_2_Rot(new_value); - } - - /** - * @brief Sets the fej value and recomputes the fej rotation matrix - * @param new_value New value for the quaternion estimate - */ - void set_fej_internal(const Eigen::MatrixXd& new_value) { - - assert(new_value.rows() == 4); - assert(new_value.cols() == 1); - - _fej = new_value; - - //compute associated rotation - _Rfej = ov_core::quat_2_Rot(new_value); - } - - }; - - -} - -#endif //OV_TYPE_TYPE_JPLQUAT_H +/** + * @brief Derived Type class that implements JPL quaternion + * + * This quaternion uses a left-multiplicative error state and follows the "JPL convention". + * Please checkout our utility functions in the quat_ops.h file. + * We recommend that people new quaternions check out the following resources: + * - http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf + * - ftp://naif.jpl.nasa.gov/pub/naif/misc/Quaternion_White_Paper/Quaternions_White_Paper.pdf + */ +class JPLQuat : public Type { + +public: + JPLQuat() : Type(3) { + Eigen::Vector4d q0 = Eigen::Vector4d::Zero(); + q0(3) = 1.0; + set_value_internal(q0); + set_fej_internal(q0); + } + + ~JPLQuat() {} + + /** + * @brief Implements update operation by left-multiplying the current + * quaternion with a quaternion built from a small axis-angle perturbation. + * + * @f[ + * \bar{q}=norm\Big(\begin{bmatrix} 0.5*\mathbf{\theta_{dx}} \\ 1 \end{bmatrix}\Big) \hat{\bar{q}} + * @f] + * + * @param dx Axis-angle representation of the perturbing quaternion + */ + void update(const Eigen::VectorXd &dx) override { + + assert(dx.rows() == _size); + + // Build perturbing quaternion + Eigen::Matrix dq; + dq << .5 * dx, 1.0; + dq = ov_core::quatnorm(dq); + + // Update estimate and recompute R + set_value(ov_core::quat_multiply(dq, _value)); + } + + /** + * @brief Sets the value of the estimate and recomputes the internal rotation matrix + * @param new_value New value for the quaternion estimate + */ + void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); } + + /** + * @brief Sets the fej value and recomputes the fej rotation matrix + * @param new_value New value for the quaternion estimate + */ + void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); } + + std::shared_ptr clone() override { + auto Clone = std::shared_ptr(new JPLQuat()); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + /// Rotation access + Eigen::Matrix Rot() const { return _R; } + + /// FEJ Rotation access + Eigen::Matrix Rot_fej() const { return _Rfej; } + +protected: + // Stores the rotation + Eigen::Matrix _R; + + // Stores the first-estimate rotation + Eigen::Matrix _Rfej; + + /** + * @brief Sets the value of the estimate and recomputes the internal rotation matrix + * @param new_value New value for the quaternion estimate + */ + void set_value_internal(const Eigen::MatrixXd &new_value) { + + assert(new_value.rows() == 4); + assert(new_value.cols() == 1); + + _value = new_value; + + // compute associated rotation + _R = ov_core::quat_2_Rot(new_value); + } + + /** + * @brief Sets the fej value and recomputes the fej rotation matrix + * @param new_value New value for the quaternion estimate + */ + void set_fej_internal(const Eigen::MatrixXd &new_value) { + + assert(new_value.rows() == 4); + assert(new_value.cols() == 1); + + _fej = new_value; + + // compute associated rotation + _Rfej = ov_core::quat_2_Rot(new_value); + } +}; + +} // namespace ov_type + +#endif // OV_TYPE_TYPE_JPLQUAT_H diff --git a/ov_core/src/types/Landmark.cpp b/ov_core/src/types/Landmark.cpp index 661c90ac8..1e0a82043 100644 --- a/ov_core/src/types/Landmark.cpp +++ b/ov_core/src/types/Landmark.cpp @@ -20,126 +20,124 @@ */ #include "Landmark.h" - using namespace ov_type; - -Eigen::Matrix Landmark::get_xyz(bool getfej) const { - - /// CASE: Global 3d feature representation - /// CASE: Anchored 3D feature representation - if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D - || _feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) { - return (getfej) ? fej() : value(); - } - - /// CASE: Global inverse depth feature representation - /// CASE: Anchored full inverse depth feature representation - if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH - || _feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) { - Eigen::Matrix p_invFinG = (getfej) ? fej() : value(); - Eigen::Matrix p_FinG; - p_FinG << (1 / p_invFinG(2)) * std::cos(p_invFinG(0)) * std::sin(p_invFinG(1)), - (1 / p_invFinG(2)) * std::sin(p_invFinG(0)) * std::sin(p_invFinG(1)), - (1 / p_invFinG(2)) * std::cos(p_invFinG(1)); - return p_FinG; - } - - /// CASE: Anchored MSCKF inverse depth feature representation - if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) { - Eigen::Matrix p_FinA; - Eigen::Matrix p_invFinA = value(); - p_FinA << (1 / p_invFinA(2)) * p_invFinA(0), - (1 / p_invFinA(2)) * p_invFinA(1), - 1 / p_invFinA(2); - return p_FinA; - } - - /// CASE: Estimate single depth of the feature using the initial bearing - if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) { - //if(getfej) return 1.0/fej()(0)*uv_norm_zero_fej; - return 1.0/value()(0)*uv_norm_zero; - } - - // Failure - assert(false); - return Eigen::Vector3d::Zero(); +Eigen::Matrix Landmark::get_xyz(bool getfej) const { + + /// CASE: Global 3d feature representation + /// CASE: Anchored 3D feature representation + if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D || + _feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) { + return (getfej) ? fej() : value(); + } + + /// CASE: Global inverse depth feature representation + /// CASE: Anchored full inverse depth feature representation + if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH || + _feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) { + Eigen::Matrix p_invFinG = (getfej) ? fej() : value(); + Eigen::Matrix p_FinG; + p_FinG << (1 / p_invFinG(2)) * std::cos(p_invFinG(0)) * std::sin(p_invFinG(1)), + (1 / p_invFinG(2)) * std::sin(p_invFinG(0)) * std::sin(p_invFinG(1)), (1 / p_invFinG(2)) * std::cos(p_invFinG(1)); + return p_FinG; + } + + /// CASE: Anchored MSCKF inverse depth feature representation + if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) { + Eigen::Matrix p_FinA; + Eigen::Matrix p_invFinA = value(); + p_FinA << (1 / p_invFinA(2)) * p_invFinA(0), (1 / p_invFinA(2)) * p_invFinA(1), 1 / p_invFinA(2); + return p_FinA; + } + + /// CASE: Estimate single depth of the feature using the initial bearing + if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) { + // if(getfej) return 1.0/fej()(0)*uv_norm_zero_fej; + return 1.0 / value()(0) * uv_norm_zero; + } + + // Failure + assert(false); + return Eigen::Vector3d::Zero(); } - - -void Landmark::set_from_xyz(Eigen::Matrix p_FinG, bool isfej) { - - /// CASE: Global 3d feature representation - /// CASE: Anchored 3d feature representation - if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D - || _feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) { - if(isfej) set_fej(p_FinG); - else set_value(p_FinG); - return; - } - - /// CASE: Global inverse depth feature representation - /// CASE: Anchored inverse depth feature representation - if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH - || _feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) { - - // Feature inverse representation - // NOTE: This is not the MSCKF inverse form, but the standard form - // NOTE: Thus we go from p_FinG and convert it to this form - double g_rho = 1/p_FinG.norm(); - double g_phi = std::acos(g_rho*p_FinG(2)); - //double g_theta = std::asin(g_rho*p_FinG(1)/std::sin(g_phi)); - double g_theta = std::atan2(p_FinG(1),p_FinG(0)); - Eigen::Matrix p_invFinG; - p_invFinG(0) = g_theta; - p_invFinG(1) = g_phi; - p_invFinG(2) = g_rho; - - // Set our feature value - if(isfej) set_fej(p_invFinG); - else set_value(p_invFinG); - return; - } - - /// CASE: MSCKF anchored inverse depth representation - if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) { - - // MSCKF representation - Eigen::Matrix p_invFinA_MSCKF; - p_invFinA_MSCKF(0) = p_FinG(0)/p_FinG(2); - p_invFinA_MSCKF(1) = p_FinG(1)/p_FinG(2); - p_invFinA_MSCKF(2) = 1/p_FinG(2); - - // Set our feature value - if(isfej) set_fej(p_invFinA_MSCKF); - else set_value(p_invFinA_MSCKF); - return; - } - - /// CASE: Estimate single depth of the feature using the initial bearing - if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) { - - // Get the inverse depth - Eigen::VectorXd temp; - temp.resize(1,1); - temp(0) = 1.0/p_FinG(2); - - // Set our bearing vector - if(!isfej) uv_norm_zero = 1.0/p_FinG(2)*p_FinG; - else uv_norm_zero_fej = 1.0/p_FinG(2)*p_FinG; - - // Set our feature value - if(isfej) set_fej(temp); - else set_value(temp); - return; - } - - // Failure - assert(false); - +void Landmark::set_from_xyz(Eigen::Matrix p_FinG, bool isfej) { + + /// CASE: Global 3d feature representation + /// CASE: Anchored 3d feature representation + if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D || + _feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) { + if (isfej) + set_fej(p_FinG); + else + set_value(p_FinG); + return; + } + + /// CASE: Global inverse depth feature representation + /// CASE: Anchored inverse depth feature representation + if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH || + _feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) { + + // Feature inverse representation + // NOTE: This is not the MSCKF inverse form, but the standard form + // NOTE: Thus we go from p_FinG and convert it to this form + double g_rho = 1 / p_FinG.norm(); + double g_phi = std::acos(g_rho * p_FinG(2)); + // double g_theta = std::asin(g_rho*p_FinG(1)/std::sin(g_phi)); + double g_theta = std::atan2(p_FinG(1), p_FinG(0)); + Eigen::Matrix p_invFinG; + p_invFinG(0) = g_theta; + p_invFinG(1) = g_phi; + p_invFinG(2) = g_rho; + + // Set our feature value + if (isfej) + set_fej(p_invFinG); + else + set_value(p_invFinG); + return; + } + + /// CASE: MSCKF anchored inverse depth representation + if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) { + + // MSCKF representation + Eigen::Matrix p_invFinA_MSCKF; + p_invFinA_MSCKF(0) = p_FinG(0) / p_FinG(2); + p_invFinA_MSCKF(1) = p_FinG(1) / p_FinG(2); + p_invFinA_MSCKF(2) = 1 / p_FinG(2); + + // Set our feature value + if (isfej) + set_fej(p_invFinA_MSCKF); + else + set_value(p_invFinA_MSCKF); + return; + } + + /// CASE: Estimate single depth of the feature using the initial bearing + if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) { + + // Get the inverse depth + Eigen::VectorXd temp; + temp.resize(1, 1); + temp(0) = 1.0 / p_FinG(2); + + // Set our bearing vector + if (!isfej) + uv_norm_zero = 1.0 / p_FinG(2) * p_FinG; + else + uv_norm_zero_fej = 1.0 / p_FinG(2) * p_FinG; + + // Set our feature value + if (isfej) + set_fej(temp); + else + set_value(temp); + return; + } + + // Failure + assert(false); } - - - - diff --git a/ov_core/src/types/Landmark.h b/ov_core/src/types/Landmark.h index 2c0bc02e5..7d625f457 100644 --- a/ov_core/src/types/Landmark.h +++ b/ov_core/src/types/Landmark.h @@ -21,91 +21,82 @@ #ifndef OV_TYPE_TYPE_LANDMARK_H #define OV_TYPE_TYPE_LANDMARK_H - -#include "Vec.h" #include "LandmarkRepresentation.h" +#include "Vec.h" #include "utils/colors.h" - namespace ov_type { - - /** - * @brief Type that implements a persistent SLAM feature. - * - * We store the feature ID that should match the IDs in the trackers. - * Additionally if this is an anchored representation we store what clone timestamp this is anchored from and what camera. - * If this features should be marginalized its flag can be set and during cleanup it will be removed. - */ - class Landmark : public Vec { - - public: - - /// Default constructor (feature is a Vec of size 3 or Vec of size 1) - Landmark(int dim) : Vec(dim) {} - - /// Feature ID of this landmark (corresponds to frontend id) - size_t _featid; - - /// What unique camera stream this slam feature was observed from - int _unique_camera_id = -1; - - /// What camera ID our pose is anchored in!! By default the first measurement is the anchor. - int _anchor_cam_id = -1; - - /// Timestamp of anchor clone - double _anchor_clone_timestamp = -1; - - /// Boolean if this landmark has had at least one anchor change - bool has_had_anchor_change = false; - - /// Boolean if this landmark should be marginalized out - bool should_marg = false; - - /// First normalized uv coordinate bearing of this measurement (used for single depth representation) - Eigen::Vector3d uv_norm_zero; - - /// First estimate normalized uv coordinate bearing of this measurement (used for single depth representation) - Eigen::Vector3d uv_norm_zero_fej; - - /// What feature representation this feature currently has - LandmarkRepresentation::Representation _feat_representation; - - /** - * @brief Overrides the default vector update rule - * We want to selectively update the FEJ value if we are using an anchored representation. - * @param dx Additive error state correction - */ - void update(const Eigen::VectorXd& dx) override { - // Update estimate - assert(dx.rows() == _size); - set_value(_value+dx); - // Ensure we are not near zero in the z-direction - if (LandmarkRepresentation::is_relative_representation(_feat_representation) && _value(_value.rows()-1) < 1e-8) { - printf(YELLOW "WARNING DEPTH %.8f BECAME CLOSE TO ZERO IN UPDATE!!!\n" RESET, _value(_value.rows()-1)); - should_marg = true; - } - } - - /** - * @brief Will return the position of the feature in the global frame of reference. - * @param getfej Set to true to get the landmark FEJ value - * @return Position of feature either in global or anchor frame - */ - Eigen::Matrix get_xyz(bool getfej) const; - - - /** - * @brief Will set the current value based on the representation. - * @param p_FinG Position of the feature either in global or anchor frame - * @param isfej Set to true to set the landmark FEJ value - */ - void set_from_xyz(Eigen::Matrix p_FinG, bool isfej); - - - }; -} - - - -#endif //OV_TYPE_TYPE_LANDMARK_H +/** + * @brief Type that implements a persistent SLAM feature. + * + * We store the feature ID that should match the IDs in the trackers. + * Additionally if this is an anchored representation we store what clone timestamp this is anchored from and what camera. + * If this features should be marginalized its flag can be set and during cleanup it will be removed. + */ +class Landmark : public Vec { + +public: + /// Default constructor (feature is a Vec of size 3 or Vec of size 1) + Landmark(int dim) : Vec(dim) {} + + /// Feature ID of this landmark (corresponds to frontend id) + size_t _featid; + + /// What unique camera stream this slam feature was observed from + int _unique_camera_id = -1; + + /// What camera ID our pose is anchored in!! By default the first measurement is the anchor. + int _anchor_cam_id = -1; + + /// Timestamp of anchor clone + double _anchor_clone_timestamp = -1; + + /// Boolean if this landmark has had at least one anchor change + bool has_had_anchor_change = false; + + /// Boolean if this landmark should be marginalized out + bool should_marg = false; + + /// First normalized uv coordinate bearing of this measurement (used for single depth representation) + Eigen::Vector3d uv_norm_zero; + + /// First estimate normalized uv coordinate bearing of this measurement (used for single depth representation) + Eigen::Vector3d uv_norm_zero_fej; + + /// What feature representation this feature currently has + LandmarkRepresentation::Representation _feat_representation; + + /** + * @brief Overrides the default vector update rule + * We want to selectively update the FEJ value if we are using an anchored representation. + * @param dx Additive error state correction + */ + void update(const Eigen::VectorXd &dx) override { + // Update estimate + assert(dx.rows() == _size); + set_value(_value + dx); + // Ensure we are not near zero in the z-direction + if (LandmarkRepresentation::is_relative_representation(_feat_representation) && _value(_value.rows() - 1) < 1e-8) { + printf(YELLOW "WARNING DEPTH %.8f BECAME CLOSE TO ZERO IN UPDATE!!!\n" RESET, _value(_value.rows() - 1)); + should_marg = true; + } + } + + /** + * @brief Will return the position of the feature in the global frame of reference. + * @param getfej Set to true to get the landmark FEJ value + * @return Position of feature either in global or anchor frame + */ + Eigen::Matrix get_xyz(bool getfej) const; + + /** + * @brief Will set the current value based on the representation. + * @param p_FinG Position of the feature either in global or anchor frame + * @param isfej Set to true to set the landmark FEJ value + */ + void set_from_xyz(Eigen::Matrix p_FinG, bool isfej); +}; +} // namespace ov_type + +#endif // OV_TYPE_TYPE_LANDMARK_H diff --git a/ov_core/src/types/LandmarkRepresentation.h b/ov_core/src/types/LandmarkRepresentation.h index d42c1d739..7d6dc2820 100644 --- a/ov_core/src/types/LandmarkRepresentation.h +++ b/ov_core/src/types/LandmarkRepresentation.h @@ -21,89 +21,90 @@ #ifndef OV_TYPE_LANDMARKREPRESENTATION_H #define OV_TYPE_LANDMARKREPRESENTATION_H - - namespace ov_type { - /** - * @brief Class has useful feature representation types - */ - class LandmarkRepresentation - { - - public: - - /** - * @brief What feature representation our state can use - */ - enum Representation { - GLOBAL_3D, - GLOBAL_FULL_INVERSE_DEPTH, - ANCHORED_3D, - ANCHORED_FULL_INVERSE_DEPTH, - ANCHORED_MSCKF_INVERSE_DEPTH, - ANCHORED_INVERSE_DEPTH_SINGLE, - UNKNOWN - }; - - - /** - * @brief Returns a string representation of this enum value. - * Used to debug print out what the user has selected as the representation. - * @param feat_representation Representation we want to check - * @return String version of the passed enum - */ - static inline std::string as_string(Representation feat_representation) { - if(feat_representation==GLOBAL_3D) return "GLOBAL_3D"; - if(feat_representation==GLOBAL_FULL_INVERSE_DEPTH) return "GLOBAL_FULL_INVERSE_DEPTH"; - if(feat_representation==ANCHORED_3D) return "ANCHORED_3D"; - if(feat_representation==ANCHORED_FULL_INVERSE_DEPTH) return "ANCHORED_FULL_INVERSE_DEPTH"; - if(feat_representation==ANCHORED_MSCKF_INVERSE_DEPTH) return "ANCHORED_MSCKF_INVERSE_DEPTH"; - if(feat_representation==ANCHORED_INVERSE_DEPTH_SINGLE) return "ANCHORED_INVERSE_DEPTH_SINGLE"; - return "UNKNOWN"; - } - - /** - * @brief Returns a string representation of this enum value. - * Used to debug print out what the user has selected as the representation. - * @param feat_representation String we want to find the enum of - * @return Representation, will be "unknown" if we coun't parse it - */ - static inline Representation from_string(const std::string& feat_representation) { - if(feat_representation=="GLOBAL_3D") return GLOBAL_3D; - if(feat_representation=="GLOBAL_FULL_INVERSE_DEPTH") return GLOBAL_FULL_INVERSE_DEPTH; - if(feat_representation=="ANCHORED_3D") return ANCHORED_3D; - if(feat_representation=="ANCHORED_FULL_INVERSE_DEPTH") return ANCHORED_FULL_INVERSE_DEPTH; - if(feat_representation=="ANCHORED_MSCKF_INVERSE_DEPTH") return ANCHORED_MSCKF_INVERSE_DEPTH; - if(feat_representation=="ANCHORED_INVERSE_DEPTH_SINGLE") return ANCHORED_INVERSE_DEPTH_SINGLE; - return UNKNOWN; - } - - /** - * @brief Helper function that checks if the passed feature representation is a relative or global - * @param feat_representation Representation we want to check - * @return True if it is a relative representation - */ - static inline bool is_relative_representation(Representation feat_representation) { - return (feat_representation == Representation::ANCHORED_3D || - feat_representation == Representation::ANCHORED_FULL_INVERSE_DEPTH || - feat_representation == Representation::ANCHORED_MSCKF_INVERSE_DEPTH || - feat_representation == Representation::ANCHORED_INVERSE_DEPTH_SINGLE); - } - - private: - - /** - * All function in this class should be static. - * Thus an instance of this class cannot be created. - */ - LandmarkRepresentation() {}; - - }; - - - -} - - -#endif //OV_TYPE_LANDMARKREPRESENTATION_H \ No newline at end of file +/** + * @brief Class has useful feature representation types + */ +class LandmarkRepresentation { + +public: + /** + * @brief What feature representation our state can use + */ + enum Representation { + GLOBAL_3D, + GLOBAL_FULL_INVERSE_DEPTH, + ANCHORED_3D, + ANCHORED_FULL_INVERSE_DEPTH, + ANCHORED_MSCKF_INVERSE_DEPTH, + ANCHORED_INVERSE_DEPTH_SINGLE, + UNKNOWN + }; + + /** + * @brief Returns a string representation of this enum value. + * Used to debug print out what the user has selected as the representation. + * @param feat_representation Representation we want to check + * @return String version of the passed enum + */ + static inline std::string as_string(Representation feat_representation) { + if (feat_representation == GLOBAL_3D) + return "GLOBAL_3D"; + if (feat_representation == GLOBAL_FULL_INVERSE_DEPTH) + return "GLOBAL_FULL_INVERSE_DEPTH"; + if (feat_representation == ANCHORED_3D) + return "ANCHORED_3D"; + if (feat_representation == ANCHORED_FULL_INVERSE_DEPTH) + return "ANCHORED_FULL_INVERSE_DEPTH"; + if (feat_representation == ANCHORED_MSCKF_INVERSE_DEPTH) + return "ANCHORED_MSCKF_INVERSE_DEPTH"; + if (feat_representation == ANCHORED_INVERSE_DEPTH_SINGLE) + return "ANCHORED_INVERSE_DEPTH_SINGLE"; + return "UNKNOWN"; + } + + /** + * @brief Returns a string representation of this enum value. + * Used to debug print out what the user has selected as the representation. + * @param feat_representation String we want to find the enum of + * @return Representation, will be "unknown" if we coun't parse it + */ + static inline Representation from_string(const std::string &feat_representation) { + if (feat_representation == "GLOBAL_3D") + return GLOBAL_3D; + if (feat_representation == "GLOBAL_FULL_INVERSE_DEPTH") + return GLOBAL_FULL_INVERSE_DEPTH; + if (feat_representation == "ANCHORED_3D") + return ANCHORED_3D; + if (feat_representation == "ANCHORED_FULL_INVERSE_DEPTH") + return ANCHORED_FULL_INVERSE_DEPTH; + if (feat_representation == "ANCHORED_MSCKF_INVERSE_DEPTH") + return ANCHORED_MSCKF_INVERSE_DEPTH; + if (feat_representation == "ANCHORED_INVERSE_DEPTH_SINGLE") + return ANCHORED_INVERSE_DEPTH_SINGLE; + return UNKNOWN; + } + + /** + * @brief Helper function that checks if the passed feature representation is a relative or global + * @param feat_representation Representation we want to check + * @return True if it is a relative representation + */ + static inline bool is_relative_representation(Representation feat_representation) { + return (feat_representation == Representation::ANCHORED_3D || feat_representation == Representation::ANCHORED_FULL_INVERSE_DEPTH || + feat_representation == Representation::ANCHORED_MSCKF_INVERSE_DEPTH || + feat_representation == Representation::ANCHORED_INVERSE_DEPTH_SINGLE); + } + +private: + /** + * All function in this class should be static. + * Thus an instance of this class cannot be created. + */ + LandmarkRepresentation(){}; +}; + +} // namespace ov_type + +#endif // OV_TYPE_LANDMARKREPRESENTATION_H \ No newline at end of file diff --git a/ov_core/src/types/PoseJPL.h b/ov_core/src/types/PoseJPL.h index 21e4ea274..37f266335 100644 --- a/ov_core/src/types/PoseJPL.h +++ b/ov_core/src/types/PoseJPL.h @@ -21,197 +21,173 @@ #ifndef OV_TYPE_TYPE_POSEJPL_H #define OV_TYPE_TYPE_POSEJPL_H -#include "Vec.h" #include "JPLQuat.h" +#include "Vec.h" #include "utils/quat_ops.h" - namespace ov_type { +/** + * @brief Derived Type class that implements a 6 d.o.f pose + * + * Internally we use a JPLQuat quaternion representation for the orientation and 3D Vec position. + * Please see JPLQuat for details on its update procedure and its left multiplicative error. + */ +class PoseJPL : public Type { + +public: + PoseJPL() : Type(6) { + + // Initialize subvariables + _q = std::shared_ptr(new JPLQuat()); + _p = std::shared_ptr(new Vec(3)); + + // Set our default state value + Eigen::Matrix pose0; + pose0.setZero(); + pose0(3) = 1.0; + set_value_internal(pose0); + set_fej_internal(pose0); + } + + ~PoseJPL() {} + + /** + * @brief Sets id used to track location of variable in the filter covariance + * + * Note that we update the sub-variables also. + * + * @param new_id entry in filter covariance corresponding to this variable + */ + void set_local_id(int new_id) override { + _id = new_id; + _q->set_local_id(new_id); + _p->set_local_id(new_id + ((new_id != -1) ? _q->size() : 0)); + } + + /** + * @brief Update q and p using a the JPLQuat update for orientation and vector update for position + * + * @param dx Correction vector (orientation then position) + */ + void update(const Eigen::VectorXd &dx) override { + + assert(dx.rows() == _size); + + Eigen::Matrix newX = _value; + + Eigen::Matrix dq; + dq << .5 * dx.block(0, 0, 3, 1), 1.0; + dq = ov_core::quatnorm(dq); + + // Update orientation + newX.block(0, 0, 4, 1) = ov_core::quat_multiply(dq, quat()); + + // Update position + newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1); + + set_value(newX); + } + + /** + * @brief Sets the value of the estimate + * @param new_value New value we should set + */ + void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); } + + /** + * @brief Sets the value of the first estimate + * @param new_value New value we should set + */ + void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); } + + std::shared_ptr clone() override { + auto Clone = std::shared_ptr(new PoseJPL()); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + std::shared_ptr check_if_subvariable(const std::shared_ptr check) override { + if (check == _q) { + return _q; + } else if (check == _p) { + return _p; + } + return nullptr; + } + + /// Rotation access + Eigen::Matrix Rot() const { return _q->Rot(); } + + /// FEJ Rotation access + Eigen::Matrix Rot_fej() const { + return _q->Rot_fej(); + ; + } + + /// Rotation access as quaternion + Eigen::Matrix quat() const { return _q->value(); } + + /// FEJ Rotation access as quaternion + Eigen::Matrix quat_fej() const { return _q->fej(); } + + /// Position access + Eigen::Matrix pos() const { return _p->value(); } + + // FEJ position access + Eigen::Matrix pos_fej() const { return _p->fej(); } + + // Quaternion type access + std::shared_ptr q() { return _q; } + + // Position type access + std::shared_ptr p() { return _p; } + +protected: + /// Subvariable containing orientation + std::shared_ptr _q; + + /// Subvariable containing position + std::shared_ptr _p; + + /** + * @brief Sets the value of the estimate + * @param new_value New value we should set + */ + void set_value_internal(const Eigen::MatrixXd &new_value) { + + assert(new_value.rows() == 7); + assert(new_value.cols() == 1); + + // Set orientation value + _q->set_value(new_value.block(0, 0, 4, 1)); + + // Set position value + _p->set_value(new_value.block(4, 0, 3, 1)); + + _value = new_value; + } + + /** + * @brief Sets the value of the first estimate + * @param new_value New value we should set + */ + void set_fej_internal(const Eigen::MatrixXd &new_value) { + + assert(new_value.rows() == 7); + assert(new_value.cols() == 1); + + // Set orientation fej value + _q->set_fej(new_value.block(0, 0, 4, 1)); - /** - * @brief Derived Type class that implements a 6 d.o.f pose - * - * Internally we use a JPLQuat quaternion representation for the orientation and 3D Vec position. - * Please see JPLQuat for details on its update procedure and its left multiplicative error. - */ - class PoseJPL : public Type { - - public: - - PoseJPL() : Type(6) { - - // Initialize subvariables - _q = std::shared_ptr(new JPLQuat()); - _p = std::shared_ptr(new Vec(3)); - - // Set our default state value - Eigen::Matrix pose0; - pose0.setZero(); - pose0(3) = 1.0; - set_value_internal(pose0); - set_fej_internal(pose0); - - } - - ~PoseJPL() { } - - /** - * @brief Sets id used to track location of variable in the filter covariance - * - * Note that we update the sub-variables also. - * - * @param new_id entry in filter covariance corresponding to this variable - */ - void set_local_id(int new_id) override { - _id = new_id; - _q->set_local_id(new_id); - _p->set_local_id(new_id+((new_id!=-1)? _q->size() : 0)); - } - - /** - * @brief Update q and p using a the JPLQuat update for orientation and vector update for position - * - * @param dx Correction vector (orientation then position) - */ - void update(const Eigen::VectorXd& dx) override { - - assert(dx.rows() == _size); - - Eigen::Matrix newX = _value; - - Eigen::Matrix dq; - dq << .5 * dx.block(0, 0, 3, 1), 1.0; - dq = ov_core::quatnorm(dq); - - //Update orientation - newX.block(0, 0, 4, 1) = ov_core::quat_multiply(dq, quat()); - - //Update position - newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1); - - set_value(newX); - - } - - /** - * @brief Sets the value of the estimate - * @param new_value New value we should set - */ - void set_value(const Eigen::MatrixXd& new_value) override { - set_value_internal(new_value); - } - - /** - * @brief Sets the value of the first estimate - * @param new_value New value we should set - */ - void set_fej(const Eigen::MatrixXd& new_value) override { - set_fej_internal(new_value); - } - - std::shared_ptr clone() override { - auto Clone = std::shared_ptr(new PoseJPL()); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } - - std::shared_ptr check_if_subvariable(const std::shared_ptr check) override { - if (check == _q) { - return _q; - } else if (check == _p) { - return _p; - } - return nullptr; - } - - /// Rotation access - Eigen::Matrix Rot() const { - return _q->Rot(); - } - - /// FEJ Rotation access - Eigen::Matrix Rot_fej() const { - return _q->Rot_fej();; - } - - /// Rotation access as quaternion - Eigen::Matrix quat() const { - return _q->value(); - } - - /// FEJ Rotation access as quaternion - Eigen::Matrix quat_fej() const { - return _q->fej(); - } - - /// Position access - Eigen::Matrix pos() const { - return _p->value(); - } - - // FEJ position access - Eigen::Matrix pos_fej() const { - return _p->fej(); - } - - // Quaternion type access - std::shared_ptr q() { - return _q; - } - - // Position type access - std::shared_ptr p() { - return _p; - } - - protected: - - /// Subvariable containing orientation - std::shared_ptr _q; - - /// Subvariable containing position - std::shared_ptr _p; + // Set position fej value + _p->set_fej(new_value.block(4, 0, 3, 1)); - /** - * @brief Sets the value of the estimate - * @param new_value New value we should set - */ - void set_value_internal(const Eigen::MatrixXd& new_value) { - - assert(new_value.rows() == 7); - assert(new_value.cols() == 1); - - //Set orientation value - _q->set_value(new_value.block(0, 0, 4, 1)); - - //Set position value - _p->set_value(new_value.block(4, 0, 3, 1)); - - _value = new_value; - } - - /** - * @brief Sets the value of the first estimate - * @param new_value New value we should set - */ - void set_fej_internal(const Eigen::MatrixXd& new_value) { - - assert(new_value.rows() == 7); - assert(new_value.cols() == 1); - - //Set orientation fej value - _q->set_fej(new_value.block(0, 0, 4, 1)); - - //Set position fej value - _p->set_fej(new_value.block(4, 0, 3, 1)); - - _fej = new_value; - } + _fej = new_value; + } +}; - }; - -} +} // namespace ov_type -#endif //OV_TYPE_TYPE_POSEJPL_H \ No newline at end of file +#endif // OV_TYPE_TYPE_POSEJPL_H \ No newline at end of file diff --git a/ov_core/src/types/Type.h b/ov_core/src/types/Type.h index 102175211..432fed527 100644 --- a/ov_core/src/types/Type.h +++ b/ov_core/src/types/Type.h @@ -21,138 +21,116 @@ #ifndef OV_TYPE_TYPE_BASE_H #define OV_TYPE_TYPE_BASE_H - -#include #include - +#include namespace ov_type { - - /** - * @brief Base class for estimated variables. - * - * This class is used how variables are represented or updated (e.g., vectors or quaternions). - * Each variable is defined by its error state size and its location in the covariance matrix. - * We additionally require all sub-types to have a update procedure. - */ - class Type { - - public: - - /** - * @brief Default constructor for our Type - * - * @param size_ degrees of freedom of variable (i.e., the size of the error state) - */ - Type(int size_) { - _size = size_; - } - - virtual ~Type() {}; - - /** - * @brief Sets id used to track location of variable in the filter covariance - * - * Note that the minimum ID is -1 which says that the state is not in our covariance. - * If the ID is larger than -1 then this is the index location in the covariance matrix. - * - * @param new_id entry in filter covariance corresponding to this variable - */ - virtual void set_local_id(int new_id) { - _id = new_id; - } - - /** - * @brief Access to variable id (i.e. its location in the covariance) - */ - int id() { - return _id; - } - - /** - * @brief Access to variable size (i.e. its error state size) - */ - int size() { - return _size; - } - - /** - * @brief Update variable due to perturbation of error state - * - * @param dx Perturbation used to update the variable through a defined "boxplus" operation - */ - virtual void update(const Eigen::VectorXd& dx) = 0; - - /** - * @brief Access variable's estimate - */ - virtual const Eigen::MatrixXd& value() const { - return _value; - } - - /** - * @brief Access variable's first-estimate - */ - virtual const Eigen::MatrixXd& fej() const { - return _fej; - } - - /** - * @brief Overwrite value of state's estimate - * @param new_value New value that will overwrite state's value - */ - virtual void set_value(const Eigen::MatrixXd& new_value) { - assert(_value.rows()==new_value.rows()); - assert(_value.cols()==new_value.cols()); - _value = new_value; - } - - /** - * @brief Overwrite value of first-estimate - * @param new_value New value that will overwrite state's fej - */ - virtual void set_fej(const Eigen::MatrixXd& new_value) { - assert(_fej.rows()==new_value.rows()); - assert(_fej.cols()==new_value.cols()); - _fej = new_value; - } - - /** - * @brief Create a clone of this variable - */ - virtual std::shared_ptr clone() = 0; - - /** - * @brief Determine if pass variable is a sub-variable - * - * If the passed variable is a sub-variable or the current variable this will return it. - * Otherwise it will return a nullptr, meaning that it was unable to be found. - * - * @param check Type pointer to compare our subvariables to - */ - virtual std::shared_ptr check_if_subvariable(const std::shared_ptr check) { - return nullptr; - } - - - protected: - - /// First-estimate - Eigen::MatrixXd _fej; - - /// Current best estimate - Eigen::MatrixXd _value; - - /// Location of error state in covariance - int _id = -1; - - /// Dimension of error state - int _size = -1; - - - }; - -} - -#endif //OV_TYPE_TYPE_BASE_H \ No newline at end of file +/** + * @brief Base class for estimated variables. + * + * This class is used how variables are represented or updated (e.g., vectors or quaternions). + * Each variable is defined by its error state size and its location in the covariance matrix. + * We additionally require all sub-types to have a update procedure. + */ +class Type { + +public: + /** + * @brief Default constructor for our Type + * + * @param size_ degrees of freedom of variable (i.e., the size of the error state) + */ + Type(int size_) { _size = size_; } + + virtual ~Type(){}; + + /** + * @brief Sets id used to track location of variable in the filter covariance + * + * Note that the minimum ID is -1 which says that the state is not in our covariance. + * If the ID is larger than -1 then this is the index location in the covariance matrix. + * + * @param new_id entry in filter covariance corresponding to this variable + */ + virtual void set_local_id(int new_id) { _id = new_id; } + + /** + * @brief Access to variable id (i.e. its location in the covariance) + */ + int id() { return _id; } + + /** + * @brief Access to variable size (i.e. its error state size) + */ + int size() { return _size; } + + /** + * @brief Update variable due to perturbation of error state + * + * @param dx Perturbation used to update the variable through a defined "boxplus" operation + */ + virtual void update(const Eigen::VectorXd &dx) = 0; + + /** + * @brief Access variable's estimate + */ + virtual const Eigen::MatrixXd &value() const { return _value; } + + /** + * @brief Access variable's first-estimate + */ + virtual const Eigen::MatrixXd &fej() const { return _fej; } + + /** + * @brief Overwrite value of state's estimate + * @param new_value New value that will overwrite state's value + */ + virtual void set_value(const Eigen::MatrixXd &new_value) { + assert(_value.rows() == new_value.rows()); + assert(_value.cols() == new_value.cols()); + _value = new_value; + } + + /** + * @brief Overwrite value of first-estimate + * @param new_value New value that will overwrite state's fej + */ + virtual void set_fej(const Eigen::MatrixXd &new_value) { + assert(_fej.rows() == new_value.rows()); + assert(_fej.cols() == new_value.cols()); + _fej = new_value; + } + + /** + * @brief Create a clone of this variable + */ + virtual std::shared_ptr clone() = 0; + + /** + * @brief Determine if pass variable is a sub-variable + * + * If the passed variable is a sub-variable or the current variable this will return it. + * Otherwise it will return a nullptr, meaning that it was unable to be found. + * + * @param check Type pointer to compare our subvariables to + */ + virtual std::shared_ptr check_if_subvariable(const std::shared_ptr check) { return nullptr; } + +protected: + /// First-estimate + Eigen::MatrixXd _fej; + + /// Current best estimate + Eigen::MatrixXd _value; + + /// Location of error state in covariance + int _id = -1; + + /// Dimension of error state + int _size = -1; +}; + +} // namespace ov_type + +#endif // OV_TYPE_TYPE_BASE_H \ No newline at end of file diff --git a/ov_core/src/types/Vec.h b/ov_core/src/types/Vec.h index a5f080d7a..0528461fa 100644 --- a/ov_core/src/types/Vec.h +++ b/ov_core/src/types/Vec.h @@ -21,52 +21,47 @@ #ifndef OV_TYPE_TYPE_VEC_H #define OV_TYPE_TYPE_VEC_H - #include "Type.h" - namespace ov_type { - /** - * @brief Derived Type class that implements vector variables - */ - class Vec : public Type { - - public: - - /** - * @brief Default constructor for Vec - * @param dim Size of the vector (will be same as error state) - */ - Vec(int dim) : Type(dim) { - _value = Eigen::VectorXd::Zero(dim); - _fej = Eigen::VectorXd::Zero(dim); - } - - ~Vec() {} +/** + * @brief Derived Type class that implements vector variables + */ +class Vec : public Type { - /** - * @brief Implements the update operation through standard vector addition - * @param dx Additive error state correction - */ - void update(const Eigen::VectorXd& dx) override { - assert(dx.rows() == _size); - set_value(_value + dx); - } +public: + /** + * @brief Default constructor for Vec + * @param dim Size of the vector (will be same as error state) + */ + Vec(int dim) : Type(dim) { + _value = Eigen::VectorXd::Zero(dim); + _fej = Eigen::VectorXd::Zero(dim); + } - /** - * @brief Performs all the cloning - */ - std::shared_ptr clone() override { - auto Clone = std::shared_ptr(new Vec(_size)); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } + ~Vec() {} + /** + * @brief Implements the update operation through standard vector addition + * @param dx Additive error state correction + */ + void update(const Eigen::VectorXd &dx) override { + assert(dx.rows() == _size); + set_value(_value + dx); + } - }; + /** + * @brief Performs all the cloning + */ + std::shared_ptr clone() override { + auto Clone = std::shared_ptr(new Vec(_size)); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } +}; -} +} // namespace ov_type -#endif //OV_TYPE_TYPE_VEC_H \ No newline at end of file +#endif // OV_TYPE_TYPE_VEC_H \ No newline at end of file diff --git a/ov_core/src/utils/CLI11.hpp b/ov_core/src/utils/CLI11.hpp index 8f958076a..caedc700f 100644 --- a/ov_core/src/utils/CLI11.hpp +++ b/ov_core/src/utils/CLI11.hpp @@ -11,10 +11,10 @@ // // CLI11 1.8 Copyright (c) 2017-2019 University of Cincinnati, developed by Henry // Schreiner under NSF AWARD 1414736. All rights reserved. -// +// // Redistribution and use in source and binary forms of CLI11, with or without // modification, are permitted provided that the following conditions are met: -// +// // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, @@ -23,7 +23,7 @@ // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. -// +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -35,7 +35,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Standard combined includes: #include @@ -61,21 +60,15 @@ #include #include - // Verbatim copy from CLI/Version.hpp: - #define CLI11_VERSION_MAJOR 1 #define CLI11_VERSION_MINOR 9 #define CLI11_VERSION_PATCH 0 #define CLI11_VERSION "1.9.0" - - - // Verbatim copy from CLI/Macros.hpp: - // The following version macro is very similar to the one in PyBind11 #if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER) #if __cplusplus >= 201402L @@ -109,12 +102,8 @@ #define CLI11_DEPRECATED(reason) __attribute__((deprecated(reason))) #endif - - - // Verbatim copy from CLI/Validators.hpp: - // C standard library // Only needed for existence checking #if defined CLI11_CPP17 && defined __has_include && !defined CLI11_HAS_FILESYSTEM @@ -140,16 +129,10 @@ #include #endif - - // From CLI/Version.hpp: - - // From CLI/Macros.hpp: - - // From CLI/StringTools.hpp: namespace CLI { @@ -161,8 +144,8 @@ namespace enums { /// output streaming for enumerations template ::value>::type> std::ostream &operator<<(std::ostream &in, const T &item) { - // make sure this is out of the detail namespace otherwise it won't be found when needed - return in << static_cast::type>(item); + // make sure this is out of the detail namespace otherwise it won't be found when needed + return in << static_cast::type>(item); } } // namespace enums @@ -177,90 +160,87 @@ constexpr int expected_max_vector_size{1 << 29}; // Based on http://stackoverflow.com/questions/236129/split-a-string-in-c /// Split a string by a delim inline std::vector split(const std::string &s, char delim) { - std::vector elems; - // Check to see if empty string, give consistent result - if(s.empty()) - elems.emplace_back(); - else { - std::stringstream ss; - ss.str(s); - std::string item; - while(std::getline(ss, item, delim)) { - elems.push_back(item); - } + std::vector elems; + // Check to see if empty string, give consistent result + if (s.empty()) + elems.emplace_back(); + else { + std::stringstream ss; + ss.str(s); + std::string item; + while (std::getline(ss, item, delim)) { + elems.push_back(item); } - return elems; + } + return elems; } /// Simple function to join a string template std::string join(const T &v, std::string delim = ",") { - std::ostringstream s; - auto beg = std::begin(v); - auto end = std::end(v); - if(beg != end) - s << *beg++; - while(beg != end) { - s << delim << *beg++; - } - return s.str(); + std::ostringstream s; + auto beg = std::begin(v); + auto end = std::end(v); + if (beg != end) + s << *beg++; + while (beg != end) { + s << delim << *beg++; + } + return s.str(); } /// Simple function to join a string from processed elements -template ::value>::type> +template ::value>::type> std::string join(const T &v, Callable func, std::string delim = ",") { - std::ostringstream s; - auto beg = std::begin(v); - auto end = std::end(v); - if(beg != end) - s << func(*beg++); - while(beg != end) { - s << delim << func(*beg++); - } - return s.str(); + std::ostringstream s; + auto beg = std::begin(v); + auto end = std::end(v); + if (beg != end) + s << func(*beg++); + while (beg != end) { + s << delim << func(*beg++); + } + return s.str(); } /// Join a string in reverse order template std::string rjoin(const T &v, std::string delim = ",") { - std::ostringstream s; - for(std::size_t start = 0; start < v.size(); start++) { - if(start > 0) - s << delim; - s << v[v.size() - start - 1]; - } - return s.str(); + std::ostringstream s; + for (std::size_t start = 0; start < v.size(); start++) { + if (start > 0) + s << delim; + s << v[v.size() - start - 1]; + } + return s.str(); } // Based roughly on http://stackoverflow.com/questions/25829143/c-trim-whitespace-from-a-string /// Trim whitespace from left of string inline std::string <rim(std::string &str) { - auto it = std::find_if(str.begin(), str.end(), [](char ch) { return !std::isspace(ch, std::locale()); }); - str.erase(str.begin(), it); - return str; + auto it = std::find_if(str.begin(), str.end(), [](char ch) { return !std::isspace(ch, std::locale()); }); + str.erase(str.begin(), it); + return str; } /// Trim anything from left of string inline std::string <rim(std::string &str, const std::string &filter) { - auto it = std::find_if(str.begin(), str.end(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); - str.erase(str.begin(), it); - return str; + auto it = std::find_if(str.begin(), str.end(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); + str.erase(str.begin(), it); + return str; } /// Trim whitespace from right of string inline std::string &rtrim(std::string &str) { - auto it = std::find_if(str.rbegin(), str.rend(), [](char ch) { return !std::isspace(ch, std::locale()); }); - str.erase(it.base(), str.end()); - return str; + auto it = std::find_if(str.rbegin(), str.rend(), [](char ch) { return !std::isspace(ch, std::locale()); }); + str.erase(it.base(), str.end()); + return str; } /// Trim anything from right of string inline std::string &rtrim(std::string &str, const std::string &filter) { - auto it = - std::find_if(str.rbegin(), str.rend(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); - str.erase(it.base(), str.end()); - return str; + auto it = std::find_if(str.rbegin(), str.rend(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); + str.erase(it.base(), str.end()); + return str; } /// Trim whitespace from string @@ -271,199 +251,186 @@ inline std::string &trim(std::string &str, const std::string filter) { return lt /// Make a copy of the string and then trim it inline std::string trim_copy(const std::string &str) { - std::string s = str; - return trim(s); + std::string s = str; + return trim(s); } /// remove quotes at the front and back of a string either '"' or '\'' inline std::string &remove_quotes(std::string &str) { - if(str.length() > 1 && (str.front() == '"' || str.front() == '\'')) { - if(str.front() == str.back()) { - str.pop_back(); - str.erase(str.begin(), str.begin() + 1); - } + if (str.length() > 1 && (str.front() == '"' || str.front() == '\'')) { + if (str.front() == str.back()) { + str.pop_back(); + str.erase(str.begin(), str.begin() + 1); } - return str; + } + return str; } /// Make a copy of the string and then trim it, any filter string can be used (any char in string is filtered) inline std::string trim_copy(const std::string &str, const std::string &filter) { - std::string s = str; - return trim(s, filter); + std::string s = str; + return trim(s, filter); } /// Print a two part "help" string inline std::ostream &format_help(std::ostream &out, std::string name, std::string description, std::size_t wid) { - name = " " + name; - out << std::setw(static_cast(wid)) << std::left << name; - if(!description.empty()) { - if(name.length() >= wid) - out << "\n" << std::setw(static_cast(wid)) << ""; - for(const char c : description) { - out.put(c); - if(c == '\n') { - out << std::setw(static_cast(wid)) << ""; - } - } - } - out << "\n"; - return out; + name = " " + name; + out << std::setw(static_cast(wid)) << std::left << name; + if (!description.empty()) { + if (name.length() >= wid) + out << "\n" << std::setw(static_cast(wid)) << ""; + for (const char c : description) { + out.put(c); + if (c == '\n') { + out << std::setw(static_cast(wid)) << ""; + } + } + } + out << "\n"; + return out; } /// Verify the first character of an option -template bool valid_first_char(T c) { - return std::isalnum(c, std::locale()) || c == '_' || c == '?' || c == '@'; -} +template bool valid_first_char(T c) { return std::isalnum(c, std::locale()) || c == '_' || c == '?' || c == '@'; } /// Verify following characters of an option template bool valid_later_char(T c) { return valid_first_char(c) || c == '.' || c == '-'; } /// Verify an option name inline bool valid_name_string(const std::string &str) { - if(str.empty() || !valid_first_char(str[0])) - return false; - for(auto c : str.substr(1)) - if(!valid_later_char(c)) - return false; - return true; + if (str.empty() || !valid_first_char(str[0])) + return false; + for (auto c : str.substr(1)) + if (!valid_later_char(c)) + return false; + return true; } /// Verify that str consists of letters only inline bool isalpha(const std::string &str) { - return std::all_of(str.begin(), str.end(), [](char c) { return std::isalpha(c, std::locale()); }); + return std::all_of(str.begin(), str.end(), [](char c) { return std::isalpha(c, std::locale()); }); } /// Return a lower case version of a string inline std::string to_lower(std::string str) { - std::transform(std::begin(str), std::end(str), std::begin(str), [](const std::string::value_type &x) { - return std::tolower(x, std::locale()); - }); - return str; + std::transform(std::begin(str), std::end(str), std::begin(str), + [](const std::string::value_type &x) { return std::tolower(x, std::locale()); }); + return str; } /// remove underscores from a string inline std::string remove_underscore(std::string str) { - str.erase(std::remove(std::begin(str), std::end(str), '_'), std::end(str)); - return str; + str.erase(std::remove(std::begin(str), std::end(str), '_'), std::end(str)); + return str; } /// Find and replace a substring with another substring inline std::string find_and_replace(std::string str, std::string from, std::string to) { - std::size_t start_pos = 0; + std::size_t start_pos = 0; - while((start_pos = str.find(from, start_pos)) != std::string::npos) { - str.replace(start_pos, from.length(), to); - start_pos += to.length(); - } + while ((start_pos = str.find(from, start_pos)) != std::string::npos) { + str.replace(start_pos, from.length(), to); + start_pos += to.length(); + } - return str; + return str; } /// check if the flag definitions has possible false flags -inline bool has_default_flag_values(const std::string &flags) { - return (flags.find_first_of("{!") != std::string::npos); -} +inline bool has_default_flag_values(const std::string &flags) { return (flags.find_first_of("{!") != std::string::npos); } inline void remove_default_flag_values(std::string &flags) { - auto loc = flags.find_first_of('{'); - while(loc != std::string::npos) { - auto finish = flags.find_first_of("},", loc + 1); - if((finish != std::string::npos) && (flags[finish] == '}')) { - flags.erase(flags.begin() + static_cast(loc), - flags.begin() + static_cast(finish) + 1); - } - loc = flags.find_first_of('{', loc + 1); + auto loc = flags.find_first_of('{'); + while (loc != std::string::npos) { + auto finish = flags.find_first_of("},", loc + 1); + if ((finish != std::string::npos) && (flags[finish] == '}')) { + flags.erase(flags.begin() + static_cast(loc), flags.begin() + static_cast(finish) + 1); } - flags.erase(std::remove(flags.begin(), flags.end(), '!'), flags.end()); + loc = flags.find_first_of('{', loc + 1); + } + flags.erase(std::remove(flags.begin(), flags.end(), '!'), flags.end()); } /// Check if a string is a member of a list of strings and optionally ignore case or ignore underscores -inline std::ptrdiff_t find_member(std::string name, - const std::vector names, - bool ignore_case = false, +inline std::ptrdiff_t find_member(std::string name, const std::vector names, bool ignore_case = false, bool ignore_underscore = false) { - auto it = std::end(names); - if(ignore_case) { - if(ignore_underscore) { - name = detail::to_lower(detail::remove_underscore(name)); - it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { - return detail::to_lower(detail::remove_underscore(local_name)) == name; - }); - } else { - name = detail::to_lower(name); - it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { - return detail::to_lower(local_name) == name; - }); - } + auto it = std::end(names); + if (ignore_case) { + if (ignore_underscore) { + name = detail::to_lower(detail::remove_underscore(name)); + it = std::find_if(std::begin(names), std::end(names), + [&name](std::string local_name) { return detail::to_lower(detail::remove_underscore(local_name)) == name; }); + } else { + name = detail::to_lower(name); + it = std::find_if(std::begin(names), std::end(names), + [&name](std::string local_name) { return detail::to_lower(local_name) == name; }); + } - } else if(ignore_underscore) { - name = detail::remove_underscore(name); - it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { - return detail::remove_underscore(local_name) == name; - }); - } else - it = std::find(std::begin(names), std::end(names), name); + } else if (ignore_underscore) { + name = detail::remove_underscore(name); + it = std::find_if(std::begin(names), std::end(names), + [&name](std::string local_name) { return detail::remove_underscore(local_name) == name; }); + } else + it = std::find(std::begin(names), std::end(names), name); - return (it != std::end(names)) ? (it - std::begin(names)) : (-1); + return (it != std::end(names)) ? (it - std::begin(names)) : (-1); } /// Find a trigger string and call a modify callable function that takes the current string and starting position of the /// trigger and returns the position in the string to search for the next trigger string template inline std::string find_and_modify(std::string str, std::string trigger, Callable modify) { - std::size_t start_pos = 0; - while((start_pos = str.find(trigger, start_pos)) != std::string::npos) { - start_pos = modify(str, start_pos); - } - return str; + std::size_t start_pos = 0; + while ((start_pos = str.find(trigger, start_pos)) != std::string::npos) { + start_pos = modify(str, start_pos); + } + return str; } /// Split a string '"one two" "three"' into 'one two', 'three' /// Quote characters can be ` ' or " inline std::vector split_up(std::string str, char delimiter = '\0') { - const std::string delims("\'\"`"); - auto find_ws = [delimiter](char ch) { - return (delimiter == '\0') ? (std::isspace(ch, std::locale()) != 0) : (ch == delimiter); - }; - trim(str); - - std::vector output; - bool embeddedQuote = false; - char keyChar = ' '; - while(!str.empty()) { - if(delims.find_first_of(str[0]) != std::string::npos) { - keyChar = str[0]; - auto end = str.find_first_of(keyChar, 1); - while((end != std::string::npos) && (str[end - 1] == '\\')) { // deal with escaped quotes - end = str.find_first_of(keyChar, end + 1); - embeddedQuote = true; - } - if(end != std::string::npos) { - output.push_back(str.substr(1, end - 1)); - str = str.substr(end + 1); - } else { - output.push_back(str.substr(1)); - str = ""; - } - } else { - auto it = std::find_if(std::begin(str), std::end(str), find_ws); - if(it != std::end(str)) { - std::string value = std::string(str.begin(), it); - output.push_back(value); - str = std::string(it + 1, str.end()); - } else { - output.push_back(str); - str = ""; - } - } - // transform any embedded quotes into the regular character - if(embeddedQuote) { - output.back() = find_and_replace(output.back(), std::string("\\") + keyChar, std::string(1, keyChar)); - embeddedQuote = false; - } - trim(str); + const std::string delims("\'\"`"); + auto find_ws = [delimiter](char ch) { return (delimiter == '\0') ? (std::isspace(ch, std::locale()) != 0) : (ch == delimiter); }; + trim(str); + + std::vector output; + bool embeddedQuote = false; + char keyChar = ' '; + while (!str.empty()) { + if (delims.find_first_of(str[0]) != std::string::npos) { + keyChar = str[0]; + auto end = str.find_first_of(keyChar, 1); + while ((end != std::string::npos) && (str[end - 1] == '\\')) { // deal with escaped quotes + end = str.find_first_of(keyChar, end + 1); + embeddedQuote = true; + } + if (end != std::string::npos) { + output.push_back(str.substr(1, end - 1)); + str = str.substr(end + 1); + } else { + output.push_back(str.substr(1)); + str = ""; + } + } else { + auto it = std::find_if(std::begin(str), std::end(str), find_ws); + if (it != std::end(str)) { + std::string value = std::string(str.begin(), it); + output.push_back(value); + str = std::string(it + 1, str.end()); + } else { + output.push_back(str); + str = ""; + } + } + // transform any embedded quotes into the regular character + if (embeddedQuote) { + output.back() = find_and_replace(output.back(), std::string("\\") + keyChar, std::string(1, keyChar)); + embeddedQuote = false; } - return output; + trim(str); + } + return output; } /// Add a leader to the beginning of all new lines (nothing is added @@ -471,15 +438,15 @@ inline std::vector split_up(std::string str, char delimiter = '\0') /// /// Can't use Regex, or this would be a subs. inline std::string fix_newlines(const std::string &leader, std::string input) { - std::string::size_type n = 0; - while(n != std::string::npos && n < input.size()) { - n = input.find('\n', n); - if(n != std::string::npos) { - input = input.substr(0, n + 1) + leader + input.substr(n + 1); - n += leader.size(); - } + std::string::size_type n = 0; + while (n != std::string::npos && n < input.size()) { + n = input.find('\n', n); + if (n != std::string::npos) { + input = input.substr(0, n + 1) + leader + input.substr(n + 1); + n += leader.size(); } - return input; + } + return input; } /// This function detects an equal or colon followed by an escaped quote after an argument @@ -487,27 +454,27 @@ inline std::string fix_newlines(const std::string &leader, std::string input) { /// to allow the split up function to work properly and is intended to be used with the find_and_modify function /// the return value is the offset+1 which is required by the find_and_modify function. inline std::size_t escape_detect(std::string &str, std::size_t offset) { - auto next = str[offset + 1]; - if((next == '\"') || (next == '\'') || (next == '`')) { - auto astart = str.find_last_of("-/ \"\'`", offset - 1); - if(astart != std::string::npos) { - if(str[astart] == ((str[offset] == '=') ? '-' : '/')) - str[offset] = ' '; // interpret this as a space so the split_up works properly - } + auto next = str[offset + 1]; + if ((next == '\"') || (next == '\'') || (next == '`')) { + auto astart = str.find_last_of("-/ \"\'`", offset - 1); + if (astart != std::string::npos) { + if (str[astart] == ((str[offset] == '=') ? '-' : '/')) + str[offset] = ' '; // interpret this as a space so the split_up works properly } - return offset + 1; + } + return offset + 1; } /// Add quotes if the string contains spaces inline std::string &add_quotes_if_needed(std::string &str) { - if((str.front() != '"' && str.front() != '\'') || str.front() != str.back()) { - char quote = str.find('"') < str.find('\'') ? '\'' : '"'; - if(str.find(' ') != std::string::npos) { - str.insert(0, 1, quote); - str.append(1, quote); - } + if ((str.front() != '"' && str.front() != '\'') || str.front() != str.back()) { + char quote = str.find('"') < str.find('\'') ? '\'' : '"'; + if (str.find(' ') != std::string::npos) { + str.insert(0, 1, quote); + str.append(1, quote); } - return str; + } + return str; } } // namespace detail @@ -520,40 +487,39 @@ namespace CLI { // Use one of these on all error classes. // These are temporary and are undef'd at the end of this file. -#define CLI11_ERROR_DEF(parent, name) \ - protected: \ - name(std::string ename, std::string msg, int exit_code) : parent(std::move(ename), std::move(msg), exit_code) {} \ - name(std::string ename, std::string msg, ExitCodes exit_code) \ - : parent(std::move(ename), std::move(msg), exit_code) {} \ - \ - public: \ - name(std::string msg, ExitCodes exit_code) : parent(#name, std::move(msg), exit_code) {} \ - name(std::string msg, int exit_code) : parent(#name, std::move(msg), exit_code) {} +#define CLI11_ERROR_DEF(parent, name) \ +protected: \ + name(std::string ename, std::string msg, int exit_code) : parent(std::move(ename), std::move(msg), exit_code) {} \ + name(std::string ename, std::string msg, ExitCodes exit_code) : parent(std::move(ename), std::move(msg), exit_code) {} \ + \ +public: \ + name(std::string msg, ExitCodes exit_code) : parent(#name, std::move(msg), exit_code) {} \ + name(std::string msg, int exit_code) : parent(#name, std::move(msg), exit_code) {} // This is added after the one above if a class is used directly and builds its own message -#define CLI11_ERROR_SIMPLE(name) \ - explicit name(std::string msg) : name(#name, msg, ExitCodes::name) {} +#define CLI11_ERROR_SIMPLE(name) \ + explicit name(std::string msg) : name(#name, msg, ExitCodes::name) {} /// These codes are part of every error in CLI. They can be obtained from e using e.exit_code or as a quick shortcut, /// int values from e.get_error_code(). enum class ExitCodes { - Success = 0, - IncorrectConstruction = 100, - BadNameString, - OptionAlreadyAdded, - FileError, - ConversionError, - ValidationError, - RequiredError, - RequiresError, - ExcludesError, - ExtrasError, - ConfigError, - InvalidError, - HorribleError, - OptionNotFound, - ArgumentMismatch, - BaseClass = 127 + Success = 0, + IncorrectConstruction = 100, + BadNameString, + OptionAlreadyAdded, + FileError, + ConversionError, + ValidationError, + RequiredError, + RequiresError, + ExcludesError, + ExtrasError, + ConfigError, + InvalidError, + HorribleError, + OptionNotFound, + ArgumentMismatch, + BaseClass = 127 }; // Error definitions @@ -566,271 +532,242 @@ enum class ExitCodes { /// All errors derive from this one class Error : public std::runtime_error { - int actual_exit_code; - std::string error_name{"Error"}; + int actual_exit_code; + std::string error_name{"Error"}; - public: - int get_exit_code() const { return actual_exit_code; } +public: + int get_exit_code() const { return actual_exit_code; } - std::string get_name() const { return error_name; } + std::string get_name() const { return error_name; } - Error(std::string name, std::string msg, int exit_code = static_cast(ExitCodes::BaseClass)) - : runtime_error(msg), actual_exit_code(exit_code), error_name(std::move(name)) {} + Error(std::string name, std::string msg, int exit_code = static_cast(ExitCodes::BaseClass)) + : runtime_error(msg), actual_exit_code(exit_code), error_name(std::move(name)) {} - Error(std::string name, std::string msg, ExitCodes exit_code) : Error(name, msg, static_cast(exit_code)) {} + Error(std::string name, std::string msg, ExitCodes exit_code) : Error(name, msg, static_cast(exit_code)) {} }; // Note: Using Error::Error constructors does not work on GCC 4.7 /// Construction errors (not in parsing) class ConstructionError : public Error { - CLI11_ERROR_DEF(Error, ConstructionError) + CLI11_ERROR_DEF(Error, ConstructionError) }; /// Thrown when an option is set to conflicting values (non-vector and multi args, for example) class IncorrectConstruction : public ConstructionError { - CLI11_ERROR_DEF(ConstructionError, IncorrectConstruction) - CLI11_ERROR_SIMPLE(IncorrectConstruction) - static IncorrectConstruction PositionalFlag(std::string name) { - return IncorrectConstruction(name + ": Flags cannot be positional"); - } - static IncorrectConstruction Set0Opt(std::string name) { - return IncorrectConstruction(name + ": Cannot set 0 expected, use a flag instead"); - } - static IncorrectConstruction SetFlag(std::string name) { - return IncorrectConstruction(name + ": Cannot set an expected number for flags"); - } - static IncorrectConstruction ChangeNotVector(std::string name) { - return IncorrectConstruction(name + ": You can only change the expected arguments for vectors"); - } - static IncorrectConstruction AfterMultiOpt(std::string name) { - return IncorrectConstruction( - name + ": You can't change expected arguments after you've changed the multi option policy!"); - } - static IncorrectConstruction MissingOption(std::string name) { - return IncorrectConstruction("Option " + name + " is not defined"); - } - static IncorrectConstruction MultiOptionPolicy(std::string name) { - return IncorrectConstruction(name + ": multi_option_policy only works for flags and exact value options"); - } + CLI11_ERROR_DEF(ConstructionError, IncorrectConstruction) + CLI11_ERROR_SIMPLE(IncorrectConstruction) + static IncorrectConstruction PositionalFlag(std::string name) { return IncorrectConstruction(name + ": Flags cannot be positional"); } + static IncorrectConstruction Set0Opt(std::string name) { + return IncorrectConstruction(name + ": Cannot set 0 expected, use a flag instead"); + } + static IncorrectConstruction SetFlag(std::string name) { + return IncorrectConstruction(name + ": Cannot set an expected number for flags"); + } + static IncorrectConstruction ChangeNotVector(std::string name) { + return IncorrectConstruction(name + ": You can only change the expected arguments for vectors"); + } + static IncorrectConstruction AfterMultiOpt(std::string name) { + return IncorrectConstruction(name + ": You can't change expected arguments after you've changed the multi option policy!"); + } + static IncorrectConstruction MissingOption(std::string name) { return IncorrectConstruction("Option " + name + " is not defined"); } + static IncorrectConstruction MultiOptionPolicy(std::string name) { + return IncorrectConstruction(name + ": multi_option_policy only works for flags and exact value options"); + } }; /// Thrown on construction of a bad name class BadNameString : public ConstructionError { - CLI11_ERROR_DEF(ConstructionError, BadNameString) - CLI11_ERROR_SIMPLE(BadNameString) - static BadNameString OneCharName(std::string name) { return BadNameString("Invalid one char name: " + name); } - static BadNameString BadLongName(std::string name) { return BadNameString("Bad long name: " + name); } - static BadNameString DashesOnly(std::string name) { - return BadNameString("Must have a name, not just dashes: " + name); - } - static BadNameString MultiPositionalNames(std::string name) { - return BadNameString("Only one positional name allowed, remove: " + name); - } + CLI11_ERROR_DEF(ConstructionError, BadNameString) + CLI11_ERROR_SIMPLE(BadNameString) + static BadNameString OneCharName(std::string name) { return BadNameString("Invalid one char name: " + name); } + static BadNameString BadLongName(std::string name) { return BadNameString("Bad long name: " + name); } + static BadNameString DashesOnly(std::string name) { return BadNameString("Must have a name, not just dashes: " + name); } + static BadNameString MultiPositionalNames(std::string name) { return BadNameString("Only one positional name allowed, remove: " + name); } }; /// Thrown when an option already exists class OptionAlreadyAdded : public ConstructionError { - CLI11_ERROR_DEF(ConstructionError, OptionAlreadyAdded) - explicit OptionAlreadyAdded(std::string name) - : OptionAlreadyAdded(name + " is already added", ExitCodes::OptionAlreadyAdded) {} - static OptionAlreadyAdded Requires(std::string name, std::string other) { - return OptionAlreadyAdded(name + " requires " + other, ExitCodes::OptionAlreadyAdded); - } - static OptionAlreadyAdded Excludes(std::string name, std::string other) { - return OptionAlreadyAdded(name + " excludes " + other, ExitCodes::OptionAlreadyAdded); - } + CLI11_ERROR_DEF(ConstructionError, OptionAlreadyAdded) + explicit OptionAlreadyAdded(std::string name) : OptionAlreadyAdded(name + " is already added", ExitCodes::OptionAlreadyAdded) {} + static OptionAlreadyAdded Requires(std::string name, std::string other) { + return OptionAlreadyAdded(name + " requires " + other, ExitCodes::OptionAlreadyAdded); + } + static OptionAlreadyAdded Excludes(std::string name, std::string other) { + return OptionAlreadyAdded(name + " excludes " + other, ExitCodes::OptionAlreadyAdded); + } }; // Parsing errors /// Anything that can error in Parse class ParseError : public Error { - CLI11_ERROR_DEF(Error, ParseError) + CLI11_ERROR_DEF(Error, ParseError) }; // Not really "errors" /// This is a successful completion on parsing, supposed to exit class Success : public ParseError { - CLI11_ERROR_DEF(ParseError, Success) - Success() : Success("Successfully completed, should be caught and quit", ExitCodes::Success) {} + CLI11_ERROR_DEF(ParseError, Success) + Success() : Success("Successfully completed, should be caught and quit", ExitCodes::Success) {} }; /// -h or --help on command line class CallForHelp : public ParseError { - CLI11_ERROR_DEF(ParseError, CallForHelp) - CallForHelp() : CallForHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} + CLI11_ERROR_DEF(ParseError, CallForHelp) + CallForHelp() : CallForHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} }; /// Usually something like --help-all on command line class CallForAllHelp : public ParseError { - CLI11_ERROR_DEF(ParseError, CallForAllHelp) - CallForAllHelp() - : CallForAllHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} + CLI11_ERROR_DEF(ParseError, CallForAllHelp) + CallForAllHelp() : CallForAllHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} }; /// Does not output a diagnostic in CLI11_PARSE, but allows to return from main() with a specific error code. class RuntimeError : public ParseError { - CLI11_ERROR_DEF(ParseError, RuntimeError) - explicit RuntimeError(int exit_code = 1) : RuntimeError("Runtime error", exit_code) {} + CLI11_ERROR_DEF(ParseError, RuntimeError) + explicit RuntimeError(int exit_code = 1) : RuntimeError("Runtime error", exit_code) {} }; /// Thrown when parsing an INI file and it is missing class FileError : public ParseError { - CLI11_ERROR_DEF(ParseError, FileError) - CLI11_ERROR_SIMPLE(FileError) - static FileError Missing(std::string name) { return FileError(name + " was not readable (missing?)"); } + CLI11_ERROR_DEF(ParseError, FileError) + CLI11_ERROR_SIMPLE(FileError) + static FileError Missing(std::string name) { return FileError(name + " was not readable (missing?)"); } }; /// Thrown when conversion call back fails, such as when an int fails to coerce to a string class ConversionError : public ParseError { - CLI11_ERROR_DEF(ParseError, ConversionError) - CLI11_ERROR_SIMPLE(ConversionError) - ConversionError(std::string member, std::string name) - : ConversionError("The value " + member + " is not an allowed value for " + name) {} - ConversionError(std::string name, std::vector results) - : ConversionError("Could not convert: " + name + " = " + detail::join(results)) {} - static ConversionError TooManyInputsFlag(std::string name) { - return ConversionError(name + ": too many inputs for a flag"); - } - static ConversionError TrueFalse(std::string name) { - return ConversionError(name + ": Should be true/false or a number"); - } + CLI11_ERROR_DEF(ParseError, ConversionError) + CLI11_ERROR_SIMPLE(ConversionError) + ConversionError(std::string member, std::string name) : ConversionError("The value " + member + " is not an allowed value for " + name) {} + ConversionError(std::string name, std::vector results) + : ConversionError("Could not convert: " + name + " = " + detail::join(results)) {} + static ConversionError TooManyInputsFlag(std::string name) { return ConversionError(name + ": too many inputs for a flag"); } + static ConversionError TrueFalse(std::string name) { return ConversionError(name + ": Should be true/false or a number"); } }; /// Thrown when validation of results fails class ValidationError : public ParseError { - CLI11_ERROR_DEF(ParseError, ValidationError) - CLI11_ERROR_SIMPLE(ValidationError) - explicit ValidationError(std::string name, std::string msg) : ValidationError(name + ": " + msg) {} + CLI11_ERROR_DEF(ParseError, ValidationError) + CLI11_ERROR_SIMPLE(ValidationError) + explicit ValidationError(std::string name, std::string msg) : ValidationError(name + ": " + msg) {} }; /// Thrown when a required option is missing class RequiredError : public ParseError { - CLI11_ERROR_DEF(ParseError, RequiredError) - explicit RequiredError(std::string name) : RequiredError(name + " is required", ExitCodes::RequiredError) {} - static RequiredError Subcommand(std::size_t min_subcom) { - if(min_subcom == 1) { - return RequiredError("A subcommand"); - } - return RequiredError("Requires at least " + std::to_string(min_subcom) + " subcommands", - ExitCodes::RequiredError); - } - static RequiredError - Option(std::size_t min_option, std::size_t max_option, std::size_t used, const std::string &option_list) { - if((min_option == 1) && (max_option == 1) && (used == 0)) - return RequiredError("Exactly 1 option from [" + option_list + "]"); - if((min_option == 1) && (max_option == 1) && (used > 1)) { - return RequiredError("Exactly 1 option from [" + option_list + "] is required and " + std::to_string(used) + - " were given", - ExitCodes::RequiredError); - } - if((min_option == 1) && (used == 0)) - return RequiredError("At least 1 option from [" + option_list + "]"); - if(used < min_option) { - return RequiredError("Requires at least " + std::to_string(min_option) + " options used and only " + - std::to_string(used) + "were given from [" + option_list + "]", - ExitCodes::RequiredError); - } - if(max_option == 1) - return RequiredError("Requires at most 1 options be given from [" + option_list + "]", - ExitCodes::RequiredError); - - return RequiredError("Requires at most " + std::to_string(max_option) + " options be used and " + - std::to_string(used) + "were given from [" + option_list + "]", - ExitCodes::RequiredError); - } + CLI11_ERROR_DEF(ParseError, RequiredError) + explicit RequiredError(std::string name) : RequiredError(name + " is required", ExitCodes::RequiredError) {} + static RequiredError Subcommand(std::size_t min_subcom) { + if (min_subcom == 1) { + return RequiredError("A subcommand"); + } + return RequiredError("Requires at least " + std::to_string(min_subcom) + " subcommands", ExitCodes::RequiredError); + } + static RequiredError Option(std::size_t min_option, std::size_t max_option, std::size_t used, const std::string &option_list) { + if ((min_option == 1) && (max_option == 1) && (used == 0)) + return RequiredError("Exactly 1 option from [" + option_list + "]"); + if ((min_option == 1) && (max_option == 1) && (used > 1)) { + return RequiredError("Exactly 1 option from [" + option_list + "] is required and " + std::to_string(used) + " were given", + ExitCodes::RequiredError); + } + if ((min_option == 1) && (used == 0)) + return RequiredError("At least 1 option from [" + option_list + "]"); + if (used < min_option) { + return RequiredError("Requires at least " + std::to_string(min_option) + " options used and only " + std::to_string(used) + + "were given from [" + option_list + "]", + ExitCodes::RequiredError); + } + if (max_option == 1) + return RequiredError("Requires at most 1 options be given from [" + option_list + "]", ExitCodes::RequiredError); + + return RequiredError("Requires at most " + std::to_string(max_option) + " options be used and " + std::to_string(used) + + "were given from [" + option_list + "]", + ExitCodes::RequiredError); + } }; /// Thrown when the wrong number of arguments has been received class ArgumentMismatch : public ParseError { - CLI11_ERROR_DEF(ParseError, ArgumentMismatch) - CLI11_ERROR_SIMPLE(ArgumentMismatch) - ArgumentMismatch(std::string name, int expected, std::size_t recieved) - : ArgumentMismatch(expected > 0 ? ("Expected exactly " + std::to_string(expected) + " arguments to " + name + - ", got " + std::to_string(recieved)) - : ("Expected at least " + std::to_string(-expected) + " arguments to " + name + - ", got " + std::to_string(recieved)), - ExitCodes::ArgumentMismatch) {} - - static ArgumentMismatch AtLeast(std::string name, int num, std::size_t received) { - return ArgumentMismatch(name + ": At least " + std::to_string(num) + " required but received " + - std::to_string(received)); - } - static ArgumentMismatch AtMost(std::string name, int num, std::size_t received) { - return ArgumentMismatch(name + ": At Most " + std::to_string(num) + " required but received " + - std::to_string(received)); - } - static ArgumentMismatch TypedAtLeast(std::string name, int num, std::string type) { - return ArgumentMismatch(name + ": " + std::to_string(num) + " required " + type + " missing"); - } - static ArgumentMismatch FlagOverride(std::string name) { - return ArgumentMismatch(name + " was given a disallowed flag override"); - } + CLI11_ERROR_DEF(ParseError, ArgumentMismatch) + CLI11_ERROR_SIMPLE(ArgumentMismatch) + ArgumentMismatch(std::string name, int expected, std::size_t recieved) + : ArgumentMismatch( + expected > 0 + ? ("Expected exactly " + std::to_string(expected) + " arguments to " + name + ", got " + std::to_string(recieved)) + : ("Expected at least " + std::to_string(-expected) + " arguments to " + name + ", got " + std::to_string(recieved)), + ExitCodes::ArgumentMismatch) {} + + static ArgumentMismatch AtLeast(std::string name, int num, std::size_t received) { + return ArgumentMismatch(name + ": At least " + std::to_string(num) + " required but received " + std::to_string(received)); + } + static ArgumentMismatch AtMost(std::string name, int num, std::size_t received) { + return ArgumentMismatch(name + ": At Most " + std::to_string(num) + " required but received " + std::to_string(received)); + } + static ArgumentMismatch TypedAtLeast(std::string name, int num, std::string type) { + return ArgumentMismatch(name + ": " + std::to_string(num) + " required " + type + " missing"); + } + static ArgumentMismatch FlagOverride(std::string name) { return ArgumentMismatch(name + " was given a disallowed flag override"); } }; /// Thrown when a requires option is missing class RequiresError : public ParseError { - CLI11_ERROR_DEF(ParseError, RequiresError) - RequiresError(std::string curname, std::string subname) - : RequiresError(curname + " requires " + subname, ExitCodes::RequiresError) {} + CLI11_ERROR_DEF(ParseError, RequiresError) + RequiresError(std::string curname, std::string subname) : RequiresError(curname + " requires " + subname, ExitCodes::RequiresError) {} }; /// Thrown when an excludes option is present class ExcludesError : public ParseError { - CLI11_ERROR_DEF(ParseError, ExcludesError) - ExcludesError(std::string curname, std::string subname) - : ExcludesError(curname + " excludes " + subname, ExitCodes::ExcludesError) {} + CLI11_ERROR_DEF(ParseError, ExcludesError) + ExcludesError(std::string curname, std::string subname) : ExcludesError(curname + " excludes " + subname, ExitCodes::ExcludesError) {} }; /// Thrown when too many positionals or options are found class ExtrasError : public ParseError { - CLI11_ERROR_DEF(ParseError, ExtrasError) - explicit ExtrasError(std::vector args) - : ExtrasError((args.size() > 1 ? "The following arguments were not expected: " - : "The following argument was not expected: ") + - detail::rjoin(args, " "), - ExitCodes::ExtrasError) {} - ExtrasError(const std::string &name, std::vector args) - : ExtrasError(name, - (args.size() > 1 ? "The following arguments were not expected: " - : "The following argument was not expected: ") + - detail::rjoin(args, " "), - ExitCodes::ExtrasError) {} + CLI11_ERROR_DEF(ParseError, ExtrasError) + explicit ExtrasError(std::vector args) + : ExtrasError((args.size() > 1 ? "The following arguments were not expected: " : "The following argument was not expected: ") + + detail::rjoin(args, " "), + ExitCodes::ExtrasError) {} + ExtrasError(const std::string &name, std::vector args) + : ExtrasError(name, + (args.size() > 1 ? "The following arguments were not expected: " : "The following argument was not expected: ") + + detail::rjoin(args, " "), + ExitCodes::ExtrasError) {} }; /// Thrown when extra values are found in an INI file class ConfigError : public ParseError { - CLI11_ERROR_DEF(ParseError, ConfigError) - CLI11_ERROR_SIMPLE(ConfigError) - static ConfigError Extras(std::string item) { return ConfigError("INI was not able to parse " + item); } - static ConfigError NotConfigurable(std::string item) { - return ConfigError(item + ": This option is not allowed in a configuration file"); - } + CLI11_ERROR_DEF(ParseError, ConfigError) + CLI11_ERROR_SIMPLE(ConfigError) + static ConfigError Extras(std::string item) { return ConfigError("INI was not able to parse " + item); } + static ConfigError NotConfigurable(std::string item) { + return ConfigError(item + ": This option is not allowed in a configuration file"); + } }; /// Thrown when validation fails before parsing class InvalidError : public ParseError { - CLI11_ERROR_DEF(ParseError, InvalidError) - explicit InvalidError(std::string name) - : InvalidError(name + ": Too many positional arguments with unlimited expected args", ExitCodes::InvalidError) { - } + CLI11_ERROR_DEF(ParseError, InvalidError) + explicit InvalidError(std::string name) + : InvalidError(name + ": Too many positional arguments with unlimited expected args", ExitCodes::InvalidError) {} }; /// This is just a safety check to verify selection and parsing match - you should not ever see it /// Strings are directly added to this error, but again, it should never be seen. class HorribleError : public ParseError { - CLI11_ERROR_DEF(ParseError, HorribleError) - CLI11_ERROR_SIMPLE(HorribleError) + CLI11_ERROR_DEF(ParseError, HorribleError) + CLI11_ERROR_SIMPLE(HorribleError) }; // After parsing /// Thrown when counting a non-existent option class OptionNotFound : public Error { - CLI11_ERROR_DEF(Error, OptionNotFound) - explicit OptionNotFound(std::string name) : OptionNotFound(name + " not found", ExitCodes::OptionNotFound) {} + CLI11_ERROR_DEF(Error, OptionNotFound) + explicit OptionNotFound(std::string name) : OptionNotFound(name + " not found", ExitCodes::OptionNotFound) {} }; #undef CLI11_ERROR_DEF @@ -897,9 +834,7 @@ template struct is_shared_ptr> : std::true_type template struct is_shared_ptr> : std::true_type {}; /// Check to see if something is copyable pointer -template struct is_copyable_ptr { - static bool const value = is_shared_ptr::value || std::is_pointer::value; -}; +template struct is_copyable_ptr { static bool const value = is_shared_ptr::value || std::is_pointer::value; }; /// This can be specialized to override the type deduction for IsMember. template struct IsMemberType { using type = T; }; @@ -918,7 +853,7 @@ namespace detail { template struct element_type { using type = T; }; template struct element_type::value>::type> { - using type = typename std::pointer_traits::element_type; + using type = typename std::pointer_traits::element_type; }; /// Combination of the element type and value type - remove pointer (including smart pointers) and get the value_type of @@ -927,39 +862,33 @@ template struct element_value_type { using type = typename element_ /// Adaptor for set-like structure: This just wraps a normal container in a few utilities that do almost nothing. template struct pair_adaptor : std::false_type { - using value_type = typename T::value_type; - using first_type = typename std::remove_const::type; - using second_type = typename std::remove_const::type; - - /// Get the first value (really just the underlying value) - template static auto first(Q &&pair_value) -> decltype(std::forward(pair_value)) { - return std::forward(pair_value); - } - /// Get the second value (really just the underlying value) - template static auto second(Q &&pair_value) -> decltype(std::forward(pair_value)) { - return std::forward(pair_value); - } + using value_type = typename T::value_type; + using first_type = typename std::remove_const::type; + using second_type = typename std::remove_const::type; + + /// Get the first value (really just the underlying value) + template static auto first(Q &&pair_value) -> decltype(std::forward(pair_value)) { return std::forward(pair_value); } + /// Get the second value (really just the underlying value) + template static auto second(Q &&pair_value) -> decltype(std::forward(pair_value)) { return std::forward(pair_value); } }; /// Adaptor for map-like structure (true version, must have key_type and mapped_type). /// This wraps a mapped container in a few utilities access it in a general way. template -struct pair_adaptor< - T, - conditional_t, void>> +struct pair_adaptor, void>> : std::true_type { - using value_type = typename T::value_type; - using first_type = typename std::remove_const::type; - using second_type = typename std::remove_const::type; - - /// Get the first value (really just the underlying value) - template static auto first(Q &&pair_value) -> decltype(std::get<0>(std::forward(pair_value))) { - return std::get<0>(std::forward(pair_value)); - } - /// Get the second value (really just the underlying value) - template static auto second(Q &&pair_value) -> decltype(std::get<1>(std::forward(pair_value))) { - return std::get<1>(std::forward(pair_value)); - } + using value_type = typename T::value_type; + using first_type = typename std::remove_const::type; + using second_type = typename std::remove_const::type; + + /// Get the first value (really just the underlying value) + template static auto first(Q &&pair_value) -> decltype(std::get<0>(std::forward(pair_value))) { + return std::get<0>(std::forward(pair_value)); + } + /// Get the second value (really just the underlying value) + template static auto second(Q &&pair_value) -> decltype(std::get<1>(std::forward(pair_value))) { + return std::get<1>(std::forward(pair_value)); + } }; // Warning is suppressed due to "bug" in gcc<5.0 and gcc 7.0 with c++17 enabled that generates a Wnarrowing warning @@ -974,25 +903,25 @@ struct pair_adaptor< #endif // check for constructibility from a specific type and copy assignable used in the parse detection template class is_direct_constructible { - template - static auto test(int, std::true_type) -> decltype( + template + static auto test(int, std::true_type) -> decltype( // NVCC warns about narrowing conversions here #ifdef __CUDACC__ #pragma diag_suppress 2361 #endif - TT { std::declval() } + TT { std::declval() } #ifdef __CUDACC__ #pragma diag_default 2361 #endif - , - std::is_move_assignable()); + , + std::is_move_assignable()); - template static auto test(int, std::false_type) -> std::false_type; + template static auto test(int, std::false_type) -> std::false_type; - template static auto test(...) -> std::false_type; + template static auto test(...) -> std::false_type; - public: - static constexpr bool value = decltype(test(0, typename std::is_constructible::type()))::value; +public: + static constexpr bool value = decltype(test(0, typename std::is_constructible::type()))::value; }; #ifdef __GNUC__ #pragma GCC diagnostic pop @@ -1002,126 +931,112 @@ template class is_direct_constructible { // Based on https://stackoverflow.com/questions/22758291/how-can-i-detect-if-a-type-can-be-streamed-to-an-stdostream template class is_ostreamable { - template - static auto test(int) -> decltype(std::declval() << std::declval(), std::true_type()); + template static auto test(int) -> decltype(std::declval() << std::declval(), std::true_type()); - template static auto test(...) -> std::false_type; + template static auto test(...) -> std::false_type; - public: - static constexpr bool value = decltype(test(0))::value; +public: + static constexpr bool value = decltype(test(0))::value; }; /// Check for input streamability template class is_istreamable { - template - static auto test(int) -> decltype(std::declval() >> std::declval(), std::true_type()); + template static auto test(int) -> decltype(std::declval() >> std::declval(), std::true_type()); - template static auto test(...) -> std::false_type; + template static auto test(...) -> std::false_type; - public: - static constexpr bool value = decltype(test(0))::value; +public: + static constexpr bool value = decltype(test(0))::value; }; /// Templated operation to get a value from a stream template ::value, detail::enabler> = detail::dummy> bool from_stream(const std::string &istring, T &obj) { - std::istringstream is; - is.str(istring); - is >> obj; - return !is.fail() && !is.rdbuf()->in_avail(); + std::istringstream is; + is.str(istring); + is >> obj; + return !is.fail() && !is.rdbuf()->in_avail(); } template ::value, detail::enabler> = detail::dummy> bool from_stream(const std::string & /*istring*/, T & /*obj*/) { - return false; + return false; } // Check for tuple like types, as in classes with a tuple_size type trait template class is_tuple_like { - template - // static auto test(int) - // -> decltype(std::conditional<(std::tuple_size::value > 0), std::true_type, std::false_type>::type()); - static auto test(int) -> decltype(std::tuple_size::value, std::true_type{}); - template static auto test(...) -> std::false_type; - - public: - static constexpr bool value = decltype(test(0))::value; + template + // static auto test(int) + // -> decltype(std::conditional<(std::tuple_size::value > 0), std::true_type, std::false_type>::type()); + static auto test(int) -> decltype(std::tuple_size::value, std::true_type{}); + template static auto test(...) -> std::false_type; + +public: + static constexpr bool value = decltype(test(0))::value; }; /// Convert an object to a string (directly forward if this can become a string) template ::value, detail::enabler> = detail::dummy> auto to_string(T &&value) -> decltype(std::forward(value)) { - return std::forward(value); + return std::forward(value); } /// Convert an object to a string (streaming must be supported for that type) template ::value && is_ostreamable::value, detail::enabler> = - detail::dummy> + enable_if_t::value && is_ostreamable::value, detail::enabler> = detail::dummy> std::string to_string(T &&value) { - std::stringstream stream; - stream << value; - return stream.str(); + std::stringstream stream; + stream << value; + return stream.str(); } /// If conversion is not supported, return an empty string (streaming is not supported for that type) -template ::value && !is_ostreamable::value && - !is_vector::type>::type>::value, - detail::enabler> = detail::dummy> +template ::value && !is_ostreamable::value && + !is_vector::type>::type>::value, + detail::enabler> = detail::dummy> std::string to_string(T &&) { - return std::string{}; + return std::string{}; } /// convert a vector to a string -template ::value && !is_ostreamable::value && - is_vector::type>::type>::value, - detail::enabler> = detail::dummy> +template ::value && !is_ostreamable::value && + is_vector::type>::type>::value, + detail::enabler> = detail::dummy> std::string to_string(T &&variable) { - std::vector defaults; - defaults.reserve(variable.size()); - auto cval = variable.begin(); - auto end = variable.end(); - while(cval != end) { - defaults.emplace_back(CLI::detail::to_string(*cval)); - ++cval; - } - return std::string("[" + detail::join(defaults) + "]"); + std::vector defaults; + defaults.reserve(variable.size()); + auto cval = variable.begin(); + auto end = variable.end(); + while (cval != end) { + defaults.emplace_back(CLI::detail::to_string(*cval)); + ++cval; + } + return std::string("[" + detail::join(defaults) + "]"); } /// special template overload -template ::value, detail::enabler> = detail::dummy> +template ::value, detail::enabler> = detail::dummy> auto checked_to_string(T &&value) -> decltype(to_string(std::forward(value))) { - return to_string(std::forward(value)); + return to_string(std::forward(value)); } /// special template overload -template ::value, detail::enabler> = detail::dummy> +template ::value, detail::enabler> = detail::dummy> std::string checked_to_string(T &&) { - return std::string{}; + return std::string{}; } /// get a string as a convertible value for arithmetic types -template ::value, detail::enabler> = detail::dummy> -std::string value_string(const T &value) { - return std::to_string(value); +template ::value, detail::enabler> = detail::dummy> std::string value_string(const T &value) { + return std::to_string(value); } /// get a string as a convertible value for enumerations -template ::value, detail::enabler> = detail::dummy> -std::string value_string(const T &value) { - return std::to_string(static_cast::type>(value)); +template ::value, detail::enabler> = detail::dummy> std::string value_string(const T &value) { + return std::to_string(static_cast::type>(value)); } /// for other types just use the regular to_string function -template ::value && !std::is_arithmetic::value, detail::enabler> = detail::dummy> +template ::value && !std::is_arithmetic::value, detail::enabler> = detail::dummy> auto value_string(const T &value) -> decltype(to_string(value)) { - return to_string(value); + return to_string(value); } /// This will only trigger for actual void type @@ -1129,161 +1044,144 @@ template struct type_count { static const i /// Set of overloads to get the type size of an object template struct type_count::value>::type> { - static constexpr int value{std::tuple_size::value}; + static constexpr int value{std::tuple_size::value}; }; /// Type size for regular object types that do not look like a tuple template -struct type_count< - T, - typename std::enable_if::value && !is_tuple_like::value && !std::is_void::value>::type> { - static constexpr int value{1}; +struct type_count::value && !is_tuple_like::value && !std::is_void::value>::type> { + static constexpr int value{1}; }; /// Type size of types that look like a vector template struct type_count::value>::type> { - static constexpr int value{is_vector::value ? expected_max_vector_size - : type_count::value}; + static constexpr int value{is_vector::value ? expected_max_vector_size + : type_count::value}; }; /// This will only trigger for actual void type template struct expected_count { static const int value{0}; }; /// For most types the number of expected items is 1 -template -struct expected_count::value && !std::is_void::value>::type> { - static constexpr int value{1}; +template struct expected_count::value && !std::is_void::value>::type> { + static constexpr int value{1}; }; /// number of expected items in a vector template struct expected_count::value>::type> { - static constexpr int value{expected_max_vector_size}; + static constexpr int value{expected_max_vector_size}; }; // Enumeration of the different supported categorizations of objects enum class object_category : int { - integral_value = 2, - unsigned_integral = 4, - enumeration = 6, - boolean_value = 8, - floating_point = 10, - number_constructible = 12, - double_constructible = 14, - integer_constructible = 16, - vector_value = 30, - tuple_value = 35, - // string assignable or greater used in a condition so anything string like must come last - string_assignable = 50, - string_constructible = 60, - other = 200, + integral_value = 2, + unsigned_integral = 4, + enumeration = 6, + boolean_value = 8, + floating_point = 10, + number_constructible = 12, + double_constructible = 14, + integer_constructible = 16, + vector_value = 30, + tuple_value = 35, + // string assignable or greater used in a condition so anything string like must come last + string_assignable = 50, + string_constructible = 60, + other = 200, }; /// some type that is not otherwise recognized -template struct classify_object { - static constexpr object_category value{object_category::other}; -}; +template struct classify_object { static constexpr object_category value{object_category::other}; }; /// Set of overloads to classify an object according to type template -struct classify_object::value && std::is_signed::value && - !is_bool::value && !std::is_enum::value>::type> { - static constexpr object_category value{object_category::integral_value}; +struct classify_object::value && std::is_signed::value && !is_bool::value && + !std::is_enum::value>::type> { + static constexpr object_category value{object_category::integral_value}; }; /// Unsigned integers template -struct classify_object< - T, - typename std::enable_if::value && std::is_unsigned::value && !is_bool::value>::type> { - static constexpr object_category value{object_category::unsigned_integral}; +struct classify_object::value && std::is_unsigned::value && !is_bool::value>::type> { + static constexpr object_category value{object_category::unsigned_integral}; }; /// Boolean values template struct classify_object::value>::type> { - static constexpr object_category value{object_category::boolean_value}; + static constexpr object_category value{object_category::boolean_value}; }; /// Floats template struct classify_object::value>::type> { - static constexpr object_category value{object_category::floating_point}; + static constexpr object_category value{object_category::floating_point}; }; /// String and similar direct assignment template -struct classify_object< - T, - typename std::enable_if::value && !std::is_integral::value && - std::is_assignable::value && !is_vector::value>::type> { - static constexpr object_category value{object_category::string_assignable}; +struct classify_object::value && !std::is_integral::value && + std::is_assignable::value && !is_vector::value>::type> { + static constexpr object_category value{object_category::string_assignable}; }; /// String and similar constructible and copy assignment template -struct classify_object< - T, - typename std::enable_if::value && !std::is_integral::value && - !std::is_assignable::value && - std::is_constructible::value && !is_vector::value>::type> { - static constexpr object_category value{object_category::string_constructible}; +struct classify_object::value && !std::is_integral::value && + !std::is_assignable::value && + std::is_constructible::value && !is_vector::value>::type> { + static constexpr object_category value{object_category::string_constructible}; }; /// Enumerations template struct classify_object::value>::type> { - static constexpr object_category value{object_category::enumeration}; + static constexpr object_category value{object_category::enumeration}; }; /// Handy helper to contain a bunch of checks that rule out many common types (integers, string like, floating point, /// vectors, and enumerations template struct uncommon_type { - using type = typename std::conditional::value && !std::is_integral::value && - !std::is_assignable::value && - !std::is_constructible::value && !is_vector::value && - !std::is_enum::value, - std::true_type, - std::false_type>::type; - static constexpr bool value = type::value; + using type = + typename std::conditional::value && !std::is_integral::value && + !std::is_assignable::value && !std::is_constructible::value && + !is_vector::value && !std::is_enum::value, + std::true_type, std::false_type>::type; + static constexpr bool value = type::value; }; /// Assignable from double or int template struct classify_object::value && type_count::value == 1 && - is_direct_constructible::value && - is_direct_constructible::value>::type> { - static constexpr object_category value{object_category::number_constructible}; + is_direct_constructible::value && is_direct_constructible::value>::type> { + static constexpr object_category value{object_category::number_constructible}; }; /// Assignable from int template -struct classify_object::value && type_count::value == 1 && - !is_direct_constructible::value && - is_direct_constructible::value>::type> { - static constexpr object_category value{object_category::integer_constructible}; +struct classify_object< + T, typename std::enable_if::value && type_count::value == 1 && !is_direct_constructible::value && + is_direct_constructible::value>::type> { + static constexpr object_category value{object_category::integer_constructible}; }; /// Assignable from double template -struct classify_object::value && type_count::value == 1 && - is_direct_constructible::value && - !is_direct_constructible::value>::type> { - static constexpr object_category value{object_category::double_constructible}; +struct classify_object< + T, typename std::enable_if::value && type_count::value == 1 && is_direct_constructible::value && + !is_direct_constructible::value>::type> { + static constexpr object_category value{object_category::double_constructible}; }; /// Tuple type template -struct classify_object::value >= 2 && !is_vector::value) || - (is_tuple_like::value && uncommon_type::value && - !is_direct_constructible::value && - !is_direct_constructible::value)>::type> { - static constexpr object_category value{object_category::tuple_value}; +struct classify_object< + T, typename std::enable_if<(type_count::value >= 2 && !is_vector::value) || + (is_tuple_like::value && uncommon_type::value && !is_direct_constructible::value && + !is_direct_constructible::value)>::type> { + static constexpr object_category value{object_category::tuple_value}; }; /// Vector type template struct classify_object::value>::type> { - static constexpr object_category value{object_category::vector_value}; + static constexpr object_category value{object_category::vector_value}; }; // Type name print @@ -1292,522 +1190,477 @@ template struct classify_object::value == object_category::integral_value || - classify_object::value == object_category::integer_constructible, - detail::enabler> = detail::dummy> +template ::value == object_category::integral_value || + classify_object::value == object_category::integer_constructible, + detail::enabler> = detail::dummy> constexpr const char *type_name() { - return "INT"; + return "INT"; } -template ::value == object_category::unsigned_integral, detail::enabler> = detail::dummy> +template ::value == object_category::unsigned_integral, detail::enabler> = detail::dummy> constexpr const char *type_name() { - return "UINT"; + return "UINT"; } -template ::value == object_category::floating_point || - classify_object::value == object_category::number_constructible || - classify_object::value == object_category::double_constructible, - detail::enabler> = detail::dummy> +template ::value == object_category::floating_point || + classify_object::value == object_category::number_constructible || + classify_object::value == object_category::double_constructible, + detail::enabler> = detail::dummy> constexpr const char *type_name() { - return "FLOAT"; + return "FLOAT"; } /// Print name for enumeration types -template ::value == object_category::enumeration, detail::enabler> = detail::dummy> +template ::value == object_category::enumeration, detail::enabler> = detail::dummy> constexpr const char *type_name() { - return "ENUM"; + return "ENUM"; } /// Print name for enumeration types -template ::value == object_category::boolean_value, detail::enabler> = detail::dummy> +template ::value == object_category::boolean_value, detail::enabler> = detail::dummy> constexpr const char *type_name() { - return "BOOLEAN"; + return "BOOLEAN"; } /// Print for all other types -template ::value >= object_category::string_assignable, detail::enabler> = detail::dummy> +template ::value >= object_category::string_assignable, detail::enabler> = detail::dummy> constexpr const char *type_name() { - return "TEXT"; + return "TEXT"; } /// Print name for single element tuple types -template ::value == object_category::tuple_value && type_count::value == 1, - detail::enabler> = detail::dummy> +template ::value == object_category::tuple_value && type_count::value == 1, detail::enabler> = + detail::dummy> inline std::string type_name() { - return type_name::type>(); + return type_name::type>(); } /// Empty string if the index > tuple size -template -inline typename std::enable_if::value, std::string>::type tuple_name() { - return std::string{}; +template inline typename std::enable_if::value, std::string>::type tuple_name() { + return std::string{}; } /// Recursively generate the tuple type name -template - inline typename std::enable_if < I::value, std::string>::type tuple_name() { - std::string str = std::string(type_name::type>()) + ',' + tuple_name(); - if(str.back() == ',') - str.pop_back(); - return str; +template inline typename std::enable_if < I::value, std::string>::type tuple_name() { + std::string str = std::string(type_name::type>()) + ',' + tuple_name(); + if (str.back() == ',') + str.pop_back(); + return str; } /// Print type name for tuples with 2 or more elements -template ::value == object_category::tuple_value && type_count::value >= 2, - detail::enabler> = detail::dummy> +template ::value == object_category::tuple_value && type_count::value >= 2, detail::enabler> = + detail::dummy> std::string type_name() { - auto tname = std::string(1, '[') + tuple_name(); - tname.push_back(']'); - return tname; + auto tname = std::string(1, '[') + tuple_name(); + tname.push_back(']'); + return tname; } /// This one should not be used normally, since vector types print the internal type -template ::value == object_category::vector_value, detail::enabler> = detail::dummy> +template ::value == object_category::vector_value, detail::enabler> = detail::dummy> inline std::string type_name() { - return type_name(); + return type_name(); } // Lexical cast /// Convert a flag into an integer value typically binary flags inline int64_t to_flag_value(std::string val) { - static const std::string trueString("true"); - static const std::string falseString("false"); - if(val == trueString) { - return 1; - } - if(val == falseString) { - return -1; - } - val = detail::to_lower(val); - int64_t ret; - if(val.size() == 1) { - if(val[0] >= '1' && val[0] <= '9') { - return (static_cast(val[0]) - '0'); - } - switch(val[0]) { - case '0': - case 'f': - case 'n': - case '-': - ret = -1; - break; - case 't': - case 'y': - case '+': - ret = 1; - break; - default: - throw std::invalid_argument("unrecognized character"); - } - return ret; - } - if(val == trueString || val == "on" || val == "yes" || val == "enable") { - ret = 1; - } else if(val == falseString || val == "off" || val == "no" || val == "disable") { - ret = -1; - } else { - ret = std::stoll(val); + static const std::string trueString("true"); + static const std::string falseString("false"); + if (val == trueString) { + return 1; + } + if (val == falseString) { + return -1; + } + val = detail::to_lower(val); + int64_t ret; + if (val.size() == 1) { + if (val[0] >= '1' && val[0] <= '9') { + return (static_cast(val[0]) - '0'); + } + switch (val[0]) { + case '0': + case 'f': + case 'n': + case '-': + ret = -1; + break; + case 't': + case 'y': + case '+': + ret = 1; + break; + default: + throw std::invalid_argument("unrecognized character"); } return ret; + } + if (val == trueString || val == "on" || val == "yes" || val == "enable") { + ret = 1; + } else if (val == falseString || val == "off" || val == "no" || val == "disable") { + ret = -1; + } else { + ret = std::stoll(val); + } + return ret; } /// Signed integers -template ::value == object_category::integral_value, detail::enabler> = detail::dummy> +template ::value == object_category::integral_value, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - try { - std::size_t n = 0; - std::int64_t output_ll = std::stoll(input, &n, 0); - output = static_cast(output_ll); - return n == input.size() && static_cast(output) == output_ll; - } catch(const std::invalid_argument &) { - return false; - } catch(const std::out_of_range &) { - return false; - } + try { + std::size_t n = 0; + std::int64_t output_ll = std::stoll(input, &n, 0); + output = static_cast(output_ll); + return n == input.size() && static_cast(output) == output_ll; + } catch (const std::invalid_argument &) { + return false; + } catch (const std::out_of_range &) { + return false; + } } /// Unsigned integers -template ::value == object_category::unsigned_integral, detail::enabler> = detail::dummy> +template ::value == object_category::unsigned_integral, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - if(!input.empty() && input.front() == '-') - return false; // std::stoull happily converts negative values to junk without any errors. - - try { - std::size_t n = 0; - std::uint64_t output_ll = std::stoull(input, &n, 0); - output = static_cast(output_ll); - return n == input.size() && static_cast(output) == output_ll; - } catch(const std::invalid_argument &) { - return false; - } catch(const std::out_of_range &) { - return false; - } + if (!input.empty() && input.front() == '-') + return false; // std::stoull happily converts negative values to junk without any errors. + + try { + std::size_t n = 0; + std::uint64_t output_ll = std::stoull(input, &n, 0); + output = static_cast(output_ll); + return n == input.size() && static_cast(output) == output_ll; + } catch (const std::invalid_argument &) { + return false; + } catch (const std::out_of_range &) { + return false; + } } /// Boolean values -template ::value == object_category::boolean_value, detail::enabler> = detail::dummy> +template ::value == object_category::boolean_value, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - try { - auto out = to_flag_value(input); - output = (out > 0); - return true; - } catch(const std::invalid_argument &) { - return false; - } catch(const std::out_of_range &) { - // if the number is out of the range of a 64 bit value then it is still a number and for this purpose is still - // valid all we care about the sign - output = (input[0] != '-'); - return true; - } + try { + auto out = to_flag_value(input); + output = (out > 0); + return true; + } catch (const std::invalid_argument &) { + return false; + } catch (const std::out_of_range &) { + // if the number is out of the range of a 64 bit value then it is still a number and for this purpose is still + // valid all we care about the sign + output = (input[0] != '-'); + return true; + } } /// Floats -template ::value == object_category::floating_point, detail::enabler> = detail::dummy> +template ::value == object_category::floating_point, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - try { - std::size_t n = 0; - output = static_cast(std::stold(input, &n)); - return n == input.size(); - } catch(const std::invalid_argument &) { - return false; - } catch(const std::out_of_range &) { - return false; - } + try { + std::size_t n = 0; + output = static_cast(std::stold(input, &n)); + return n == input.size(); + } catch (const std::invalid_argument &) { + return false; + } catch (const std::out_of_range &) { + return false; + } } /// String and similar direct assignment -template ::value == object_category::string_assignable, detail::enabler> = detail::dummy> +template ::value == object_category::string_assignable, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - output = input; - return true; + output = input; + return true; } /// String and similar constructible and copy assignment -template < - typename T, - enable_if_t::value == object_category::string_constructible, detail::enabler> = detail::dummy> +template ::value == object_category::string_constructible, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - output = T(input); - return true; + output = T(input); + return true; } /// Enumerations -template ::value == object_category::enumeration, detail::enabler> = detail::dummy> +template ::value == object_category::enumeration, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - typename std::underlying_type::type val; - bool retval = detail::lexical_cast(input, val); - if(!retval) { - return false; - } - output = static_cast(val); - return true; + typename std::underlying_type::type val; + bool retval = detail::lexical_cast(input, val); + if (!retval) { + return false; + } + output = static_cast(val); + return true; } /// Assignable from double or int -template < - typename T, - enable_if_t::value == object_category::number_constructible, detail::enabler> = detail::dummy> +template ::value == object_category::number_constructible, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - int val; - if(lexical_cast(input, val)) { - output = T(val); - return true; - } else { - double dval; - if(lexical_cast(input, dval)) { - output = T{dval}; - return true; - } + int val; + if (lexical_cast(input, val)) { + output = T(val); + return true; + } else { + double dval; + if (lexical_cast(input, dval)) { + output = T{dval}; + return true; } - return from_stream(input, output); + } + return from_stream(input, output); } /// Assignable from int -template < - typename T, - enable_if_t::value == object_category::integer_constructible, detail::enabler> = detail::dummy> +template ::value == object_category::integer_constructible, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - int val; - if(lexical_cast(input, val)) { - output = T(val); - return true; - } - return from_stream(input, output); + int val; + if (lexical_cast(input, val)) { + output = T(val); + return true; + } + return from_stream(input, output); } /// Assignable from double -template < - typename T, - enable_if_t::value == object_category::double_constructible, detail::enabler> = detail::dummy> +template ::value == object_category::double_constructible, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - double val; - if(lexical_cast(input, val)) { - output = T{val}; - return true; - } - return from_stream(input, output); + double val; + if (lexical_cast(input, val)) { + output = T{val}; + return true; + } + return from_stream(input, output); } /// Non-string parsable by a stream template ::value == object_category::other, detail::enabler> = detail::dummy> bool lexical_cast(const std::string &input, T &output) { - static_assert(is_istreamable::value, - "option object type must have a lexical cast overload or streaming input operator(>>) defined, if it " - "is convertible from another type use the add_option(...) with XC being the known type"); - return from_stream(input, output); + static_assert(is_istreamable::value, + "option object type must have a lexical cast overload or streaming input operator(>>) defined, if it " + "is convertible from another type use the add_option(...) with XC being the known type"); + return from_stream(input, output); } /// Assign a value through lexical cast operations -template < - typename T, - typename XC, - enable_if_t::value && (classify_object::value == object_category::string_assignable || - classify_object::value == object_category::string_constructible), - detail::enabler> = detail::dummy> +template ::value && (classify_object::value == object_category::string_assignable || + classify_object::value == object_category::string_constructible), + detail::enabler> = detail::dummy> bool lexical_assign(const std::string &input, T &output) { - return lexical_cast(input, output); + return lexical_cast(input, output); } /// Assign a value through lexical cast operations -template ::value && classify_object::value != object_category::string_assignable && classify_object::value != object_category::string_constructible, detail::enabler> = detail::dummy> bool lexical_assign(const std::string &input, T &output) { - if(input.empty()) { - output = T{}; - return true; - } - return lexical_cast(input, output); + if (input.empty()) { + output = T{}; + return true; + } + return lexical_cast(input, output); } /// Assign a value converted from a string in lexical cast to the output value directly -template < - typename T, - typename XC, - enable_if_t::value && std::is_assignable::value, detail::enabler> = detail::dummy> +template ::value && std::is_assignable::value, detail::enabler> = detail::dummy> bool lexical_assign(const std::string &input, T &output) { - XC val{}; - bool parse_result = (!input.empty()) ? lexical_cast(input, val) : true; - if(parse_result) { - output = val; - } - return parse_result; + XC val{}; + bool parse_result = (!input.empty()) ? lexical_cast(input, val) : true; + if (parse_result) { + output = val; + } + return parse_result; } /// Assign a value from a lexical cast through constructing a value and move assigning it -template ::value && !std::is_assignable::value && - std::is_move_assignable::value, +template ::value && !std::is_assignable::value && std::is_move_assignable::value, detail::enabler> = detail::dummy> bool lexical_assign(const std::string &input, T &output) { - XC val{}; - bool parse_result = input.empty() ? true : lexical_cast(input, val); - if(parse_result) { - output = T(val); // use () form of constructor to allow some implicit conversions - } - return parse_result; + XC val{}; + bool parse_result = input.empty() ? true : lexical_cast(input, val); + if (parse_result) { + output = T(val); // use () form of constructor to allow some implicit conversions + } + return parse_result; } /// Lexical conversion if there is only one element -template < - typename T, - typename XC, - enable_if_t::value && !is_tuple_like::value && !is_vector::value && !is_vector::value, - detail::enabler> = detail::dummy> +template ::value && !is_tuple_like::value && !is_vector::value && !is_vector::value, + detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - return lexical_assign(strings[0], output); + return lexical_assign(strings[0], output); } /// Lexical conversion if there is only one element but the conversion type is for two call a two element constructor -template ::value == 1 && type_count::value == 2, detail::enabler> = detail::dummy> +template ::value == 1 && type_count::value == 2, detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - typename std::tuple_element<0, XC>::type v1; - typename std::tuple_element<1, XC>::type v2; - bool retval = lexical_assign(strings[0], v1); - if(strings.size() > 1) { - retval = retval && lexical_assign(strings[1], v2); - } - if(retval) { - output = T{v1, v2}; - } - return retval; + typename std::tuple_element<0, XC>::type v1; + typename std::tuple_element<1, XC>::type v2; + bool retval = lexical_assign(strings[0], v1); + if (strings.size() > 1) { + retval = retval && lexical_assign(strings[1], v2); + } + if (retval) { + output = T{v1, v2}; + } + return retval; } /// Lexical conversion of a vector types -template ::value == expected_max_vector_size && - expected_count::value == expected_max_vector_size && type_count::value == 1, +template ::value == expected_max_vector_size && expected_count::value == expected_max_vector_size && + type_count::value == 1, detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - output.clear(); - output.reserve(strings.size()); - for(const auto &elem : strings) { + output.clear(); + output.reserve(strings.size()); + for (const auto &elem : strings) { - output.emplace_back(); - bool retval = lexical_assign(elem, output.back()); - if(!retval) { - return false; - } + output.emplace_back(); + bool retval = lexical_assign(elem, output.back()); + if (!retval) { + return false; } - return (!output.empty()); + } + return (!output.empty()); } /// Lexical conversion of a vector types with type size of two -template ::value == expected_max_vector_size && - expected_count::value == expected_max_vector_size && type_count::value == 2, +template ::value == expected_max_vector_size && expected_count::value == expected_max_vector_size && + type_count::value == 2, detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - output.clear(); - for(std::size_t ii = 0; ii < strings.size(); ii += 2) { - - typename std::tuple_element<0, typename XC::value_type>::type v1; - typename std::tuple_element<1, typename XC::value_type>::type v2; - bool retval = lexical_assign(strings[ii], v1); - if(strings.size() > ii + 1) { - retval = retval && lexical_assign(strings[ii + 1], v2); - } - if(retval) { - output.emplace_back(v1, v2); - } else { - return false; - } + output.clear(); + for (std::size_t ii = 0; ii < strings.size(); ii += 2) { + + typename std::tuple_element<0, typename XC::value_type>::type v1; + typename std::tuple_element<1, typename XC::value_type>::type v2; + bool retval = lexical_assign(strings[ii], v1); + if (strings.size() > ii + 1) { + retval = retval && lexical_assign(strings[ii + 1], v2); + } + if (retval) { + output.emplace_back(v1, v2); + } else { + return false; } - return (!output.empty()); + } + return (!output.empty()); } /// Conversion to a vector type using a particular single type as the conversion type -template ::value == expected_max_vector_size) && (expected_count::value == 1) && - (type_count::value == 1), - detail::enabler> = detail::dummy> +template < + class T, class XC, + enable_if_t<(expected_count::value == expected_max_vector_size) && (expected_count::value == 1) && (type_count::value == 1), + detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - bool retval = true; - output.clear(); - output.reserve(strings.size()); - for(const auto &elem : strings) { + bool retval = true; + output.clear(); + output.reserve(strings.size()); + for (const auto &elem : strings) { - output.emplace_back(); - retval = retval && lexical_assign(elem, output.back()); - } - return (!output.empty()) && retval; + output.emplace_back(); + retval = retval && lexical_assign(elem, output.back()); + } + return (!output.empty()) && retval; } // This one is last since it can call other lexical_conversion functions /// Lexical conversion if there is only one element but the conversion type is a vector -template ::value && !is_vector::value && is_vector::value, detail::enabler> = - detail::dummy> +template ::value && !is_vector::value && is_vector::value, detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - if(strings.size() > 1 || (!strings.empty() && !(strings.front().empty()))) { - XC val; - auto retval = lexical_conversion(strings, val); - output = T{val}; - return retval; - } - output = T{}; - return true; + if (strings.size() > 1 || (!strings.empty() && !(strings.front().empty()))) { + XC val; + auto retval = lexical_conversion(strings, val); + output = T{val}; + return retval; + } + output = T{}; + return true; } /// function template for converting tuples if the static Index is greater than the tuple size template -inline typename std::enable_if= type_count::value, bool>::type tuple_conversion(const std::vector &, - T &) { - return true; +inline typename std::enable_if= type_count::value, bool>::type tuple_conversion(const std::vector &, T &) { + return true; } /// Tuple conversion operation template inline typename std::enable_if < I::value, bool>::type tuple_conversion(const std::vector &strings, T &output) { - bool retval = true; - if(strings.size() > I) { - retval = retval && lexical_assign::type, - typename std::conditional::value, - typename std::tuple_element::type, - XC>::type>(strings[I], std::get(output)); - } - retval = retval && tuple_conversion(strings, output); - return retval; + bool retval = true; + if (strings.size() > I) { + retval = + retval && lexical_assign::type, + typename std::conditional::value, typename std::tuple_element::type, XC>::type>( + strings[I], std::get(output)); + } + retval = retval && tuple_conversion(strings, output); + return retval; } /// Conversion for tuples template ::value, detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - static_assert( - !is_tuple_like::value || type_count::value == type_count::value, - "if the conversion type is defined as a tuple it must be the same size as the type you are converting to"); - return tuple_conversion(strings, output); + static_assert(!is_tuple_like::value || type_count::value == type_count::value, + "if the conversion type is defined as a tuple it must be the same size as the type you are converting to"); + return tuple_conversion(strings, output); } /// Lexical conversion of a vector types with type_size >2 -template ::value == expected_max_vector_size && - expected_count::value == expected_max_vector_size && (type_count::value > 2), +template ::value == expected_max_vector_size && expected_count::value == expected_max_vector_size && + (type_count::value > 2), detail::enabler> = detail::dummy> bool lexical_conversion(const std::vector &strings, T &output) { - bool retval = true; - output.clear(); - std::vector temp; - std::size_t ii = 0; - std::size_t icount = 0; - std::size_t xcm = type_count::value; - while(ii < strings.size()) { - temp.push_back(strings[ii]); - ++ii; - ++icount; - if(icount == xcm || temp.back().empty()) { - if(static_cast(xcm) == expected_max_vector_size) { - temp.pop_back(); - } - output.emplace_back(); - retval = retval && lexical_conversion(temp, output.back()); - temp.clear(); - if(!retval) { - return false; - } - icount = 0; - } + bool retval = true; + output.clear(); + std::vector temp; + std::size_t ii = 0; + std::size_t icount = 0; + std::size_t xcm = type_count::value; + while (ii < strings.size()) { + temp.push_back(strings[ii]); + ++ii; + ++icount; + if (icount == xcm || temp.back().empty()) { + if (static_cast(xcm) == expected_max_vector_size) { + temp.pop_back(); + } + output.emplace_back(); + retval = retval && lexical_conversion(temp, output.back()); + temp.clear(); + if (!retval) { + return false; + } + icount = 0; } - return retval; + } + return retval; } /// Sum a vector of flag representations /// The flag vector produces a series of strings in a vector, simple true is represented by a "1", simple false is /// by /// "-1" an if numbers are passed by some fashion they are captured as well so the function just checks for the most /// common true and false strings then uses stoll to convert the rest for summing -template ::value && std::is_unsigned::value, detail::enabler> = detail::dummy> +template ::value && std::is_unsigned::value, detail::enabler> = detail::dummy> void sum_flag_vector(const std::vector &flags, T &output) { - int64_t count{0}; - for(auto &flag : flags) { - count += detail::to_flag_value(flag); - } - output = (count > 0) ? static_cast(count) : T{0}; + int64_t count{0}; + for (auto &flag : flags) { + count += detail::to_flag_value(flag); + } + output = (count > 0) ? static_cast(count) : T{0}; } /// Sum a vector of flag representations @@ -1815,14 +1668,13 @@ void sum_flag_vector(const std::vector &flags, T &output) { /// by /// "-1" an if numbers are passed by some fashion they are captured as well so the function just checks for the most /// common true and false strings then uses stoll to convert the rest for summing -template ::value && std::is_signed::value, detail::enabler> = detail::dummy> +template ::value && std::is_signed::value, detail::enabler> = detail::dummy> void sum_flag_vector(const std::vector &flags, T &output) { - int64_t count{0}; - for(auto &flag : flags) { - count += detail::to_flag_value(flag); - } - output = static_cast(count); + int64_t count{0}; + for (auto &flag : flags) { + count += detail::to_flag_value(flag); + } + output = static_cast(count); } } // namespace detail @@ -1835,119 +1687,115 @@ namespace detail { // Returns false if not a short option. Otherwise, sets opt name and rest and returns true inline bool split_short(const std::string ¤t, std::string &name, std::string &rest) { - if(current.size() > 1 && current[0] == '-' && valid_first_char(current[1])) { - name = current.substr(1, 1); - rest = current.substr(2); - return true; - } - return false; + if (current.size() > 1 && current[0] == '-' && valid_first_char(current[1])) { + name = current.substr(1, 1); + rest = current.substr(2); + return true; + } + return false; } // Returns false if not a long option. Otherwise, sets opt name and other side of = and returns true inline bool split_long(const std::string ¤t, std::string &name, std::string &value) { - if(current.size() > 2 && current.substr(0, 2) == "--" && valid_first_char(current[2])) { - auto loc = current.find_first_of('='); - if(loc != std::string::npos) { - name = current.substr(2, loc - 2); - value = current.substr(loc + 1); - } else { - name = current.substr(2); - value = ""; - } - return true; + if (current.size() > 2 && current.substr(0, 2) == "--" && valid_first_char(current[2])) { + auto loc = current.find_first_of('='); + if (loc != std::string::npos) { + name = current.substr(2, loc - 2); + value = current.substr(loc + 1); + } else { + name = current.substr(2); + value = ""; } - return false; + return true; + } + return false; } // Returns false if not a windows style option. Otherwise, sets opt name and value and returns true inline bool split_windows_style(const std::string ¤t, std::string &name, std::string &value) { - if(current.size() > 1 && current[0] == '/' && valid_first_char(current[1])) { - auto loc = current.find_first_of(':'); - if(loc != std::string::npos) { - name = current.substr(1, loc - 1); - value = current.substr(loc + 1); - } else { - name = current.substr(1); - value = ""; - } - return true; + if (current.size() > 1 && current[0] == '/' && valid_first_char(current[1])) { + auto loc = current.find_first_of(':'); + if (loc != std::string::npos) { + name = current.substr(1, loc - 1); + value = current.substr(loc + 1); + } else { + name = current.substr(1); + value = ""; } - return false; + return true; + } + return false; } // Splits a string into multiple long and short names inline std::vector split_names(std::string current) { - std::vector output; - std::size_t val; - while((val = current.find(",")) != std::string::npos) { - output.push_back(trim_copy(current.substr(0, val))); - current = current.substr(val + 1); - } - output.push_back(trim_copy(current)); - return output; + std::vector output; + std::size_t val; + while ((val = current.find(",")) != std::string::npos) { + output.push_back(trim_copy(current.substr(0, val))); + current = current.substr(val + 1); + } + output.push_back(trim_copy(current)); + return output; } /// extract default flag values either {def} or starting with a ! inline std::vector> get_default_flag_values(const std::string &str) { - std::vector flags = split_names(str); - flags.erase(std::remove_if(flags.begin(), - flags.end(), - [](const std::string &name) { - return ((name.empty()) || (!(((name.find_first_of('{') != std::string::npos) && - (name.back() == '}')) || - (name[0] == '!')))); - }), - flags.end()); - std::vector> output; - output.reserve(flags.size()); - for(auto &flag : flags) { - auto def_start = flag.find_first_of('{'); - std::string defval = "false"; - if((def_start != std::string::npos) && (flag.back() == '}')) { - defval = flag.substr(def_start + 1); - defval.pop_back(); - flag.erase(def_start, std::string::npos); - } - flag.erase(0, flag.find_first_not_of("-!")); - output.emplace_back(flag, defval); - } - return output; + std::vector flags = split_names(str); + flags.erase(std::remove_if(flags.begin(), flags.end(), + [](const std::string &name) { + return ((name.empty()) || + (!(((name.find_first_of('{') != std::string::npos) && (name.back() == '}')) || (name[0] == '!')))); + }), + flags.end()); + std::vector> output; + output.reserve(flags.size()); + for (auto &flag : flags) { + auto def_start = flag.find_first_of('{'); + std::string defval = "false"; + if ((def_start != std::string::npos) && (flag.back() == '}')) { + defval = flag.substr(def_start + 1); + defval.pop_back(); + flag.erase(def_start, std::string::npos); + } + flag.erase(0, flag.find_first_not_of("-!")); + output.emplace_back(flag, defval); + } + return output; } /// Get a vector of short names, one of long names, and a single name -inline std::tuple, std::vector, std::string> -get_names(const std::vector &input) { - - std::vector short_names; - std::vector long_names; - std::string pos_name; - - for(std::string name : input) { - if(name.length() == 0) { - continue; - } - if(name.length() > 1 && name[0] == '-' && name[1] != '-') { - if(name.length() == 2 && valid_first_char(name[1])) - short_names.emplace_back(1, name[1]); - else - throw BadNameString::OneCharName(name); - } else if(name.length() > 2 && name.substr(0, 2) == "--") { - name = name.substr(2); - if(valid_name_string(name)) - long_names.push_back(name); - else - throw BadNameString::BadLongName(name); - } else if(name == "-" || name == "--") { - throw BadNameString::DashesOnly(name); - } else { - if(pos_name.length() > 0) - throw BadNameString::MultiPositionalNames(name); - pos_name = name; - } +inline std::tuple, std::vector, std::string> get_names(const std::vector &input) { + + std::vector short_names; + std::vector long_names; + std::string pos_name; + + for (std::string name : input) { + if (name.length() == 0) { + continue; + } + if (name.length() > 1 && name[0] == '-' && name[1] != '-') { + if (name.length() == 2 && valid_first_char(name[1])) + short_names.emplace_back(1, name[1]); + else + throw BadNameString::OneCharName(name); + } else if (name.length() > 2 && name.substr(0, 2) == "--") { + name = name.substr(2); + if (valid_name_string(name)) + long_names.push_back(name); + else + throw BadNameString::BadLongName(name); + } else if (name == "-" || name == "--") { + throw BadNameString::DashesOnly(name); + } else { + if (pos_name.length() > 0) + throw BadNameString::MultiPositionalNames(name); + pos_name = name; } + } - return std::tuple, std::vector, std::string>( - short_names, long_names, pos_name); + return std::tuple, std::vector, std::string>(short_names, long_names, pos_name); } } // namespace detail @@ -1961,96 +1809,95 @@ class App; /// Holds values to load into Options struct ConfigItem { - /// This is the list of parents - std::vector parents{}; + /// This is the list of parents + std::vector parents{}; - /// This is the name - std::string name{}; + /// This is the name + std::string name{}; - /// Listing of inputs - std::vector inputs{}; + /// Listing of inputs + std::vector inputs{}; - /// The list of parents and name joined by "." - std::string fullname() const { - std::vector tmp = parents; - tmp.emplace_back(name); - return detail::join(tmp, "."); - } + /// The list of parents and name joined by "." + std::string fullname() const { + std::vector tmp = parents; + tmp.emplace_back(name); + return detail::join(tmp, "."); + } }; /// This class provides a converter for configuration files. class Config { - protected: - std::vector items{}; +protected: + std::vector items{}; - public: - /// Convert an app into a configuration - virtual std::string to_config(const App *, bool, bool, std::string) const = 0; +public: + /// Convert an app into a configuration + virtual std::string to_config(const App *, bool, bool, std::string) const = 0; - /// Convert a configuration into an app - virtual std::vector from_config(std::istream &) const = 0; + /// Convert a configuration into an app + virtual std::vector from_config(std::istream &) const = 0; - /// Get a flag value - virtual std::string to_flag(const ConfigItem &item) const { - if(item.inputs.size() == 1) { - return item.inputs.at(0); - } - throw ConversionError::TooManyInputsFlag(item.fullname()); + /// Get a flag value + virtual std::string to_flag(const ConfigItem &item) const { + if (item.inputs.size() == 1) { + return item.inputs.at(0); } + throw ConversionError::TooManyInputsFlag(item.fullname()); + } - /// Parse a config file, throw an error (ParseError:ConfigParseError or FileError) on failure - std::vector from_file(const std::string &name) { - std::ifstream input{name}; - if(!input.good()) - throw FileError::Missing(name); + /// Parse a config file, throw an error (ParseError:ConfigParseError or FileError) on failure + std::vector from_file(const std::string &name) { + std::ifstream input{name}; + if (!input.good()) + throw FileError::Missing(name); - return from_config(input); - } + return from_config(input); + } - /// Virtual destructor - virtual ~Config() = default; + /// Virtual destructor + virtual ~Config() = default; }; /// This converter works with INI/TOML files; to write proper TOML files use ConfigTOML class ConfigBase : public Config { - protected: - /// the character used for comments - char commentChar = ';'; - /// the character used to start an array '\0' is a default to not use - char arrayStart = '\0'; - /// the character used to end an array '\0' is a default to not use - char arrayEnd = '\0'; - /// the character used to separate elements in an array - char arraySeparator = ' '; - /// the character used separate the name from the value - char valueDelimiter = '='; - - public: - std::string - to_config(const App * /*app*/, bool default_also, bool write_description, std::string prefix) const override; - - std::vector from_config(std::istream &input) const override; - /// Specify the configuration for comment characters - ConfigBase *comment(char cchar) { - commentChar = cchar; - return this; - } - /// Specify the start and end characters for an array - ConfigBase *arrayBounds(char aStart, char aEnd) { - arrayStart = aStart; - arrayEnd = aEnd; - return this; - } - /// Specify the delimiter character for an array - ConfigBase *arrayDelimiter(char aSep) { - arraySeparator = aSep; - return this; - } - /// Specify the delimiter between a name and value - ConfigBase *valueSeparator(char vSep) { - valueDelimiter = vSep; - return this; - } +protected: + /// the character used for comments + char commentChar = ';'; + /// the character used to start an array '\0' is a default to not use + char arrayStart = '\0'; + /// the character used to end an array '\0' is a default to not use + char arrayEnd = '\0'; + /// the character used to separate elements in an array + char arraySeparator = ' '; + /// the character used separate the name from the value + char valueDelimiter = '='; + +public: + std::string to_config(const App * /*app*/, bool default_also, bool write_description, std::string prefix) const override; + + std::vector from_config(std::istream &input) const override; + /// Specify the configuration for comment characters + ConfigBase *comment(char cchar) { + commentChar = cchar; + return this; + } + /// Specify the start and end characters for an array + ConfigBase *arrayBounds(char aStart, char aEnd) { + arrayStart = aStart; + arrayEnd = aEnd; + return this; + } + /// Specify the delimiter character for an array + ConfigBase *arrayDelimiter(char aSep) { + arraySeparator = aSep; + return this; + } + /// Specify the delimiter between a name and value + ConfigBase *valueSeparator(char vSep) { + valueDelimiter = vSep; + return this; + } }; /// the default Config is the INI file format @@ -2059,14 +1906,14 @@ using ConfigINI = ConfigBase; /// ConfigTOML generates a TOML compliant output class ConfigTOML : public ConfigINI { - public: - ConfigTOML() { - commentChar = '#'; - arrayStart = '['; - arrayEnd = ']'; - arraySeparator = ','; - valueDelimiter = '='; - } +public: + ConfigTOML() { + commentChar = '#'; + arrayStart = '['; + arrayEnd = ']'; + arraySeparator = ','; + valueDelimiter = '='; + } }; } // namespace CLI @@ -2087,216 +1934,215 @@ class Option; /// class Validator { - protected: - /// This is the description function, if empty the description_ will be used - std::function desc_function_{[]() { return std::string{}; }}; - - /// This is the base function that is to be called. - /// Returns a string error message if validation fails. - std::function func_{[](std::string &) { return std::string{}; }}; - /// The name for search purposes of the Validator - std::string name_{}; - /// A Validator will only apply to an indexed value (-1 is all elements) - int application_index_ = -1; - /// Enable for Validator to allow it to be disabled if need be - bool active_{true}; - /// specify that a validator should not modify the input - bool non_modifying_{false}; - - public: - Validator() = default; - /// Construct a Validator with just the description string - explicit Validator(std::string validator_desc) : desc_function_([validator_desc]() { return validator_desc; }) {} - /// Construct Validator from basic information - Validator(std::function op, std::string validator_desc, std::string validator_name = "") - : desc_function_([validator_desc]() { return validator_desc; }), func_(std::move(op)), - name_(std::move(validator_name)) {} - /// Set the Validator operation function - Validator &operation(std::function op) { - func_ = std::move(op); - return *this; - } - /// This is the required operator for a Validator - provided to help - /// users (CLI11 uses the member `func` directly) - std::string operator()(std::string &str) const { - std::string retstring; - if(active_) { - if(non_modifying_) { - std::string value = str; - retstring = func_(value); - } else { - retstring = func_(str); - } - } - return retstring; - }; - - /// This is the required operator for a Validator - provided to help - /// users (CLI11 uses the member `func` directly) - std::string operator()(const std::string &str) const { +protected: + /// This is the description function, if empty the description_ will be used + std::function desc_function_{[]() { return std::string{}; }}; + + /// This is the base function that is to be called. + /// Returns a string error message if validation fails. + std::function func_{[](std::string &) { return std::string{}; }}; + /// The name for search purposes of the Validator + std::string name_{}; + /// A Validator will only apply to an indexed value (-1 is all elements) + int application_index_ = -1; + /// Enable for Validator to allow it to be disabled if need be + bool active_{true}; + /// specify that a validator should not modify the input + bool non_modifying_{false}; + +public: + Validator() = default; + /// Construct a Validator with just the description string + explicit Validator(std::string validator_desc) : desc_function_([validator_desc]() { return validator_desc; }) {} + /// Construct Validator from basic information + Validator(std::function op, std::string validator_desc, std::string validator_name = "") + : desc_function_([validator_desc]() { return validator_desc; }), func_(std::move(op)), name_(std::move(validator_name)) {} + /// Set the Validator operation function + Validator &operation(std::function op) { + func_ = std::move(op); + return *this; + } + /// This is the required operator for a Validator - provided to help + /// users (CLI11 uses the member `func` directly) + std::string operator()(std::string &str) const { + std::string retstring; + if (active_) { + if (non_modifying_) { std::string value = str; - return (active_) ? func_(value) : std::string{}; - }; - - /// Specify the type string - Validator &description(std::string validator_desc) { - desc_function_ = [validator_desc]() { return validator_desc; }; - return *this; - } - /// Specify the type string - Validator description(std::string validator_desc) const { - Validator newval(*this); - newval.desc_function_ = [validator_desc]() { return validator_desc; }; - return newval; - } - /// Generate type description information for the Validator - std::string get_description() const { - if(active_) { - return desc_function_(); - } - return std::string{}; + retstring = func_(value); + } else { + retstring = func_(str); + } + } + return retstring; + }; + + /// This is the required operator for a Validator - provided to help + /// users (CLI11 uses the member `func` directly) + std::string operator()(const std::string &str) const { + std::string value = str; + return (active_) ? func_(value) : std::string{}; + }; + + /// Specify the type string + Validator &description(std::string validator_desc) { + desc_function_ = [validator_desc]() { return validator_desc; }; + return *this; + } + /// Specify the type string + Validator description(std::string validator_desc) const { + Validator newval(*this); + newval.desc_function_ = [validator_desc]() { return validator_desc; }; + return newval; + } + /// Generate type description information for the Validator + std::string get_description() const { + if (active_) { + return desc_function_(); } - /// Specify the type string - Validator &name(std::string validator_name) { - name_ = std::move(validator_name); - return *this; - } - /// Specify the type string - Validator name(std::string validator_name) const { - Validator newval(*this); - newval.name_ = std::move(validator_name); - return newval; - } - /// Get the name of the Validator - const std::string &get_name() const { return name_; } - /// Specify whether the Validator is active or not - Validator &active(bool active_val = true) { - active_ = active_val; - return *this; - } - /// Specify whether the Validator is active or not - Validator active(bool active_val = true) const { - Validator newval(*this); - newval.active_ = active_val; - return newval; - } - - /// Specify whether the Validator can be modifying or not - Validator &non_modifying(bool no_modify = true) { - non_modifying_ = no_modify; - return *this; - } - /// Specify the application index of a validator - Validator &application_index(int app_index) { - application_index_ = app_index; - return *this; - }; - /// Specify the application index of a validator - Validator application_index(int app_index) const { - Validator newval(*this); - newval.application_index_ = app_index; - return newval; + return std::string{}; + } + /// Specify the type string + Validator &name(std::string validator_name) { + name_ = std::move(validator_name); + return *this; + } + /// Specify the type string + Validator name(std::string validator_name) const { + Validator newval(*this); + newval.name_ = std::move(validator_name); + return newval; + } + /// Get the name of the Validator + const std::string &get_name() const { return name_; } + /// Specify whether the Validator is active or not + Validator &active(bool active_val = true) { + active_ = active_val; + return *this; + } + /// Specify whether the Validator is active or not + Validator active(bool active_val = true) const { + Validator newval(*this); + newval.active_ = active_val; + return newval; + } + + /// Specify whether the Validator can be modifying or not + Validator &non_modifying(bool no_modify = true) { + non_modifying_ = no_modify; + return *this; + } + /// Specify the application index of a validator + Validator &application_index(int app_index) { + application_index_ = app_index; + return *this; + }; + /// Specify the application index of a validator + Validator application_index(int app_index) const { + Validator newval(*this); + newval.application_index_ = app_index; + return newval; + }; + /// Get the current value of the application index + int get_application_index() const { return application_index_; } + /// Get a boolean if the validator is active + bool get_active() const { return active_; } + + /// Get a boolean if the validator is allowed to modify the input returns true if it can modify the input + bool get_modifying() const { return !non_modifying_; } + + /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the + /// same. + Validator operator&(const Validator &other) const { + Validator newval; + + newval._merge_description(*this, other, " AND "); + + // Give references (will make a copy in lambda function) + const std::function &f1 = func_; + const std::function &f2 = other.func_; + + newval.func_ = [f1, f2](std::string &input) { + std::string s1 = f1(input); + std::string s2 = f2(input); + if (!s1.empty() && !s2.empty()) + return std::string("(") + s1 + ") AND (" + s2 + ")"; + else + return s1 + s2; }; - /// Get the current value of the application index - int get_application_index() const { return application_index_; } - /// Get a boolean if the validator is active - bool get_active() const { return active_; } - - /// Get a boolean if the validator is allowed to modify the input returns true if it can modify the input - bool get_modifying() const { return !non_modifying_; } - - /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the - /// same. - Validator operator&(const Validator &other) const { - Validator newval; - - newval._merge_description(*this, other, " AND "); - - // Give references (will make a copy in lambda function) - const std::function &f1 = func_; - const std::function &f2 = other.func_; - - newval.func_ = [f1, f2](std::string &input) { - std::string s1 = f1(input); - std::string s2 = f2(input); - if(!s1.empty() && !s2.empty()) - return std::string("(") + s1 + ") AND (" + s2 + ")"; - else - return s1 + s2; - }; - - newval.active_ = (active_ & other.active_); - newval.application_index_ = application_index_; - return newval; - } - - /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the - /// same. - Validator operator|(const Validator &other) const { - Validator newval; - - newval._merge_description(*this, other, " OR "); - - // Give references (will make a copy in lambda function) - const std::function &f1 = func_; - const std::function &f2 = other.func_; - - newval.func_ = [f1, f2](std::string &input) { - std::string s1 = f1(input); - std::string s2 = f2(input); - if(s1.empty() || s2.empty()) - return std::string(); - - return std::string("(") + s1 + ") OR (" + s2 + ")"; - }; - newval.active_ = (active_ & other.active_); - newval.application_index_ = application_index_; - return newval; - } - - /// Create a validator that fails when a given validator succeeds - Validator operator!() const { - Validator newval; - const std::function &dfunc1 = desc_function_; - newval.desc_function_ = [dfunc1]() { - auto str = dfunc1(); - return (!str.empty()) ? std::string("NOT ") + str : std::string{}; - }; - // Give references (will make a copy in lambda function) - const std::function &f1 = func_; - - newval.func_ = [f1, dfunc1](std::string &test) -> std::string { - std::string s1 = f1(test); - if(s1.empty()) { - return std::string("check ") + dfunc1() + " succeeded improperly"; - } - return std::string{}; - }; - newval.active_ = active_; - newval.application_index_ = application_index_; - return newval; - } - private: - void _merge_description(const Validator &val1, const Validator &val2, const std::string &merger) { + newval.active_ = (active_ & other.active_); + newval.application_index_ = application_index_; + return newval; + } - const std::function &dfunc1 = val1.desc_function_; - const std::function &dfunc2 = val2.desc_function_; + /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the + /// same. + Validator operator|(const Validator &other) const { + Validator newval; - desc_function_ = [=]() { - std::string f1 = dfunc1(); - std::string f2 = dfunc2(); - if((f1.empty()) || (f2.empty())) { - return f1 + f2; - } - return std::string(1, '(') + f1 + ')' + merger + '(' + f2 + ')'; - }; - } + newval._merge_description(*this, other, " OR "); + + // Give references (will make a copy in lambda function) + const std::function &f1 = func_; + const std::function &f2 = other.func_; + + newval.func_ = [f1, f2](std::string &input) { + std::string s1 = f1(input); + std::string s2 = f2(input); + if (s1.empty() || s2.empty()) + return std::string(); + + return std::string("(") + s1 + ") OR (" + s2 + ")"; + }; + newval.active_ = (active_ & other.active_); + newval.application_index_ = application_index_; + return newval; + } + + /// Create a validator that fails when a given validator succeeds + Validator operator!() const { + Validator newval; + const std::function &dfunc1 = desc_function_; + newval.desc_function_ = [dfunc1]() { + auto str = dfunc1(); + return (!str.empty()) ? std::string("NOT ") + str : std::string{}; + }; + // Give references (will make a copy in lambda function) + const std::function &f1 = func_; + + newval.func_ = [f1, dfunc1](std::string &test) -> std::string { + std::string s1 = f1(test); + if (s1.empty()) { + return std::string("check ") + dfunc1() + " succeeded improperly"; + } + return std::string{}; + }; + newval.active_ = active_; + newval.application_index_ = application_index_; + return newval; + } + +private: + void _merge_description(const Validator &val1, const Validator &val2, const std::string &merger) { + + const std::function &dfunc1 = val1.desc_function_; + const std::function &dfunc2 = val2.desc_function_; + + desc_function_ = [=]() { + std::string f1 = dfunc1(); + std::string f2 = dfunc2(); + if ((f1.empty()) || (f2.empty())) { + return f1 + f2; + } + return std::string(1, '(') + f1 + ')' + merger + '(' + f2 + ')'; + }; + } }; // namespace CLI /// Class wrapping some of the accessors of Validator class CustomValidator : public Validator { - public: +public: }; // The implementation of the built in validators is using the Validator class; // the user is only expected to use the const (static) versions (since there's no setup). @@ -2309,176 +2155,176 @@ enum class path_type { nonexistant, file, directory }; #if defined CLI11_HAS_FILESYSTEM && CLI11_HAS_FILESYSTEM > 0 /// get the type of the path from a file name inline path_type check_path(const char *file) noexcept { - std::error_code ec; - auto stat = std::filesystem::status(file, ec); - if(ec) { - return path_type::nonexistant; - } - switch(stat.type()) { - case std::filesystem::file_type::none: - case std::filesystem::file_type::not_found: - return path_type::nonexistant; - case std::filesystem::file_type::directory: - return path_type::directory; - case std::filesystem::file_type::symlink: - case std::filesystem::file_type::block: - case std::filesystem::file_type::character: - case std::filesystem::file_type::fifo: - case std::filesystem::file_type::socket: - case std::filesystem::file_type::regular: - case std::filesystem::file_type::unknown: - default: - return path_type::file; - } + std::error_code ec; + auto stat = std::filesystem::status(file, ec); + if (ec) { + return path_type::nonexistant; + } + switch (stat.type()) { + case std::filesystem::file_type::none: + case std::filesystem::file_type::not_found: + return path_type::nonexistant; + case std::filesystem::file_type::directory: + return path_type::directory; + case std::filesystem::file_type::symlink: + case std::filesystem::file_type::block: + case std::filesystem::file_type::character: + case std::filesystem::file_type::fifo: + case std::filesystem::file_type::socket: + case std::filesystem::file_type::regular: + case std::filesystem::file_type::unknown: + default: + return path_type::file; + } } #else /// get the type of the path from a file name inline path_type check_path(const char *file) noexcept { #if defined(_MSC_VER) - struct __stat64 buffer; - if(_stat64(file, &buffer) == 0) { - return ((buffer.st_mode & S_IFDIR) != 0) ? path_type::directory : path_type::file; - } + struct __stat64 buffer; + if (_stat64(file, &buffer) == 0) { + return ((buffer.st_mode & S_IFDIR) != 0) ? path_type::directory : path_type::file; + } #else - struct stat buffer; - if(stat(file, &buffer) == 0) { - return ((buffer.st_mode & S_IFDIR) != 0) ? path_type::directory : path_type::file; - } + struct stat buffer; + if (stat(file, &buffer) == 0) { + return ((buffer.st_mode & S_IFDIR) != 0) ? path_type::directory : path_type::file; + } #endif - return path_type::nonexistant; + return path_type::nonexistant; } #endif /// Check for an existing file (returns error message if check fails) class ExistingFileValidator : public Validator { - public: - ExistingFileValidator() : Validator("FILE") { - func_ = [](std::string &filename) { - auto path_result = check_path(filename.c_str()); - if(path_result == path_type::nonexistant) { - return "File does not exist: " + filename; - } - if(path_result == path_type::directory) { - return "File is actually a directory: " + filename; - } - return std::string(); - }; - } +public: + ExistingFileValidator() : Validator("FILE") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if (path_result == path_type::nonexistant) { + return "File does not exist: " + filename; + } + if (path_result == path_type::directory) { + return "File is actually a directory: " + filename; + } + return std::string(); + }; + } }; /// Check for an existing directory (returns error message if check fails) class ExistingDirectoryValidator : public Validator { - public: - ExistingDirectoryValidator() : Validator("DIR") { - func_ = [](std::string &filename) { - auto path_result = check_path(filename.c_str()); - if(path_result == path_type::nonexistant) { - return "Directory does not exist: " + filename; - } - if(path_result == path_type::file) { - return "Directory is actually a file: " + filename; - } - return std::string(); - }; - } +public: + ExistingDirectoryValidator() : Validator("DIR") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if (path_result == path_type::nonexistant) { + return "Directory does not exist: " + filename; + } + if (path_result == path_type::file) { + return "Directory is actually a file: " + filename; + } + return std::string(); + }; + } }; /// Check for an existing path class ExistingPathValidator : public Validator { - public: - ExistingPathValidator() : Validator("PATH(existing)") { - func_ = [](std::string &filename) { - auto path_result = check_path(filename.c_str()); - if(path_result == path_type::nonexistant) { - return "Path does not exist: " + filename; - } - return std::string(); - }; - } +public: + ExistingPathValidator() : Validator("PATH(existing)") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if (path_result == path_type::nonexistant) { + return "Path does not exist: " + filename; + } + return std::string(); + }; + } }; /// Check for an non-existing path class NonexistentPathValidator : public Validator { - public: - NonexistentPathValidator() : Validator("PATH(non-existing)") { - func_ = [](std::string &filename) { - auto path_result = check_path(filename.c_str()); - if(path_result != path_type::nonexistant) { - return "Path already exists: " + filename; - } - return std::string(); - }; - } +public: + NonexistentPathValidator() : Validator("PATH(non-existing)") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if (path_result != path_type::nonexistant) { + return "Path already exists: " + filename; + } + return std::string(); + }; + } }; /// Validate the given string is a legal ipv4 address class IPV4Validator : public Validator { - public: - IPV4Validator() : Validator("IPV4") { - func_ = [](std::string &ip_addr) { - auto result = CLI::detail::split(ip_addr, '.'); - if(result.size() != 4) { - return std::string("Invalid IPV4 address must have four parts (") + ip_addr + ')'; - } - int num; - for(const auto &var : result) { - bool retval = detail::lexical_cast(var, num); - if(!retval) { - return std::string("Failed parsing number (") + var + ')'; - } - if(num < 0 || num > 255) { - return std::string("Each IP number must be between 0 and 255 ") + var; - } - } - return std::string(); - }; - } +public: + IPV4Validator() : Validator("IPV4") { + func_ = [](std::string &ip_addr) { + auto result = CLI::detail::split(ip_addr, '.'); + if (result.size() != 4) { + return std::string("Invalid IPV4 address must have four parts (") + ip_addr + ')'; + } + int num; + for (const auto &var : result) { + bool retval = detail::lexical_cast(var, num); + if (!retval) { + return std::string("Failed parsing number (") + var + ')'; + } + if (num < 0 || num > 255) { + return std::string("Each IP number must be between 0 and 255 ") + var; + } + } + return std::string(); + }; + } }; /// Validate the argument is a number and greater than 0 class PositiveNumber : public Validator { - public: - PositiveNumber() : Validator("POSITIVE") { - func_ = [](std::string &number_str) { - double number; - if(!detail::lexical_cast(number_str, number)) { - return std::string("Failed parsing number: (") + number_str + ')'; - } - if(number <= 0) { - return std::string("Number less or equal to 0: (") + number_str + ')'; - } - return std::string(); - }; - } +public: + PositiveNumber() : Validator("POSITIVE") { + func_ = [](std::string &number_str) { + double number; + if (!detail::lexical_cast(number_str, number)) { + return std::string("Failed parsing number: (") + number_str + ')'; + } + if (number <= 0) { + return std::string("Number less or equal to 0: (") + number_str + ')'; + } + return std::string(); + }; + } }; /// Validate the argument is a number and greater than or equal to 0 class NonNegativeNumber : public Validator { - public: - NonNegativeNumber() : Validator("NONNEGATIVE") { - func_ = [](std::string &number_str) { - double number; - if(!detail::lexical_cast(number_str, number)) { - return std::string("Failed parsing number: (") + number_str + ')'; - } - if(number < 0) { - return std::string("Number less than 0: (") + number_str + ')'; - } - return std::string(); - }; - } +public: + NonNegativeNumber() : Validator("NONNEGATIVE") { + func_ = [](std::string &number_str) { + double number; + if (!detail::lexical_cast(number_str, number)) { + return std::string("Failed parsing number: (") + number_str + ')'; + } + if (number < 0) { + return std::string("Number less than 0: (") + number_str + ')'; + } + return std::string(); + }; + } }; /// Validate the argument is a number class Number : public Validator { - public: - Number() : Validator("NUMBER") { - func_ = [](std::string &number_str) { - double number; - if(!detail::lexical_cast(number_str, number)) { - return std::string("Failed parsing as a number (") + number_str + ')'; - } - return std::string(); - }; - } +public: + Number() : Validator("NUMBER") { + func_ = [](std::string &number_str) { + double number; + if (!detail::lexical_cast(number_str, number)) { + return std::string("Failed parsing as a number (") + number_str + ')'; + } + return std::string(); + }; + } }; } // namespace detail @@ -2511,269 +2357,255 @@ const detail::Number Number; /// Produce a range (factory). Min and max are inclusive. class Range : public Validator { - public: - /// This produces a range with min and max inclusive. - /// - /// Note that the constructor is templated, but the struct is not, so C++17 is not - /// needed to provide nice syntax for Range(a,b). - template Range(T min, T max) { - std::stringstream out; - out << detail::type_name() << " in [" << min << " - " << max << "]"; - description(out.str()); - - func_ = [min, max](std::string &input) { - T val; - bool converted = detail::lexical_cast(input, val); - if((!converted) || (val < min || val > max)) - return std::string("Value ") + input + " not in range " + std::to_string(min) + " to " + - std::to_string(max); - - return std::string(); - }; - } - - /// Range of one value is 0 to value - template explicit Range(T max) : Range(static_cast(0), max) {} +public: + /// This produces a range with min and max inclusive. + /// + /// Note that the constructor is templated, but the struct is not, so C++17 is not + /// needed to provide nice syntax for Range(a,b). + template Range(T min, T max) { + std::stringstream out; + out << detail::type_name() << " in [" << min << " - " << max << "]"; + description(out.str()); + + func_ = [min, max](std::string &input) { + T val; + bool converted = detail::lexical_cast(input, val); + if ((!converted) || (val < min || val > max)) + return std::string("Value ") + input + " not in range " + std::to_string(min) + " to " + std::to_string(max); + + return std::string(); + }; + } + + /// Range of one value is 0 to value + template explicit Range(T max) : Range(static_cast(0), max) {} }; /// Produce a bounded range (factory). Min and max are inclusive. class Bound : public Validator { - public: - /// This bounds a value with min and max inclusive. - /// - /// Note that the constructor is templated, but the struct is not, so C++17 is not - /// needed to provide nice syntax for Range(a,b). - template Bound(T min, T max) { - std::stringstream out; - out << detail::type_name() << " bounded to [" << min << " - " << max << "]"; - description(out.str()); - - func_ = [min, max](std::string &input) { - T val; - bool converted = detail::lexical_cast(input, val); - if(!converted) { - return std::string("Value ") + input + " could not be converted"; - } - if(val < min) - input = detail::to_string(min); - else if(val > max) - input = detail::to_string(max); - - return std::string{}; - }; - } +public: + /// This bounds a value with min and max inclusive. + /// + /// Note that the constructor is templated, but the struct is not, so C++17 is not + /// needed to provide nice syntax for Range(a,b). + template Bound(T min, T max) { + std::stringstream out; + out << detail::type_name() << " bounded to [" << min << " - " << max << "]"; + description(out.str()); + + func_ = [min, max](std::string &input) { + T val; + bool converted = detail::lexical_cast(input, val); + if (!converted) { + return std::string("Value ") + input + " could not be converted"; + } + if (val < min) + input = detail::to_string(min); + else if (val > max) + input = detail::to_string(max); + + return std::string{}; + }; + } - /// Range of one value is 0 to value - template explicit Bound(T max) : Bound(static_cast(0), max) {} + /// Range of one value is 0 to value + template explicit Bound(T max) : Bound(static_cast(0), max) {} }; namespace detail { -template ::type>::value, detail::enabler> = detail::dummy> +template ::type>::value, detail::enabler> = detail::dummy> auto smart_deref(T value) -> decltype(*value) { - return *value; + return *value; } -template < - typename T, - enable_if_t::type>::value, detail::enabler> = detail::dummy> +template ::type>::value, detail::enabler> = detail::dummy> typename std::remove_reference::type &smart_deref(T &value) { - return value; + return value; } /// Generate a string representation of a set template std::string generate_set(const T &set) { - using element_t = typename detail::element_type::type; - using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair - std::string out(1, '{'); - out.append(detail::join( - detail::smart_deref(set), - [](const iteration_type_t &v) { return detail::pair_adaptor::first(v); }, - ",")); - out.push_back('}'); - return out; + using element_t = typename detail::element_type::type; + using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair + std::string out(1, '{'); + out.append(detail::join( + detail::smart_deref(set), [](const iteration_type_t &v) { return detail::pair_adaptor::first(v); }, ",")); + out.push_back('}'); + return out; } /// Generate a string representation of a map template std::string generate_map(const T &map, bool key_only = false) { - using element_t = typename detail::element_type::type; - using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair - std::string out(1, '{'); - out.append(detail::join( - detail::smart_deref(map), - [key_only](const iteration_type_t &v) { - std::string res{detail::to_string(detail::pair_adaptor::first(v))}; - - if(!key_only) { - res.append("->"); - res += detail::to_string(detail::pair_adaptor::second(v)); - } - return res; - }, - ",")); - out.push_back('}'); - return out; + using element_t = typename detail::element_type::type; + using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair + std::string out(1, '{'); + out.append(detail::join( + detail::smart_deref(map), + [key_only](const iteration_type_t &v) { + std::string res{detail::to_string(detail::pair_adaptor::first(v))}; + + if (!key_only) { + res.append("->"); + res += detail::to_string(detail::pair_adaptor::second(v)); + } + return res; + }, + ",")); + out.push_back('}'); + return out; } template struct has_find { - template - static auto test(int) -> decltype(std::declval().find(std::declval()), std::true_type()); - template static auto test(...) -> decltype(std::false_type()); + template static auto test(int) -> decltype(std::declval().find(std::declval()), std::true_type()); + template static auto test(...) -> decltype(std::false_type()); - static const auto value = decltype(test(0))::value; - using type = std::integral_constant; + static const auto value = decltype(test(0))::value; + using type = std::integral_constant; }; /// A search function template ::value, detail::enabler> = detail::dummy> auto search(const T &set, const V &val) -> std::pair { - using element_t = typename detail::element_type::type; - auto &setref = detail::smart_deref(set); - auto it = std::find_if(std::begin(setref), std::end(setref), [&val](decltype(*std::begin(setref)) v) { - return (detail::pair_adaptor::first(v) == val); - }); - return {(it != std::end(setref)), it}; + using element_t = typename detail::element_type::type; + auto &setref = detail::smart_deref(set); + auto it = std::find_if(std::begin(setref), std::end(setref), + [&val](decltype(*std::begin(setref)) v) { return (detail::pair_adaptor::first(v) == val); }); + return {(it != std::end(setref)), it}; } /// A search function that uses the built in find function template ::value, detail::enabler> = detail::dummy> auto search(const T &set, const V &val) -> std::pair { - auto &setref = detail::smart_deref(set); - auto it = setref.find(val); - return {(it != std::end(setref)), it}; + auto &setref = detail::smart_deref(set); + auto it = setref.find(val); + return {(it != std::end(setref)), it}; } /// A search function with a filter function template auto search(const T &set, const V &val, const std::function &filter_function) -> std::pair { - using element_t = typename detail::element_type::type; - // do the potentially faster first search - auto res = search(set, val); - if((res.first) || (!(filter_function))) { - return res; - } - // if we haven't found it do the longer linear search with all the element translations - auto &setref = detail::smart_deref(set); - auto it = std::find_if(std::begin(setref), std::end(setref), [&](decltype(*std::begin(setref)) v) { - V a{detail::pair_adaptor::first(v)}; - a = filter_function(a); - return (a == val); - }); - return {(it != std::end(setref)), it}; + using element_t = typename detail::element_type::type; + // do the potentially faster first search + auto res = search(set, val); + if ((res.first) || (!(filter_function))) { + return res; + } + // if we haven't found it do the longer linear search with all the element translations + auto &setref = detail::smart_deref(set); + auto it = std::find_if(std::begin(setref), std::end(setref), [&](decltype(*std::begin(setref)) v) { + V a{detail::pair_adaptor::first(v)}; + a = filter_function(a); + return (a == val); + }); + return {(it != std::end(setref)), it}; } // the following suggestion was made by Nikita Ofitserov(@himikof) // done in templates to prevent compiler warnings on negation of unsigned numbers /// Do a check for overflow on signed numbers -template -inline typename std::enable_if::value, T>::type overflowCheck(const T &a, const T &b) { - if((a > 0) == (b > 0)) { - return ((std::numeric_limits::max)() / (std::abs)(a) < (std::abs)(b)); - } else { - return ((std::numeric_limits::min)() / (std::abs)(a) > -(std::abs)(b)); - } +template inline typename std::enable_if::value, T>::type overflowCheck(const T &a, const T &b) { + if ((a > 0) == (b > 0)) { + return ((std::numeric_limits::max)() / (std::abs)(a) < (std::abs)(b)); + } else { + return ((std::numeric_limits::min)() / (std::abs)(a) > -(std::abs)(b)); + } } /// Do a check for overflow on unsigned numbers -template -inline typename std::enable_if::value, T>::type overflowCheck(const T &a, const T &b) { - return ((std::numeric_limits::max)() / a < b); +template inline typename std::enable_if::value, T>::type overflowCheck(const T &a, const T &b) { + return ((std::numeric_limits::max)() / a < b); } /// Performs a *= b; if it doesn't cause integer overflow. Returns false otherwise. template typename std::enable_if::value, bool>::type checked_multiply(T &a, T b) { - if(a == 0 || b == 0 || a == 1 || b == 1) { - a *= b; - return true; - } - if(a == (std::numeric_limits::min)() || b == (std::numeric_limits::min)()) { - return false; - } - if(overflowCheck(a, b)) { - return false; - } + if (a == 0 || b == 0 || a == 1 || b == 1) { a *= b; return true; + } + if (a == (std::numeric_limits::min)() || b == (std::numeric_limits::min)()) { + return false; + } + if (overflowCheck(a, b)) { + return false; + } + a *= b; + return true; } /// Performs a *= b; if it doesn't equal infinity. Returns false otherwise. -template -typename std::enable_if::value, bool>::type checked_multiply(T &a, T b) { - T c = a * b; - if(std::isinf(c) && !std::isinf(a) && !std::isinf(b)) { - return false; - } - a = c; - return true; +template typename std::enable_if::value, bool>::type checked_multiply(T &a, T b) { + T c = a * b; + if (std::isinf(c) && !std::isinf(a) && !std::isinf(b)) { + return false; + } + a = c; + return true; } } // namespace detail /// Verify items are in a set class IsMember : public Validator { - public: - using filter_fn_t = std::function; - - /// This allows in-place construction using an initializer list - template - IsMember(std::initializer_list values, Args &&... args) - : IsMember(std::vector(values), std::forward(args)...) {} - - /// This checks to see if an item is in a set (empty function) - template explicit IsMember(T &&set) : IsMember(std::forward(set), nullptr) {} - - /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter - /// both sides of the comparison before computing the comparison. - template explicit IsMember(T set, F filter_function) { - - // Get the type of the contained item - requires a container have ::value_type - // if the type does not have first_type and second_type, these are both value_type - using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed - using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map - - using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones - // (const char * to std::string) - - // Make a local copy of the filter function, using a std::function if not one already - std::function filter_fn = filter_function; - - // This is the type name for help, it will take the current version of the set contents - desc_function_ = [set]() { return detail::generate_set(detail::smart_deref(set)); }; - - // This is the function that validates - // It stores a copy of the set pointer-like, so shared_ptr will stay alive - func_ = [set, filter_fn](std::string &input) { - local_item_t b; - if(!detail::lexical_cast(input, b)) { - throw ValidationError(input); // name is added later - } - if(filter_fn) { - b = filter_fn(b); - } - auto res = detail::search(set, b, filter_fn); - if(res.first) { - // Make sure the version in the input string is identical to the one in the set - if(filter_fn) { - input = detail::value_string(detail::pair_adaptor::first(*(res.second))); - } - - // Return empty error string (success) - return std::string{}; - } +public: + using filter_fn_t = std::function; + + /// This allows in-place construction using an initializer list + template + IsMember(std::initializer_list values, Args &&... args) : IsMember(std::vector(values), std::forward(args)...) {} + + /// This checks to see if an item is in a set (empty function) + template explicit IsMember(T &&set) : IsMember(std::forward(set), nullptr) {} + + /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter + /// both sides of the comparison before computing the comparison. + template explicit IsMember(T set, F filter_function) { + + // Get the type of the contained item - requires a container have ::value_type + // if the type does not have first_type and second_type, these are both value_type + using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed + using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map + + using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones + // (const char * to std::string) + + // Make a local copy of the filter function, using a std::function if not one already + std::function filter_fn = filter_function; + + // This is the type name for help, it will take the current version of the set contents + desc_function_ = [set]() { return detail::generate_set(detail::smart_deref(set)); }; + + // This is the function that validates + // It stores a copy of the set pointer-like, so shared_ptr will stay alive + func_ = [set, filter_fn](std::string &input) { + local_item_t b; + if (!detail::lexical_cast(input, b)) { + throw ValidationError(input); // name is added later + } + if (filter_fn) { + b = filter_fn(b); + } + auto res = detail::search(set, b, filter_fn); + if (res.first) { + // Make sure the version in the input string is identical to the one in the set + if (filter_fn) { + input = detail::value_string(detail::pair_adaptor::first(*(res.second))); + } + + // Return empty error string (success) + return std::string{}; + } - // If you reach this point, the result was not found - std::string out(" not in "); - out += detail::generate_set(detail::smart_deref(set)); - return out; - }; - } + // If you reach this point, the result was not found + std::string out(" not in "); + out += detail::generate_set(detail::smart_deref(set)); + return out; + }; + } - /// You can pass in as many filter functions as you like, they nest (string only currently) - template - IsMember(T &&set, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) - : IsMember( - std::forward(set), - [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, - other...) {} + /// You can pass in as many filter functions as you like, they nest (string only currently) + template + IsMember(T &&set, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) + : IsMember( + std::forward(set), [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, other...) {} }; /// definition of the default transformation object @@ -2781,137 +2613,130 @@ template using TransformPairs = std::vector; - - /// This allows in-place construction - template - Transformer(std::initializer_list> values, Args &&... args) - : Transformer(TransformPairs(values), std::forward(args)...) {} - - /// direct map of std::string to std::string - template explicit Transformer(T &&mapping) : Transformer(std::forward(mapping), nullptr) {} - - /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter - /// both sides of the comparison before computing the comparison. - template explicit Transformer(T mapping, F filter_function) { - - static_assert(detail::pair_adaptor::type>::value, - "mapping must produce value pairs"); - // Get the type of the contained item - requires a container have ::value_type - // if the type does not have first_type and second_type, these are both value_type - using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed - using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map - using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones - // (const char * to std::string) - - // Make a local copy of the filter function, using a std::function if not one already - std::function filter_fn = filter_function; - - // This is the type name for help, it will take the current version of the set contents - desc_function_ = [mapping]() { return detail::generate_map(detail::smart_deref(mapping)); }; - - func_ = [mapping, filter_fn](std::string &input) { - local_item_t b; - if(!detail::lexical_cast(input, b)) { - return std::string(); - // there is no possible way we can match anything in the mapping if we can't convert so just return - } - if(filter_fn) { - b = filter_fn(b); - } - auto res = detail::search(mapping, b, filter_fn); - if(res.first) { - input = detail::value_string(detail::pair_adaptor::second(*res.second)); - } - return std::string{}; - }; - } +public: + using filter_fn_t = std::function; + + /// This allows in-place construction + template + Transformer(std::initializer_list> values, Args &&... args) + : Transformer(TransformPairs(values), std::forward(args)...) {} + + /// direct map of std::string to std::string + template explicit Transformer(T &&mapping) : Transformer(std::forward(mapping), nullptr) {} + + /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter + /// both sides of the comparison before computing the comparison. + template explicit Transformer(T mapping, F filter_function) { + + static_assert(detail::pair_adaptor::type>::value, "mapping must produce value pairs"); + // Get the type of the contained item - requires a container have ::value_type + // if the type does not have first_type and second_type, these are both value_type + using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed + using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map + using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones + // (const char * to std::string) + + // Make a local copy of the filter function, using a std::function if not one already + std::function filter_fn = filter_function; + + // This is the type name for help, it will take the current version of the set contents + desc_function_ = [mapping]() { return detail::generate_map(detail::smart_deref(mapping)); }; + + func_ = [mapping, filter_fn](std::string &input) { + local_item_t b; + if (!detail::lexical_cast(input, b)) { + return std::string(); + // there is no possible way we can match anything in the mapping if we can't convert so just return + } + if (filter_fn) { + b = filter_fn(b); + } + auto res = detail::search(mapping, b, filter_fn); + if (res.first) { + input = detail::value_string(detail::pair_adaptor::second(*res.second)); + } + return std::string{}; + }; + } - /// You can pass in as many filter functions as you like, they nest - template - Transformer(T &&mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) - : Transformer( - std::forward(mapping), - [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, - other...) {} + /// You can pass in as many filter functions as you like, they nest + template + Transformer(T &&mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) + : Transformer( + std::forward(mapping), [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, other...) {} }; /// translate named items to other or a value set class CheckedTransformer : public Validator { - public: - using filter_fn_t = std::function; - - /// This allows in-place construction - template - CheckedTransformer(std::initializer_list> values, Args &&... args) - : CheckedTransformer(TransformPairs(values), std::forward(args)...) {} - - /// direct map of std::string to std::string - template explicit CheckedTransformer(T mapping) : CheckedTransformer(std::move(mapping), nullptr) {} - - /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter - /// both sides of the comparison before computing the comparison. - template explicit CheckedTransformer(T mapping, F filter_function) { - - static_assert(detail::pair_adaptor::type>::value, - "mapping must produce value pairs"); - // Get the type of the contained item - requires a container have ::value_type - // if the type does not have first_type and second_type, these are both value_type - using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed - using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map - using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones - // (const char * to std::string) - using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair // - // the type of the object pair - - // Make a local copy of the filter function, using a std::function if not one already - std::function filter_fn = filter_function; - - auto tfunc = [mapping]() { - std::string out("value in "); - out += detail::generate_map(detail::smart_deref(mapping)) + " OR {"; - out += detail::join( - detail::smart_deref(mapping), - [](const iteration_type_t &v) { return detail::to_string(detail::pair_adaptor::second(v)); }, - ","); - out.push_back('}'); - return out; - }; - - desc_function_ = tfunc; - - func_ = [mapping, tfunc, filter_fn](std::string &input) { - local_item_t b; - bool converted = detail::lexical_cast(input, b); - if(converted) { - if(filter_fn) { - b = filter_fn(b); - } - auto res = detail::search(mapping, b, filter_fn); - if(res.first) { - input = detail::value_string(detail::pair_adaptor::second(*res.second)); - return std::string{}; - } - } - for(const auto &v : detail::smart_deref(mapping)) { - auto output_string = detail::value_string(detail::pair_adaptor::second(v)); - if(output_string == input) { - return std::string(); - } - } +public: + using filter_fn_t = std::function; + + /// This allows in-place construction + template + CheckedTransformer(std::initializer_list> values, Args &&... args) + : CheckedTransformer(TransformPairs(values), std::forward(args)...) {} + + /// direct map of std::string to std::string + template explicit CheckedTransformer(T mapping) : CheckedTransformer(std::move(mapping), nullptr) {} + + /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter + /// both sides of the comparison before computing the comparison. + template explicit CheckedTransformer(T mapping, F filter_function) { + + static_assert(detail::pair_adaptor::type>::value, "mapping must produce value pairs"); + // Get the type of the contained item - requires a container have ::value_type + // if the type does not have first_type and second_type, these are both value_type + using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed + using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map + using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones + // (const char * to std::string) + using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair // + // the type of the object pair + + // Make a local copy of the filter function, using a std::function if not one already + std::function filter_fn = filter_function; + + auto tfunc = [mapping]() { + std::string out("value in "); + out += detail::generate_map(detail::smart_deref(mapping)) + " OR {"; + out += detail::join( + detail::smart_deref(mapping), + [](const iteration_type_t &v) { return detail::to_string(detail::pair_adaptor::second(v)); }, ","); + out.push_back('}'); + return out; + }; - return "Check " + input + " " + tfunc() + " FAILED"; - }; - } + desc_function_ = tfunc; + + func_ = [mapping, tfunc, filter_fn](std::string &input) { + local_item_t b; + bool converted = detail::lexical_cast(input, b); + if (converted) { + if (filter_fn) { + b = filter_fn(b); + } + auto res = detail::search(mapping, b, filter_fn); + if (res.first) { + input = detail::value_string(detail::pair_adaptor::second(*res.second)); + return std::string{}; + } + } + for (const auto &v : detail::smart_deref(mapping)) { + auto output_string = detail::value_string(detail::pair_adaptor::second(v)); + if (output_string == input) { + return std::string(); + } + } - /// You can pass in as many filter functions as you like, they nest - template - CheckedTransformer(T &&mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) - : CheckedTransformer( - std::forward(mapping), - [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, - other...) {} + return "Check " + input + " " + tfunc() + " FAILED"; + }; + } + + /// You can pass in as many filter functions as you like, they nest + template + CheckedTransformer(T &&mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) + : CheckedTransformer( + std::forward(mapping), [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, other...) {} }; /// Helper function to allow ignore_case to be passed to IsMember or Transform @@ -2922,9 +2747,9 @@ inline std::string ignore_underscore(std::string item) { return detail::remove_u /// Helper function to allow checks to ignore spaces to be passed to IsMember or Transform inline std::string ignore_space(std::string item) { - item.erase(std::remove(std::begin(item), std::end(item), ' '), std::end(item)); - item.erase(std::remove(std::begin(item), std::end(item), '\t'), std::end(item)); - return item; + item.erase(std::remove(std::begin(item), std::end(item), ' '), std::end(item)); + item.erase(std::remove(std::begin(item), std::end(item), '\t'), std::end(item)); + return item; } /// Multiply a number by a factor using given mapping. @@ -2939,123 +2764,119 @@ inline std::string ignore_space(std::string item) { /// Therefore, if it is required to interpret real inputs like "0.42 s", /// the mapping should be of a type or . class AsNumberWithUnit : public Validator { - public: - /// Adjust AsNumberWithUnit behavior. - /// CASE_SENSITIVE/CASE_INSENSITIVE controls how units are matched. - /// UNIT_OPTIONAL/UNIT_REQUIRED throws ValidationError - /// if UNIT_REQUIRED is set and unit literal is not found. - enum Options { - CASE_SENSITIVE = 0, - CASE_INSENSITIVE = 1, - UNIT_OPTIONAL = 0, - UNIT_REQUIRED = 2, - DEFAULT = CASE_INSENSITIVE | UNIT_OPTIONAL +public: + /// Adjust AsNumberWithUnit behavior. + /// CASE_SENSITIVE/CASE_INSENSITIVE controls how units are matched. + /// UNIT_OPTIONAL/UNIT_REQUIRED throws ValidationError + /// if UNIT_REQUIRED is set and unit literal is not found. + enum Options { + CASE_SENSITIVE = 0, + CASE_INSENSITIVE = 1, + UNIT_OPTIONAL = 0, + UNIT_REQUIRED = 2, + DEFAULT = CASE_INSENSITIVE | UNIT_OPTIONAL + }; + + template + explicit AsNumberWithUnit(std::map mapping, Options opts = DEFAULT, const std::string &unit_name = "UNIT") { + description(generate_description(unit_name, opts)); + validate_mapping(mapping, opts); + + // transform function + func_ = [mapping, opts](std::string &input) -> std::string { + Number num; + + detail::rtrim(input); + if (input.empty()) { + throw ValidationError("Input is empty"); + } + + // Find split position between number and prefix + auto unit_begin = input.end(); + while (unit_begin > input.begin() && std::isalpha(*(unit_begin - 1), std::locale())) { + --unit_begin; + } + + std::string unit{unit_begin, input.end()}; + input.resize(static_cast(std::distance(input.begin(), unit_begin))); + detail::trim(input); + + if (opts & UNIT_REQUIRED && unit.empty()) { + throw ValidationError("Missing mandatory unit"); + } + if (opts & CASE_INSENSITIVE) { + unit = detail::to_lower(unit); + } + + bool converted = detail::lexical_cast(input, num); + if (!converted) { + throw ValidationError(std::string("Value ") + input + " could not be converted to " + detail::type_name()); + } + + if (unit.empty()) { + // No need to modify input if no unit passed + return {}; + } + + // find corresponding factor + auto it = mapping.find(unit); + if (it == mapping.end()) { + throw ValidationError(unit + + " unit not recognized. " + "Allowed values: " + + detail::generate_map(mapping, true)); + } + + // perform safe multiplication + bool ok = detail::checked_multiply(num, it->second); + if (!ok) { + throw ValidationError(detail::to_string(num) + " multiplied by " + unit + + " factor would cause number overflow. Use smaller value."); + } + input = detail::to_string(num); + + return {}; }; - - template - explicit AsNumberWithUnit(std::map mapping, - Options opts = DEFAULT, - const std::string &unit_name = "UNIT") { - description(generate_description(unit_name, opts)); - validate_mapping(mapping, opts); - - // transform function - func_ = [mapping, opts](std::string &input) -> std::string { - Number num; - - detail::rtrim(input); - if(input.empty()) { - throw ValidationError("Input is empty"); - } - - // Find split position between number and prefix - auto unit_begin = input.end(); - while(unit_begin > input.begin() && std::isalpha(*(unit_begin - 1), std::locale())) { - --unit_begin; - } - - std::string unit{unit_begin, input.end()}; - input.resize(static_cast(std::distance(input.begin(), unit_begin))); - detail::trim(input); - - if(opts & UNIT_REQUIRED && unit.empty()) { - throw ValidationError("Missing mandatory unit"); - } - if(opts & CASE_INSENSITIVE) { - unit = detail::to_lower(unit); - } - - bool converted = detail::lexical_cast(input, num); - if(!converted) { - throw ValidationError(std::string("Value ") + input + " could not be converted to " + - detail::type_name()); - } - - if(unit.empty()) { - // No need to modify input if no unit passed - return {}; - } - - // find corresponding factor - auto it = mapping.find(unit); - if(it == mapping.end()) { - throw ValidationError(unit + - " unit not recognized. " - "Allowed values: " + - detail::generate_map(mapping, true)); - } - - // perform safe multiplication - bool ok = detail::checked_multiply(num, it->second); - if(!ok) { - throw ValidationError(detail::to_string(num) + " multiplied by " + unit + - " factor would cause number overflow. Use smaller value."); - } - input = detail::to_string(num); - - return {}; - }; - } - - private: - /// Check that mapping contains valid units. - /// Update mapping for CASE_INSENSITIVE mode. - template static void validate_mapping(std::map &mapping, Options opts) { - for(auto &kv : mapping) { - if(kv.first.empty()) { - throw ValidationError("Unit must not be empty."); - } - if(!detail::isalpha(kv.first)) { - throw ValidationError("Unit must contain only letters."); - } - } - - // make all units lowercase if CASE_INSENSITIVE - if(opts & CASE_INSENSITIVE) { - std::map lower_mapping; - for(auto &kv : mapping) { - auto s = detail::to_lower(kv.first); - if(lower_mapping.count(s)) { - throw ValidationError(std::string("Several matching lowercase unit representations are found: ") + - s); - } - lower_mapping[detail::to_lower(kv.first)] = kv.second; - } - mapping = std::move(lower_mapping); - } - } - - /// Generate description like this: NUMBER [UNIT] - template static std::string generate_description(const std::string &name, Options opts) { - std::stringstream out; - out << detail::type_name() << ' '; - if(opts & UNIT_REQUIRED) { - out << name; - } else { - out << '[' << name << ']'; - } - return out.str(); + } + +private: + /// Check that mapping contains valid units. + /// Update mapping for CASE_INSENSITIVE mode. + template static void validate_mapping(std::map &mapping, Options opts) { + for (auto &kv : mapping) { + if (kv.first.empty()) { + throw ValidationError("Unit must not be empty."); + } + if (!detail::isalpha(kv.first)) { + throw ValidationError("Unit must contain only letters."); + } + } + + // make all units lowercase if CASE_INSENSITIVE + if (opts & CASE_INSENSITIVE) { + std::map lower_mapping; + for (auto &kv : mapping) { + auto s = detail::to_lower(kv.first); + if (lower_mapping.count(s)) { + throw ValidationError(std::string("Several matching lowercase unit representations are found: ") + s); + } + lower_mapping[detail::to_lower(kv.first)] = kv.second; + } + mapping = std::move(lower_mapping); + } + } + + /// Generate description like this: NUMBER [UNIT] + template static std::string generate_description(const std::string &name, Options opts) { + std::stringstream out; + out << detail::type_name() << ' '; + if (opts & UNIT_REQUIRED) { + out << name; + } else { + out << '[' << name << ']'; } + return out.str(); + } }; /// Converts a human-readable size string (with unit literal) to uin64_t size. @@ -3070,54 +2891,54 @@ class AsNumberWithUnit : public Validator { /// "2 MB" => 2097152 /// "2 EiB" => 2^61 // Units up to exibyte are supported class AsSizeValue : public AsNumberWithUnit { - public: - using result_t = uint64_t; - - /// If kb_is_1000 is true, - /// interpret 'kb', 'k' as 1000 and 'kib', 'ki' as 1024 - /// (same applies to higher order units as well). - /// Otherwise, interpret all literals as factors of 1024. - /// The first option is formally correct, but - /// the second interpretation is more wide-spread - /// (see https://en.wikipedia.org/wiki/Binary_prefix). - explicit AsSizeValue(bool kb_is_1000) : AsNumberWithUnit(get_mapping(kb_is_1000)) { - if(kb_is_1000) { - description("SIZE [b, kb(=1000b), kib(=1024b), ...]"); - } else { - description("SIZE [b, kb(=1024b), ...]"); - } - } - - private: - /// Get mapping - static std::map init_mapping(bool kb_is_1000) { - std::map m; - result_t k_factor = kb_is_1000 ? 1000 : 1024; - result_t ki_factor = 1024; - result_t k = 1; - result_t ki = 1; - m["b"] = 1; - for(std::string p : {"k", "m", "g", "t", "p", "e"}) { - k *= k_factor; - ki *= ki_factor; - m[p] = k; - m[p + "b"] = k; - m[p + "i"] = ki; - m[p + "ib"] = ki; - } - return m; - } - - /// Cache calculated mapping - static std::map get_mapping(bool kb_is_1000) { - if(kb_is_1000) { - static auto m = init_mapping(true); - return m; - } else { - static auto m = init_mapping(false); - return m; - } +public: + using result_t = uint64_t; + + /// If kb_is_1000 is true, + /// interpret 'kb', 'k' as 1000 and 'kib', 'ki' as 1024 + /// (same applies to higher order units as well). + /// Otherwise, interpret all literals as factors of 1024. + /// The first option is formally correct, but + /// the second interpretation is more wide-spread + /// (see https://en.wikipedia.org/wiki/Binary_prefix). + explicit AsSizeValue(bool kb_is_1000) : AsNumberWithUnit(get_mapping(kb_is_1000)) { + if (kb_is_1000) { + description("SIZE [b, kb(=1000b), kib(=1024b), ...]"); + } else { + description("SIZE [b, kb(=1024b), ...]"); + } + } + +private: + /// Get mapping + static std::map init_mapping(bool kb_is_1000) { + std::map m; + result_t k_factor = kb_is_1000 ? 1000 : 1024; + result_t ki_factor = 1024; + result_t k = 1; + result_t ki = 1; + m["b"] = 1; + for (std::string p : {"k", "m", "g", "t", "p", "e"}) { + k *= k_factor; + ki *= ki_factor; + m[p] = k; + m[p + "b"] = k; + m[p + "i"] = ki; + m[p + "ib"] = ki; + } + return m; + } + + /// Cache calculated mapping + static std::map get_mapping(bool kb_is_1000) { + if (kb_is_1000) { + static auto m = init_mapping(true); + return m; + } else { + static auto m = init_mapping(false); + return m; } + } }; namespace detail { @@ -3126,25 +2947,25 @@ namespace detail { /// the return value contains is a pair with the first argument containing the program name and the second /// everything else. inline std::pair split_program_name(std::string commandline) { - // try to determine the programName - std::pair vals; - trim(commandline); - auto esp = commandline.find_first_of(' ', 1); - while(detail::check_path(commandline.substr(0, esp).c_str()) != path_type::file) { - esp = commandline.find_first_of(' ', esp + 1); - if(esp == std::string::npos) { - // if we have reached the end and haven't found a valid file just assume the first argument is the - // program name - esp = commandline.find_first_of(' ', 1); - break; - } - } - vals.first = commandline.substr(0, esp); - rtrim(vals.first); - // strip the program name - vals.second = (esp != std::string::npos) ? commandline.substr(esp + 1) : std::string{}; - ltrim(vals.second); - return vals; + // try to determine the programName + std::pair vals; + trim(commandline); + auto esp = commandline.find_first_of(' ', 1); + while (detail::check_path(commandline.substr(0, esp).c_str()) != path_type::file) { + esp = commandline.find_first_of(' ', esp + 1); + if (esp == std::string::npos) { + // if we have reached the end and haven't found a valid file just assume the first argument is the + // program name + esp = commandline.find_first_of(' ', 1); + break; + } + } + vals.first = commandline.substr(0, esp); + rtrim(vals.first); + // strip the program name + vals.second = (esp != std::string::npos) ? commandline.substr(esp + 1) : std::string{}; + ltrim(vals.second); + return vals; } } // namespace detail @@ -3165,9 +2986,9 @@ class App; /// the second argument. enum class AppFormatMode { - Normal, //< The normal, detailed help - All, //< A fully expanded help - Sub, //< Used when printed as part of expanded subcommand + Normal, //< The normal, detailed help + All, //< A fully expanded help + Sub, //< Used when printed as part of expanded subcommand }; /// This is the minimum requirements to run a formatter. @@ -3175,147 +2996,144 @@ enum class AppFormatMode { /// A user can subclass this is if they do not care at all /// about the structure in CLI::Formatter. class FormatterBase { - protected: - /// @name Options - ///@{ +protected: + /// @name Options + ///@{ - /// The width of the first column - std::size_t column_width_{30}; + /// The width of the first column + std::size_t column_width_{30}; - /// @brief The required help printout labels (user changeable) - /// Values are Needs, Excludes, etc. - std::map labels_{}; + /// @brief The required help printout labels (user changeable) + /// Values are Needs, Excludes, etc. + std::map labels_{}; - ///@} - /// @name Basic - ///@{ + ///@} + /// @name Basic + ///@{ - public: - FormatterBase() = default; - FormatterBase(const FormatterBase &) = default; - FormatterBase(FormatterBase &&) = default; +public: + FormatterBase() = default; + FormatterBase(const FormatterBase &) = default; + FormatterBase(FormatterBase &&) = default; - /// Adding a destructor in this form to work around bug in GCC 4.7 - virtual ~FormatterBase() noexcept {} // NOLINT(modernize-use-equals-default) + /// Adding a destructor in this form to work around bug in GCC 4.7 + virtual ~FormatterBase() noexcept {} // NOLINT(modernize-use-equals-default) - /// This is the key method that puts together help - virtual std::string make_help(const App *, std::string, AppFormatMode) const = 0; + /// This is the key method that puts together help + virtual std::string make_help(const App *, std::string, AppFormatMode) const = 0; - ///@} - /// @name Setters - ///@{ + ///@} + /// @name Setters + ///@{ - /// Set the "REQUIRED" label - void label(std::string key, std::string val) { labels_[key] = val; } + /// Set the "REQUIRED" label + void label(std::string key, std::string val) { labels_[key] = val; } - /// Set the column width - void column_width(std::size_t val) { column_width_ = val; } + /// Set the column width + void column_width(std::size_t val) { column_width_ = val; } - ///@} - /// @name Getters - ///@{ + ///@} + /// @name Getters + ///@{ - /// Get the current value of a name (REQUIRED, etc.) - std::string get_label(std::string key) const { - if(labels_.find(key) == labels_.end()) - return key; - else - return labels_.at(key); - } + /// Get the current value of a name (REQUIRED, etc.) + std::string get_label(std::string key) const { + if (labels_.find(key) == labels_.end()) + return key; + else + return labels_.at(key); + } - /// Get the current column width - std::size_t get_column_width() const { return column_width_; } + /// Get the current column width + std::size_t get_column_width() const { return column_width_; } - ///@} + ///@} }; /// This is a specialty override for lambda functions class FormatterLambda final : public FormatterBase { - using funct_t = std::function; + using funct_t = std::function; - /// The lambda to hold and run - funct_t lambda_; + /// The lambda to hold and run + funct_t lambda_; - public: - /// Create a FormatterLambda with a lambda function - explicit FormatterLambda(funct_t funct) : lambda_(std::move(funct)) {} +public: + /// Create a FormatterLambda with a lambda function + explicit FormatterLambda(funct_t funct) : lambda_(std::move(funct)) {} - /// Adding a destructor (mostly to make GCC 4.7 happy) - ~FormatterLambda() noexcept override {} // NOLINT(modernize-use-equals-default) + /// Adding a destructor (mostly to make GCC 4.7 happy) + ~FormatterLambda() noexcept override {} // NOLINT(modernize-use-equals-default) - /// This will simply call the lambda function - std::string make_help(const App *app, std::string name, AppFormatMode mode) const override { - return lambda_(app, name, mode); - } + /// This will simply call the lambda function + std::string make_help(const App *app, std::string name, AppFormatMode mode) const override { return lambda_(app, name, mode); } }; /// This is the default Formatter for CLI11. It pretty prints help output, and is broken into quite a few /// overridable methods, to be highly customizable with minimal effort. class Formatter : public FormatterBase { - public: - Formatter() = default; - Formatter(const Formatter &) = default; - Formatter(Formatter &&) = default; +public: + Formatter() = default; + Formatter(const Formatter &) = default; + Formatter(Formatter &&) = default; - /// @name Overridables - ///@{ + /// @name Overridables + ///@{ - /// This prints out a group of options with title - /// - virtual std::string make_group(std::string group, bool is_positional, std::vector opts) const; + /// This prints out a group of options with title + /// + virtual std::string make_group(std::string group, bool is_positional, std::vector opts) const; - /// This prints out just the positionals "group" - virtual std::string make_positionals(const App *app) const; + /// This prints out just the positionals "group" + virtual std::string make_positionals(const App *app) const; - /// This prints out all the groups of options - std::string make_groups(const App *app, AppFormatMode mode) const; + /// This prints out all the groups of options + std::string make_groups(const App *app, AppFormatMode mode) const; - /// This prints out all the subcommands - virtual std::string make_subcommands(const App *app, AppFormatMode mode) const; + /// This prints out all the subcommands + virtual std::string make_subcommands(const App *app, AppFormatMode mode) const; - /// This prints out a subcommand - virtual std::string make_subcommand(const App *sub) const; + /// This prints out a subcommand + virtual std::string make_subcommand(const App *sub) const; - /// This prints out a subcommand in help-all - virtual std::string make_expanded(const App *sub) const; + /// This prints out a subcommand in help-all + virtual std::string make_expanded(const App *sub) const; - /// This prints out all the groups of options - virtual std::string make_footer(const App *app) const; + /// This prints out all the groups of options + virtual std::string make_footer(const App *app) const; - /// This displays the description line - virtual std::string make_description(const App *app) const; + /// This displays the description line + virtual std::string make_description(const App *app) const; - /// This displays the usage line - virtual std::string make_usage(const App *app, std::string name) const; + /// This displays the usage line + virtual std::string make_usage(const App *app, std::string name) const; - /// This puts everything together - std::string make_help(const App * /*app*/, std::string, AppFormatMode) const override; + /// This puts everything together + std::string make_help(const App * /*app*/, std::string, AppFormatMode) const override; - ///@} - /// @name Options - ///@{ + ///@} + /// @name Options + ///@{ - /// This prints out an option help line, either positional or optional form - virtual std::string make_option(const Option *opt, bool is_positional) const { - std::stringstream out; - detail::format_help( - out, make_option_name(opt, is_positional) + make_option_opts(opt), make_option_desc(opt), column_width_); - return out.str(); - } + /// This prints out an option help line, either positional or optional form + virtual std::string make_option(const Option *opt, bool is_positional) const { + std::stringstream out; + detail::format_help(out, make_option_name(opt, is_positional) + make_option_opts(opt), make_option_desc(opt), column_width_); + return out.str(); + } - /// @brief This is the name part of an option, Default: left column - virtual std::string make_option_name(const Option *, bool) const; + /// @brief This is the name part of an option, Default: left column + virtual std::string make_option_name(const Option *, bool) const; - /// @brief This is the options part of the name, Default: combined into left column - virtual std::string make_option_opts(const Option *) const; + /// @brief This is the options part of the name, Default: combined into left column + virtual std::string make_option_opts(const Option *) const; - /// @brief This is the description. Default: Right column, on new line if left column too large - virtual std::string make_option_desc(const Option *) const; + /// @brief This is the description. Default: Right column, on new line if left column too large + virtual std::string make_option_desc(const Option *) const; - /// @brief This is used to print the name on the USAGE line - virtual std::string make_option_usage(const Option *opt) const; + /// @brief This is used to print the name on the USAGE line + virtual std::string make_option_usage(const Option *opt) const; - ///@} + ///@} }; } // namespace CLI @@ -3334,4211 +3152,4138 @@ class App; using Option_p = std::unique_ptr