product_manifold.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: sameeragarwal@google.com (Sameer Agarwal)
  30. // sergiu.deitsch@gmail.com (Sergiu Deitsch)
  31. //
  32. #ifndef CERES_PUBLIC_PRODUCT_MANIFOLD_H_
  33. #define CERES_PUBLIC_PRODUCT_MANIFOLD_H_
  34. #include <algorithm>
  35. #include <array>
  36. #include <cassert>
  37. #include <cstddef>
  38. #include <numeric>
  39. #include <tuple>
  40. #include <type_traits>
  41. #include <utility>
  42. #include "ceres/internal/eigen.h"
  43. #include "ceres/internal/fixed_array.h"
  44. #include "ceres/internal/port.h"
  45. #include "ceres/manifold.h"
  46. namespace ceres {
  47. // Construct a manifold by taking the Cartesian product of a number of other
  48. // manifolds. This is useful, when a parameter block is the Cartesian product
  49. // of two or more manifolds. For example the parameters of a camera consist of
  50. // a rotation and a translation, i.e., SO(3) x R^3.
  51. //
  52. // Example usage:
  53. //
  54. // ProductManifold<QuaternionManifold, EuclideanManifold<3>> se3;
  55. //
  56. // is the manifold for a rigid transformation, where the rotation is
  57. // represented using a quaternion.
  58. //
  59. // Manifolds can be copied and moved to ProductManifold:
  60. //
  61. // SubsetManifold manifold1(5, {2});
  62. // SubsetManifold manifold2(3, {0, 1});
  63. // ProductManifold<SubsetManifold, SubsetManifold> manifold(manifold1,
  64. // manifold2);
  65. //
  66. // In advanced use cases, manifolds can be dynamically allocated and passed as
  67. // (smart) pointers:
  68. //
  69. // ProductManifold<std::unique_ptr<QuaternionManifold>, EuclideanManifold<3>>
  70. // se3{std::make_unique<QuaternionManifold>(), EuclideanManifold<3>{}};
  71. //
  72. // In C++17, the template parameters can be left out as they are automatically
  73. // deduced making the initialization much simpler:
  74. //
  75. // ProductManifold se3{QuaternionManifold{}, EuclideanManifold<3>{}};
  76. //
  77. // The manifold implementations must be either default constructible, copyable
  78. // or moveable to be usable in a ProductManifold.
  79. template <typename Manifold0, typename Manifold1, typename... ManifoldN>
  80. class ProductManifold final : public Manifold {
  81. public:
  82. // ProductManifold constructor perfect forwards arguments to store manifolds.
  83. //
  84. // Either use default construction or if you need to copy or move-construct a
  85. // manifold instance, you need to pass an instance as an argument for all
  86. // types given as class template parameters.
  87. template <typename... Args,
  88. std::enable_if_t<std::is_constructible<
  89. std::tuple<Manifold0, Manifold1, ManifoldN...>,
  90. Args...>::value>* = nullptr>
  91. explicit ProductManifold(Args&&... manifolds)
  92. : ProductManifold{std::make_index_sequence<kNumManifolds>{},
  93. std::forward<Args>(manifolds)...} {}
  94. int AmbientSize() const override { return ambient_size_; }
  95. int TangentSize() const override { return tangent_size_; }
  96. bool Plus(const double* x,
  97. const double* delta,
  98. double* x_plus_delta) const override {
  99. return PlusImpl(
  100. x, delta, x_plus_delta, std::make_index_sequence<kNumManifolds>{});
  101. }
  102. bool Minus(const double* y,
  103. const double* x,
  104. double* y_minus_x) const override {
  105. return MinusImpl(
  106. y, x, y_minus_x, std::make_index_sequence<kNumManifolds>{});
  107. }
  108. bool PlusJacobian(const double* x, double* jacobian_ptr) const override {
  109. MatrixRef jacobian(jacobian_ptr, AmbientSize(), TangentSize());
  110. jacobian.setZero();
  111. internal::FixedArray<double> buffer(buffer_size_);
  112. return PlusJacobianImpl(
  113. x, jacobian, buffer, std::make_index_sequence<kNumManifolds>{});
  114. }
  115. bool MinusJacobian(const double* x, double* jacobian_ptr) const override {
  116. MatrixRef jacobian(jacobian_ptr, TangentSize(), AmbientSize());
  117. jacobian.setZero();
  118. internal::FixedArray<double> buffer(buffer_size_);
  119. return MinusJacobianImpl(
  120. x, jacobian, buffer, std::make_index_sequence<kNumManifolds>{});
  121. }
  122. private:
  123. static constexpr std::size_t kNumManifolds = 2 + sizeof...(ManifoldN);
  124. template <std::size_t... Indices, typename... Args>
  125. explicit ProductManifold(std::index_sequence<Indices...>, Args&&... manifolds)
  126. : manifolds_{std::forward<Args>(manifolds)...},
  127. buffer_size_{(std::max)(
  128. {(Dereference(std::get<Indices>(manifolds_)).TangentSize() *
  129. Dereference(std::get<Indices>(manifolds_)).AmbientSize())...})},
  130. ambient_sizes_{
  131. Dereference(std::get<Indices>(manifolds_)).AmbientSize()...},
  132. tangent_sizes_{
  133. Dereference(std::get<Indices>(manifolds_)).TangentSize()...},
  134. ambient_offsets_{ExclusiveScan(ambient_sizes_)},
  135. tangent_offsets_{ExclusiveScan(tangent_sizes_)},
  136. ambient_size_{
  137. std::accumulate(ambient_sizes_.begin(), ambient_sizes_.end(), 0)},
  138. tangent_size_{
  139. std::accumulate(tangent_sizes_.begin(), tangent_sizes_.end(), 0)} {}
  140. template <std::size_t Index0, std::size_t... Indices>
  141. bool PlusImpl(const double* x,
  142. const double* delta,
  143. double* x_plus_delta,
  144. std::index_sequence<Index0, Indices...>) const {
  145. if (!Dereference(std::get<Index0>(manifolds_))
  146. .Plus(x + ambient_offsets_[Index0],
  147. delta + tangent_offsets_[Index0],
  148. x_plus_delta + ambient_offsets_[Index0])) {
  149. return false;
  150. }
  151. return PlusImpl(x, delta, x_plus_delta, std::index_sequence<Indices...>{});
  152. }
  153. static constexpr bool PlusImpl(const double* /*x*/,
  154. const double* /*delta*/,
  155. double* /*x_plus_delta*/,
  156. std::index_sequence<>) noexcept {
  157. return true;
  158. }
  159. template <std::size_t Index0, std::size_t... Indices>
  160. bool MinusImpl(const double* y,
  161. const double* x,
  162. double* y_minus_x,
  163. std::index_sequence<Index0, Indices...>) const {
  164. if (!Dereference(std::get<Index0>(manifolds_))
  165. .Minus(y + ambient_offsets_[Index0],
  166. x + ambient_offsets_[Index0],
  167. y_minus_x + tangent_offsets_[Index0])) {
  168. return false;
  169. }
  170. return MinusImpl(y, x, y_minus_x, std::index_sequence<Indices...>{});
  171. }
  172. static constexpr bool MinusImpl(const double* /*y*/,
  173. const double* /*x*/,
  174. double* /*y_minus_x*/,
  175. std::index_sequence<>) noexcept {
  176. return true;
  177. }
  178. template <std::size_t Index0, std::size_t... Indices>
  179. bool PlusJacobianImpl(const double* x,
  180. MatrixRef& jacobian,
  181. internal::FixedArray<double>& buffer,
  182. std::index_sequence<Index0, Indices...>) const {
  183. if (!Dereference(std::get<Index0>(manifolds_))
  184. .PlusJacobian(x + ambient_offsets_[Index0], buffer.data())) {
  185. return false;
  186. }
  187. jacobian.block(ambient_offsets_[Index0],
  188. tangent_offsets_[Index0],
  189. ambient_sizes_[Index0],
  190. tangent_sizes_[Index0]) =
  191. MatrixRef(
  192. buffer.data(), ambient_sizes_[Index0], tangent_sizes_[Index0]);
  193. return PlusJacobianImpl(
  194. x, jacobian, buffer, std::index_sequence<Indices...>{});
  195. }
  196. static constexpr bool PlusJacobianImpl(
  197. const double* /*x*/,
  198. MatrixRef& /*jacobian*/,
  199. internal::FixedArray<double>& /*buffer*/,
  200. std::index_sequence<>) noexcept {
  201. return true;
  202. }
  203. template <std::size_t Index0, std::size_t... Indices>
  204. bool MinusJacobianImpl(const double* x,
  205. MatrixRef& jacobian,
  206. internal::FixedArray<double>& buffer,
  207. std::index_sequence<Index0, Indices...>) const {
  208. if (!Dereference(std::get<Index0>(manifolds_))
  209. .MinusJacobian(x + ambient_offsets_[Index0], buffer.data())) {
  210. return false;
  211. }
  212. jacobian.block(tangent_offsets_[Index0],
  213. ambient_offsets_[Index0],
  214. tangent_sizes_[Index0],
  215. ambient_sizes_[Index0]) =
  216. MatrixRef(
  217. buffer.data(), tangent_sizes_[Index0], ambient_sizes_[Index0]);
  218. return MinusJacobianImpl(
  219. x, jacobian, buffer, std::index_sequence<Indices...>{});
  220. }
  221. static constexpr bool MinusJacobianImpl(
  222. const double* /*x*/,
  223. MatrixRef& /*jacobian*/,
  224. internal::FixedArray<double>& /*buffer*/,
  225. std::index_sequence<>) noexcept {
  226. return true;
  227. }
  228. template <typename T, std::size_t N>
  229. static std::array<T, N> ExclusiveScan(const std::array<T, N>& values) {
  230. std::array<T, N> result;
  231. // TODO Replace with std::exclusive_scan once all platforms have full C++17
  232. // STL support.
  233. T init = 0;
  234. for (std::size_t i = 0; i != N; ++i) {
  235. result[i] = init;
  236. init += values[i];
  237. }
  238. return result;
  239. }
  240. template <typename T, typename E = void>
  241. struct IsDereferenceable : std::false_type {};
  242. template <typename T>
  243. struct IsDereferenceable<T, std::void_t<decltype(*std::declval<T>())>>
  244. : std::true_type {};
  245. template <typename T,
  246. std::enable_if_t<!IsDereferenceable<T>::value>* = nullptr>
  247. static constexpr decltype(auto) Dereference(T& value) {
  248. return value;
  249. }
  250. // Support dereferenceable types such as std::unique_ptr, std::shared_ptr, raw
  251. // pointers etc.
  252. template <typename T,
  253. std::enable_if_t<IsDereferenceable<T>::value>* = nullptr>
  254. static constexpr decltype(auto) Dereference(T& value) {
  255. return *value;
  256. }
  257. template <typename T>
  258. static constexpr decltype(auto) Dereference(T* p) {
  259. assert(p != nullptr);
  260. return *p;
  261. }
  262. std::tuple<Manifold0, Manifold1, ManifoldN...> manifolds_;
  263. int buffer_size_;
  264. std::array<int, kNumManifolds> ambient_sizes_;
  265. std::array<int, kNumManifolds> tangent_sizes_;
  266. std::array<int, kNumManifolds> ambient_offsets_;
  267. std::array<int, kNumManifolds> tangent_offsets_;
  268. int ambient_size_;
  269. int tangent_size_;
  270. };
  271. // C++17 deduction guide that allows the user to avoid explicitly specifying
  272. // the template parameters of ProductManifold. The class can instead be
  273. // instantiated as follows:
  274. //
  275. // ProductManifold manifold{QuaternionManifold{}, EuclideanManifold<3>{}};
  276. //
  277. template <typename Manifold0, typename Manifold1, typename... Manifolds>
  278. ProductManifold(Manifold0&&, Manifold1&&, Manifolds&&...)
  279. -> ProductManifold<Manifold0, Manifold1, Manifolds...>;
  280. } // namespace ceres
  281. #endif // CERES_PUBLIC_PRODUCT_MANIFOLD_H_