cuda_kernels_vector_ops.cu.cc 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: joydeepb@cs.utexas.edu (Joydeep Biswas)
  30. #include "ceres/cuda_kernels_vector_ops.h"
  31. #include <cuda_runtime.h>
  32. #include "ceres/cuda_kernels_utils.h"
  33. namespace ceres {
  34. namespace internal {
  35. template <typename SrcType, typename DstType>
  36. __global__ void TypeConversionKernel(const SrcType* __restrict__ input,
  37. DstType* __restrict__ output,
  38. const int size) {
  39. const int i = blockIdx.x * blockDim.x + threadIdx.x;
  40. if (i < size) {
  41. output[i] = static_cast<DstType>(input[i]);
  42. }
  43. }
  44. void CudaFP64ToFP32(const double* input,
  45. float* output,
  46. const int size,
  47. cudaStream_t stream) {
  48. const int num_blocks = NumBlocksInGrid(size);
  49. TypeConversionKernel<double, float>
  50. <<<num_blocks, kCudaBlockSize, 0, stream>>>(input, output, size);
  51. }
  52. void CudaFP32ToFP64(const float* input,
  53. double* output,
  54. const int size,
  55. cudaStream_t stream) {
  56. const int num_blocks = NumBlocksInGrid(size);
  57. TypeConversionKernel<float, double>
  58. <<<num_blocks, kCudaBlockSize, 0, stream>>>(input, output, size);
  59. }
  60. template <typename T>
  61. __global__ void SetZeroKernel(T* __restrict__ output, const int size) {
  62. const int i = blockIdx.x * blockDim.x + threadIdx.x;
  63. if (i < size) {
  64. output[i] = T(0.0);
  65. }
  66. }
  67. void CudaSetZeroFP32(float* output, const int size, cudaStream_t stream) {
  68. const int num_blocks = NumBlocksInGrid(size);
  69. SetZeroKernel<float><<<num_blocks, kCudaBlockSize, 0, stream>>>(output, size);
  70. }
  71. void CudaSetZeroFP64(double* output, const int size, cudaStream_t stream) {
  72. const int num_blocks = NumBlocksInGrid(size);
  73. SetZeroKernel<double>
  74. <<<num_blocks, kCudaBlockSize, 0, stream>>>(output, size);
  75. }
  76. template <typename SrcType, typename DstType>
  77. __global__ void XPlusEqualsYKernel(DstType* __restrict__ x,
  78. const SrcType* __restrict__ y,
  79. const int size) {
  80. const int i = blockIdx.x * blockDim.x + threadIdx.x;
  81. if (i < size) {
  82. x[i] = x[i] + DstType(y[i]);
  83. }
  84. }
  85. void CudaDsxpy(double* x, float* y, const int size, cudaStream_t stream) {
  86. const int num_blocks = NumBlocksInGrid(size);
  87. XPlusEqualsYKernel<float, double>
  88. <<<num_blocks, kCudaBlockSize, 0, stream>>>(x, y, size);
  89. }
  90. __global__ void CudaDtDxpyKernel(double* __restrict__ y,
  91. const double* D,
  92. const double* __restrict__ x,
  93. const int size) {
  94. const int i = blockIdx.x * blockDim.x + threadIdx.x;
  95. if (i < size) {
  96. y[i] = y[i] + D[i] * D[i] * x[i];
  97. }
  98. }
  99. void CudaDtDxpy(double* y,
  100. const double* D,
  101. const double* x,
  102. const int size,
  103. cudaStream_t stream) {
  104. const int num_blocks = NumBlocksInGrid(size);
  105. CudaDtDxpyKernel<<<num_blocks, kCudaBlockSize, 0, stream>>>(y, D, x, size);
  106. }
  107. } // namespace internal
  108. } // namespace ceres