cxx11_tensor_device.cu 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. // This file is part of Eigen, a lightweight C++ template library
  2. // for linear algebra.
  3. //
  4. // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
  5. //
  6. // This Source Code Form is subject to the terms of the Mozilla
  7. // Public License v. 2.0. If a copy of the MPL was not distributed
  8. // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
  9. #define EIGEN_TEST_NO_LONGDOUBLE
  10. #define EIGEN_TEST_NO_COMPLEX
  11. #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
  12. #define EIGEN_USE_GPU
  13. #include "main.h"
  14. #include <unsupported/Eigen/CXX11/Tensor>
  15. #include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
  16. using Eigen::Tensor;
  17. using Eigen::RowMajor;
  18. // Context for evaluation on cpu
  19. struct CPUContext {
  20. CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(2,2), kernel_3d_(2,2,2) {
  21. kernel_1d_(0) = 3.14f;
  22. kernel_1d_(1) = 2.7f;
  23. kernel_2d_(0,0) = 3.14f;
  24. kernel_2d_(1,0) = 2.7f;
  25. kernel_2d_(0,1) = 0.2f;
  26. kernel_2d_(1,1) = 7.0f;
  27. kernel_3d_(0,0,0) = 3.14f;
  28. kernel_3d_(0,1,0) = 2.7f;
  29. kernel_3d_(0,0,1) = 0.2f;
  30. kernel_3d_(0,1,1) = 7.0f;
  31. kernel_3d_(1,0,0) = -1.0f;
  32. kernel_3d_(1,1,0) = -0.3f;
  33. kernel_3d_(1,0,1) = -0.7f;
  34. kernel_3d_(1,1,1) = -0.5f;
  35. }
  36. const Eigen::DefaultDevice& device() const { return cpu_device_; }
  37. const Eigen::Tensor<float, 3>& in1() const { return in1_; }
  38. const Eigen::Tensor<float, 3>& in2() const { return in2_; }
  39. Eigen::Tensor<float, 3>& out() { return out_; }
  40. const Eigen::Tensor<float, 1>& kernel1d() const { return kernel_1d_; }
  41. const Eigen::Tensor<float, 2>& kernel2d() const { return kernel_2d_; }
  42. const Eigen::Tensor<float, 3>& kernel3d() const { return kernel_3d_; }
  43. private:
  44. const Eigen::Tensor<float, 3>& in1_;
  45. const Eigen::Tensor<float, 3>& in2_;
  46. Eigen::Tensor<float, 3>& out_;
  47. Eigen::Tensor<float, 1> kernel_1d_;
  48. Eigen::Tensor<float, 2> kernel_2d_;
  49. Eigen::Tensor<float, 3> kernel_3d_;
  50. Eigen::DefaultDevice cpu_device_;
  51. };
  52. // Context for evaluation on GPU
  53. struct GPUContext {
  54. GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) {
  55. assert(gpuMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == gpuSuccess);
  56. float kernel_1d_val[] = {3.14f, 2.7f};
  57. assert(gpuMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess);
  58. assert(gpuMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == gpuSuccess);
  59. float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f};
  60. assert(gpuMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess);
  61. assert(gpuMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == gpuSuccess);
  62. float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f};
  63. assert(gpuMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess);
  64. }
  65. ~GPUContext() {
  66. assert(gpuFree(kernel_1d_) == gpuSuccess);
  67. assert(gpuFree(kernel_2d_) == gpuSuccess);
  68. assert(gpuFree(kernel_3d_) == gpuSuccess);
  69. }
  70. const Eigen::GpuDevice& device() const { return gpu_device_; }
  71. const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1() const { return in1_; }
  72. const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; }
  73. Eigen::TensorMap<Eigen::Tensor<float, 3> >& out() { return out_; }
  74. Eigen::TensorMap<Eigen::Tensor<float, 1> > kernel1d() const { return Eigen::TensorMap<Eigen::Tensor<float, 1> >(kernel_1d_, 2); }
  75. Eigen::TensorMap<Eigen::Tensor<float, 2> > kernel2d() const { return Eigen::TensorMap<Eigen::Tensor<float, 2> >(kernel_2d_, 2, 2); }
  76. Eigen::TensorMap<Eigen::Tensor<float, 3> > kernel3d() const { return Eigen::TensorMap<Eigen::Tensor<float, 3> >(kernel_3d_, 2, 2, 2); }
  77. private:
  78. const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1_;
  79. const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2_;
  80. Eigen::TensorMap<Eigen::Tensor<float, 3> >& out_;
  81. float* kernel_1d_;
  82. float* kernel_2d_;
  83. float* kernel_3d_;
  84. Eigen::GpuStreamDevice stream_;
  85. Eigen::GpuDevice gpu_device_;
  86. };
  87. // The actual expression to evaluate
  88. template <typename Context>
  89. void test_contextual_eval(Context* context)
  90. {
  91. context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f);
  92. }
  93. template <typename Context>
  94. void test_forced_contextual_eval(Context* context)
  95. {
  96. context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f);
  97. }
  98. template <typename Context>
  99. void test_compound_assignment(Context* context)
  100. {
  101. context->out().device(context->device()) = context->in1().constant(2.718f);
  102. context->out().device(context->device()) += context->in1() + context->in2() * 3.14f;
  103. }
  104. template <typename Context>
  105. void test_contraction(Context* context)
  106. {
  107. Eigen::array<std::pair<int, int>, 2> dims;
  108. dims[0] = std::make_pair(1, 1);
  109. dims[1] = std::make_pair(2, 2);
  110. Eigen::array<int, 2> shape(40, 50*70);
  111. Eigen::DSizes<int, 2> indices(0,0);
  112. Eigen::DSizes<int, 2> sizes(40,40);
  113. context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims);
  114. }
  115. template <typename Context>
  116. void test_1d_convolution(Context* context)
  117. {
  118. Eigen::DSizes<int, 3> indices(0,0,0);
  119. Eigen::DSizes<int, 3> sizes(40,49,70);
  120. Eigen::array<int, 1> dims(1);
  121. context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims);
  122. }
  123. template <typename Context>
  124. void test_2d_convolution(Context* context)
  125. {
  126. Eigen::DSizes<int, 3> indices(0,0,0);
  127. Eigen::DSizes<int, 3> sizes(40,49,69);
  128. Eigen::array<int, 2> dims(1,2);
  129. context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims);
  130. }
  131. template <typename Context>
  132. void test_3d_convolution(Context* context)
  133. {
  134. Eigen::DSizes<int, 3> indices(0,0,0);
  135. Eigen::DSizes<int, 3> sizes(39,49,69);
  136. Eigen::array<int, 3> dims(0,1,2);
  137. context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims);
  138. }
  139. void test_cpu() {
  140. Eigen::Tensor<float, 3> in1(40,50,70);
  141. Eigen::Tensor<float, 3> in2(40,50,70);
  142. Eigen::Tensor<float, 3> out(40,50,70);
  143. in1 = in1.random() + in1.constant(10.0f);
  144. in2 = in2.random() + in2.constant(10.0f);
  145. CPUContext context(in1, in2, out);
  146. test_contextual_eval(&context);
  147. for (int i = 0; i < 40; ++i) {
  148. for (int j = 0; j < 50; ++j) {
  149. for (int k = 0; k < 70; ++k) {
  150. VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
  151. }
  152. }
  153. }
  154. test_forced_contextual_eval(&context);
  155. for (int i = 0; i < 40; ++i) {
  156. for (int j = 0; j < 50; ++j) {
  157. for (int k = 0; k < 70; ++k) {
  158. VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f);
  159. }
  160. }
  161. }
  162. test_compound_assignment(&context);
  163. for (int i = 0; i < 40; ++i) {
  164. for (int j = 0; j < 50; ++j) {
  165. for (int k = 0; k < 70; ++k) {
  166. VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
  167. }
  168. }
  169. }
  170. test_contraction(&context);
  171. for (int i = 0; i < 40; ++i) {
  172. for (int j = 0; j < 40; ++j) {
  173. const float result = out(i,j,0);
  174. float expected = 0;
  175. for (int k = 0; k < 50; ++k) {
  176. for (int l = 0; l < 70; ++l) {
  177. expected += in1(i, k, l) * in2(j, k, l);
  178. }
  179. }
  180. VERIFY_IS_APPROX(expected, result);
  181. }
  182. }
  183. test_1d_convolution(&context);
  184. for (int i = 0; i < 40; ++i) {
  185. for (int j = 0; j < 49; ++j) {
  186. for (int k = 0; k < 70; ++k) {
  187. VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f));
  188. }
  189. }
  190. }
  191. test_2d_convolution(&context);
  192. for (int i = 0; i < 40; ++i) {
  193. for (int j = 0; j < 49; ++j) {
  194. for (int k = 0; k < 69; ++k) {
  195. const float result = out(i,j,k);
  196. const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f) +
  197. (in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f);
  198. if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) {
  199. continue;
  200. }
  201. VERIFY_IS_APPROX(expected, result);
  202. }
  203. }
  204. }
  205. test_3d_convolution(&context);
  206. for (int i = 0; i < 39; ++i) {
  207. for (int j = 0; j < 49; ++j) {
  208. for (int k = 0; k < 69; ++k) {
  209. const float result = out(i,j,k);
  210. const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
  211. in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f) +
  212. (in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f +
  213. in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f);
  214. if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) {
  215. continue;
  216. }
  217. VERIFY_IS_APPROX(expected, result);
  218. }
  219. }
  220. }
  221. }
  222. void test_gpu() {
  223. Eigen::Tensor<float, 3> in1(40,50,70);
  224. Eigen::Tensor<float, 3> in2(40,50,70);
  225. Eigen::Tensor<float, 3> out(40,50,70);
  226. in1 = in1.random() + in1.constant(10.0f);
  227. in2 = in2.random() + in2.constant(10.0f);
  228. std::size_t in1_bytes = in1.size() * sizeof(float);
  229. std::size_t in2_bytes = in2.size() * sizeof(float);
  230. std::size_t out_bytes = out.size() * sizeof(float);
  231. float* d_in1;
  232. float* d_in2;
  233. float* d_out;
  234. gpuMalloc((void**)(&d_in1), in1_bytes);
  235. gpuMalloc((void**)(&d_in2), in2_bytes);
  236. gpuMalloc((void**)(&d_out), out_bytes);
  237. gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice);
  238. gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice);
  239. Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, 40,50,70);
  240. Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, 40,50,70);
  241. Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, 40,50,70);
  242. GPUContext context(gpu_in1, gpu_in2, gpu_out);
  243. test_contextual_eval(&context);
  244. assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
  245. for (int i = 0; i < 40; ++i) {
  246. for (int j = 0; j < 50; ++j) {
  247. for (int k = 0; k < 70; ++k) {
  248. VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
  249. }
  250. }
  251. }
  252. test_forced_contextual_eval(&context);
  253. assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
  254. for (int i = 0; i < 40; ++i) {
  255. for (int j = 0; j < 50; ++j) {
  256. for (int k = 0; k < 70; ++k) {
  257. VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f);
  258. }
  259. }
  260. }
  261. test_compound_assignment(&context);
  262. assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
  263. for (int i = 0; i < 40; ++i) {
  264. for (int j = 0; j < 50; ++j) {
  265. for (int k = 0; k < 70; ++k) {
  266. VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
  267. }
  268. }
  269. }
  270. test_contraction(&context);
  271. assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess);
  272. for (int i = 0; i < 40; ++i) {
  273. for (int j = 0; j < 40; ++j) {
  274. const float result = out(i,j,0);
  275. float expected = 0;
  276. for (int k = 0; k < 50; ++k) {
  277. for (int l = 0; l < 70; ++l) {
  278. expected += in1(i, k, l) * in2(j, k, l);
  279. }
  280. }
  281. VERIFY_IS_APPROX(expected, result);
  282. }
  283. }
  284. test_1d_convolution(&context);
  285. assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess);
  286. assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess);
  287. for (int i = 0; i < 40; ++i) {
  288. for (int j = 0; j < 49; ++j) {
  289. for (int k = 0; k < 70; ++k) {
  290. VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f));
  291. }
  292. }
  293. }
  294. test_2d_convolution(&context);
  295. assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess);
  296. assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess);
  297. for (int i = 0; i < 40; ++i) {
  298. for (int j = 0; j < 49; ++j) {
  299. for (int k = 0; k < 69; ++k) {
  300. const float result = out(i,j,k);
  301. const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
  302. in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f);
  303. VERIFY_IS_APPROX(expected, result);
  304. }
  305. }
  306. }
  307. #if !defined(EIGEN_USE_HIP)
  308. // disable this test on the HIP platform
  309. // 3D tensor convolutions seem to hang on the HIP platform
  310. test_3d_convolution(&context);
  311. assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess);
  312. assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess);
  313. for (int i = 0; i < 39; ++i) {
  314. for (int j = 0; j < 49; ++j) {
  315. for (int k = 0; k < 69; ++k) {
  316. const float result = out(i,j,k);
  317. const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
  318. in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f +
  319. in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f +
  320. in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f);
  321. VERIFY_IS_APPROX(expected, result);
  322. }
  323. }
  324. }
  325. #endif
  326. }
  327. EIGEN_DECLARE_TEST(cxx11_tensor_device)
  328. {
  329. CALL_SUBTEST_1(test_cpu());
  330. CALL_SUBTEST_2(test_gpu());
  331. }