tensor_benchmarks.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. #ifndef THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
  2. #define THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
  3. typedef int TensorIndex;
  4. #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
  5. #include "unsupported/Eigen/CXX11/Tensor"
  6. #include "benchmark.h"
  7. #define BENCHMARK_RANGE(bench, lo, hi) \
  8. BENCHMARK(bench)->Range(lo, hi)
  9. using Eigen::Tensor;
  10. using Eigen::TensorMap;
  11. // TODO(bsteiner): also templatize on the input type since we have users
  12. // for int8 as well as floats.
  13. template <typename Device, typename T> class BenchmarkSuite {
  14. public:
  15. BenchmarkSuite(const Device& device, size_t m, size_t k, size_t n)
  16. : m_(m), k_(k), n_(n), device_(device) {
  17. initialize();
  18. }
  19. BenchmarkSuite(const Device& device, size_t m)
  20. : m_(m), k_(m), n_(m), device_(device) {
  21. initialize();
  22. }
  23. BenchmarkSuite(const Device& device, size_t m, size_t k)
  24. : m_(1), k_(k), n_(m), device_(device) {
  25. initialize();
  26. }
  27. ~BenchmarkSuite() {
  28. device_.deallocate(a_);
  29. device_.deallocate(b_);
  30. device_.deallocate(c_);
  31. }
  32. void memcpy(int num_iters) {
  33. eigen_assert(m_ == k_ && k_ == n_);
  34. #ifdef EIGEN_USE_SYCL // warmup for sycl
  35. for (int iter = 0; iter < 10; ++iter) {
  36. device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
  37. }
  38. #endif
  39. StartBenchmarkTiming();
  40. for (int iter = 0; iter < num_iters; ++iter) {
  41. device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
  42. }
  43. // Record the number of values copied per second
  44. finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
  45. }
  46. void typeCasting(int num_iters) {
  47. eigen_assert(m_ == n_);
  48. Eigen::array<TensorIndex, 2> sizes;
  49. if (sizeof(T) >= sizeof(int)) {
  50. sizes[0] = m_;
  51. sizes[1] = k_;
  52. } else {
  53. sizes[0] = m_ * sizeof(T) / sizeof(int);
  54. sizes[1] = k_ * sizeof(T) / sizeof(int);
  55. }
  56. const TensorMap<Tensor<int, 2, 0, TensorIndex>, Eigen::Aligned> A((int*)a_, sizes);
  57. TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, sizes);
  58. #ifdef EIGEN_USE_SYCL // warmup for sycl
  59. for (int iter = 0; iter < 10; ++iter) {
  60. B.device(device_) = A.template cast<T>();
  61. }
  62. #endif
  63. StartBenchmarkTiming();
  64. for (int iter = 0; iter < num_iters; ++iter) {
  65. B.device(device_) = A.template cast<T>();
  66. }
  67. // Record the number of values copied per second
  68. finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
  69. }
  70. void random(int num_iters) {
  71. eigen_assert(m_ == k_ && k_ == n_);
  72. Eigen::array<TensorIndex, 2> sizes;
  73. sizes[0] = m_;
  74. sizes[1] = m_;
  75. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
  76. #ifdef EIGEN_USE_SYCL // warmup for sycl
  77. for (int iter = 0; iter < 10; ++iter) {
  78. C.device(device_) = C.random();
  79. }
  80. #endif
  81. StartBenchmarkTiming();
  82. for (int iter = 0; iter < num_iters; ++iter) {
  83. C.device(device_) = C.random();
  84. }
  85. // Record the number of random numbers generated per second
  86. finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
  87. }
  88. void slicing(int num_iters) {
  89. eigen_assert(m_ == k_ && k_ == n_);
  90. Eigen::array<TensorIndex, 2> sizes;
  91. sizes[0] = m_;
  92. sizes[1] = m_;
  93. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
  94. const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
  95. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
  96. const Eigen::DSizes<TensorIndex, 2> quarter_sizes(m_/2, m_/2);
  97. const Eigen::DSizes<TensorIndex, 2> first_quadrant(0, 0);
  98. const Eigen::DSizes<TensorIndex, 2> second_quadrant(0, m_/2);
  99. const Eigen::DSizes<TensorIndex, 2> third_quadrant(m_/2, 0);
  100. const Eigen::DSizes<TensorIndex, 2> fourth_quadrant(m_/2, m_/2);
  101. #ifdef EIGEN_USE_SYCL // warmup for sycl
  102. for (int iter = 0; iter < 10; ++iter) {
  103. C.slice(first_quadrant, quarter_sizes).device(device_) =
  104. A.slice(first_quadrant, quarter_sizes);
  105. C.slice(second_quadrant, quarter_sizes).device(device_) =
  106. B.slice(second_quadrant, quarter_sizes);
  107. C.slice(third_quadrant, quarter_sizes).device(device_) =
  108. A.slice(third_quadrant, quarter_sizes);
  109. C.slice(fourth_quadrant, quarter_sizes).device(device_) =
  110. B.slice(fourth_quadrant, quarter_sizes);
  111. }
  112. #endif
  113. StartBenchmarkTiming();
  114. for (int iter = 0; iter < num_iters; ++iter) {
  115. C.slice(first_quadrant, quarter_sizes).device(device_) =
  116. A.slice(first_quadrant, quarter_sizes);
  117. C.slice(second_quadrant, quarter_sizes).device(device_) =
  118. B.slice(second_quadrant, quarter_sizes);
  119. C.slice(third_quadrant, quarter_sizes).device(device_) =
  120. A.slice(third_quadrant, quarter_sizes);
  121. C.slice(fourth_quadrant, quarter_sizes).device(device_) =
  122. B.slice(fourth_quadrant, quarter_sizes);
  123. }
  124. // Record the number of values copied from the rhs slice to the lhs slice
  125. // each second
  126. finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
  127. }
  128. void rowChip(int num_iters) {
  129. Eigen::array<TensorIndex, 2> input_size;
  130. input_size[0] = k_;
  131. input_size[1] = n_;
  132. const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, input_size);
  133. Eigen::array<TensorIndex, 1> output_size;
  134. output_size[0] = n_;
  135. TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
  136. #ifdef EIGEN_USE_SYCL // warmup for sycl
  137. for (int iter = 0; iter < 10; ++iter) {
  138. C.device(device_) = B.chip(iter % k_, 0);
  139. }
  140. #endif
  141. StartBenchmarkTiming();
  142. for (int iter = 0; iter < num_iters; ++iter) {
  143. C.device(device_) = B.chip(iter % k_, 0);
  144. }
  145. // Record the number of values copied from the rhs chip to the lhs.
  146. finalizeBenchmark(static_cast<int64_t>(n_) * num_iters);
  147. }
  148. void colChip(int num_iters) {
  149. Eigen::array<TensorIndex, 2> input_size;
  150. input_size[0] = k_;
  151. input_size[1] = n_;
  152. const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, input_size);
  153. Eigen::array<TensorIndex, 1> output_size;
  154. output_size[0] = n_;
  155. TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
  156. #ifdef EIGEN_USE_SYCL // warmup for sycl
  157. for (int iter = 0; iter < 10; ++iter) {
  158. C.device(device_) = B.chip(iter % n_, 1);
  159. }
  160. #endif
  161. StartBenchmarkTiming();
  162. for (int iter = 0; iter < num_iters; ++iter) {
  163. C.device(device_) = B.chip(iter % n_, 1);
  164. }
  165. // Record the number of values copied from the rhs chip to the lhs.
  166. finalizeBenchmark(static_cast<int64_t>(n_) * num_iters);
  167. }
  168. void shuffling(int num_iters) {
  169. eigen_assert(m_ == n_);
  170. Eigen::array<TensorIndex, 2> size_a;
  171. size_a[0] = m_;
  172. size_a[1] = k_;
  173. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
  174. Eigen::array<TensorIndex, 2> size_b;
  175. size_b[0] = k_;
  176. size_b[1] = m_;
  177. TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
  178. Eigen::array<int, 2> shuffle;
  179. shuffle[0] = 1;
  180. shuffle[1] = 0;
  181. #ifdef EIGEN_USE_SYCL // warmup for sycl
  182. for (int iter = 0; iter < 10; ++iter) {
  183. B.device(device_) = A.shuffle(shuffle);
  184. }
  185. #endif
  186. StartBenchmarkTiming();
  187. for (int iter = 0; iter < num_iters; ++iter) {
  188. B.device(device_) = A.shuffle(shuffle);
  189. }
  190. // Record the number of values shuffled from A and copied to B each second
  191. finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
  192. }
  193. void padding(int num_iters) {
  194. eigen_assert(m_ == k_);
  195. Eigen::array<TensorIndex, 2> size_a;
  196. size_a[0] = m_;
  197. size_a[1] = k_-3;
  198. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
  199. Eigen::array<TensorIndex, 2> size_b;
  200. size_b[0] = k_;
  201. size_b[1] = m_;
  202. TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
  203. #if defined(EIGEN_HAS_INDEX_LIST)
  204. Eigen::IndexPairList<Eigen::type2indexpair<0, 0>,
  205. Eigen::type2indexpair<2, 1> > paddings;
  206. #else
  207. Eigen::array<Eigen::IndexPair<TensorIndex>, 2> paddings;
  208. paddings[0] = Eigen::IndexPair<TensorIndex>(0, 0);
  209. paddings[1] = Eigen::IndexPair<TensorIndex>(2, 1);
  210. #endif
  211. #ifdef EIGEN_USE_SYCL // warmup for sycl
  212. for (int iter = 0; iter < 10; ++iter) {
  213. B.device(device_) = A.pad(paddings);
  214. }
  215. #endif
  216. StartBenchmarkTiming();
  217. for (int iter = 0; iter < num_iters; ++iter) {
  218. B.device(device_) = A.pad(paddings);
  219. }
  220. // Record the number of values copied from the padded tensor A each second
  221. finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
  222. }
  223. void striding(int num_iters) {
  224. eigen_assert(m_ == k_);
  225. Eigen::array<TensorIndex, 2> size_a;
  226. size_a[0] = m_;
  227. size_a[1] = k_;
  228. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
  229. Eigen::array<TensorIndex, 2> size_b;
  230. size_b[0] = m_;
  231. size_b[1] = k_/2;
  232. TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
  233. #ifndef EIGEN_HAS_INDEX_LIST
  234. Eigen::array<TensorIndex, 2> strides;
  235. strides[0] = 1;
  236. strides[1] = 2;
  237. #else
  238. // Take advantage of cxx11 to give the compiler information it can use to
  239. // optimize the code.
  240. Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2> > strides;
  241. #endif
  242. #ifdef EIGEN_USE_SYCL // warmup for sycl
  243. for (int iter = 0; iter < 10; ++iter) {
  244. B.device(device_) = A.stride(strides);
  245. }
  246. #endif
  247. StartBenchmarkTiming();
  248. for (int iter = 0; iter < num_iters; ++iter) {
  249. B.device(device_) = A.stride(strides);
  250. }
  251. // Record the number of values copied from the padded tensor A each second
  252. finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
  253. }
  254. void broadcasting(int num_iters) {
  255. Eigen::array<TensorIndex, 2> size_a;
  256. size_a[0] = m_;
  257. size_a[1] = 1;
  258. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
  259. Eigen::array<TensorIndex, 2> size_c;
  260. size_c[0] = m_;
  261. size_c[1] = n_;
  262. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, size_c);
  263. #ifndef EIGEN_HAS_INDEX_LIST
  264. Eigen::array<int, 2> broadcast;
  265. broadcast[0] = 1;
  266. broadcast[1] = n_;
  267. #else
  268. // Take advantage of cxx11 to give the compiler information it can use to
  269. // optimize the code.
  270. Eigen::IndexList<Eigen::type2index<1>, int> broadcast;
  271. broadcast.set(1, n_);
  272. #endif
  273. #ifdef EIGEN_USE_SYCL // warmup for sycl
  274. for (int iter = 0; iter < 10; ++iter) {
  275. C.device(device_) = A.broadcast(broadcast);
  276. }
  277. #endif
  278. StartBenchmarkTiming();
  279. for (int iter = 0; iter < num_iters; ++iter) {
  280. C.device(device_) = A.broadcast(broadcast);
  281. }
  282. // Record the number of values broadcasted from A and copied to C each second
  283. finalizeBenchmark(static_cast<int64_t>(m_) * n_ * num_iters);
  284. }
  285. void coeffWiseOp(int num_iters) {
  286. eigen_assert(m_ == k_ && k_ == n_);
  287. Eigen::array<TensorIndex, 2> sizes;
  288. sizes[0] = m_;
  289. sizes[1] = m_;
  290. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
  291. const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
  292. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
  293. #ifdef EIGEN_USE_SYCL // warmup for sycl
  294. for (int iter = 0; iter < 10; ++iter) {
  295. C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
  296. }
  297. #endif
  298. StartBenchmarkTiming();
  299. for (int iter = 0; iter < num_iters; ++iter) {
  300. C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
  301. }
  302. // Record the number of FLOP executed per second (2 multiplications and
  303. // 1 addition per value)
  304. finalizeBenchmark(static_cast<int64_t>(3) * m_ * m_ * num_iters);
  305. }
  306. void algebraicFunc(int num_iters) {
  307. eigen_assert(m_ == k_ && k_ == n_);
  308. Eigen::array<TensorIndex, 2> sizes;
  309. sizes[0] = m_;
  310. sizes[1] = m_;
  311. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
  312. const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
  313. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
  314. #ifdef EIGEN_USE_SYCL // warmup for sycl
  315. for (int iter = 0; iter < 10; ++iter) {
  316. C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
  317. }
  318. #endif
  319. StartBenchmarkTiming();
  320. for (int iter = 0; iter < num_iters; ++iter) {
  321. C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
  322. }
  323. // Record the number of FLOP executed per second (assuming one operation
  324. // per value)
  325. finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
  326. }
  327. void transcendentalFunc(int num_iters) {
  328. eigen_assert(m_ == k_ && k_ == n_);
  329. Eigen::array<TensorIndex, 2> sizes;
  330. sizes[0] = m_;
  331. sizes[1] = m_;
  332. const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes);
  333. const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
  334. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes);
  335. #ifdef EIGEN_USE_SYCL // warmup for sycl
  336. for (int iter = 0; iter < 10; ++iter) {
  337. C.device(device_) = A.exp() + B.log();
  338. }
  339. #endif
  340. StartBenchmarkTiming();
  341. for (int iter = 0; iter < num_iters; ++iter) {
  342. C.device(device_) = A.exp() + B.log();
  343. }
  344. // Record the number of FLOP executed per second (assuming one operation
  345. // per value)
  346. finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
  347. }
  348. // Row reduction
  349. void rowReduction(int num_iters) {
  350. Eigen::array<TensorIndex, 2> input_size;
  351. input_size[0] = k_;
  352. input_size[1] = n_;
  353. const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(b_, input_size);
  354. Eigen::array<TensorIndex, 1> output_size;
  355. output_size[0] = n_;
  356. TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
  357. #ifndef EIGEN_HAS_INDEX_LIST
  358. Eigen::array<TensorIndex, 1> sum_along_dim;
  359. sum_along_dim[0] = 0;
  360. #else
  361. // Take advantage of cxx11 to give the compiler information it can use to
  362. // optimize the code.
  363. Eigen::IndexList<Eigen::type2index<0>> sum_along_dim;
  364. #endif
  365. #ifdef EIGEN_USE_SYCL // warmup for sycl
  366. for (int iter = 0; iter < 10; ++iter) {
  367. C.device(device_) = B.sum(sum_along_dim);
  368. }
  369. #endif
  370. StartBenchmarkTiming();
  371. for (int iter = 0; iter < num_iters; ++iter) {
  372. C.device(device_) = B.sum(sum_along_dim);
  373. }
  374. // Record the number of FLOP executed per second (assuming one operation
  375. // per value)
  376. finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
  377. }
  378. // Column reduction
  379. void colReduction(int num_iters) {
  380. Eigen::array<TensorIndex, 2> input_size;
  381. input_size[0] = k_;
  382. input_size[1] = n_;
  383. const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(
  384. b_, input_size);
  385. Eigen::array<TensorIndex, 1> output_size;
  386. output_size[0] = k_;
  387. TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> A(
  388. a_, output_size);
  389. #ifndef EIGEN_HAS_INDEX_LIST
  390. Eigen::array<TensorIndex, 1> sum_along_dim;
  391. sum_along_dim[0] = 1;
  392. #else
  393. // Take advantage of cxx11 to give the compiler information it can use to
  394. // optimize the code.
  395. Eigen::IndexList<Eigen::type2index<1>> sum_along_dim;
  396. #endif
  397. #ifdef EIGEN_USE_SYCL // warmup for sycl
  398. for (int iter = 0; iter < 10; ++iter) {
  399. A.device(device_) = B.sum(sum_along_dim);
  400. }
  401. #endif
  402. StartBenchmarkTiming();
  403. for (int iter = 0; iter < num_iters; ++iter) {
  404. A.device(device_) = B.sum(sum_along_dim);
  405. }
  406. // Record the number of FLOP executed per second (assuming one operation
  407. // per value)
  408. finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
  409. }
  410. // Full reduction
  411. void fullReduction(int num_iters) {
  412. Eigen::array<TensorIndex, 2> input_size;
  413. input_size[0] = k_;
  414. input_size[1] = n_;
  415. const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(
  416. b_, input_size);
  417. Eigen::array<TensorIndex, 0> output_size;
  418. TensorMap<Tensor<T, 0, 0, TensorIndex>, Eigen::Aligned> C(
  419. c_, output_size);
  420. #ifdef EIGEN_USE_SYCL // warmup for sycl
  421. for (int iter = 0; iter < 10; ++iter) {
  422. C.device(device_) = B.sum();
  423. }
  424. #endif
  425. StartBenchmarkTiming();
  426. for (int iter = 0; iter < num_iters; ++iter) {
  427. C.device(device_) = B.sum();
  428. }
  429. // Record the number of FLOP executed per second (assuming one operation
  430. // per value)
  431. finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
  432. }
  433. // do a contraction which is equivalent to a matrix multiplication
  434. void contraction(int num_iters) {
  435. contraction<static_cast<int>(Eigen::ColMajor)>(num_iters, false, false);
  436. }
  437. void contractionRowMajor(int num_iters) {
  438. contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, false, false);
  439. }
  440. void contractionRowMajorAT(int num_iters) {
  441. contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, true, false);
  442. }
  443. void contractionRowMajorBT(int num_iters) {
  444. contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, false, true);
  445. }
  446. void contractionRowMajorABT(int num_iters) {
  447. contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, true, true);
  448. }
  449. void convolution(int num_iters, int kernel_x, int kernel_y) {
  450. Eigen::array<TensorIndex, 2> input_sizes;
  451. input_sizes[0] = m_;
  452. input_sizes[1] = n_;
  453. TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, input_sizes);
  454. Eigen::array<TensorIndex, 2> kernel_sizes;
  455. kernel_sizes[0] = kernel_x;
  456. kernel_sizes[1] = kernel_y;
  457. TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, kernel_sizes);
  458. Eigen::array<TensorIndex, 2> result_sizes;
  459. result_sizes[0] = m_ - kernel_x + 1;
  460. result_sizes[1] = n_ - kernel_y + 1;
  461. TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, result_sizes);
  462. Eigen::array<TensorIndex, 2> dims;
  463. dims[0] = 0;
  464. dims[1] = 1;
  465. #ifdef EIGEN_USE_SYCL // warmup for sycl
  466. for (int iter = 0; iter < 10; ++iter) {
  467. C.device(device_) = A.convolve(B, dims);
  468. }
  469. #endif
  470. StartBenchmarkTiming();
  471. for (int iter = 0; iter < num_iters; ++iter) {
  472. C.device(device_) = A.convolve(B, dims);
  473. }
  474. // Record the number of FLOPs executed per second (kernel_size
  475. // multiplications and additions for each value in the resulting tensor)
  476. finalizeBenchmark(static_cast<int64_t>(2) *
  477. (m_ - kernel_x + 1) * (n_ - kernel_y + 1) * kernel_x * kernel_y * num_iters);
  478. }
  479. private:
  480. // do a contraction which is equivalent to a matrix multiplication
  481. template<int Layout>
  482. void contraction(int num_iters, bool trans_a, bool trans_b) {
  483. Eigen::array<TensorIndex, 2> sizeA;
  484. sizeA[0] = (trans_a ? k_: m_);
  485. sizeA[1] = (trans_a ? m_: k_);
  486. Eigen::array<TensorIndex, 2> sizeB;
  487. sizeB[0] = (trans_b ? n_: k_);
  488. sizeB[1] = (trans_b ? k_: n_);
  489. Eigen::array<TensorIndex, 2> sizeC;
  490. sizeC[0] = m_;
  491. sizeC[1] = n_;
  492. const TensorMap<Tensor<T, 2, Layout>, Eigen::Aligned> A(a_, sizeA);
  493. const TensorMap<Tensor<T, 2, Layout>, Eigen::Aligned> B(b_, sizeB);
  494. TensorMap<Tensor<T, 2, Layout>, Eigen::Aligned> C(c_, sizeC);
  495. typedef typename Tensor<T, 2, Layout>::DimensionPair DimPair;
  496. Eigen::array<DimPair, 1> dims;
  497. TensorIndex a_contract_dim = (trans_a ? 0 : 1);
  498. TensorIndex b_contract_dim = (trans_b ? 1 : 0);
  499. dims[0] = DimPair(a_contract_dim, b_contract_dim);
  500. #ifdef EIGEN_USE_SYCL // warmup for sycl
  501. for (int iter = 0; iter < 10; ++iter) {
  502. C.device(device_) = A.contract(B, dims);
  503. }
  504. #endif
  505. StartBenchmarkTiming();
  506. for (int iter = 0; iter < num_iters; ++iter) {
  507. C.device(device_) = A.contract(B, dims);
  508. }
  509. // Record the number of FLOP executed per second (size_ multiplications and
  510. // additions for each value in the resulting tensor)
  511. finalizeBenchmark(static_cast<int64_t>(2) * m_ * n_ * k_ * num_iters);
  512. }
  513. void initialize() {
  514. a_ = (T *) device_.allocate(m_ * k_ * sizeof(T));
  515. b_ = (T *) device_.allocate(k_ * n_ * sizeof(T));
  516. c_ = (T *) device_.allocate(m_ * n_ * sizeof(T));
  517. // Initialize the content of the memory pools to prevent asan from
  518. // complaining.
  519. device_.memset(a_, 12, m_ * k_ * sizeof(T));
  520. device_.memset(b_, 23, k_ * n_ * sizeof(T));
  521. device_.memset(c_, 31, m_ * n_ * sizeof(T));
  522. }
  523. inline void finalizeBenchmark(int64_t num_items) {
  524. #if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
  525. if (Eigen::internal::is_same<Device, Eigen::GpuDevice>::value) {
  526. device_.synchronize();
  527. }
  528. #elif defined(EIGEN_USE_SYCL)
  529. if (Eigen::internal::is_same<Device, Eigen::SyclDevice>::value) {
  530. device_.synchronize();
  531. }
  532. #endif
  533. StopBenchmarkTiming();
  534. SetBenchmarkFlopsProcessed(num_items);
  535. }
  536. TensorIndex m_;
  537. TensorIndex k_;
  538. TensorIndex n_;
  539. T* a_;
  540. T* b_;
  541. T* c_;
  542. Device device_;
  543. };
  544. #endif // THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_