bench_gemm.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. // g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2 ./a.out
  2. // icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp && OMP_NUM_THREADS=2 ./a.out
  3. // Compilation options:
  4. //
  5. // -DSCALAR=std::complex<double>
  6. // -DSCALARA=double or -DSCALARB=double
  7. // -DHAVE_BLAS
  8. // -DDECOUPLED
  9. //
  10. #include <iostream>
  11. #include <bench/BenchTimer.h>
  12. #include <Eigen/Core>
  13. using namespace std;
  14. using namespace Eigen;
  15. #ifndef SCALAR
  16. // #define SCALAR std::complex<float>
  17. #define SCALAR float
  18. #endif
  19. #ifndef SCALARA
  20. #define SCALARA SCALAR
  21. #endif
  22. #ifndef SCALARB
  23. #define SCALARB SCALAR
  24. #endif
  25. #ifdef ROWMAJ_A
  26. const int opt_A = RowMajor;
  27. #else
  28. const int opt_A = ColMajor;
  29. #endif
  30. #ifdef ROWMAJ_B
  31. const int opt_B = RowMajor;
  32. #else
  33. const int opt_B = ColMajor;
  34. #endif
  35. typedef SCALAR Scalar;
  36. typedef NumTraits<Scalar>::Real RealScalar;
  37. typedef Matrix<SCALARA,Dynamic,Dynamic,opt_A> A;
  38. typedef Matrix<SCALARB,Dynamic,Dynamic,opt_B> B;
  39. typedef Matrix<Scalar,Dynamic,Dynamic> C;
  40. typedef Matrix<RealScalar,Dynamic,Dynamic> M;
  41. #ifdef HAVE_BLAS
  42. extern "C" {
  43. #include <Eigen/src/misc/blas.h>
  44. }
  45. static float fone = 1;
  46. static float fzero = 0;
  47. static double done = 1;
  48. static double szero = 0;
  49. static std::complex<float> cfone = 1;
  50. static std::complex<float> cfzero = 0;
  51. static std::complex<double> cdone = 1;
  52. static std::complex<double> cdzero = 0;
  53. static char notrans = 'N';
  54. static char trans = 'T';
  55. static char nonunit = 'N';
  56. static char lower = 'L';
  57. static char right = 'R';
  58. static int intone = 1;
  59. #ifdef ROWMAJ_A
  60. const char transA = trans;
  61. #else
  62. const char transA = notrans;
  63. #endif
  64. #ifdef ROWMAJ_B
  65. const char transB = trans;
  66. #else
  67. const char transB = notrans;
  68. #endif
  69. template<typename A,typename B>
  70. void blas_gemm(const A& a, const B& b, MatrixXf& c)
  71. {
  72. int M = c.rows(); int N = c.cols(); int K = a.cols();
  73. int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
  74. sgemm_(&transA,&transB,&M,&N,&K,&fone,
  75. const_cast<float*>(a.data()),&lda,
  76. const_cast<float*>(b.data()),&ldb,&fone,
  77. c.data(),&ldc);
  78. }
  79. template<typename A,typename B>
  80. void blas_gemm(const A& a, const B& b, MatrixXd& c)
  81. {
  82. int M = c.rows(); int N = c.cols(); int K = a.cols();
  83. int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
  84. dgemm_(&transA,&transB,&M,&N,&K,&done,
  85. const_cast<double*>(a.data()),&lda,
  86. const_cast<double*>(b.data()),&ldb,&done,
  87. c.data(),&ldc);
  88. }
  89. template<typename A,typename B>
  90. void blas_gemm(const A& a, const B& b, MatrixXcf& c)
  91. {
  92. int M = c.rows(); int N = c.cols(); int K = a.cols();
  93. int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
  94. cgemm_(&transA,&transB,&M,&N,&K,(float*)&cfone,
  95. const_cast<float*>((const float*)a.data()),&lda,
  96. const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
  97. (float*)c.data(),&ldc);
  98. }
  99. template<typename A,typename B>
  100. void blas_gemm(const A& a, const B& b, MatrixXcd& c)
  101. {
  102. int M = c.rows(); int N = c.cols(); int K = a.cols();
  103. int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
  104. zgemm_(&transA,&transB,&M,&N,&K,(double*)&cdone,
  105. const_cast<double*>((const double*)a.data()),&lda,
  106. const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
  107. (double*)c.data(),&ldc);
  108. }
  109. #endif
  110. void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci)
  111. {
  112. cr.noalias() += ar * br;
  113. cr.noalias() -= ai * bi;
  114. ci.noalias() += ar * bi;
  115. ci.noalias() += ai * br;
  116. // [cr ci] += [ar ai] * br + [-ai ar] * bi
  117. }
  118. void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
  119. {
  120. cr.noalias() += a * br;
  121. ci.noalias() += a * bi;
  122. }
  123. void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
  124. {
  125. cr.noalias() += ar * b;
  126. ci.noalias() += ai * b;
  127. }
  128. template<typename A, typename B, typename C>
  129. EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
  130. {
  131. c.noalias() += a * b;
  132. }
  133. int main(int argc, char ** argv)
  134. {
  135. std::ptrdiff_t l1 = internal::queryL1CacheSize();
  136. std::ptrdiff_t l2 = internal::queryTopLevelCacheSize();
  137. std::cout << "L1 cache size = " << (l1>0 ? l1/1024 : -1) << " KB\n";
  138. std::cout << "L2/L3 cache size = " << (l2>0 ? l2/1024 : -1) << " KB\n";
  139. typedef internal::gebp_traits<Scalar,Scalar> Traits;
  140. std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n";
  141. int rep = 1; // number of repetitions per try
  142. int tries = 2; // number of tries, we keep the best
  143. int s = 2048;
  144. int m = s;
  145. int n = s;
  146. int p = s;
  147. int cache_size1=-1, cache_size2=l2, cache_size3 = 0;
  148. bool need_help = false;
  149. for (int i=1; i<argc;)
  150. {
  151. if(argv[i][0]=='-')
  152. {
  153. if(argv[i][1]=='s')
  154. {
  155. ++i;
  156. s = atoi(argv[i++]);
  157. m = n = p = s;
  158. if(argv[i][0]!='-')
  159. {
  160. n = atoi(argv[i++]);
  161. p = atoi(argv[i++]);
  162. }
  163. }
  164. else if(argv[i][1]=='c')
  165. {
  166. ++i;
  167. cache_size1 = atoi(argv[i++]);
  168. if(argv[i][0]!='-')
  169. {
  170. cache_size2 = atoi(argv[i++]);
  171. if(argv[i][0]!='-')
  172. cache_size3 = atoi(argv[i++]);
  173. }
  174. }
  175. else if(argv[i][1]=='t')
  176. {
  177. tries = atoi(argv[++i]);
  178. ++i;
  179. }
  180. else if(argv[i][1]=='p')
  181. {
  182. ++i;
  183. rep = atoi(argv[i++]);
  184. }
  185. }
  186. else
  187. {
  188. need_help = true;
  189. break;
  190. }
  191. }
  192. if(need_help)
  193. {
  194. std::cout << argv[0] << " -s <matrix sizes> -c <cache sizes> -t <nb tries> -p <nb repeats>\n";
  195. std::cout << " <matrix sizes> : size\n";
  196. std::cout << " <matrix sizes> : rows columns depth\n";
  197. return 1;
  198. }
  199. #if EIGEN_VERSION_AT_LEAST(3,2,90)
  200. if(cache_size1>0)
  201. setCpuCacheSizes(cache_size1,cache_size2,cache_size3);
  202. #endif
  203. A a(m,p); a.setRandom();
  204. B b(p,n); b.setRandom();
  205. C c(m,n); c.setOnes();
  206. C rc = c;
  207. std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
  208. std::ptrdiff_t mc(m), nc(n), kc(p);
  209. internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
  210. std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << " x " << nc << "\n";
  211. C r = c;
  212. // check the parallel product is correct
  213. #if defined EIGEN_HAS_OPENMP
  214. Eigen::initParallel();
  215. int procs = omp_get_max_threads();
  216. if(procs>1)
  217. {
  218. #ifdef HAVE_BLAS
  219. blas_gemm(a,b,r);
  220. #else
  221. omp_set_num_threads(1);
  222. r.noalias() += a * b;
  223. omp_set_num_threads(procs);
  224. #endif
  225. c.noalias() += a * b;
  226. if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
  227. }
  228. #elif defined HAVE_BLAS
  229. blas_gemm(a,b,r);
  230. c.noalias() += a * b;
  231. if(!r.isApprox(c)) {
  232. std::cout << (r - c).norm()/r.norm() << "\n";
  233. std::cerr << "Warning, your product is crap!\n\n";
  234. }
  235. #else
  236. if(1.*m*n*p<2000.*2000*2000)
  237. {
  238. gemm(a,b,c);
  239. r.noalias() += a.cast<Scalar>() .lazyProduct( b.cast<Scalar>() );
  240. if(!r.isApprox(c)) {
  241. std::cout << (r - c).norm()/r.norm() << "\n";
  242. std::cerr << "Warning, your product is crap!\n\n";
  243. }
  244. }
  245. #endif
  246. #ifdef HAVE_BLAS
  247. BenchTimer tblas;
  248. c = rc;
  249. BENCH(tblas, tries, rep, blas_gemm(a,b,c));
  250. std::cout << "blas cpu " << tblas.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(CPU_TIMER) << "s)\n";
  251. std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
  252. #endif
  253. // warm start
  254. if(b.norm()+a.norm()==123.554) std::cout << "\n";
  255. BenchTimer tmt;
  256. c = rc;
  257. BENCH(tmt, tries, rep, gemm(a,b,c));
  258. std::cout << "eigen cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
  259. std::cout << "eigen real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
  260. #ifdef EIGEN_HAS_OPENMP
  261. if(procs>1)
  262. {
  263. BenchTimer tmono;
  264. omp_set_num_threads(1);
  265. Eigen::setNbThreads(1);
  266. c = rc;
  267. BENCH(tmono, tries, rep, gemm(a,b,c));
  268. std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n";
  269. std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
  270. std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER) << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
  271. }
  272. #endif
  273. if(1.*m*n*p<30*30*30)
  274. {
  275. BenchTimer tmt;
  276. c = rc;
  277. BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b));
  278. std::cout << "lazy cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
  279. std::cout << "lazy real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
  280. }
  281. #ifdef DECOUPLED
  282. if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
  283. {
  284. M ar(m,p); ar.setRandom();
  285. M ai(m,p); ai.setRandom();
  286. M br(p,n); br.setRandom();
  287. M bi(p,n); bi.setRandom();
  288. M cr(m,n); cr.setRandom();
  289. M ci(m,n); ci.setRandom();
  290. BenchTimer t;
  291. BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci));
  292. std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
  293. std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
  294. }
  295. if((!NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
  296. {
  297. M a(m,p); a.setRandom();
  298. M br(p,n); br.setRandom();
  299. M bi(p,n); bi.setRandom();
  300. M cr(m,n); cr.setRandom();
  301. M ci(m,n); ci.setRandom();
  302. BenchTimer t;
  303. BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci));
  304. std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
  305. std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
  306. }
  307. if((NumTraits<A::Scalar>::IsComplex) && (!NumTraits<B::Scalar>::IsComplex))
  308. {
  309. M ar(m,p); ar.setRandom();
  310. M ai(m,p); ai.setRandom();
  311. M b(p,n); b.setRandom();
  312. M cr(m,n); cr.setRandom();
  313. M ci(m,n); ci.setRandom();
  314. BenchTimer t;
  315. BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci));
  316. std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
  317. std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
  318. }
  319. #endif
  320. return 0;
  321. }