eigen/bench/bench_gemm.cpp

376 lines
11 KiB
C++
Raw Normal View History

// g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2 ./a.out
// icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp && OMP_NUM_THREADS=2 ./a.out
// Compilation options:
//
// -DSCALAR=std::complex<double>
// -DSCALARA=double or -DSCALARB=double
// -DHAVE_BLAS
// -DDECOUPLED
//
2010-03-05 16:57:04 +08:00
#include <iostream>
2010-02-23 00:57:15 +08:00
#include <bench/BenchTimer.h>
#include <Eigen/Core>
using namespace std;
using namespace Eigen;
#ifndef SCALAR
2010-07-22 22:29:35 +08:00
// #define SCALAR std::complex<float>
2010-07-16 02:45:45 +08:00
#define SCALAR float
#endif
#ifndef SCALARA
#define SCALARA SCALAR
#endif
#ifndef SCALARB
#define SCALARB SCALAR
#endif
#ifdef ROWMAJ_A
const int opt_A = RowMajor;
#else
const int opt_A = ColMajor;
#endif
#ifdef ROWMAJ_B
const int opt_B = RowMajor;
#else
const int opt_B = ColMajor;
#endif
typedef SCALAR Scalar;
2010-07-08 01:49:48 +08:00
typedef NumTraits<Scalar>::Real RealScalar;
typedef Matrix<SCALARA,Dynamic,Dynamic,opt_A> A;
typedef Matrix<SCALARB,Dynamic,Dynamic,opt_B> B;
2010-07-08 01:49:48 +08:00
typedef Matrix<Scalar,Dynamic,Dynamic> C;
2010-07-22 19:19:09 +08:00
typedef Matrix<RealScalar,Dynamic,Dynamic> M;
#ifdef HAVE_BLAS
extern "C" {
2012-07-08 22:18:51 +08:00
#include <Eigen/src/misc/blas.h>
}
static float fone = 1;
static float fzero = 0;
static double done = 1;
static double szero = 0;
static std::complex<float> cfone = 1;
static std::complex<float> cfzero = 0;
2010-07-07 22:41:29 +08:00
static std::complex<double> cdone = 1;
static std::complex<double> cdzero = 0;
static char notrans = 'N';
2010-07-22 19:19:09 +08:00
static char trans = 'T';
static char nonunit = 'N';
static char lower = 'L';
static char right = 'R';
static int intone = 1;
#ifdef ROWMAJ_A
const char transA = trans;
#else
const char transA = notrans;
#endif
#ifdef ROWMAJ_B
const char transB = trans;
#else
const char transB = notrans;
#endif
template<typename A,typename B>
void blas_gemm(const A& a, const B& b, MatrixXf& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
sgemm_(&transA,&transB,&M,&N,&K,&fone,
const_cast<float*>(a.data()),&lda,
const_cast<float*>(b.data()),&ldb,&fone,
c.data(),&ldc);
}
template<typename A,typename B>
void blas_gemm(const A& a, const B& b, MatrixXd& c)
2010-12-10 02:46:26 +08:00
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
2010-12-10 02:46:26 +08:00
dgemm_(&transA,&transB,&M,&N,&K,&done,
2010-12-10 02:46:26 +08:00
const_cast<double*>(a.data()),&lda,
const_cast<double*>(b.data()),&ldb,&done,
c.data(),&ldc);
}
template<typename A,typename B>
void blas_gemm(const A& a, const B& b, MatrixXcf& c)
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
cgemm_(&transA,&transB,&M,&N,&K,(float*)&cfone,
const_cast<float*>((const float*)a.data()),&lda,
const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
(float*)c.data(),&ldc);
}
template<typename A,typename B>
void blas_gemm(const A& a, const B& b, MatrixXcd& c)
2010-07-07 22:41:29 +08:00
{
int M = c.rows(); int N = c.cols(); int K = a.cols();
int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows();
2010-07-07 22:41:29 +08:00
zgemm_(&transA,&transB,&M,&N,&K,(double*)&cdone,
2010-07-07 22:41:29 +08:00
const_cast<double*>((const double*)a.data()),&lda,
const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
(double*)c.data(),&ldc);
}
#endif
2010-07-22 19:19:09 +08:00
void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci)
{
cr.noalias() += ar * br;
cr.noalias() -= ai * bi;
ci.noalias() += ar * bi;
ci.noalias() += ai * br;
// [cr ci] += [ar ai] * br + [-ai ar] * bi
2010-07-22 19:19:09 +08:00
}
void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
{
cr.noalias() += a * br;
ci.noalias() += a * bi;
}
void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
{
cr.noalias() += ar * b;
ci.noalias() += ai * b;
}
2010-07-08 01:49:48 +08:00
template<typename A, typename B, typename C>
EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
{
c.noalias() += a * b;
}
int main(int argc, char ** argv)
{
std::ptrdiff_t l1 = internal::queryL1CacheSize();
std::ptrdiff_t l2 = internal::queryTopLevelCacheSize();
2010-07-07 22:41:29 +08:00
std::cout << "L1 cache size = " << (l1>0 ? l1/1024 : -1) << " KB\n";
std::cout << "L2/L3 cache size = " << (l2>0 ? l2/1024 : -1) << " KB\n";
typedef internal::gebp_traits<Scalar,Scalar> Traits;
2010-07-22 19:19:09 +08:00
std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n";
int rep = 1; // number of repetitions per try
int tries = 2; // number of tries, we keep the best
int s = 2048;
int m = s;
int n = s;
int p = s;
2015-02-20 18:59:49 +08:00
int cache_size1=-1, cache_size2=l2, cache_size3 = 0;
bool need_help = false;
for (int i=1; i<argc;)
{
if(argv[i][0]=='-')
{
if(argv[i][1]=='s')
{
++i;
s = atoi(argv[i++]);
m = n = p = s;
if(argv[i][0]!='-')
{
n = atoi(argv[i++]);
p = atoi(argv[i++]);
}
}
else if(argv[i][1]=='c')
{
++i;
2015-02-20 18:59:49 +08:00
cache_size1 = atoi(argv[i++]);
if(argv[i][0]!='-')
{
cache_size2 = atoi(argv[i++]);
if(argv[i][0]!='-')
cache_size3 = atoi(argv[i++]);
}
}
else if(argv[i][1]=='t')
{
tries = atoi(argv[++i]);
++i;
}
else if(argv[i][1]=='p')
{
++i;
rep = atoi(argv[i++]);
}
}
else
{
need_help = true;
break;
}
}
if(need_help)
{
2015-02-20 18:59:49 +08:00
std::cout << argv[0] << " -s <matrix sizes> -c <cache sizes> -t <nb tries> -p <nb repeats>\n";
std::cout << " <matrix sizes> : size\n";
std::cout << " <matrix sizes> : rows columns depth\n";
return 1;
}
2015-12-01 16:57:31 +08:00
#if EIGEN_VERSION_AT_LEAST(3,2,90)
if(cache_size1>0)
setCpuCacheSizes(cache_size1,cache_size2,cache_size3);
#endif
2010-07-22 19:19:09 +08:00
A a(m,p); a.setRandom();
B b(p,n); b.setRandom();
C c(m,n); c.setOnes();
2011-09-09 16:36:20 +08:00
C rc = c;
std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
2010-07-22 19:19:09 +08:00
std::ptrdiff_t mc(m), nc(n), kc(p);
2011-09-09 16:36:20 +08:00
internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << " x " << nc << "\n";
2010-07-08 01:49:48 +08:00
C r = c;
// check the parallel product is correct
2010-07-22 19:19:09 +08:00
#if defined EIGEN_HAS_OPENMP
Eigen::initParallel();
int procs = omp_get_max_threads();
if(procs>1)
{
#ifdef HAVE_BLAS
blas_gemm(a,b,r);
#else
omp_set_num_threads(1);
r.noalias() += a * b;
omp_set_num_threads(procs);
#endif
c.noalias() += a * b;
if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
}
2010-07-22 19:19:09 +08:00
#elif defined HAVE_BLAS
blas_gemm(a,b,r);
c.noalias() += a * b;
if(!r.isApprox(c)) {
std::cout << (r - c).norm()/r.norm() << "\n";
std::cerr << "Warning, your product is crap!\n\n";
}
2010-07-22 19:19:09 +08:00
#else
if(1.*m*n*p<2000.*2000*2000)
{
gemm(a,b,c);
r.noalias() += a.cast<Scalar>() .lazyProduct( b.cast<Scalar>() );
if(!r.isApprox(c)) {
std::cout << (r - c).norm()/r.norm() << "\n";
std::cerr << "Warning, your product is crap!\n\n";
}
}
#endif
#ifdef HAVE_BLAS
BenchTimer tblas;
2011-09-09 16:36:20 +08:00
c = rc;
BENCH(tblas, tries, rep, blas_gemm(a,b,c));
std::cout << "blas cpu " << tblas.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(CPU_TIMER) << "s)\n";
std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
#endif
// warm start
if(b.norm()+a.norm()==123.554) std::cout << "\n";
BenchTimer tmt;
2011-09-09 16:36:20 +08:00
c = rc;
BENCH(tmt, tries, rep, gemm(a,b,c));
std::cout << "eigen cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
std::cout << "eigen real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
#ifdef EIGEN_HAS_OPENMP
if(procs>1)
{
BenchTimer tmono;
2011-09-09 16:36:20 +08:00
omp_set_num_threads(1);
Eigen::setNbThreads(1);
2011-09-09 16:36:20 +08:00
c = rc;
BENCH(tmono, tries, rep, gemm(a,b,c));
std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n";
std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER) << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
}
#endif
2010-07-22 19:19:09 +08:00
if(1.*m*n*p<30*30*30)
{
BenchTimer tmt;
c = rc;
BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b));
std::cout << "lazy cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
std::cout << "lazy real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
}
2010-07-22 19:19:09 +08:00
#ifdef DECOUPLED
if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
{
M ar(m,p); ar.setRandom();
M ai(m,p); ai.setRandom();
M br(p,n); br.setRandom();
M bi(p,n); bi.setRandom();
M cr(m,n); cr.setRandom();
M ci(m,n); ci.setRandom();
BenchTimer t;
BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci));
std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
}
if((!NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
{
M a(m,p); a.setRandom();
M br(p,n); br.setRandom();
M bi(p,n); bi.setRandom();
M cr(m,n); cr.setRandom();
M ci(m,n); ci.setRandom();
BenchTimer t;
BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci));
std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
}
if((NumTraits<A::Scalar>::IsComplex) && (!NumTraits<B::Scalar>::IsComplex))
{
M ar(m,p); ar.setRandom();
M ai(m,p); ai.setRandom();
M b(p,n); b.setRandom();
M cr(m,n); cr.setRandom();
M ci(m,n); ci.setRandom();
BenchTimer t;
BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci));
std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
}
#endif
return 0;
}