re-implement stableNorm using a homemade blocky and

vectorization friendly algorithm (slow if no vectorization)
This commit is contained in:
Gael Guennebaud 2009-07-17 16:22:39 +02:00
parent 15ed32dd6e
commit 32b08ac971
8 changed files with 256 additions and 182 deletions

View File

@ -161,6 +161,7 @@ namespace Eigen {
#include "src/Core/CwiseNullaryOp.h"
#include "src/Core/CwiseUnaryView.h"
#include "src/Core/Dot.h"
#include "src/Core/StableNorm.h"
#include "src/Core/DiagonalProduct.h"
#include "src/Core/SolveTriangular.h"
#include "src/Core/MapBase.h"

View File

@ -33,8 +33,8 @@
* \param MatrixType the type of the object in which we are taking a block
* \param BlockRows the number of rows of the block we are taking at compile time (optional)
* \param BlockCols the number of columns of the block we are taking at compile time (optional)
* \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned.
* The default is AsRequested. This parameter is internaly used by Eigen
* \param _PacketAccess allows to enforce aligned loads and stores if set to \b ForceAligned.
* The default is \b AsRequested. This parameter is internaly used by Eigen
* in expressions such as \code mat.block() += other; \endcode and most of
* the time this is the only way it is used.
* \param _DirectAccessStatus \internal used for partial specialization

View File

@ -292,131 +292,6 @@ inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<
return ei_sqrt(squaredNorm());
}
/** \returns the \em l2 norm of \c *this using a numerically more stable
* algorithm.
*
* \sa norm(), dot(), squaredNorm(), blueNorm()
*/
template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
return this->cwise().abs().redux(ei_scalar_hypot_op<RealScalar>());
}
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
* A Portable Fortran Program to Find the Euclidean Norm of a Vector,
* ACM TOMS, Vol 4, Issue 1, 1978.
*
* \sa norm(), dot(), squaredNorm(), stableNorm()
*/
template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::blueNorm() const
{
static int nmax = -1;
static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr;
int n;
Scalar ax, abig, amed, asml;
if(nmax <= 0)
{
int nbig, ibeta, it, iemin, iemax, iexp;
Scalar abig, eps;
// This program calculates the machine-dependent constants
// bl, b2, slm, s2m, relerr overfl, nmax
// from the "basic" machine-dependent numbers
// nbig, ibeta, it, iemin, iemax, rbig.
// The following define the basic machine-dependent constants.
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
// are used. For any specific computer, each of the assignment
// statements can be replaced
nbig = std::numeric_limits<int>::max(); // largest integer
ibeta = std::numeric_limits<Scalar>::radix; //NumTraits<Scalar>::Base; // base for floating-point numbers
it = std::numeric_limits<Scalar>::digits; //NumTraits<Scalar>::Mantissa; // number of base-beta digits in mantissa
iemin = std::numeric_limits<Scalar>::min_exponent; // minimum exponent
iemax = std::numeric_limits<Scalar>::max_exponent; // maximum exponent
rbig = std::numeric_limits<Scalar>::max(); // largest floating-point number
// Check the basic machine-dependent constants.
if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5)
|| (it<=4 && ibeta <= 3 ) || it<2)
{
ei_assert(false && "the algorithm cannot be guaranteed on this computer");
}
iexp = -((1-iemin)/2);
b1 = std::pow(ibeta, iexp); // lower boundary of midrange
iexp = (iemax + 1 - it)/2;
b2 = std::pow(ibeta,iexp); // upper boundary of midrange
iexp = (2-iemin)/2;
s1m = std::pow(ibeta,iexp); // scaling factor for lower range
iexp = - ((iemax+it)/2);
s2m = std::pow(ibeta,iexp); // scaling factor for upper range
overfl = rbig*s2m; // overfow boundary for abig
eps = std::pow(ibeta, 1-it);
relerr = ei_sqrt(eps); // tolerance for neglecting asml
abig = 1.0/eps - 1.0;
if (Scalar(nbig)>abig) nmax = abig; // largest safe n
else nmax = nbig;
}
n = size();
if(n==0)
return 0;
asml = Scalar(0);
amed = Scalar(0);
abig = Scalar(0);
for(int j=0; j<n; ++j)
{
ax = ei_abs(coeff(j));
if(ax > b2) abig += ei_abs2(ax*s2m);
else if(ax < b1) asml += ei_abs2(ax*s1m);
else amed += ei_abs2(ax);
}
if(abig > Scalar(0))
{
abig = ei_sqrt(abig);
if(abig > overfl)
{
ei_assert(false && "overflow");
return rbig;
}
if(amed > Scalar(0))
{
abig = abig/s2m;
amed = ei_sqrt(amed);
}
else
{
return abig/s2m;
}
}
else if(asml > Scalar(0))
{
if (amed > Scalar(0))
{
abig = ei_sqrt(amed);
amed = ei_sqrt(asml) / s1m;
}
else
{
return ei_sqrt(asml)/s1m;
}
}
else
{
return ei_sqrt(amed);
}
asml = std::min(abig, amed);
abig = std::max(abig, amed);
if(asml <= abig*relerr)
return abig;
else
return abig * ei_sqrt(Scalar(1) + ei_abs2(asml/abig));
}
/** \returns an expression of the quotient of *this by its own norm.
*
* \only_for_vectors

View File

@ -124,9 +124,6 @@ template<typename Scalar> struct ei_scalar_hypot_op EIGEN_EMPTY_STRUCT {
// typedef typename NumTraits<Scalar>::Real result_type;
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
{
// typedef typename NumTraits<T>::Real RealScalar;
// RealScalar _x = ei_abs(x);
// RealScalar _y = ei_abs(y);
Scalar p = std::max(_x, _y);
Scalar q = std::min(_x, _y);
Scalar qp = q/p;

View File

@ -384,6 +384,7 @@ template<typename Derived> class MatrixBase
RealScalar norm() const;
RealScalar stableNorm() const;
RealScalar blueNorm() const;
RealScalar hypotNorm() const;
const PlainMatrixType normalized() const;
void normalize();

194
Eigen/src/Core/StableNorm.h Normal file
View File

@ -0,0 +1,194 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_STABLENORM_H
#define EIGEN_STABLENORM_H
template<typename ExpressionType, typename Scalar>
inline void ei_stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
Scalar max = bl.cwise().abs().maxCoeff();
if (max>scale)
{
ssq = ssq * ei_abs2(scale/max);
scale = max;
invScale = Scalar(1)/scale;
}
// TODO if the max is much much smaller than the current scale,
// then we can neglect this sub vector
ssq += (bl*invScale).squaredNorm();
}
/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
* This version use a blockwise two passes algorithm:
* 1 - find the absolute largest coefficient \c s
* 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
*
* For architecture/scalar types supporting vectorization, this version
* is faster than blueNorm(). Otherwise the blueNorm() is much faster.
*
* \sa norm(), blueNorm(), hypotNorm()
*/
template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
const int blockSize = 4096;
RealScalar scale = 0;
RealScalar invScale;
RealScalar ssq = 0; // sum of square
enum {
Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? ForceAligned : AsRequested
};
int n = size();
int bi=0;
if ((int(Flags)&DirectAccessBit) && !(int(Flags)&AlignedBit))
{
bi = ei_alignmentOffset(&const_cast_derived().coeffRef(0), n);
if (bi>0)
ei_stable_norm_kernel(start(bi), ssq, scale, invScale);
}
for (; bi<n; bi+=blockSize)
ei_stable_norm_kernel(VectorBlock<Derived,Dynamic,Alignment>(derived(),bi,std::min(blockSize, n - bi)), ssq, scale, invScale);
return scale * ei_sqrt(ssq);
}
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
* A Portable Fortran Program to Find the Euclidean Norm of a Vector,
* ACM TOMS, Vol 4, Issue 1, 1978.
*
* For architecture/scalar types without vectorization, this version
* is much faster than stableNorm(). Otherwise the stableNorm() is faster.
*
* \sa norm(), stableNorm(), hypotNorm()
*/
template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::blueNorm() const
{
static int nmax = -1;
static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
if(nmax <= 0)
{
int nbig, ibeta, it, iemin, iemax, iexp;
RealScalar abig, eps;
// This program calculates the machine-dependent constants
// bl, b2, slm, s2m, relerr overfl, nmax
// from the "basic" machine-dependent numbers
// nbig, ibeta, it, iemin, iemax, rbig.
// The following define the basic machine-dependent constants.
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
// are used. For any specific computer, each of the assignment
// statements can be replaced
nbig = std::numeric_limits<int>::max(); // largest integer
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
rbig = std::numeric_limits<RealScalar>::max(); // largest floating-point number
// Check the basic machine-dependent constants.
if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5)
|| (it<=4 && ibeta <= 3 ) || it<2)
{
ei_assert(false && "the algorithm cannot be guaranteed on this computer");
}
iexp = -((1-iemin)/2);
b1 = std::pow(ibeta, iexp); // lower boundary of midrange
iexp = (iemax + 1 - it)/2;
b2 = std::pow(ibeta,iexp); // upper boundary of midrange
iexp = (2-iemin)/2;
s1m = std::pow(ibeta,iexp); // scaling factor for lower range
iexp = - ((iemax+it)/2);
s2m = std::pow(ibeta,iexp); // scaling factor for upper range
overfl = rbig*s2m; // overfow boundary for abig
eps = std::pow(ibeta, 1-it);
relerr = ei_sqrt(eps); // tolerance for neglecting asml
abig = 1.0/eps - 1.0;
if (RealScalar(nbig)>abig) nmax = abig; // largest safe n
else nmax = nbig;
}
int n = size();
RealScalar ab2 = b2 / RealScalar(n);
RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0);
for(int j=0; j<n; ++j)
{
RealScalar ax = ei_abs(coeff(j));
if(ax > ab2) abig += ei_abs2(ax*s2m);
else if(ax < b1) asml += ei_abs2(ax*s1m);
else amed += ei_abs2(ax);
}
if(abig > RealScalar(0))
{
abig = ei_sqrt(abig);
if(abig > overfl)
{
ei_assert(false && "overflow");
return rbig;
}
if(amed > RealScalar(0))
{
abig = abig/s2m;
amed = ei_sqrt(amed);
}
else
return abig/s2m;
}
else if(asml > RealScalar(0))
{
if (amed > RealScalar(0))
{
abig = ei_sqrt(amed);
amed = ei_sqrt(asml) / s1m;
}
else
return ei_sqrt(asml)/s1m;
}
else
return ei_sqrt(amed);
asml = std::min(abig, amed);
abig = std::max(abig, amed);
if(asml <= abig*relerr)
return abig;
else
return abig * ei_sqrt(RealScalar(1) + ei_abs2(asml/abig));
}
/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
* This version use a concatenation of hypot() calls, and it is very slow.
*
* \sa norm(), stableNorm()
*/
template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::hypotNorm() const
{
return this->cwise().abs().redux(ei_scalar_hypot_op<RealScalar>());
}
#endif // EIGEN_STABLENORM_H

View File

@ -13,7 +13,7 @@ EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(const T& v)
template<typename T>
EIGEN_DONT_INLINE typename T::Scalar hypotNorm(const T& v)
{
return v.stableNorm();
return v.hypotNorm();
}
template<typename T>
@ -56,23 +56,7 @@ EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v)
template<typename T>
EIGEN_DONT_INLINE typename T::Scalar bl2passNorm(T& v)
{
const int blockSize = 4096;
typedef typename T::Scalar Scalar;
Scalar s = 0;
Scalar ssq = 0;
for (int bi=0; bi<v.size(); bi+=blockSize)
{
int r = std::min(blockSize, v.size() - bi);
Eigen::Block<typename ei_cleantype<T>::type,Eigen::Dynamic,1,Eigen::ForceAligned> sv(v,bi,0,r,1);
Scalar m = sv.cwise().abs().maxCoeff();
if (m>s)
{
ssq = ssq * ei_abs2(s/m);
s = m;
}
ssq += (sv/s).squaredNorm();
}
return s*ei_sqrt(ssq);
return v.stableNorm();
}
template<typename T>
@ -163,6 +147,19 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
Packet ax_s1m = ei_pmul(ax,ps1m);
Packet maskBig = ei_plt(pb2,ax);
Packet maskSml = ei_plt(ax,pb1);
// Packet maskMed = ei_pand(maskSml,maskBig);
// Packet scale = ei_pset1(Scalar(0));
// scale = ei_por(scale, ei_pand(maskBig,ps2m));
// scale = ei_por(scale, ei_pand(maskSml,ps1m));
// scale = ei_por(scale, ei_pandnot(ei_pset1(Scalar(1)),maskMed));
// ax = ei_pmul(ax,scale);
// ax = ei_pmul(ax,ax);
// pabig = ei_padd(pabig, ei_pand(maskBig, ax));
// pasml = ei_padd(pasml, ei_pand(maskSml, ax));
// pamed = ei_padd(pamed, ei_pandnot(ax,maskMed));
pabig = ei_padd(pabig, ei_pand(maskBig, ei_pmul(ax_s2m,ax_s2m)));
pasml = ei_padd(pasml, ei_pand(maskSml, ei_pmul(ax_s1m,ax_s1m)));
pamed = ei_padd(pamed, ei_pandnot(ei_pmul(ax,ax),ei_pand(maskSml,maskBig)));
@ -215,7 +212,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
}
#define BENCH_PERF(NRM) { \
Eigen::BenchTimer tf, td; tf.reset(); td.reset();\
Eigen::BenchTimer tf, td, tcf; tf.reset(); td.reset(); tcf.reset();\
for (int k=0; k<tries; ++k) { \
tf.start(); \
for (int i=0; i<iters; ++i) NRM(vf); \
@ -226,7 +223,12 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
for (int i=0; i<iters; ++i) NRM(vd); \
td.stop(); \
} \
std::cout << #NRM << "\t" << tf.value() << " " << td.value() << "\n"; \
for (int k=0; k<std::max(1,tries/3); ++k) { \
tcf.start(); \
for (int i=0; i<iters; ++i) NRM(vcf); \
tcf.stop(); \
} \
std::cout << #NRM << "\t" << tf.value() << " " << td.value() << " " << tcf.value() << "\n"; \
}
void check_accuracy(double basef, double based, int s)
@ -263,7 +265,7 @@ void check_accuracy_var(int ef0, int ef1, int ed0, int ed1, int s)
std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\t" << blueNorm(vf.cast<long double>()) << "\t" << blueNorm(vd.cast<long double>()) << "\n";
std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\t" << lapackNorm(vf.cast<long double>()) << "\t" << lapackNorm(vd.cast<long double>()) << "\n";
std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\t" << twopassNorm(vf.cast<long double>()) << "\t" << twopassNorm(vd.cast<long double>()) << "\n";
std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\t" << bl2passNorm(vf.cast<long double>()) << "\t" << bl2passNorm(vd.cast<long double>()) << "\n";
// std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\t" << bl2passNorm(vf.cast<long double>()) << "\t" << bl2passNorm(vd.cast<long double>()) << "\n";
}
int main(int argc, char** argv)
@ -273,34 +275,7 @@ int main(int argc, char** argv)
double y = 1.1345743233455785456788e12 * ei_random<double>();
VectorXf v = VectorXf::Ones(1024) * y;
std::cerr << "Performance (out of cache):\n";
{
int iters = 1;
VectorXf vf = VectorXf::Random(1024*1024*32) * y;
VectorXd vd = VectorXd::Random(1024*1024*32) * y;
BENCH_PERF(sqsumNorm);
BENCH_PERF(blueNorm);
BENCH_PERF(pblueNorm);
BENCH_PERF(lapackNorm);
BENCH_PERF(hypotNorm);
BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
std::cerr << "\nPerformance (in cache):\n";
{
int iters = 100000;
VectorXf vf = VectorXf::Random(512) * y;
VectorXd vd = VectorXd::Random(512) * y;
BENCH_PERF(sqsumNorm);
BENCH_PERF(blueNorm);
BENCH_PERF(pblueNorm);
BENCH_PERF(lapackNorm);
BENCH_PERF(hypotNorm);
BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
return 0;
// return 0;
int s = 10000;
double basef_ok = 1.1345743233455785456788e15;
double based_ok = 1.1345743233455785456788e95;
@ -317,7 +292,7 @@ return 0;
check_accuracy(basef_ok, based_ok, s);
std::cerr << "\nUnderflow:\n";
check_accuracy(basef_under, based_under, 1);
check_accuracy(basef_under, based_under, s);
std::cerr << "\nOverflow:\n";
check_accuracy(basef_over, based_over, s);
@ -335,4 +310,35 @@ return 0;
check_accuracy_var(-27,20,-302,-190,s);
std::cout << "\n";
}
std::cout.precision(4);
std::cerr << "Performance (out of cache):\n";
{
int iters = 1;
VectorXf vf = VectorXf::Random(1024*1024*32) * y;
VectorXd vd = VectorXd::Random(1024*1024*32) * y;
VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y;
BENCH_PERF(sqsumNorm);
BENCH_PERF(blueNorm);
// BENCH_PERF(pblueNorm);
// BENCH_PERF(lapackNorm);
// BENCH_PERF(hypotNorm);
// BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
std::cerr << "\nPerformance (in cache):\n";
{
int iters = 100000;
VectorXf vf = VectorXf::Random(512) * y;
VectorXd vd = VectorXd::Random(512) * y;
VectorXcf vcf = VectorXcf::Random(512) * y;
BENCH_PERF(sqsumNorm);
BENCH_PERF(blueNorm);
// BENCH_PERF(pblueNorm);
// BENCH_PERF(lapackNorm);
// BENCH_PERF(hypotNorm);
// BENCH_PERF(twopassNorm);
BENCH_PERF(bl2passNorm);
}
}

View File

@ -76,8 +76,8 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
{
VERIFY_IS_MUCH_SMALLER_THAN(vzero.norm(), static_cast<RealScalar>(1));
VERIFY_IS_APPROX(v1.norm(), v1.stableNorm());
// NOTE disabled because it currently compiles for float and double only
// VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm());
VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm());
VERIFY_IS_APPROX(v1.hypotNorm(), v1.stableNorm());
}
// check compatibility of dot and adjoint