mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-03-13 18:37:27 +08:00
Make stableNorm and blueNorm compatible with 2D matrices.
This commit is contained in:
parent
4213b63f5c
commit
403f09ccef
@ -50,6 +50,71 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
|
||||
ssq += (bl*invScale).squaredNorm();
|
||||
}
|
||||
|
||||
template<typename VectorType, typename RealScalar>
|
||||
void stable_norm_impl_inner_step(const VectorType &vec, RealScalar& ssq, RealScalar& scale, RealScalar& invScale)
|
||||
{
|
||||
typedef typename VectorType::Scalar Scalar;
|
||||
const Index blockSize = 4096;
|
||||
|
||||
typedef typename internal::nested_eval<VectorType,2>::type VectorTypeCopy;
|
||||
typedef typename internal::remove_all<VectorTypeCopy>::type VectorTypeCopyClean;
|
||||
const VectorTypeCopy copy(vec);
|
||||
|
||||
enum {
|
||||
CanAlign = ( (int(VectorTypeCopyClean::Flags)&DirectAccessBit)
|
||||
|| (int(internal::evaluator<VectorTypeCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
|
||||
) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)
|
||||
&& (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization
|
||||
};
|
||||
typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<VectorTypeCopyClean>::Alignment>,
|
||||
typename VectorTypeCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
|
||||
Index n = vec.size();
|
||||
|
||||
Index bi = internal::first_default_aligned(copy);
|
||||
if (bi>0)
|
||||
internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
|
||||
for (; bi<n; bi+=blockSize)
|
||||
internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
|
||||
}
|
||||
|
||||
template<typename VectorType>
|
||||
typename VectorType::RealScalar
|
||||
stable_norm_impl(const VectorType &vec, typename enable_if<VectorType::IsVectorAtCompileTime>::type* = 0 )
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
||||
Index n = vec.size();
|
||||
|
||||
if(n==1)
|
||||
return abs(vec.coeff(0));
|
||||
|
||||
typedef typename VectorType::RealScalar RealScalar;
|
||||
RealScalar scale(0);
|
||||
RealScalar invScale(1);
|
||||
RealScalar ssq(0); // sum of squares
|
||||
|
||||
stable_norm_impl_inner_step(vec, ssq, scale, invScale);
|
||||
|
||||
return scale * sqrt(ssq);
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
typename MatrixType::RealScalar
|
||||
stable_norm_impl(const MatrixType &mat, typename enable_if<!MatrixType::IsVectorAtCompileTime>::type* = 0 )
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
RealScalar scale(0);
|
||||
RealScalar invScale(1);
|
||||
RealScalar ssq(0); // sum of squares
|
||||
|
||||
for(Index j=0; j<mat.outerSize(); ++j)
|
||||
stable_norm_impl_inner_step(mat.innerVector(j), ssq, scale, invScale);
|
||||
return scale * sqrt(ssq);
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline typename NumTraits<typename traits<Derived>::Scalar>::Real
|
||||
blueNorm_impl(const EigenBase<Derived>& _vec)
|
||||
@ -98,12 +163,16 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
|
||||
RealScalar asml = RealScalar(0);
|
||||
RealScalar amed = RealScalar(0);
|
||||
RealScalar abig = RealScalar(0);
|
||||
for(typename Derived::InnerIterator it(vec, 0); it; ++it)
|
||||
|
||||
for(Index j=0; j<vec.outerSize(); ++j)
|
||||
{
|
||||
RealScalar ax = abs(it.value());
|
||||
if(ax > ab2) abig += numext::abs2(ax*s2m);
|
||||
else if(ax < b1) asml += numext::abs2(ax*s1m);
|
||||
else amed += numext::abs2(ax);
|
||||
for(typename Derived::InnerIterator it(vec, j); it; ++it)
|
||||
{
|
||||
RealScalar ax = abs(it.value());
|
||||
if(ax > ab2) abig += numext::abs2(ax*s2m);
|
||||
else if(ax < b1) asml += numext::abs2(ax*s1m);
|
||||
else amed += numext::abs2(ax);
|
||||
}
|
||||
}
|
||||
if(amed!=amed)
|
||||
return amed; // we got a NaN
|
||||
@ -156,36 +225,7 @@ template<typename Derived>
|
||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
MatrixBase<Derived>::stableNorm() const
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
const Index blockSize = 4096;
|
||||
RealScalar scale(0);
|
||||
RealScalar invScale(1);
|
||||
RealScalar ssq(0); // sum of square
|
||||
|
||||
typedef typename internal::nested_eval<Derived,2>::type DerivedCopy;
|
||||
typedef typename internal::remove_all<DerivedCopy>::type DerivedCopyClean;
|
||||
const DerivedCopy copy(derived());
|
||||
|
||||
enum {
|
||||
CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit)
|
||||
|| (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
|
||||
) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)
|
||||
&& (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization
|
||||
};
|
||||
typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>,
|
||||
typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
|
||||
Index n = size();
|
||||
|
||||
if(n==1)
|
||||
return abs(this->coeff(0));
|
||||
|
||||
Index bi = internal::first_default_aligned(copy);
|
||||
if (bi>0)
|
||||
internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
|
||||
for (; bi<n; bi+=blockSize)
|
||||
internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
|
||||
return scale * sqrt(ssq);
|
||||
return internal::stable_norm_impl(derived());
|
||||
}
|
||||
|
||||
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
|
||||
@ -213,7 +253,10 @@ template<typename Derived>
|
||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
MatrixBase<Derived>::hypotNorm() const
|
||||
{
|
||||
return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
|
||||
if(size()==1)
|
||||
return numext::abs(coeff(0,0));
|
||||
else
|
||||
return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -204,8 +204,6 @@ void test_hypot()
|
||||
factor = internal::random<Scalar>();
|
||||
Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
|
||||
|
||||
std::cout << big << " " << small << "\n";
|
||||
|
||||
Scalar one (1),
|
||||
zero (0),
|
||||
sqrt2 (std::sqrt(2)),
|
||||
@ -234,6 +232,7 @@ void test_stable_norm()
|
||||
CALL_SUBTEST_1( stable_norm(Matrix<float, 1, 1>()) );
|
||||
CALL_SUBTEST_2( stable_norm(Vector4d()) );
|
||||
CALL_SUBTEST_3( stable_norm(VectorXd(internal::random<int>(10,2000))) );
|
||||
CALL_SUBTEST_3( stable_norm(MatrixXd(internal::random<int>(10,200), internal::random<int>(10,200))) );
|
||||
CALL_SUBTEST_4( stable_norm(VectorXf(internal::random<int>(10,2000))) );
|
||||
CALL_SUBTEST_5( stable_norm(VectorXcd(internal::random<int>(10,2000))) );
|
||||
CALL_SUBTEST_6( stable_norm(VectorXcf(internal::random<int>(10,2000))) );
|
||||
|
Loading…
x
Reference in New Issue
Block a user