mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-03-13 18:37:27 +08:00
Merged in rmlarsen/eigen (pull request PR-174)
Add matrix condition number estimation module.
This commit is contained in:
commit
ea7087ef31
@ -33,13 +33,13 @@
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
#undef EIGEN_EXCEPTIONS
|
||||
#endif
|
||||
|
||||
|
||||
// All functions callable from CUDA code must be qualified with __device__
|
||||
#define EIGEN_DEVICE_FUNC __host__ __device__
|
||||
|
||||
|
||||
#else
|
||||
#define EIGEN_DEVICE_FUNC
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
// When compiling CUDA device code with NVCC, pull in math functions from the
|
||||
@ -296,7 +296,7 @@ inline static const char *SimdInstructionSetsInUse(void) {
|
||||
// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
|
||||
// ensure QNX/QCC support
|
||||
using std::size_t;
|
||||
// gcc 4.6.0 wants std:: for ptrdiff_t
|
||||
// gcc 4.6.0 wants std:: for ptrdiff_t
|
||||
using std::ptrdiff_t;
|
||||
|
||||
/** \defgroup Core_Module Core module
|
||||
@ -440,6 +440,7 @@ using std::ptrdiff_t;
|
||||
#include "src/Core/products/TriangularSolverVector.h"
|
||||
#include "src/Core/BandMatrix.h"
|
||||
#include "src/Core/CoreIterators.h"
|
||||
#include "src/Core/ConditionEstimator.h"
|
||||
|
||||
#include "src/Core/BooleanRedux.h"
|
||||
#include "src/Core/Select.h"
|
||||
|
@ -13,7 +13,7 @@
|
||||
#ifndef EIGEN_LDLT_H
|
||||
#define EIGEN_LDLT_H
|
||||
|
||||
namespace Eigen {
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template<typename MatrixType, int UpLo> struct LDLT_Traits;
|
||||
@ -73,11 +73,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via LDLT::compute(const MatrixType&).
|
||||
*/
|
||||
LDLT()
|
||||
: m_matrix(),
|
||||
m_transpositions(),
|
||||
LDLT()
|
||||
: m_matrix(),
|
||||
m_transpositions(),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false)
|
||||
m_isInitialized(false)
|
||||
{}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
@ -168,7 +168,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
* \note_about_checking_solutions
|
||||
*
|
||||
* More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
|
||||
* by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
|
||||
* by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
|
||||
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
|
||||
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
|
||||
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
|
||||
@ -192,6 +192,15 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
template<typename InputType>
|
||||
LDLT& compute(const EigenBase<InputType>& matrix);
|
||||
|
||||
/** \returns an estimate of the reciprocal condition number of the matrix of
|
||||
* which *this is the LDLT decomposition.
|
||||
*/
|
||||
RealScalar rcond() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
|
||||
|
||||
@ -207,6 +216,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
|
||||
MatrixType reconstructedMatrix() const;
|
||||
|
||||
/** \returns the decomposition itself to allow generic code to do
|
||||
* ldlt.adjoint().solve(rhs).
|
||||
*/
|
||||
const LDLT<MatrixType, UpLo>& adjoint() const { return *this; };
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
@ -220,7 +234,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return Success;
|
||||
}
|
||||
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename RhsType, typename DstType>
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -228,7 +242,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
@ -241,6 +255,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
* is not stored), and the diagonal entries correspond to D.
|
||||
*/
|
||||
MatrixType m_matrix;
|
||||
RealScalar m_l1_norm;
|
||||
TranspositionType m_transpositions;
|
||||
TmpMatrixType m_temporary;
|
||||
internal::SignMatrix m_sign;
|
||||
@ -314,7 +329,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
if(rs>0)
|
||||
A21.noalias() -= A20 * temp.head(k);
|
||||
}
|
||||
|
||||
|
||||
// In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
|
||||
// was smaller than the cutoff value. However, since LDLT is not rank-revealing
|
||||
// we should only make sure that we do not introduce INF or NaN values.
|
||||
@ -433,12 +448,32 @@ template<typename InputType>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
|
||||
m_matrix = a.derived();
|
||||
|
||||
// Compute matrix L1 norm = max abs column sum.
|
||||
m_l1_norm = RealScalar(0);
|
||||
if (_UpLo == Lower) {
|
||||
for (int col = 0; col < size; ++col) {
|
||||
const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() +
|
||||
m_matrix.row(col).head(col).template lpNorm<1>();
|
||||
if (abs_col_sum > m_l1_norm) {
|
||||
m_l1_norm = abs_col_sum;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int col = 0; col < a.cols(); ++col) {
|
||||
const RealScalar abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() +
|
||||
m_matrix.row(col).tail(size - col).template lpNorm<1>();
|
||||
if (abs_col_sum > m_l1_norm) {
|
||||
m_l1_norm = abs_col_sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_transpositions.resize(size);
|
||||
m_isInitialized = false;
|
||||
m_temporary.resize(size);
|
||||
@ -466,7 +501,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
|
||||
eigen_assert(m_matrix.rows()==size);
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
m_matrix.resize(size,size);
|
||||
m_matrix.setZero();
|
||||
m_transpositions.resize(size);
|
||||
@ -505,7 +540,7 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
|
||||
// diagonal element is not well justified and leads to numerical issues in some cases.
|
||||
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
|
||||
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
|
||||
|
||||
|
||||
for (Index i = 0; i < vecD.size(); ++i)
|
||||
{
|
||||
if(abs(vecD(i)) > tolerance)
|
||||
|
@ -10,7 +10,7 @@
|
||||
#ifndef EIGEN_LLT_H
|
||||
#define EIGEN_LLT_H
|
||||
|
||||
namespace Eigen {
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal{
|
||||
template<typename MatrixType, int UpLo> struct LLT_Traits;
|
||||
@ -40,7 +40,7 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
|
||||
*
|
||||
* Example: \include LLT_example.cpp
|
||||
* Output: \verbinclude LLT_example.out
|
||||
*
|
||||
*
|
||||
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
|
||||
*/
|
||||
/* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
|
||||
@ -135,6 +135,16 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
template<typename InputType>
|
||||
LLT& compute(const EigenBase<InputType>& matrix);
|
||||
|
||||
/** \returns an estimate of the reciprocal condition number of the matrix of
|
||||
* which *this is the Cholesky decomposition.
|
||||
*/
|
||||
RealScalar rcond() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative");
|
||||
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||
}
|
||||
|
||||
/** \returns the LLT decomposition matrix
|
||||
*
|
||||
* TODO: document the storage layout
|
||||
@ -159,12 +169,17 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** \returns the decomposition itself to allow generic code to do
|
||||
* llt.adjoint().solve(rhs).
|
||||
*/
|
||||
const LLT<MatrixType, UpLo>& adjoint() const { return *this; };
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
template<typename VectorType>
|
||||
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
||||
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename RhsType, typename DstType>
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -172,17 +187,18 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store L
|
||||
* The strict upper part is not used and even not initialized.
|
||||
*/
|
||||
MatrixType m_matrix;
|
||||
RealScalar m_l1_norm;
|
||||
bool m_isInitialized;
|
||||
ComputationInfo m_info;
|
||||
};
|
||||
@ -268,7 +284,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
|
||||
static Index unblocked(MatrixType& mat)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
|
||||
eigen_assert(mat.rows()==mat.cols());
|
||||
const Index size = mat.rows();
|
||||
for(Index k = 0; k < size; ++k)
|
||||
@ -328,7 +344,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
|
||||
return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename Scalar> struct llt_inplace<Scalar, Upper>
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
@ -387,12 +403,32 @@ template<typename InputType>
|
||||
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
m_matrix.resize(size, size);
|
||||
m_matrix = a.derived();
|
||||
|
||||
// Compute matrix L1 norm = max abs column sum.
|
||||
m_l1_norm = RealScalar(0);
|
||||
if (_UpLo == Lower) {
|
||||
for (int col = 0; col < size; ++col) {
|
||||
const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() +
|
||||
m_matrix.row(col).head(col).template lpNorm<1>();
|
||||
if (abs_col_sum > m_l1_norm) {
|
||||
m_l1_norm = abs_col_sum;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int col = 0; col < a.cols(); ++col) {
|
||||
const RealScalar abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() +
|
||||
m_matrix.row(col).tail(size - col).template lpNorm<1>();
|
||||
if (abs_col_sum > m_l1_norm) {
|
||||
m_l1_norm = abs_col_sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
bool ok = Traits::inplace_decomposition(m_matrix);
|
||||
m_info = ok ? Success : NumericalIssue;
|
||||
@ -419,7 +455,7 @@ LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, c
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename _MatrixType,int _UpLo>
|
||||
template<typename RhsType, typename DstType>
|
||||
@ -431,7 +467,7 @@ void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||
#endif
|
||||
|
||||
/** \internal use x = llt_object.solve(x);
|
||||
*
|
||||
*
|
||||
* This is the \em in-place version of solve().
|
||||
*
|
||||
* \param bAndX represents both the right-hand side matrix b and result x.
|
||||
@ -483,7 +519,7 @@ SelfAdjointView<MatrixType, UpLo>::llt() const
|
||||
return LLT<PlainObject,UpLo>(m_matrix);
|
||||
}
|
||||
#endif // __CUDACC__
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_LLT_H
|
||||
|
207
Eigen/src/Core/ConditionEstimator.h
Normal file
207
Eigen/src/Core/ConditionEstimator.h
Normal file
@ -0,0 +1,207 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CONDITIONESTIMATOR_H
|
||||
#define EIGEN_CONDITIONESTIMATOR_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename MatrixType>
|
||||
inline typename MatrixType::RealScalar MatrixL1Norm(const MatrixType& matrix) {
|
||||
return matrix.cwiseAbs().colwise().sum().maxCoeff();
|
||||
}
|
||||
|
||||
template <typename Vector>
|
||||
inline typename Vector::RealScalar VectorL1Norm(const Vector& v) {
|
||||
return v.template lpNorm<1>();
|
||||
}
|
||||
|
||||
template <typename Vector, typename RealVector, bool IsComplex>
|
||||
struct SignOrUnity {
|
||||
static inline Vector run(const Vector& v) {
|
||||
const RealVector v_abs = v.cwiseAbs();
|
||||
return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
|
||||
.select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
|
||||
}
|
||||
};
|
||||
|
||||
// Partial specialization to avoid elementwise division for real vectors.
|
||||
template <typename Vector>
|
||||
struct SignOrUnity<Vector, Vector, false> {
|
||||
static inline Vector run(const Vector& v) {
|
||||
return (v.array() < static_cast<typename Vector::RealScalar>(0))
|
||||
.select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \class ConditionEstimator
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Condition number estimator.
|
||||
*
|
||||
* Computing a decomposition of a dense matrix takes O(n^3) operations, while
|
||||
* this method estimates the condition number quickly and reliably in O(n^2)
|
||||
* operations.
|
||||
*
|
||||
* \returns an estimate of the reciprocal condition number
|
||||
* (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given the matrix and
|
||||
* its decomposition. Supports the following decompositions: FullPivLU,
|
||||
* PartialPivLU, LDLT, and LLT.
|
||||
*
|
||||
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
|
||||
*/
|
||||
template <typename Decomposition>
|
||||
typename Decomposition::RealScalar ReciprocalConditionNumberEstimate(
|
||||
const typename Decomposition::MatrixType& matrix,
|
||||
const Decomposition& dec) {
|
||||
eigen_assert(matrix.rows() == dec.rows());
|
||||
eigen_assert(matrix.cols() == dec.cols());
|
||||
if (dec.rows() == 0) return typename Decomposition::RealScalar(1);
|
||||
return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec);
|
||||
}
|
||||
|
||||
/** \class ConditionEstimator
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Condition number estimator.
|
||||
*
|
||||
* Computing a decomposition of a dense matrix takes O(n^3) operations, while
|
||||
* this method estimates the condition number quickly and reliably in O(n^2)
|
||||
* operations.
|
||||
*
|
||||
* \returns an estimate of the reciprocal condition number
|
||||
* (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
|
||||
* its decomposition. Supports the following decompositions: FullPivLU,
|
||||
* PartialPivLU, LDLT, and LLT.
|
||||
*
|
||||
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
|
||||
*/
|
||||
template <typename Decomposition>
|
||||
typename Decomposition::RealScalar ReciprocalConditionNumberEstimate(
|
||||
typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) {
|
||||
typedef typename Decomposition::RealScalar RealScalar;
|
||||
eigen_assert(dec.rows() == dec.cols());
|
||||
if (dec.rows() == 0) return RealScalar(1);
|
||||
if (matrix_norm == RealScalar(0)) return RealScalar(0);
|
||||
if (dec.rows() == 1) return RealScalar(1);
|
||||
const typename Decomposition::RealScalar inverse_matrix_norm =
|
||||
InverseMatrixL1NormEstimate(dec);
|
||||
return (inverse_matrix_norm == RealScalar(0)
|
||||
? RealScalar(0)
|
||||
: (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
|
||||
}
|
||||
|
||||
/**
|
||||
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
|
||||
* matrix that implements .solve() and .adjoint().solve() methods.
|
||||
*
|
||||
* The method implements Algorithms 4.1 and 5.1 from
|
||||
* http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
|
||||
* which also forms the basis for the condition number estimators in
|
||||
* LAPACK. Since at most 10 calls to the solve method of dec are
|
||||
* performed, the total cost is O(dims^2), as opposed to O(dims^3)
|
||||
* needed to compute the inverse matrix explicitly.
|
||||
*
|
||||
* The most common usage is in estimating the condition number
|
||||
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
|
||||
* computed directly in O(n^2) operations.
|
||||
*
|
||||
* Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
|
||||
* LLT.
|
||||
*
|
||||
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
|
||||
*/
|
||||
template <typename Decomposition>
|
||||
typename Decomposition::RealScalar InverseMatrixL1NormEstimate(
|
||||
const Decomposition& dec) {
|
||||
typedef typename Decomposition::MatrixType MatrixType;
|
||||
typedef typename Decomposition::Scalar Scalar;
|
||||
typedef typename Decomposition::RealScalar RealScalar;
|
||||
typedef typename internal::plain_col_type<MatrixType>::type Vector;
|
||||
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type
|
||||
RealVector;
|
||||
const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
|
||||
|
||||
eigen_assert(dec.rows() == dec.cols());
|
||||
const Index n = dec.rows();
|
||||
if (n == 0) {
|
||||
return 0;
|
||||
}
|
||||
Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
|
||||
|
||||
// lower_bound is a lower bound on
|
||||
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
|
||||
// and is the objective maximized by the ("super-") gradient ascent
|
||||
// algorithm below.
|
||||
RealScalar lower_bound = internal::VectorL1Norm(v);
|
||||
if (n == 1) {
|
||||
return lower_bound;
|
||||
}
|
||||
// Gradient ascent algorithm follows: We know that the optimum is achieved at
|
||||
// one of the simplices v = e_i, so in each iteration we follow a
|
||||
// super-gradient to move towards the optimal one.
|
||||
RealScalar old_lower_bound = lower_bound;
|
||||
Vector sign_vector(n);
|
||||
Vector old_sign_vector;
|
||||
Index v_max_abs_index = -1;
|
||||
Index old_v_max_abs_index = v_max_abs_index;
|
||||
for (int k = 0; k < 4; ++k) {
|
||||
sign_vector = internal::SignOrUnity<Vector, RealVector, is_complex>::run(v);
|
||||
if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
|
||||
// Break if the solution stagnated.
|
||||
break;
|
||||
}
|
||||
// v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
|
||||
v = dec.adjoint().solve(sign_vector);
|
||||
v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
|
||||
if (v_max_abs_index == old_v_max_abs_index) {
|
||||
// Break if the solution stagnated.
|
||||
break;
|
||||
}
|
||||
// Move to the new simplex e_j, where j = v_max_abs_index.
|
||||
v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
|
||||
lower_bound = internal::VectorL1Norm(v);
|
||||
if (lower_bound <= old_lower_bound) {
|
||||
// Break if the gradient step did not increase the lower_bound.
|
||||
break;
|
||||
}
|
||||
if (!is_complex) {
|
||||
old_sign_vector = sign_vector;
|
||||
}
|
||||
old_v_max_abs_index = v_max_abs_index;
|
||||
old_lower_bound = lower_bound;
|
||||
}
|
||||
// The following calculates an independent estimate of ||matrix||_1 by
|
||||
// multiplying matrix by a vector with entries of slowly increasing
|
||||
// magnitude and alternating sign:
|
||||
// v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
|
||||
// This improvement to Hager's algorithm above is due to Higham. It was
|
||||
// added to make the algorithm more robust in certain corner cases where
|
||||
// large elements in the matrix might otherwise escape detection due to
|
||||
// exact cancellation (especially when op and op_adjoint correspond to a
|
||||
// sequence of backsubstitutions and permutations), which could cause
|
||||
// Hager's algorithm to vastly underestimate ||matrix||_1.
|
||||
Scalar alternating_sign(RealScalar(1));
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
v[i] = alternating_sign *
|
||||
(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
|
||||
alternating_sign = -alternating_sign;
|
||||
}
|
||||
v = dec.solve(v);
|
||||
const RealScalar alternate_lower_bound =
|
||||
(2 * internal::VectorL1Norm(v)) / (3 * RealScalar(n));
|
||||
return numext::maxi(lower_bound, alternate_lower_bound);
|
||||
}
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif
|
@ -231,6 +231,15 @@ template<typename _MatrixType> class FullPivLU
|
||||
return Solve<FullPivLU, Rhs>(*this, b.derived());
|
||||
}
|
||||
|
||||
/** \returns an estimate of the reciprocal condition number of the matrix of which *this is
|
||||
the LU decomposition.
|
||||
*/
|
||||
inline RealScalar rcond() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
||||
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||
}
|
||||
|
||||
/** \returns the determinant of the matrix of which
|
||||
* *this is the LU decomposition. It has only linear complexity
|
||||
* (that is, O(n) where n is the dimension of the square matrix)
|
||||
@ -410,6 +419,7 @@ template<typename _MatrixType> class FullPivLU
|
||||
IntColVectorType m_rowsTranspositions;
|
||||
IntRowVectorType m_colsTranspositions;
|
||||
Index m_det_pq, m_nonzero_pivots;
|
||||
RealScalar m_l1_norm;
|
||||
RealScalar m_maxpivot, m_prescribedThreshold;
|
||||
bool m_isInitialized, m_usePrescribedThreshold;
|
||||
};
|
||||
@ -455,11 +465,12 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const EigenBase<InputType>
|
||||
// the permutations are stored as int indices, so just to be sure:
|
||||
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
|
||||
|
||||
m_isInitialized = true;
|
||||
m_lu = matrix.derived();
|
||||
m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
|
||||
|
||||
computeInPlace();
|
||||
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,6 @@ template<typename _MatrixType> class PartialPivLU
|
||||
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
|
||||
typedef typename MatrixType::PlainObject PlainObject;
|
||||
|
||||
|
||||
/**
|
||||
* \brief Default Constructor.
|
||||
*
|
||||
@ -152,6 +151,15 @@ template<typename _MatrixType> class PartialPivLU
|
||||
return Solve<PartialPivLU, Rhs>(*this, b.derived());
|
||||
}
|
||||
|
||||
/** \returns an estimate of the reciprocal condition number of the matrix of which *this is
|
||||
the LU decomposition.
|
||||
*/
|
||||
inline RealScalar rcond() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
||||
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||
}
|
||||
|
||||
/** \returns the inverse of the matrix of which *this is the LU decomposition.
|
||||
*
|
||||
* \warning The matrix being decomposed here is assumed to be invertible. If you need to check for
|
||||
@ -178,7 +186,7 @@ template<typename _MatrixType> class PartialPivLU
|
||||
*
|
||||
* \sa MatrixBase::determinant()
|
||||
*/
|
||||
typename internal::traits<MatrixType>::Scalar determinant() const;
|
||||
Scalar determinant() const;
|
||||
|
||||
MatrixType reconstructedMatrix() const;
|
||||
|
||||
@ -247,6 +255,7 @@ template<typename _MatrixType> class PartialPivLU
|
||||
PermutationType m_p;
|
||||
TranspositionType m_rowsTranspositions;
|
||||
Index m_det_p;
|
||||
RealScalar m_l1_norm;
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
@ -256,6 +265,7 @@ PartialPivLU<MatrixType>::PartialPivLU()
|
||||
m_p(),
|
||||
m_rowsTranspositions(),
|
||||
m_det_p(0),
|
||||
m_l1_norm(0),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
}
|
||||
@ -266,6 +276,7 @@ PartialPivLU<MatrixType>::PartialPivLU(Index size)
|
||||
m_p(size),
|
||||
m_rowsTranspositions(size),
|
||||
m_det_p(0),
|
||||
m_l1_norm(0),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
}
|
||||
@ -277,6 +288,7 @@ PartialPivLU<MatrixType>::PartialPivLU(const EigenBase<InputType>& matrix)
|
||||
m_p(matrix.rows()),
|
||||
m_rowsTranspositions(matrix.rows()),
|
||||
m_det_p(0),
|
||||
m_l1_norm(0),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
compute(matrix.derived());
|
||||
@ -467,6 +479,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const EigenBase<Inpu
|
||||
eigen_assert(matrix.rows()<NumTraits<int>::highest());
|
||||
|
||||
m_lu = matrix.derived();
|
||||
m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
|
||||
|
||||
eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
|
||||
const Index size = matrix.rows();
|
||||
@ -484,7 +497,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const EigenBase<Inpu
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
typename internal::traits<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
|
||||
typename PartialPivLU<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
||||
return Scalar(m_det_p) * m_lu.diagonal().prod();
|
||||
|
@ -17,6 +17,12 @@
|
||||
#include <Eigen/Cholesky>
|
||||
#include <Eigen/QR>
|
||||
|
||||
template<typename MatrixType, int UpLo>
|
||||
typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) {
|
||||
MatrixType symm = m.template selfadjointView<UpLo>();
|
||||
return symm.cwiseAbs().colwise().sum().maxCoeff();
|
||||
}
|
||||
|
||||
template<typename MatrixType,template <typename,int> class CholType> void test_chol_update(const MatrixType& symm)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
@ -77,7 +83,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
{
|
||||
SquareMatrixType symmUp = symm.template triangularView<Upper>();
|
||||
SquareMatrixType symmLo = symm.template triangularView<Lower>();
|
||||
|
||||
|
||||
LLT<SquareMatrixType,Lower> chollo(symmLo);
|
||||
VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix());
|
||||
vecX = chollo.solve(vecB);
|
||||
@ -85,6 +91,14 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
matX = chollo.solve(matB);
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
|
||||
const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols));
|
||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
|
||||
matrix_l1_norm<MatrixType, Lower>(symmLo_inverse);
|
||||
RealScalar rcond_est = chollo.rcond();
|
||||
// Verify that the estimated condition number is within a factor of 10 of the
|
||||
// truth.
|
||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||
|
||||
// test the upper mode
|
||||
LLT<SquareMatrixType,Upper> cholup(symmUp);
|
||||
VERIFY_IS_APPROX(symm, cholup.reconstructedMatrix());
|
||||
@ -93,6 +107,15 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
matX = cholup.solve(matB);
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
|
||||
// Verify that the estimated condition number is within a factor of 10 of the
|
||||
// truth.
|
||||
const MatrixType symmUp_inverse = cholup.solve(MatrixType::Identity(rows,cols));
|
||||
rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Upper>(symmUp)) /
|
||||
matrix_l1_norm<MatrixType, Upper>(symmUp_inverse);
|
||||
rcond_est = cholup.rcond();
|
||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||
|
||||
|
||||
MatrixType neg = -symmLo;
|
||||
chollo.compute(neg);
|
||||
VERIFY(chollo.info()==NumericalIssue);
|
||||
@ -101,7 +124,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL()));
|
||||
VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU()));
|
||||
VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL()));
|
||||
|
||||
|
||||
// test some special use cases of SelfCwiseBinaryOp:
|
||||
MatrixType m1 = MatrixType::Random(rows,cols), m2(rows,cols);
|
||||
m2 = m1;
|
||||
@ -137,6 +160,15 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
matX = ldltlo.solve(matB);
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
|
||||
const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols));
|
||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
|
||||
matrix_l1_norm<MatrixType, Lower>(symmLo_inverse);
|
||||
RealScalar rcond_est = ldltlo.rcond();
|
||||
// Verify that the estimated condition number is within a factor of 10 of the
|
||||
// truth.
|
||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||
|
||||
|
||||
LDLT<SquareMatrixType,Upper> ldltup(symmUp);
|
||||
VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix());
|
||||
vecX = ldltup.solve(vecB);
|
||||
@ -144,6 +176,14 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
matX = ldltup.solve(matB);
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
|
||||
// Verify that the estimated condition number is within a factor of 10 of the
|
||||
// truth.
|
||||
const MatrixType symmUp_inverse = ldltup.solve(MatrixType::Identity(rows,cols));
|
||||
rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Upper>(symmUp)) /
|
||||
matrix_l1_norm<MatrixType, Upper>(symmUp_inverse);
|
||||
rcond_est = ldltup.rcond();
|
||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||
|
||||
VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU()));
|
||||
VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL()));
|
||||
VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU()));
|
||||
@ -167,7 +207,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
// restore
|
||||
if(sign == -1)
|
||||
symm = -symm;
|
||||
|
||||
|
||||
// check matrices coming from linear constraints with Lagrange multipliers
|
||||
if(rows>=3)
|
||||
{
|
||||
@ -183,7 +223,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
vecX = ldltlo.solve(vecB);
|
||||
VERIFY_IS_APPROX(A * vecX, vecB);
|
||||
}
|
||||
|
||||
|
||||
// check non-full rank matrices
|
||||
if(rows>=3)
|
||||
{
|
||||
@ -199,7 +239,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
vecX = ldltlo.solve(vecB);
|
||||
VERIFY_IS_APPROX(A * vecX, vecB);
|
||||
}
|
||||
|
||||
|
||||
// check matrices with a wide spectrum
|
||||
if(rows>=3)
|
||||
{
|
||||
@ -225,7 +265,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
{
|
||||
RealScalar large_tol = std::sqrt(test_precision<RealScalar>());
|
||||
VERIFY((A * vecX).isApprox(vecB, large_tol));
|
||||
|
||||
|
||||
++g_test_level;
|
||||
VERIFY_IS_APPROX(A * vecX,vecB);
|
||||
--g_test_level;
|
||||
@ -314,14 +354,14 @@ template<typename MatrixType> void cholesky_bug241(const MatrixType& m)
|
||||
}
|
||||
|
||||
// LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal.
|
||||
// This test checks that LDLT reports correctly that matrix is indefinite.
|
||||
// This test checks that LDLT reports correctly that matrix is indefinite.
|
||||
// See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736
|
||||
template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
|
||||
{
|
||||
eigen_assert(m.rows() == 2 && m.cols() == 2);
|
||||
MatrixType mat;
|
||||
LDLT<MatrixType> ldlt(2);
|
||||
|
||||
|
||||
{
|
||||
mat << 1, 0, 0, -1;
|
||||
ldlt.compute(mat);
|
||||
@ -384,11 +424,11 @@ void test_cholesky()
|
||||
CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) );
|
||||
CALL_SUBTEST_4( cholesky(Matrix3f()) );
|
||||
CALL_SUBTEST_5( cholesky(Matrix4d()) );
|
||||
|
||||
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
|
||||
|
||||
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
|
||||
CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) );
|
||||
TEST_SET_BUT_UNUSED_VARIABLE(s)
|
||||
|
||||
|
||||
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2);
|
||||
CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) );
|
||||
TEST_SET_BUT_UNUSED_VARIABLE(s)
|
||||
@ -402,6 +442,6 @@ void test_cholesky()
|
||||
// Test problem size constructors
|
||||
CALL_SUBTEST_9( LLT<MatrixXf>(10) );
|
||||
CALL_SUBTEST_9( LDLT<MatrixXf>(10) );
|
||||
|
||||
|
||||
TEST_SET_BUT_UNUSED_VARIABLE(nb_temporaries)
|
||||
}
|
||||
|
23
test/lu.cpp
23
test/lu.cpp
@ -11,6 +11,11 @@
|
||||
#include <Eigen/LU>
|
||||
using namespace std;
|
||||
|
||||
template<typename MatrixType>
|
||||
typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) {
|
||||
return m.cwiseAbs().colwise().sum().maxCoeff();
|
||||
}
|
||||
|
||||
template<typename MatrixType> void lu_non_invertible()
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
@ -143,7 +148,14 @@ template<typename MatrixType> void lu_invertible()
|
||||
m3 = MatrixType::Random(size,size);
|
||||
m2 = lu.solve(m3);
|
||||
VERIFY_IS_APPROX(m3, m1*m2);
|
||||
VERIFY_IS_APPROX(m2, lu.inverse()*m3);
|
||||
MatrixType m1_inverse = lu.inverse();
|
||||
VERIFY_IS_APPROX(m2, m1_inverse*m3);
|
||||
|
||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
|
||||
const RealScalar rcond_est = lu.rcond();
|
||||
// Verify that the estimated condition number is within a factor of 10 of the
|
||||
// truth.
|
||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||
|
||||
// test solve with transposed
|
||||
lu.template _solve_impl_transposed<false>(m3, m2);
|
||||
@ -170,6 +182,7 @@ template<typename MatrixType> void lu_partial_piv()
|
||||
PartialPivLU.h
|
||||
*/
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
Index size = internal::random<Index>(1,4);
|
||||
|
||||
MatrixType m1(size, size), m2(size, size), m3(size, size);
|
||||
@ -181,7 +194,13 @@ template<typename MatrixType> void lu_partial_piv()
|
||||
m3 = MatrixType::Random(size,size);
|
||||
m2 = plu.solve(m3);
|
||||
VERIFY_IS_APPROX(m3, m1*m2);
|
||||
VERIFY_IS_APPROX(m2, plu.inverse()*m3);
|
||||
MatrixType m1_inverse = plu.inverse();
|
||||
VERIFY_IS_APPROX(m2, m1_inverse*m3);
|
||||
|
||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
|
||||
const RealScalar rcond_est = plu.rcond();
|
||||
// Verify that the estimate is within a factor of 10 of the truth.
|
||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||
|
||||
// test solve with transposed
|
||||
plu.template _solve_impl_transposed<false>(m3, m2);
|
||||
|
Loading…
x
Reference in New Issue
Block a user