Revert "Update SVD Module to allow specifying computation options with a...

This commit is contained in:
Rasmus Munk Larsen 2021-11-30 18:45:54 +00:00 committed by David Tellenbach
parent 4dd126c630
commit 085c2fc5d5
23 changed files with 634 additions and 764 deletions

View File

@ -370,11 +370,8 @@ template<typename Derived> class MatrixBase
/////////// SVD module ///////////
template<int Options = 0>
inline JacobiSVD<PlainObject, Options> jacobiSvd() const;
template<int Options = 0>
inline BDCSVD<PlainObject, Options> bdcSvd() const;
inline JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
inline BDCSVD<PlainObject> bdcSvd(unsigned int computationOptions = 0) const;
/////////// Geometry module ///////////

View File

@ -423,14 +423,14 @@ enum DecompositionOptions {
/** \ingroup enums
* Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
enum QRPreconditioners {
/** Use a QR decomposition with column pivoting as the first step. */
ColPivHouseholderQRPreconditioner = 0x0,
/** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
NoQRPreconditioner = 0x40,
NoQRPreconditioner,
/** Use a QR decomposition without pivoting as the first step. */
HouseholderQRPreconditioner = 0x80,
HouseholderQRPreconditioner,
/** Use a QR decomposition with column pivoting as the first step. */
ColPivHouseholderQRPreconditioner,
/** Use a QR decomposition with full pivoting as the first step. */
FullPivHouseholderQRPreconditioner = 0xC0
FullPivHouseholderQRPreconditioner
};
#ifdef Success

View File

@ -277,8 +277,8 @@ template<typename MatrixType> class ColPivHouseholderQR;
template<typename MatrixType> class FullPivHouseholderQR;
template<typename MatrixType> class CompleteOrthogonalDecomposition;
template<typename MatrixType> class SVDBase;
template<typename MatrixType, int Options = 0> class JacobiSVD;
template<typename MatrixType, int Options = 0> class BDCSVD;
template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
template<typename MatrixType> class BDCSVD;
template<typename MatrixType, int UpLo = Lower> class LLT;
template<typename MatrixType, int UpLo = Lower> class LDLT;
template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;

View File

@ -108,7 +108,7 @@ public:
if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())
{
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3>, ComputeFullV> svd(m);
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
result.normal() = svd.matrixV().col(2);
}
else

View File

@ -651,7 +651,7 @@ EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::setFromTwoVectors(con
{
c = numext::maxi(c,Scalar(-1));
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3>, ComputeFullV> svd(m);
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
Vector3 axis = svd.matrixV().col(2);
Scalar w2 = (Scalar(1)+c)*Scalar(0.5);

View File

@ -1105,7 +1105,7 @@ template<typename RotationMatrixType, typename ScalingMatrixType>
EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
{
// Note that JacobiSVD is faster than BDCSVD for small matrices.
JacobiSVD<LinearMatrixType, ComputeFullU | ComputeFullV> svd(linear());
JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant() < Scalar(0) ? Scalar(-1) : Scalar(1); // so x has absolute value 1
VectorType sv(svd.singularValues());
@ -1135,7 +1135,7 @@ template<typename ScalingMatrixType, typename RotationMatrixType>
EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
{
// Note that JacobiSVD is faster than BDCSVD for small matrices.
JacobiSVD<LinearMatrixType, ComputeFullU | ComputeFullV> svd(linear());
JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant() < Scalar(0) ? Scalar(-1) : Scalar(1); // so x has absolute value 1
VectorType sv(svd.singularValues());

View File

@ -127,7 +127,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
// Eq. (38)
const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();
JacobiSVD<MatrixType, ComputeFullU | ComputeFullV> svd(sigma);
JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);
// Initialize the resulting transformation with an identity matrix...
TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);

View File

@ -38,14 +38,14 @@ namespace Eigen {
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]");
#endif
template<typename MatrixType_, int Options> class BDCSVD;
template<typename MatrixType_> class BDCSVD;
namespace internal {
template<typename MatrixType_, int Options>
struct traits<BDCSVD<MatrixType_,Options> >
: svd_traits<MatrixType_, Options>
template<typename MatrixType_>
struct traits<BDCSVD<MatrixType_> >
: traits<MatrixType_>
{
typedef MatrixType_ MatrixType;
};
@ -61,11 +61,6 @@ struct traits<BDCSVD<MatrixType_,Options> >
* \brief class Bidiagonal Divide and Conquer SVD
*
* \tparam MatrixType_ the type of the matrix of which we are computing the SVD decomposition
*
* \tparam Options this optional parameter allows one to specify options for computing unitaries \a U and \a V.
* Possible values are #ComputeThinU, #ComputeThinV, #ComputeFullU, #ComputeFullV.
* It is not possible to request the thin and full version of U or V.
* By default, unitaries are not computed.
*
* This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,
* and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.
@ -80,8 +75,8 @@ struct traits<BDCSVD<MatrixType_,Options> >
*
* \sa class JacobiSVD
*/
template<typename MatrixType_, int Options>
class BDCSVD : public SVDBase<BDCSVD<MatrixType_, Options> >
template<typename MatrixType_>
class BDCSVD : public SVDBase<BDCSVD<MatrixType_> >
{
typedef SVDBase<BDCSVD> Base;
@ -132,20 +127,26 @@ public:
* according to the specified problem size.
* \sa BDCSVD()
*/
BDCSVD(Index rows, Index cols)
BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
: m_algoswap(16), m_numIters(0)
{
allocate(rows, cols);
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD(const MatrixType& matrix)
BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_algoswap(16), m_numIters(0)
{
compute(matrix);
compute(matrix, computationOptions);
}
~BDCSVD()
@ -155,8 +156,25 @@ public:
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD& compute(const MatrixType& matrix);
BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
BDCSVD& compute(const MatrixType& matrix)
{
return compute(matrix, this->m_computationOptions);
}
void setSwitchSize(int s)
{
@ -165,7 +183,7 @@ public:
}
private:
void allocate(Index rows, Index cols);
void allocate(Index rows, Index cols, unsigned int computationOptions);
void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
@ -178,8 +196,6 @@ private:
void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);
static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
template<typename SVDType>
void computeBaseCase(SVDType& svd, Index n, Index firstCol, Index firstRowW, Index firstColW, Index shift);
protected:
MatrixXr m_naiveU, m_naiveV;
@ -192,10 +208,10 @@ protected:
using Base::m_singularValues;
using Base::m_diagSize;
using Base::ShouldComputeFullU;
using Base::ShouldComputeFullV;
using Base::ShouldComputeThinU;
using Base::ShouldComputeThinV;
using Base::m_computeFullU;
using Base::m_computeFullV;
using Base::m_computeThinU;
using Base::m_computeThinV;
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_info;
@ -208,12 +224,12 @@ public:
// Method to allocate and initialize matrix and attributes
template<typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::allocate(Eigen::Index rows, Eigen::Index cols)
template<typename MatrixType>
void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
m_isTranspose = (cols > rows);
if (Base::allocate(rows, cols))
if (Base::allocate(rows, cols, computationOptions))
return;
m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
@ -231,13 +247,13 @@ void BDCSVD<MatrixType, Options>::allocate(Eigen::Index rows, Eigen::Index cols)
m_workspaceI.resize(3*m_diagSize);
}// end allocate
template<typename MatrixType, int Options>
BDCSVD<MatrixType, Options>& BDCSVD<MatrixType, Options>::compute(const MatrixType& matrix)
template<typename MatrixType>
BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "\n\n\n======================================================================================================================\n\n\n";
#endif
allocate(matrix.rows(), matrix.cols());
allocate(matrix.rows(), matrix.cols(), computationOptions);
using std::abs;
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
@ -246,7 +262,7 @@ BDCSVD<MatrixType, Options>& BDCSVD<MatrixType, Options>::compute(const MatrixTy
if(matrix.cols() < m_algoswap)
{
// FIXME this line involves temporaries
JacobiSVD<MatrixType, Options> jsvd(matrix);
JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
m_isInitialized = true;
m_info = jsvd.info();
if (m_info == Success || m_info == NoConvergence) {
@ -317,21 +333,21 @@ BDCSVD<MatrixType, Options>& BDCSVD<MatrixType, Options>::compute(const MatrixTy
}// end compute
template<typename MatrixType, int Options>
template<typename MatrixType>
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
void BDCSVD<MatrixType, Options>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
void BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
{
// Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
if (computeU())
{
Index Ucols = ShouldComputeThinU ? m_diagSize : householderU.cols();
Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();
m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer
}
if (computeV())
{
Index Vcols = ShouldComputeThinV ? m_diagSize : householderV.cols();
Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();
m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer
@ -346,8 +362,8 @@ void BDCSVD<MatrixType, Options>::copyUV(const HouseholderU &householderU, const
* We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large
* enough.
*/
template<typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
template<typename MatrixType>
void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
{
Index n = A.rows();
if(n>100)
@ -387,26 +403,7 @@ void BDCSVD<MatrixType, Options>::structured_update(Block<MatrixXr,Dynamic,Dynam
}
}
template<typename MatrixType, int Options>
template<typename SVDType>
void BDCSVD<MatrixType, Options>::computeBaseCase(SVDType& svd, Index n, Index firstCol, Index firstRowW, Index firstColW, Index shift)
{
svd.compute(m_computed.block(firstCol, firstCol, n + 1, n));
m_info = svd.info();
if (m_info != Success && m_info != NoConvergence) return;
if (m_compU)
m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = svd.matrixU();
else
{
m_naiveU.row(0).segment(firstCol, n + 1).real() = svd.matrixU().row(0);
m_naiveU.row(1).segment(firstCol, n + 1).real() = svd.matrixU().row(n);
}
if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = svd.matrixV();
m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
m_computed.diagonal().segment(firstCol + shift, n) = svd.singularValues().head(n);
}
// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
// place of the submatrix we are currently working on.
//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
@ -416,8 +413,8 @@ void BDCSVD<MatrixType, Options>::computeBaseCase(SVDType& svd, Index n, Index f
//@param firstColW : Same as firstRowW with the column.
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
template<typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
template<typename MatrixType>
void BDCSVD<MatrixType>::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
// requires rows = cols + 1;
using std::pow;
@ -435,17 +432,20 @@ void BDCSVD<MatrixType, Options>::divide(Eigen::Index firstCol, Eigen::Index las
// matrices.
if (n < m_algoswap)
{
// FIXME this block involves temporaries
if (m_compV)
{
JacobiSVD<MatrixXr, ComputeFullU | ComputeFullV> baseSvd;
computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift);
}
// FIXME this line involves temporaries
JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
m_info = b.info();
if (m_info != Success && m_info != NoConvergence) return;
if (m_compU)
m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
else
{
JacobiSVD<MatrixXr, ComputeFullU> baseSvd;
computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift);
m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);
m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);
}
if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();
m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);
return;
}
// We use the divide and conquer algorithm
@ -597,8 +597,8 @@ void BDCSVD<MatrixType, Options>::divide(Eigen::Index firstCol, Eigen::Index las
// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
// handling of round-off errors, be consistent in ordering
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
{
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
using std::abs;
@ -725,8 +725,8 @@ void BDCSVD<MatrixType, Options>::computeSVDofM(Eigen::Index firstCol, Eigen::In
#endif
}
template <typename MatrixType, int Options>
typename BDCSVD<MatrixType, Options>::RealScalar BDCSVD<MatrixType, Options>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
template <typename MatrixType>
typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
{
Index m = perm.size();
RealScalar res = Literal(1);
@ -741,9 +741,9 @@ typename BDCSVD<MatrixType, Options>::RealScalar BDCSVD<MatrixType, Options>::se
}
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
VectorType& singVals, ArrayRef shifts, ArrayRef mus)
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
VectorType& singVals, ArrayRef shifts, ArrayRef mus)
{
using std::abs;
using std::swap;
@ -987,8 +987,8 @@ void BDCSVD<MatrixType, Options>::computeSingVals(const ArrayRef& col0, const Ar
// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::perturbCol0
template <typename MatrixType>
void BDCSVD<MatrixType>::perturbCol0
(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)
{
@ -1067,8 +1067,8 @@ void BDCSVD<MatrixType, Options>::perturbCol0
}
// compute singular vectors
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::computeSingVecs
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSingVecs
(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)
{
@ -1113,8 +1113,8 @@ void BDCSVD<MatrixType, Options>::computeSingVecs
// page 12_13
// i >= 1, di almost null and zi non null.
// We use a rotation to zero out zi applied to the left of M
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
{
using std::abs;
using std::sqrt;
@ -1142,8 +1142,8 @@ void BDCSVD<MatrixType, Options>::deflation43(Eigen::Index firstCol, Eigen::Inde
// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
// We apply two rotations to have zj = 0;
// TODO deflation44 is still broken and not properly tested
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
{
using std::abs;
using std::sqrt;
@ -1182,8 +1182,8 @@ void BDCSVD<MatrixType, Options>::deflation44(Eigen::Index firstColu , Eigen::In
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
template <typename MatrixType, int Options>
void BDCSVD<MatrixType, Options>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
using std::sqrt;
using std::abs;
@ -1361,11 +1361,10 @@ void BDCSVD<MatrixType, Options>::deflation(Eigen::Index firstCol, Eigen::Index
* \sa class BDCSVD
*/
template<typename Derived>
template<int Options>
BDCSVD<typename MatrixBase<Derived>::PlainObject, Options>
MatrixBase<Derived>::bdcSvd() const
BDCSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
{
return BDCSVD<PlainObject, Options>(*this);
return BDCSVD<PlainObject>(*this, computationOptions);
}
} // end namespace Eigen

View File

@ -13,20 +13,12 @@
#include "./InternalHeaderCheck.h"
namespace Eigen {
namespace Eigen {
namespace internal {
enum OptionsMasks {
QRPreconditionerBits = NoQRPreconditioner |
HouseholderQRPreconditioner |
ColPivHouseholderQRPreconditioner |
FullPivHouseholderQRPreconditioner
};
// forward declaration (needed by ICC)
// the empty body is required by MSVC
template<typename MatrixType, int Options,
template<typename MatrixType, int QRPreconditioner,
bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
struct svd_precondition_2x2_block_to_be_real {};
@ -54,16 +46,16 @@ struct qr_preconditioner_should_do_anything
};
};
template<typename MatrixType, int Options, int QRPreconditioner, int Case,
template<typename MatrixType, int QRPreconditioner, int Case,
bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
> struct qr_preconditioner_impl {};
template<typename MatrixType, int Options, int QRPreconditioner, int Case>
class qr_preconditioner_impl<MatrixType, Options, QRPreconditioner, Case, false>
template<typename MatrixType, int QRPreconditioner, int Case>
class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
{
public:
void allocate(const JacobiSVD<MatrixType, Options>&) {}
bool run(JacobiSVD<MatrixType, Options>&, const MatrixType&)
void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
{
return false;
}
@ -71,71 +63,65 @@ public:
/*** preconditioner using FullPivHouseholderQR ***/
template<typename MatrixType, int Options>
class qr_preconditioner_impl<MatrixType, Options, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef JacobiSVD<MatrixType, Options> SVDType;
enum
{
WorkspaceSize = MatrixType::RowsAtCompileTime,
MaxWorkspaceSize = MatrixType::MaxRowsAtCompileTime
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
};
typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
typedef Matrix<Scalar, 1, WorkspaceSize, RowMajor, 1, MaxWorkspaceSize> WorkspaceType;
void allocate(const SVDType& svd)
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.ShouldComputeFullU) m_workspace.resize(svd.rows());
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
}
bool run(SVDType& svd, const MatrixType& matrix)
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.ShouldComputeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef FullPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
WorkspaceType m_workspace;
};
template<typename MatrixType, int Options>
class qr_preconditioner_impl<MatrixType, Options, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef JacobiSVD<MatrixType, Options> SVDType;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MatrixOptions = MatrixType::Options
Options = MatrixType::Options
};
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
>::type TransposeTypeWithSameStorageOrder;
void allocate(const SVDType& svd)
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
@ -143,66 +129,54 @@ public:
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
m_adjoint.resize(svd.cols(), svd.rows());
if (svd.ShouldComputeFullV) m_workspace.resize(svd.cols());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
}
bool run(SVDType& svd, const MatrixType& matrix)
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.ShouldComputeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename plain_row_type<MatrixType>::type m_workspace;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using ColPivHouseholderQR ***/
template<typename MatrixType, int Options>
class qr_preconditioner_impl<MatrixType, Options, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef JacobiSVD<MatrixType, Options> SVDType;
enum
{
WorkspaceSize = internal::traits<SVDType>::MatrixUColsAtCompileTime,
MaxWorkspaceSize = internal::traits<SVDType>::MatrixUMaxColsAtCompileTime
};
typedef Matrix<Scalar, 1, WorkspaceSize, RowMajor, 1, MaxWorkspaceSize> WorkspaceType;
void allocate(const SVDType& svd)
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.ShouldComputeFullU) m_workspace.resize(svd.rows());
else if (svd.ShouldComputeThinU) m_workspace.resize(svd.cols());
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(SVDType& svd, const MatrixType& matrix)
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.ShouldComputeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.ShouldComputeThinU)
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
@ -216,46 +190,40 @@ public:
private:
typedef ColPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
WorkspaceType m_workspace;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType, int Options>
class qr_preconditioner_impl<MatrixType, Options, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef JacobiSVD<MatrixType, Options> SVDType;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MatrixOptions = MatrixType::Options,
WorkspaceSize = internal::traits<SVDType>::MatrixVColsAtCompileTime,
MaxWorkspaceSize = internal::traits<SVDType>::MatrixVMaxColsAtCompileTime
Options = MatrixType::Options
};
typedef Matrix<Scalar, WorkspaceSize, 1, ColMajor, MaxWorkspaceSize, 1> WorkspaceType;
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
>::type TransposeTypeWithSameStorageOrder;
void allocate(const SVDType& svd)
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.ShouldComputeFullV) m_workspace.resize(svd.cols());
else if (svd.ShouldComputeThinV) m_workspace.resize(svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(SVDType& svd, const MatrixType& matrix)
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
@ -263,8 +231,8 @@ public:
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.ShouldComputeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.ShouldComputeThinV)
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
@ -279,45 +247,34 @@ private:
typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
WorkspaceType m_workspace;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using HouseholderQR ***/
template<typename MatrixType, int Options>
class qr_preconditioner_impl<MatrixType, Options, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef JacobiSVD<MatrixType, Options> SVDType;
enum
{
WorkspaceSize = internal::traits<SVDType>::MatrixUColsAtCompileTime,
MaxWorkspaceSize = internal::traits<SVDType>::MatrixUMaxColsAtCompileTime
};
typedef Matrix<Scalar, 1, WorkspaceSize, RowMajor, 1, MaxWorkspaceSize> WorkspaceType;
void allocate(const SVDType& svd)
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.ShouldComputeFullU) m_workspace.resize(svd.rows());
else if (svd.ShouldComputeThinU) m_workspace.resize(svd.cols());
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(SVDType& svd, const MatrixType& matrix)
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.ShouldComputeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.ShouldComputeThinU)
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
@ -327,50 +284,43 @@ public:
}
return false;
}
private:
typedef HouseholderQR<MatrixType> QRType;
QRType m_qr;
WorkspaceType m_workspace;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType, int Options>
class qr_preconditioner_impl<MatrixType, Options, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef JacobiSVD<MatrixType, Options> SVDType;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MatrixOptions = MatrixType::Options,
WorkspaceSize = internal::traits<SVDType>::MatrixVColsAtCompileTime,
MaxWorkspaceSize = internal::traits<SVDType>::MatrixVMaxColsAtCompileTime
Options = MatrixType::Options
};
typedef Matrix<Scalar, WorkspaceSize, 1, ColMajor, MaxWorkspaceSize, 1> WorkspaceType;
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
>::type TransposeTypeWithSameStorageOrder;
void allocate(const SVDType& svd)
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.ShouldComputeFullV) m_workspace.resize(svd.cols());
else if (svd.ShouldComputeThinV) m_workspace.resize(svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(SVDType& svd, const MatrixType& matrix)
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
@ -378,8 +328,8 @@ public:
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.ShouldComputeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.ShouldComputeThinV)
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
@ -394,7 +344,7 @@ private:
typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
WorkspaceType m_workspace;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** 2x2 SVD implementation
@ -402,18 +352,18 @@ private:
*** JacobiSVD consists in performing a series of 2x2 SVD subproblems
***/
template<typename MatrixType, int Options>
struct svd_precondition_2x2_block_to_be_real<MatrixType, Options, false>
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
{
typedef JacobiSVD<MatrixType, Options> SVD;
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
};
template<typename MatrixType, int Options>
struct svd_precondition_2x2_block_to_be_real<MatrixType, Options, true>
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
{
typedef JacobiSVD<MatrixType, Options> SVD;
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
@ -475,9 +425,9 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, Options, true>
}
};
template<typename MatrixType_, int Options>
struct traits<JacobiSVD<MatrixType_,Options> >
: svd_traits<MatrixType_, Options>
template<typename MatrixType_, int QRPreconditioner>
struct traits<JacobiSVD<MatrixType_,QRPreconditioner> >
: traits<MatrixType_>
{
typedef MatrixType_ MatrixType;
};
@ -492,9 +442,8 @@ struct traits<JacobiSVD<MatrixType_,Options> >
* \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
*
* \tparam MatrixType_ the type of the matrix of which we are computing the SVD decomposition
* \tparam Options this optional parameter allows one to specify the type of QR decomposition that will be used internally
* for the R-SVD step for non-square matrices. Additionally, it allows one to specify whether to compute
* thin or full unitaries \a U and \a V. See discussion of possible values below.
* \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
* for the R-SVD step for non-square matrices. See discussion of possible values below.
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
@ -523,7 +472,7 @@ struct traits<JacobiSVD<MatrixType_,Options> >
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
*
* The possible QR preconditioners that can be set with Options template parameter are:
* The possible values for QRPreconditioner are:
* \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
* \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
* Contrary to other QRs, it doesn't allow computing thin unitaries.
@ -536,16 +485,10 @@ struct traits<JacobiSVD<MatrixType_,Options> >
* faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
* if QR preconditioning is needed before applying it anyway.
*
* One may also use the Options template parameter to specify how the unitaries should be computed. The options are #ComputeThinU,
* #ComputeThinV, #ComputeFullU, #ComputeFullV. It is not possible to request both a thin and full unitary.
* So, it is not possible to use ComputeThinU | ComputeFullU or ComputeThinV | ComputeFullV. By default, unitaries will not be computed.
*
* You can set the QRPreconditioner and unitary options together: JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner | ComputeThinU | ComputeFullV>
*
* \sa MatrixBase::jacobiSvd()
*/
template<typename MatrixType_, int Options> class JacobiSVD
: public SVDBase<JacobiSVD<MatrixType_,Options> >
template<typename MatrixType_, int QRPreconditioner> class JacobiSVD
: public SVDBase<JacobiSVD<MatrixType_,QRPreconditioner> >
{
typedef SVDBase<JacobiSVD> Base;
public:
@ -554,7 +497,6 @@ template<typename MatrixType_, int Options> class JacobiSVD
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
enum {
QRPreconditioner = Options & internal::QRPreconditionerBits,
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
@ -567,6 +509,9 @@ template<typename MatrixType_, int Options> class JacobiSVD
typedef typename Base::MatrixUType MatrixUType;
typedef typename Base::MatrixVType MatrixVType;
typedef typename Base::SingularValuesType SingularValuesType;
typedef typename internal::plain_row_type<MatrixType>::type RowType;
typedef typename internal::plain_col_type<MatrixType>::type ColType;
typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
WorkMatrixType;
@ -579,31 +524,55 @@ template<typename MatrixType_, int Options> class JacobiSVD
JacobiSVD()
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem size.
* \sa JacobiSVD()
*/
JacobiSVD(Index rows, Index cols)
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
{
allocate(rows, cols);
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
explicit JacobiSVD(const MatrixType& matrix)
explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
{
compute(matrix);
compute(matrix, computationOptions);
}
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
JacobiSVD& compute(const MatrixType& matrix);
JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
JacobiSVD& compute(const MatrixType& matrix)
{
return compute(matrix, m_computationOptions);
}
using Base::computeU;
using Base::computeV;
@ -612,7 +581,7 @@ template<typename MatrixType_, int Options> class JacobiSVD
using Base::rank;
private:
void allocate(Index rows, Index cols);
void allocate(Index rows, Index cols, unsigned int computationOptions);
protected:
using Base::m_matrixU;
@ -622,49 +591,84 @@ template<typename MatrixType_, int Options> class JacobiSVD
using Base::m_isInitialized;
using Base::m_isAllocated;
using Base::m_usePrescribedThreshold;
using Base::m_computeFullU;
using Base::m_computeThinU;
using Base::m_computeFullV;
using Base::m_computeThinV;
using Base::m_computationOptions;
using Base::m_nonzeroSingularValues;
using Base::m_rows;
using Base::m_cols;
using Base::m_diagSize;
using Base::m_prescribedThreshold;
using Base::ShouldComputeFullU;
using Base::ShouldComputeThinU;
using Base::ShouldComputeFullV;
using Base::ShouldComputeThinV;
WorkMatrixType m_workMatrix;
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES((ShouldComputeThinU != 0 || ShouldComputeThinV != 0), QRPreconditioner != FullPivHouseholderQRPreconditioner),
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
"Use the ColPivHouseholderQR preconditioner instead.")
template<typename MatrixType__, int Options_, bool IsComplex_>
template<typename MatrixType__, int QRPreconditioner_, bool IsComplex_>
friend struct internal::svd_precondition_2x2_block_to_be_real;
template<typename MatrixType__, int Options_, int QRPreconditioner_, int Case_, bool DoAnything_>
template<typename MatrixType__, int QRPreconditioner_, int Case_, bool DoAnything_>
friend struct internal::qr_preconditioner_impl;
internal::qr_preconditioner_impl<MatrixType, Options, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, Options, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
MatrixType m_scaledMatrix;
};
template<typename MatrixType, int Options>
void JacobiSVD<MatrixType, Options>::allocate(Eigen::Index rows, Eigen::Index cols)
template<typename MatrixType, int QRPreconditioner>
void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
if (Base::allocate(rows, cols))
return;
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return;
}
m_rows = rows;
m_cols = cols;
m_info = Success;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
{
eigen_assert(!(m_computeThinU || m_computeThinV) &&
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
"Use the ColPivHouseholderQR preconditioner instead.");
}
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
: m_computeThinU ? m_diagSize
: 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols
: m_computeThinV ? m_diagSize
: 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
}
template<typename MatrixType, int Options>
JacobiSVD<MatrixType, Options>&
JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix)
template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
using std::abs;
allocate(matrix.rows(), matrix.cols());
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
// only worsening the precision of U and V as we accumulate more rotations
@ -693,10 +697,10 @@ JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix)
else
{
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
if(ShouldComputeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(ShouldComputeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(ShouldComputeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(ShouldComputeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
}
/*** step 2. The main Jacobi SVD iteration. ***/
@ -722,7 +726,7 @@ JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix)
finished = false;
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
// the complex to real operation returns true if the updated 2x2 block is not already diagonal
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, Options>::run(m_workMatrix, *this, p, q, maxDiagEntry))
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
{
JacobiRotation<RealScalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
@ -799,11 +803,10 @@ JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix)
* \sa class JacobiSVD
*/
template<typename Derived>
template<int Options>
JacobiSVD<typename MatrixBase<Derived>::PlainObject, Options>
MatrixBase<Derived>::jacobiSvd() const
JacobiSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
{
return JacobiSVD<PlainObject, Options>(*this);
return JacobiSVD<PlainObject>(*this, computationOptions);
}
} // end namespace Eigen

View File

@ -39,15 +39,15 @@ namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW, OPTIONS) \
#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \
template<> inline \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, OPTIONS>& \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, OPTIONS>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix) \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
{ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
/*typedef MatrixType::Scalar Scalar;*/ \
/*typedef MatrixType::RealScalar RealScalar;*/ \
allocate(matrix.rows(), matrix.cols()); \
allocate(matrix.rows(), matrix.cols(), computationOptions); \
\
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
m_nonzeroSingularValues = m_diagSize; \
@ -56,14 +56,14 @@ JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, OPTION
lapack_int matrix_order = LAPACKE_COLROW; \
char jobu, jobvt; \
LAPACKE_TYPE *u, *vt, dummy; \
jobu = (ShouldComputeFullU) ? 'A' : (ShouldComputeThinU) ? 'S' : 'N'; \
jobvt = (ShouldComputeFullV) ? 'A' : (ShouldComputeThinV) ? 'S' : 'N'; \
jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
if (computeU()) { \
ldu = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
lapack_int vt_rows = (ShouldComputeFullV) ? internal::convert_index<lapack_int>(m_cols) : (ShouldComputeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
localV.resize(vt_rows, m_cols); \
ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
@ -78,26 +78,15 @@ JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, OPTION
return *this; \
}
#define EIGEN_LAPACK_SVD_OPTIONS(OPTIONS) \
EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \
EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \
\
EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) \
EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) \
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) \
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR, OPTIONS)
EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACK_SVD_OPTIONS(0)
EIGEN_LAPACK_SVD_OPTIONS(ComputeThinU)
EIGEN_LAPACK_SVD_OPTIONS(ComputeThinV)
EIGEN_LAPACK_SVD_OPTIONS(ComputeFullU)
EIGEN_LAPACK_SVD_OPTIONS(ComputeFullV)
EIGEN_LAPACK_SVD_OPTIONS(ComputeThinU | ComputeThinV)
EIGEN_LAPACK_SVD_OPTIONS(ComputeFullU | ComputeFullV)
EIGEN_LAPACK_SVD_OPTIONS(ComputeThinU | ComputeFullV)
EIGEN_LAPACK_SVD_OPTIONS(ComputeFullU | ComputeThinV)
EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen

View File

@ -21,7 +21,6 @@
namespace Eigen {
namespace internal {
template<typename Derived> struct traits<SVDBase<Derived> >
: traits<Derived>
{
@ -30,32 +29,6 @@ template<typename Derived> struct traits<SVDBase<Derived> >
typedef int StorageIndex;
enum { Flags = 0 };
};
template<typename MatrixType, int Options>
struct svd_traits : traits<MatrixType>
{
enum {
ShouldComputeFullU = Options & ComputeFullU,
ShouldComputeThinU = Options & ComputeThinU,
ShouldComputeFullV = Options & ComputeFullV,
ShouldComputeThinV = Options & ComputeThinV,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime),
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::MaxRowsAtCompileTime,MatrixType::MaxColsAtCompileTime),
MatrixUColsAtCompileTime = ShouldComputeFullU ? MatrixType::RowsAtCompileTime
: ShouldComputeThinU ? DiagSizeAtCompileTime
: Dynamic,
MatrixVColsAtCompileTime = ShouldComputeFullV ? MatrixType::ColsAtCompileTime
: ShouldComputeThinV ? DiagSizeAtCompileTime
: Dynamic,
MatrixUMaxColsAtCompileTime = ShouldComputeFullU ? MatrixType::MaxRowsAtCompileTime
: ShouldComputeThinU ? MaxDiagSizeAtCompileTime
: Dynamic,
MatrixVMaxColsAtCompileTime = ShouldComputeFullV ? MatrixType::MaxColsAtCompileTime
: ShouldComputeThinV ? MaxDiagSizeAtCompileTime
: Dynamic
};
};
}
/** \ingroup SVD_Module
@ -102,33 +75,19 @@ public:
typedef typename Eigen::internal::traits<SVDBase>::StorageIndex StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
ShouldComputeFullU = internal::traits<Derived>::ShouldComputeFullU,
ShouldComputeThinU = internal::traits<Derived>::ShouldComputeThinU,
ShouldComputeFullV = internal::traits<Derived>::ShouldComputeFullV,
ShouldComputeThinV = internal::traits<Derived>::ShouldComputeThinV,
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MatrixOptions = MatrixType::Options,
MatrixUColsAtCompileTime = internal::traits<Derived>::MatrixUColsAtCompileTime,
MatrixVColsAtCompileTime = internal::traits<Derived>::MatrixVColsAtCompileTime,
MatrixUMaxColsAtCompileTime = internal::traits<Derived>::MatrixUMaxColsAtCompileTime,
MatrixVMaxColsAtCompileTime = internal::traits<Derived>::MatrixVMaxColsAtCompileTime
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
EIGEN_STATIC_ASSERT(!(ShouldComputeFullU != 0 && ShouldComputeThinU != 0), "SVDBase: Cannot request both full and thin U")
EIGEN_STATIC_ASSERT(!(ShouldComputeFullV != 0 && ShouldComputeThinV != 0), "SVDBase: Cannot request both full and thin V")
typedef typename internal::make_proper_matrix_type<
Scalar, RowsAtCompileTime, MatrixUColsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MatrixUMaxColsAtCompileTime
>::type MatrixUType;
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, MatrixVColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MatrixVMaxColsAtCompileTime
>::type MatrixVType;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
@ -248,9 +207,9 @@ public:
}
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
EIGEN_CONSTEXPR inline bool computeU() const { return ShouldComputeFullU != 0 || ShouldComputeThinU != 0; }
inline bool computeU() const { return m_computeFullU || m_computeThinU; }
/** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
EIGEN_CONSTEXPR inline bool computeV() const { return ShouldComputeFullV != 0 || ShouldComputeThinV != 0; }
inline bool computeV() const { return m_computeFullV || m_computeThinV; }
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
@ -307,13 +266,16 @@ protected:
}
// return true if already allocated
bool allocate(Index rows, Index cols);
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
ComputationInfo m_info;
bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
RealScalar m_prescribedThreshold;
@ -326,8 +288,15 @@ protected:
m_isInitialized(false),
m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computeFullU(false),
m_computeThinU(false),
m_computeFullV(false),
m_computeThinV(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1), m_diagSize(0)
{ }
};
#ifndef EIGEN_PARSED_BY_DOXYGEN
@ -361,14 +330,15 @@ void SVDBase<Derived>::_solve_impl_transposed(const RhsType &rhs, DstType &dst)
}
#endif
template<typename Derived>
bool SVDBase<Derived>::allocate(Index rows, Index cols)
template<typename MatrixType>
bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols)
cols == m_cols &&
computationOptions == m_computationOptions)
{
return true;
}
@ -378,13 +348,22 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols)
m_info = Success;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, ShouldComputeFullU ? m_rows : ShouldComputeThinU ? m_diagSize : 0);
m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, ShouldComputeFullV ? m_cols : ShouldComputeThinV ? m_diagSize : 0);
m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
return false;
}

View File

@ -38,6 +38,8 @@ void bench(int id, int rows, int size = Size)
A = A*A.adjoint();
BenchTimer t_llt, t_ldlt, t_lu, t_fplu, t_qr, t_cpqr, t_cod, t_fpqr, t_jsvd, t_bdcsvd;
int svd_opt = ComputeThinU|ComputeThinV;
int tries = 5;
int rep = 1000/size;
if(rep==0) rep = 1;
@ -51,8 +53,8 @@ void bench(int id, int rows, int size = Size)
ColPivHouseholderQR<Mat> cpqr(A.rows(),A.cols());
CompleteOrthogonalDecomposition<Mat> cod(A.rows(),A.cols());
FullPivHouseholderQR<Mat> fpqr(A.rows(),A.cols());
JacobiSVD<MatDyn, ComputeThinU|ComputeThinV> jsvd(A.rows(),A.cols());
BDCSVD<MatDyn, ComputeThinU|ComputeThinV> bdcsvd(A.rows(),A.cols());
JacobiSVD<MatDyn> jsvd(A.rows(),A.cols());
BDCSVD<MatDyn> bdcsvd(A.rows(),A.cols());
BENCH(t_llt, tries, rep, compute_norm_equation(llt,A));
BENCH(t_ldlt, tries, rep, compute_norm_equation(ldlt,A));
@ -65,9 +67,9 @@ void bench(int id, int rows, int size = Size)
if(size*rows<=10000000)
BENCH(t_fpqr, tries, rep, compute(fpqr,A));
if(size<500) // JacobiSVD is really too slow for too large matrices
BENCH(t_jsvd, tries, rep, jsvd.compute(A));
BENCH(t_jsvd, tries, rep, jsvd.compute(A,svd_opt));
// if(size*rows<=20000000)
BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A));
BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A,svd_opt));
results["LLT"][id] = t_llt.best();
results["LDLT"][id] = t_ldlt.best();

View File

@ -101,8 +101,8 @@ m1.colPivHouseholderQr();
?geqp3
\endcode</td></tr>
<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE </td><td>\code
JacobiSVD<MatrixXd, ComputeThinV> svd;
svd.compute(m1);
JacobiSVD<MatrixXd> svd;
svd.compute(m1, ComputeThinV);
\endcode</td><td>\code
?gesvd
\endcode</td></tr>

View File

@ -11,5 +11,5 @@ int main()
VectorXf b = VectorXf::Random(3);
cout << "Here is the right hand side b:\n" << b << endl;
cout << "The least-squares solution is:\n"
<< A.template bdcSvd<ComputeThinU | ComputeThinV>().solve(b) << endl;
<< A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
}

View File

@ -1,6 +1,6 @@
MatrixXf m = MatrixXf::Random(3,2);
cout << "Here is the matrix m:" << endl << m << endl;
JacobiSVD<MatrixXf, ComputeThinU | ComputeThinV> svd(m);
JacobiSVD<MatrixXf> svd(m, ComputeThinU | ComputeThinV);
cout << "Its singular values are:" << endl << svd.singularValues() << endl;
cout << "Its left singular vectors are the columns of the thin U matrix:" << endl << svd.matrixU() << endl;
cout << "Its right singular vectors are the columns of the thin V matrix:" << endl << svd.matrixV() << endl;

View File

@ -10,7 +10,6 @@
#include "lapack_common.h"
#include <Eigen/SVD>
// computes the singular values/vectors a general M-by-N matrix A using divide-and-conquer
EIGEN_LAPACK_FUNC(gesdd,(char *jobz, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *ldvt, Scalar* /*work*/, int* lwork,
EIGEN_LAPACK_ARG_IF_COMPLEX(RealScalar */*rwork*/) int * /*iwork*/, int *info))
@ -48,97 +47,40 @@ EIGEN_LAPACK_FUNC(gesdd,(char *jobz, int *m, int* n, Scalar* a, int *lda, RealSc
PlainMatrixType mat(*m,*n);
mat = matrix(a,*m,*n,*lda);
int option = *jobz=='A' ? ComputeFullU|ComputeFullV
: *jobz=='S' ? ComputeThinU|ComputeThinV
: *jobz=='O' ? ComputeThinU|ComputeThinV
: 0;
BDCSVD<PlainMatrixType> svd(mat,option);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
if(*jobz=='A')
{
BDCSVD<PlainMatrixType, ComputeFullU|ComputeFullV> svd(mat);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
matrix(u,*m,*m,*ldu) = svd.matrixU();
matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint();
matrix(u,*m,*m,*ldu) = svd.matrixU();
matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint();
}
else if(*jobz=='S')
{
BDCSVD<PlainMatrixType, ComputeThinU|ComputeThinV> svd(mat);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
matrix(u,*m,diag_size,*ldu) = svd.matrixU();
matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint();
}
else if(*jobz=='O' && *m>=*n)
{
BDCSVD<PlainMatrixType, ComputeThinU|ComputeThinV> svd(mat);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
matrix(a,*m,*n,*lda) = svd.matrixU();
matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint();
matrix(a,*m,*n,*lda) = svd.matrixU();
matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint();
}
else if(*jobz=='O')
{
BDCSVD<PlainMatrixType, ComputeThinU|ComputeThinV> svd(mat);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
matrix(u,*m,*m,*ldu) = svd.matrixU();
matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint();
}
else
{
BDCSVD<PlainMatrixType> svd(mat);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
}
return 0;
}
template<typename MatrixType, int Options>
void gesvdAssignmentHelper(MatrixType& mat, char* jobu, char* jobv, int* m, int* n, int diag_size, Scalar* a, int* lda, RealScalar* s, Scalar* u, int* ldu, Scalar* vt, int* ldvt)
{
JacobiSVD<MatrixType, Options> svd(mat);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
{
if(*jobu=='A') matrix(u,*m,*m,*ldu) = svd.matrixU();
else if(*jobu=='S') matrix(u,*m,diag_size,*ldu) = svd.matrixU();
else if(*jobu=='O') matrix(a,*m,diag_size,*lda) = svd.matrixU();
}
{
if(*jobv=='A') matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint();
else if(*jobv=='S') matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint();
else if(*jobv=='O') matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint();
}
}
template<typename MatrixType, int Options, typename ...Args>
void gesvdSetVOptions(MatrixType& mat, char* jobu, char* jobv, Args... args)
{
if (*jobv=='A')
{
gesvdAssignmentHelper<MatrixType, Options | ComputeFullV>(mat, jobu, jobv, args...);
}
else if (*jobv=='S' || *jobv=='O')
{
gesvdAssignmentHelper<MatrixType, Options | ComputeThinV>(mat, jobu, jobv, args...);
}
else
{
gesvdAssignmentHelper<MatrixType, Options>(mat, jobu, jobv, args...);
}
}
template<typename MatrixType, typename ...Args>
void gesvdSetUOptions(MatrixType& mat, char* jobu, char* jobv, Args... args)
{
if (*jobu=='A')
{
gesvdSetVOptions<MatrixType, ComputeFullU>(mat, jobu, jobv, args...);
}
else if (*jobu=='S' || *jobu=='O')
{
gesvdSetVOptions<MatrixType, ComputeThinU>(mat, jobu, jobv, args...);
}
else
{
gesvdSetVOptions<MatrixType, 0>(mat, jobu, jobv, args...);
}
}
// computes the singular values/vectors a general M-by-N matrix A using two sided jacobi algorithm
EIGEN_LAPACK_FUNC(gesvd,(char *jobu, char *jobv, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *ldvt, Scalar* /*work*/, int* lwork,
EIGEN_LAPACK_ARG_IF_COMPLEX(RealScalar */*rwork*/) int *info))
@ -175,8 +117,22 @@ EIGEN_LAPACK_FUNC(gesvd,(char *jobu, char *jobv, int *m, int* n, Scalar* a, int
PlainMatrixType mat(*m,*n);
mat = matrix(a,*m,*n,*lda);
gesvdSetUOptions<PlainMatrixType>(mat, jobu, jobv, m, n, diag_size, a, lda, s, u, ldu, vt, ldvt);
int option = (*jobu=='A' ? ComputeFullU : *jobu=='S' || *jobu=='O' ? ComputeThinU : 0)
| (*jobv=='A' ? ComputeFullV : *jobv=='S' || *jobv=='O' ? ComputeThinV : 0);
JacobiSVD<PlainMatrixType> svd(mat,option);
make_vector(s,diag_size) = svd.singularValues().head(diag_size);
{
if(*jobu=='A') matrix(u,*m,*m,*ldu) = svd.matrixU();
else if(*jobu=='S') matrix(u,*m,diag_size,*ldu) = svd.matrixU();
else if(*jobu=='O') matrix(a,*m,diag_size,*lda) = svd.matrixU();
}
{
if(*jobv=='A') matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint();
else if(*jobv=='S') matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint();
else if(*jobv=='O') matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint();
}
return 0;
}
}

View File

@ -19,11 +19,26 @@
#include <iostream>
#include <Eigen/LU>
#define SVD_DEFAULT(M) BDCSVD<M>
#define SVD_FOR_MIN_NORM(M) BDCSVD<M>
#define SVD_STATIC_OPTIONS(M, O) BDCSVD<M, O>
#include "svd_common.h"
// Check all variants of JacobiSVD
template<typename MatrixType>
void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
{
MatrixType m;
if(pickrandom) {
m.resizeLike(a);
svd_fill_random(m);
}
else
m = a;
CALL_SUBTEST(( svd_test_all_computation_options<BDCSVD<MatrixType> >(m, false) ));
}
template<typename MatrixType>
void bdcsvd_method()
{
@ -34,23 +49,28 @@ void bdcsvd_method()
VERIFY_IS_APPROX(m.bdcSvd().singularValues(), RealVecType::Ones());
VERIFY_RAISES_ASSERT(m.bdcSvd().matrixU());
VERIFY_RAISES_ASSERT(m.bdcSvd().matrixV());
VERIFY_IS_APPROX(m.template bdcSvd<ComputeFullU|ComputeFullV>().solve(m), m);
VERIFY_IS_APPROX(m.template bdcSvd<ComputeFullU|ComputeFullV>().transpose().solve(m), m);
VERIFY_IS_APPROX(m.template bdcSvd<ComputeFullU|ComputeFullV>().adjoint().solve(m), m);
VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).solve(m), m);
VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m);
VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m);
}
// compare the Singular values returned with Jacobi and Bdc
// Compare the Singular values returned with Jacobi and Bdc.
template<typename MatrixType>
void compare_bdc_jacobi(const MatrixType& a = MatrixType(), int algoswap = 16, bool random = true)
void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0, int algoswap = 16, bool random = true)
{
MatrixType m = random ? MatrixType::Random(a.rows(), a.cols()) : a;
BDCSVD<MatrixType> bdc_svd(m.rows(), m.cols());
BDCSVD<MatrixType> bdc_svd(m.rows(), m.cols(), computationOptions);
bdc_svd.setSwitchSize(algoswap);
bdc_svd.compute(m);
JacobiSVD<MatrixType> jacobi_svd(m);
VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues());
if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
}
// Verifies total deflation is **not** triggered.
@ -71,59 +91,41 @@ void compare_bdc_jacobi_instance(bool structure_as_m, int algoswap = 16)
-20.794, 8.68496, -4.83103,
-8.4981, -10.5451, 23.9072;
}
compare_bdc_jacobi(m, algoswap, false);
}
template<typename MatrixType>
void bdcsvd_all_options(const MatrixType& input = MatrixType())
{
MatrixType m = input;
svd_fill_random(m);
svd_option_checks<MatrixType, 0>(m);
compare_bdc_jacobi(m, 0, algoswap, false);
}
EIGEN_DECLARE_TEST(bdcsvd)
{
CALL_SUBTEST_3(( svd_verify_assert<Matrix3f>() ));
CALL_SUBTEST_4(( svd_verify_assert<Matrix4d>() ));
CALL_SUBTEST_7(( svd_verify_assert<Matrix<float, 30, 21> >() ));
CALL_SUBTEST_7(( svd_verify_assert<Matrix<float, 21, 30> >() ));
CALL_SUBTEST_9(( svd_verify_assert<Matrix<std::complex<double>, 20, 27> >() ));
CALL_SUBTEST_3(( svd_verify_assert<BDCSVD<Matrix3f> >(Matrix3f()) ));
CALL_SUBTEST_4(( svd_verify_assert<BDCSVD<Matrix4d> >(Matrix4d()) ));
CALL_SUBTEST_7(( svd_verify_assert<BDCSVD<MatrixXf> >(MatrixXf(10,12)) ));
CALL_SUBTEST_8(( svd_verify_assert<BDCSVD<MatrixXcd> >(MatrixXcd(7,5)) ));
CALL_SUBTEST_101(( svd_all_trivial_2x2(bdcsvd_all_options<Matrix2cd>) ));
CALL_SUBTEST_102(( svd_all_trivial_2x2(bdcsvd_all_options<Matrix2d>) ));
CALL_SUBTEST_101(( svd_all_trivial_2x2(bdcsvd<Matrix2cd>) ));
CALL_SUBTEST_102(( svd_all_trivial_2x2(bdcsvd<Matrix2d>) ));
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_3(( bdcsvd<Matrix3f>() ));
CALL_SUBTEST_4(( bdcsvd<Matrix4d>() ));
CALL_SUBTEST_5(( bdcsvd<Matrix<float,3,5> >() ));
int r = internal::random<int>(1, EIGEN_TEST_MAX_SIZE/2),
c = internal::random<int>(1, EIGEN_TEST_MAX_SIZE/2);
TEST_SET_BUT_UNUSED_VARIABLE(r)
TEST_SET_BUT_UNUSED_VARIABLE(c)
CALL_SUBTEST_7(( compare_bdc_jacobi<MatrixXf>(MatrixXf(r,c)) ));
CALL_SUBTEST_10(( compare_bdc_jacobi<MatrixXd>(MatrixXd(r,c)) ));
CALL_SUBTEST_8(( compare_bdc_jacobi<MatrixXcd>(MatrixXcd(r,c)) ));
CALL_SUBTEST_6(( bdcsvd(Matrix<double,Dynamic,2>(r,2)) ));
CALL_SUBTEST_7(( bdcsvd(MatrixXf(r,c)) ));
CALL_SUBTEST_7(( compare_bdc_jacobi(MatrixXf(r,c)) ));
CALL_SUBTEST_10(( bdcsvd(MatrixXd(r,c)) ));
CALL_SUBTEST_10(( compare_bdc_jacobi(MatrixXd(r,c)) ));
CALL_SUBTEST_8(( bdcsvd(MatrixXcd(r,c)) ));
CALL_SUBTEST_8(( compare_bdc_jacobi(MatrixXcd(r,c)) ));
// Test on inf/nan matrix
CALL_SUBTEST_7( (svd_inf_nan<MatrixXf>()) );
CALL_SUBTEST_10( (svd_inf_nan<MatrixXd>()) );
// Verify some computations using all combinations of the Options template parameter.
CALL_SUBTEST_3(( bdcsvd_all_options<Matrix3f>() ));
CALL_SUBTEST_3(( bdcsvd_all_options<Matrix<float, 2, 3> >() ));
CALL_SUBTEST_4(( bdcsvd_all_options<Matrix<double, 20, 17> >() ));
CALL_SUBTEST_4(( bdcsvd_all_options<Matrix<double, 17, 20> >() ));
CALL_SUBTEST_5(( bdcsvd_all_options<Matrix<double, Dynamic, 30> >(Matrix<double, Dynamic, 30>(r, 30)) ));
CALL_SUBTEST_5(( bdcsvd_all_options<Matrix<double, 20, Dynamic> >(Matrix<double, 20, Dynamic>(20, c)) ));
CALL_SUBTEST_7(( bdcsvd_all_options<MatrixXf>(MatrixXf(r, c)) ));
CALL_SUBTEST_8(( bdcsvd_all_options<MatrixXcd>(MatrixXcd(r, c)) ));
CALL_SUBTEST_10(( bdcsvd_all_options<MatrixXd>(MatrixXd(r, c)) ));
CALL_SUBTEST_14(( bdcsvd_all_options<Matrix<double, 20, 27, RowMajor>>() ));
CALL_SUBTEST_14(( bdcsvd_all_options<Matrix<double, 27, 20, RowMajor>>() ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, ColMajor, 20, 35>, ColPivHouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, ColMajor, 35, 20>, HouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, RowMajor, 20, 35>, ColPivHouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, RowMajor, 35, 20>, HouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_7( (svd_inf_nan<BDCSVD<MatrixXf>, MatrixXf>()) );
CALL_SUBTEST_10( (svd_inf_nan<BDCSVD<MatrixXd>, MatrixXd>()) );
}
// test matrixbase method

View File

@ -200,8 +200,8 @@ EIGEN_DECLARE_TEST(boostmultiprec)
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
CALL_SUBTEST_9(( jacobisvd_all_options(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_10(( bdcsvd_all_options(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_9(( jacobisvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_10(( bdcsvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_11(( test_simplicial_cholesky_T<Real,int,ColMajor>() ));
}

View File

@ -211,7 +211,7 @@ MatrixType randomRotationMatrix()
// https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/III-7/103/2016/isprs-annals-III-7-103-2016.pdf
const MatrixType rand = MatrixType::Random();
const MatrixType q = rand.householderQr().householderQ();
const JacobiSVD<MatrixType, ComputeFullU | ComputeFullV> svd(q);
const JacobiSVD<MatrixType> svd = q.jacobiSvd(ComputeFullU | ComputeFullV);
const typename MatrixType::Scalar det = (svd.matrixU() * svd.matrixV().transpose()).determinant();
MatrixType diag = rand.Identity();
diag(MatrixType::RowsAtCompileTime - 1, MatrixType::ColsAtCompileTime - 1) = det;

View File

@ -16,9 +16,49 @@
#define SVD_DEFAULT(M) JacobiSVD<M>
#define SVD_FOR_MIN_NORM(M) JacobiSVD<M,ColPivHouseholderQRPreconditioner>
#define SVD_STATIC_OPTIONS(M, O) JacobiSVD<M, O>
#include "svd_common.h"
// Check all variants of JacobiSVD
template<typename MatrixType>
void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
{
MatrixType m = a;
if(pickrandom)
svd_fill_random(m);
CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> >(m, true) )); // check full only
CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner> >(m, false) ));
CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, HouseholderQRPreconditioner> >(m, false) ));
if(m.rows()==m.cols())
CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, NoQRPreconditioner> >(m, false) ));
}
template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m)
{
svd_verify_assert<JacobiSVD<MatrixType> >(m);
svd_verify_assert<JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> >(m, true);
svd_verify_assert<JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner> >(m);
svd_verify_assert<JacobiSVD<MatrixType, HouseholderQRPreconditioner> >(m);
Index rows = m.rows();
Index cols = m.cols();
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime
};
MatrixType a = MatrixType::Zero(rows, cols);
a.setZero();
if (ColsAtCompileTime == Dynamic)
{
JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> svd_fullqr;
VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV))
VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV))
VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV))
}
}
template<typename MatrixType>
void jacobisvd_method()
{
@ -29,47 +69,9 @@ void jacobisvd_method()
VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones());
VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU());
VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV());
VERIFY_IS_APPROX(m.template jacobiSvd<ComputeFullU|ComputeFullV>().solve(m), m);
VERIFY_IS_APPROX(m.template jacobiSvd<ComputeFullU|ComputeFullV>().transpose().solve(m), m);
VERIFY_IS_APPROX(m.template jacobiSvd<ComputeFullU|ComputeFullV>().adjoint().solve(m), m);
}
template<typename MatrixType>
void jacobisvd_all_options(const MatrixType& input = MatrixType())
{
MatrixType m = input;
svd_fill_random(m);
svd_option_checks<MatrixType, 0 /* Default */>(m);
svd_option_checks<MatrixType, ColPivHouseholderQRPreconditioner>(m);
svd_option_checks<MatrixType, HouseholderQRPreconditioner>(m);
svd_option_checks_full_only<MatrixType, FullPivHouseholderQRPreconditioner>(m); // FullPiv only used when computing full unitaries
}
template<typename MatrixType>
void jacobisvd_verify_assert(const MatrixType& m = MatrixType())
{
svd_verify_assert<MatrixType, 0 /* Default */>(m);
svd_verify_assert<MatrixType, ColPivHouseholderQRPreconditioner>(m);
svd_verify_assert<MatrixType, HouseholderQRPreconditioner>(m);
svd_verify_assert_full_only<MatrixType, FullPivHouseholderQRPreconditioner>(m);
}
template<typename MatrixType>
void jacobisvd_verify_inputs(const MatrixType& m = MatrixType()) {
// check defaults
typedef JacobiSVD<MatrixType> DefaultSVD;
DefaultSVD defaultSvd(m);
VERIFY((int)DefaultSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner);
VERIFY(!defaultSvd.computeU());
VERIFY(!defaultSvd.computeV());
// ColPivHouseholderQR is always default in presence of other options.
VERIFY(( (int)JacobiSVD<MatrixType, ComputeThinU>::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner ));
VERIFY(( (int)JacobiSVD<MatrixType, ComputeThinV>::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner ));
VERIFY(( (int)JacobiSVD<MatrixType, ComputeThinU | ComputeThinV>::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner ));
VERIFY(( (int)JacobiSVD<MatrixType, ComputeFullU | ComputeFullV>::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner ));
VERIFY(( (int)JacobiSVD<MatrixType, ComputeThinU | ComputeFullV>::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner ));
VERIFY(( (int)JacobiSVD<MatrixType, ComputeFullU | ComputeThinV>::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner ));
VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m);
VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m);
}
namespace Foo {
@ -89,63 +91,45 @@ void msvc_workaround()
EIGEN_DECLARE_TEST(jacobisvd)
{
CALL_SUBTEST_4(( jacobisvd_verify_inputs<Matrix4d>() ));
CALL_SUBTEST_7(( jacobisvd_verify_inputs(Matrix<float, 10, Dynamic>(10, 12)) ));
CALL_SUBTEST_8(( jacobisvd_verify_inputs<Matrix<std::complex<double>, 7, 5> >() ));
CALL_SUBTEST_3(( jacobisvd_verify_assert<Matrix3f>() ));
CALL_SUBTEST_4(( jacobisvd_verify_assert<Matrix4d>() ));
CALL_SUBTEST_7(( jacobisvd_verify_assert<Matrix<float, 10, 12>>() ));
CALL_SUBTEST_7(( jacobisvd_verify_assert<Matrix<float, 12, 10>>() ));
CALL_SUBTEST_7(( jacobisvd_verify_assert<MatrixXf>(MatrixXf(10, 12)) ));
CALL_SUBTEST_8(( jacobisvd_verify_assert<MatrixXcd>(MatrixXcd(7, 5)) ));
CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) ));
CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) ));
CALL_SUBTEST_11(svd_all_trivial_2x2(jacobisvd_all_options<Matrix2cd>));
CALL_SUBTEST_12(svd_all_trivial_2x2(jacobisvd_all_options<Matrix2d>));
CALL_SUBTEST_11(svd_all_trivial_2x2(jacobisvd<Matrix2cd>));
CALL_SUBTEST_12(svd_all_trivial_2x2(jacobisvd<Matrix2d>));
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
CALL_SUBTEST_6(( jacobisvd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
int r = internal::random<int>(1, 30),
c = internal::random<int>(1, 30);
TEST_SET_BUT_UNUSED_VARIABLE(r)
TEST_SET_BUT_UNUSED_VARIABLE(c)
// Verify some computations using all combinations of the Options template parameter.
CALL_SUBTEST_3(( jacobisvd_all_options<Matrix3f>() ));
CALL_SUBTEST_3(( jacobisvd_all_options<Matrix<float, 2, 3> >() ));
CALL_SUBTEST_4(( jacobisvd_all_options<Matrix4d>() ));
CALL_SUBTEST_4(( jacobisvd_all_options<Matrix<double, 10, 16> >() ));
CALL_SUBTEST_4(( jacobisvd_all_options<Matrix<double, 16, 10> >() ));
CALL_SUBTEST_5(( jacobisvd_all_options<Matrix<double, Dynamic, 16> >(Matrix<double, Dynamic, 16>(r, 16)) ));
CALL_SUBTEST_5(( jacobisvd_all_options<Matrix<double, 10, Dynamic> >(Matrix<double, 10, Dynamic>(10, c)) ));
CALL_SUBTEST_7(( jacobisvd_all_options<MatrixXf>( MatrixXf(r, c)) ));
CALL_SUBTEST_8(( jacobisvd_all_options<MatrixXcd>( MatrixXcd(r, c)) ));
CALL_SUBTEST_10(( jacobisvd_all_options<MatrixXd>( MatrixXd(r, c)) ));
CALL_SUBTEST_14(( jacobisvd_all_options<Matrix<double, 5, 7, RowMajor>>() ));
CALL_SUBTEST_14(( jacobisvd_all_options<Matrix<double, 7, 5, RowMajor>>() ));
MatrixXcd noQRTest = MatrixXcd(r, r);
svd_fill_random(noQRTest);
CALL_SUBTEST_16(( svd_option_checks<MatrixXcd, NoQRPreconditioner>(noQRTest) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, ColMajor, 13, 15>, ColPivHouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, ColMajor, 15, 13>, HouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, RowMajor, 13, 15>, ColPivHouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_15(( svd_check_max_size_matrix<Matrix<float, Dynamic, Dynamic, RowMajor, 15, 13>, HouseholderQRPreconditioner>(r, c) ));
CALL_SUBTEST_10(( jacobisvd<MatrixXd>(MatrixXd(r,c)) ));
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
(void) r;
(void) c;
// Test on inf/nan matrix
CALL_SUBTEST_7( (svd_inf_nan<MatrixXf>()) );
CALL_SUBTEST_10( (svd_inf_nan<MatrixXd>()) );
CALL_SUBTEST_7( (svd_inf_nan<JacobiSVD<MatrixXf>, MatrixXf>()) );
CALL_SUBTEST_10( (svd_inf_nan<JacobiSVD<MatrixXd>, MatrixXd>()) );
CALL_SUBTEST_13(( jacobisvd_verify_assert<Matrix<double, 6, 1>>() ));
CALL_SUBTEST_13(( jacobisvd_verify_assert<Matrix<double, 1, 6>>() ));
CALL_SUBTEST_13(( jacobisvd_verify_assert<Matrix<double, Dynamic, 1>>(Matrix<double, Dynamic, 1>(r)) ));
CALL_SUBTEST_13(( jacobisvd_verify_assert<Matrix<double, 1, Dynamic>>(Matrix<double, 1, Dynamic>(c)) ));
// bug1395 test compile-time vectors as input
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,6,1>()) ));
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,1,6>()) ));
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,Dynamic,1>(r)) ));
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,1,Dynamic>(c)) ));
}
CALL_SUBTEST_7(( jacobisvd_all_options<MatrixXd>(MatrixXd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_8(( jacobisvd_all_options<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
// test matrixbase method
CALL_SUBTEST_1(( jacobisvd_method<Matrix2cd>() ));

View File

@ -152,7 +152,7 @@ void ctms_decompositions()
x = fpQR.solve(b);
// SVD module
Eigen::JacobiSVD<Matrix, ComputeFullU | ComputeFullV> jSVD; jSVD.compute(A);
Eigen::JacobiSVD<Matrix> jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV);
}
void test_zerosized() {

View File

@ -55,7 +55,7 @@ void cod() {
MatrixType exact_solution = MatrixType::Random(cols, cols2);
MatrixType rhs = matrix * exact_solution;
MatrixType cod_solution = cod.solve(rhs);
JacobiSVD<MatrixType, ComputeThinU | ComputeThinV> svd(matrix);
JacobiSVD<MatrixType> svd(matrix, ComputeThinU | ComputeThinV);
MatrixType svd_solution = svd.solve(rhs);
VERIFY_IS_APPROX(cod_solution, svd_solution);
@ -88,7 +88,7 @@ void cod_fixedsize() {
exact_solution.setRandom(Cols, Cols2);
Matrix<Scalar, Rows, Cols2> rhs = matrix * exact_solution;
Matrix<Scalar, Cols, Cols2> cod_solution = cod.solve(rhs);
JacobiSVD<MatrixType, ComputeFullU | ComputeFullV> svd(matrix);
JacobiSVD<MatrixType> svd(matrix, ComputeFullU | ComputeFullV);
Matrix<Scalar, Cols, Cols2> svd_solution = svd.solve(rhs);
VERIFY_IS_APPROX(cod_solution, svd_solution);

View File

@ -16,10 +16,6 @@
#error a macro SVD_FOR_MIN_NORM(MatrixType) must be defined prior to including svd_common.h
#endif
#ifndef SVD_STATIC_OPTIONS
#error a macro SVD_STATIC_OPTIONS(MatrixType, Options) must be defined prior to including svd_common.h
#endif
#include "svd_fill.h"
#include "solverbase.h"
@ -59,8 +55,9 @@ void svd_check_full(const MatrixType& m, const SvdType& svd)
}
// Compare partial SVD defined by computationOptions to a full SVD referenceSvd
template<typename MatrixType, typename SvdType, int Options>
template<typename SvdType, typename MatrixType>
void svd_compare_to_full(const MatrixType& m,
unsigned int computationOptions,
const SvdType& referenceSvd)
{
typedef typename MatrixType::RealScalar RealScalar;
@ -69,18 +66,18 @@ void svd_compare_to_full(const MatrixType& m,
Index diagSize = (std::min)(rows, cols);
RealScalar prec = test_precision<RealScalar>();
SVD_STATIC_OPTIONS(MatrixType, Options) svd(m);
SvdType svd(m, computationOptions);
VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
if(Options & (ComputeFullV|ComputeThinV))
if(computationOptions & (ComputeFullV|ComputeThinV))
{
VERIFY( (svd.matrixV().adjoint()*svd.matrixV()).isIdentity(prec) );
VERIFY_IS_APPROX( svd.matrixV().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint(),
referenceSvd.matrixV().leftCols(diagSize) * referenceSvd.singularValues().asDiagonal() * referenceSvd.matrixV().leftCols(diagSize).adjoint());
}
if(Options & (ComputeFullU|ComputeThinU))
if(computationOptions & (ComputeFullU|ComputeThinU))
{
VERIFY( (svd.matrixU().adjoint()*svd.matrixU()).isIdentity(prec) );
VERIFY_IS_APPROX( svd.matrixU().leftCols(diagSize) * svd.singularValues().cwiseAbs2().asDiagonal() * svd.matrixU().leftCols(diagSize).adjoint(),
@ -88,18 +85,19 @@ void svd_compare_to_full(const MatrixType& m,
}
// The following checks are not critical.
// For instance, with Dived&Conquer SVD, if only the factor 'V' is computed then different matrix-matrix product implementation will be used
// For instance, with Dived&Conquer SVD, if only the factor 'V' is computedt then different matrix-matrix product implementation will be used
// and the resulting 'V' factor might be significantly different when the SVD decomposition is not unique, especially with single precision float.
++g_test_level;
if(Options & ComputeFullU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
if(Options & ComputeThinU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
if(Options & ComputeFullV) VERIFY_IS_APPROX(svd.matrixV().cwiseAbs(), referenceSvd.matrixV().cwiseAbs());
if(Options & ComputeThinV) VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(svd.matrixV().cwiseAbs(), referenceSvd.matrixV().cwiseAbs());
if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
--g_test_level;
}
//
template<typename SvdType, typename MatrixType>
void svd_least_square(const MatrixType& m)
void svd_least_square(const MatrixType& m, unsigned int computationOptions)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@ -115,7 +113,7 @@ void svd_least_square(const MatrixType& m)
typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
SvdType svd(m);
SvdType svd(m, computationOptions);
if(internal::is_same<RealScalar,double>::value) svd.setThreshold(1e-8);
else if(internal::is_same<RealScalar,float>::value) svd.setThreshold(2e-4);
@ -164,9 +162,9 @@ void svd_least_square(const MatrixType& m)
}
}
// check minimal norm solutions, the input matrix m is only used to recover problem size
template<typename MatrixType, int Options>
void svd_min_norm(const MatrixType& m)
// check minimal norm solutions, the inoput matrix m is only used to recover problem size
template<typename MatrixType>
void svd_min_norm(const MatrixType& m, unsigned int computationOptions)
{
typedef typename MatrixType::Scalar Scalar;
Index cols = m.cols();
@ -201,7 +199,7 @@ void svd_min_norm(const MatrixType& m)
tmp.tail(cols-rank).setZero();
SolutionType x21 = qr.householderQ() * tmp;
// now check with SVD
SVD_STATIC_OPTIONS(MatrixType2, Options) svd2(m2);
SVD_FOR_MIN_NORM(MatrixType2) svd2(m2, computationOptions);
SolutionType x22 = svd2.solve(rhs2);
VERIFY_IS_APPROX(m2*x21, rhs2);
VERIFY_IS_APPROX(m2*x22, rhs2);
@ -214,7 +212,7 @@ void svd_min_norm(const MatrixType& m)
Matrix<Scalar,RowsAtCompileTime3,Dynamic> C = Matrix<Scalar,RowsAtCompileTime3,Dynamic>::Random(rows3,rank);
MatrixType3 m3 = C * m2;
RhsType3 rhs3 = C * rhs2;
SVD_STATIC_OPTIONS(MatrixType3, Options) svd3(m3);
SVD_FOR_MIN_NORM(MatrixType3) svd3(m3, computationOptions);
SolutionType x3 = svd3.solve(rhs3);
VERIFY_IS_APPROX(m3*x3, rhs3);
VERIFY_IS_APPROX(m3*x21, rhs3);
@ -241,6 +239,57 @@ void svd_test_solvers(const MatrixType& m, const SolverType& solver) {
check_solverbase<CMatrixType, MatrixType>(m, solver, rows, cols, cols2);
}
// Check full, compare_to_full, least_square, and min_norm for all possible compute-options
template<typename SvdType, typename MatrixType>
void svd_test_all_computation_options(const MatrixType& m, bool full_only)
{
// if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
// return;
STATIC_CHECK(( internal::is_same<typename SvdType::StorageIndex,int>::value ));
SvdType fullSvd(m, ComputeFullU|ComputeFullV);
CALL_SUBTEST(( svd_check_full(m, fullSvd) ));
CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeFullU | ComputeFullV) ));
CALL_SUBTEST(( svd_min_norm(m, ComputeFullU | ComputeFullV) ));
#if defined __INTEL_COMPILER
// remark #111: statement is unreachable
#pragma warning disable 111
#endif
svd_test_solvers(m, fullSvd);
if(full_only)
return;
CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullU, fullSvd) ));
CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullV, fullSvd) ));
CALL_SUBTEST(( svd_compare_to_full(m, 0, fullSvd) ));
if (MatrixType::ColsAtCompileTime == Dynamic) {
// thin U/V are only available with dynamic number of columns
CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) ));
CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinV, fullSvd) ));
CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) ));
CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU , fullSvd) ));
CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) ));
CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeFullU | ComputeThinV) ));
CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeThinU | ComputeFullV) ));
CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeThinU | ComputeThinV) ));
CALL_SUBTEST(( svd_min_norm(m, ComputeFullU | ComputeThinV) ));
CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeFullV) ));
CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeThinV) ));
// test reconstruction
Index diagSize = (std::min)(m.rows(), m.cols());
SvdType svd(m, ComputeThinU | ComputeThinV);
VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
}
}
// work around stupid msvc error when constructing at compile time an expression that involves
// a division by zero, even if the numeric type has floating point
template<typename Scalar>
@ -249,32 +298,31 @@ EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
// workaround aggressive optimization in ICC
template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
// This function verifies we don't iterate infinitely on nan/inf values,
// and that info() returns InvalidInput.
template<typename MatrixType>
template<typename SvdType, typename MatrixType>
void svd_inf_nan()
{
SVD_STATIC_OPTIONS(MatrixType, ComputeFullU | ComputeFullV) svd;
SvdType svd;
typedef typename MatrixType::Scalar Scalar;
Scalar some_inf = Scalar(1) / zero<Scalar>();
VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
svd.compute(MatrixType::Constant(10,10,some_inf));
svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
VERIFY(svd.info() == InvalidInput);
Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
VERIFY(nan != nan);
svd.compute(MatrixType::Constant(10,10,nan));
svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV);
VERIFY(svd.info() == InvalidInput);
MatrixType m = MatrixType::Zero(10,10);
m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
svd.compute(m);
svd.compute(m, ComputeFullU | ComputeFullV);
VERIFY(svd.info() == InvalidInput);
m = MatrixType::Zero(10,10);
m(internal::random<int>(0,9), internal::random<int>(0,9)) = nan;
svd.compute(m);
svd.compute(m, ComputeFullU | ComputeFullV);
VERIFY(svd.info() == InvalidInput);
// regression test for bug 791
@ -282,7 +330,7 @@ void svd_inf_nan()
m << 0, 2*NumTraits<Scalar>::epsilon(), 0.5,
0, -0.5, 0,
nan, 0, 0;
svd.compute(m);
svd.compute(m, ComputeFullU | ComputeFullV);
VERIFY(svd.info() == InvalidInput);
m.resize(4,4);
@ -290,7 +338,7 @@ void svd_inf_nan()
0, 3, 1, 2e-308,
1, 0, 1, nan,
0, nan, nan, 0;
svd.compute(m);
svd.compute(m, ComputeFullU | ComputeFullV);
VERIFY(svd.info() == InvalidInput);
}
@ -307,8 +355,8 @@ void svd_underoverflow()
Matrix2d M;
M << -7.90884e-313, -4.94e-324,
0, 5.60844e-313;
SVD_STATIC_OPTIONS(Matrix2d, ComputeFullU | ComputeFullV) svd;
svd.compute(M);
SVD_DEFAULT(Matrix2d) svd;
svd.compute(M,ComputeFullU|ComputeFullV);
CALL_SUBTEST( svd_check_full(M,svd) );
// Check all 2x2 matrices made with the following coefficients:
@ -319,7 +367,7 @@ void svd_underoverflow()
do
{
M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3));
svd.compute(M);
svd.compute(M,ComputeFullU|ComputeFullV);
CALL_SUBTEST( svd_check_full(M,svd) );
id(k)++;
@ -342,13 +390,15 @@ void svd_underoverflow()
3.7841695601406358e+307, 2.4331702789740617e+306, -3.5235707140272905e+307,
-8.7190887618028355e+307, -7.3453213709232193e+307, -2.4367363684472105e+307;
SVD_STATIC_OPTIONS(Matrix3d, ComputeFullU|ComputeFullV) svd3;
svd3.compute(M3); // just check we don't loop indefinitely
SVD_DEFAULT(Matrix3d) svd3;
svd3.compute(M3,ComputeFullU|ComputeFullV); // just check we don't loop indefinitely
CALL_SUBTEST( svd_check_full(M3,svd3) );
}
// void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
template<typename MatrixType>
void svd_all_trivial_2x2( void (*cb)(const MatrixType&) )
void svd_all_trivial_2x2( void (*cb)(const MatrixType&,bool) )
{
MatrixType M;
VectorXd value_set(3);
@ -359,7 +409,7 @@ void svd_all_trivial_2x2( void (*cb)(const MatrixType&) )
{
M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3));
cb(M);
cb(M,false);
id(k)++;
if(id(k)>=value_set.size())
@ -384,10 +434,22 @@ void svd_preallocate()
internal::set_is_malloc_allowed(true);
svd.compute(m);
VERIFY_IS_APPROX(svd.singularValues(), v);
VERIFY_RAISES_ASSERT(svd.matrixU());
VERIFY_RAISES_ASSERT(svd.matrixV());
SVD_STATIC_OPTIONS(MatrixXf, ComputeFullU | ComputeFullV) svd2(3,3);
SVD_DEFAULT(MatrixXf) svd2(3,3);
internal::set_is_malloc_allowed(false);
svd2.compute(m);
internal::set_is_malloc_allowed(true);
VERIFY_IS_APPROX(svd2.singularValues(), v);
VERIFY_RAISES_ASSERT(svd2.matrixU());
VERIFY_RAISES_ASSERT(svd2.matrixV());
svd2.compute(m, ComputeFullU | ComputeFullV);
VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
internal::set_is_malloc_allowed(false);
svd2.compute(m);
internal::set_is_malloc_allowed(true);
SVD_DEFAULT(MatrixXf) svd3(3,3,ComputeFullU|ComputeFullV);
internal::set_is_malloc_allowed(false);
svd2.compute(m);
internal::set_is_malloc_allowed(true);
@ -395,168 +457,65 @@ void svd_preallocate()
VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
internal::set_is_malloc_allowed(false);
svd2.compute(m);
svd2.compute(m, ComputeFullU|ComputeFullV);
internal::set_is_malloc_allowed(true);
}
template<typename MatrixType, int QRPreconditioner = 0>
void svd_verify_assert_full_only(const MatrixType& m = MatrixType())
template<typename SvdType,typename MatrixType>
void svd_verify_assert(const MatrixType& m, bool fullOnly = false)
{
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime
};
typedef Matrix<typename MatrixType::Scalar, RowsAtCompileTime, 1> RhsType;
RhsType rhs = RhsType::Zero(m.rows());
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner) svd0;
VERIFY_RAISES_ASSERT(( svd0.matrixU() ));
VERIFY_RAISES_ASSERT(( svd0.singularValues() ));
VERIFY_RAISES_ASSERT(( svd0.matrixV() ));
VERIFY_RAISES_ASSERT(( svd0.solve(rhs) ));
VERIFY_RAISES_ASSERT(( svd0.transpose().solve(rhs) ));
VERIFY_RAISES_ASSERT(( svd0.adjoint().solve(rhs) ));
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner) svd1(m);
VERIFY_RAISES_ASSERT(( svd1.matrixU() ));
VERIFY_RAISES_ASSERT(( svd1.matrixV() ));
VERIFY_RAISES_ASSERT(( svd1.solve(rhs)));
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU) svdFullU(m);
VERIFY_RAISES_ASSERT(( svdFullU.matrixV() ));
VERIFY_RAISES_ASSERT(( svdFullU.solve(rhs)));
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullV) svdFullV(m);
VERIFY_RAISES_ASSERT(( svdFullV.matrixU() ));
VERIFY_RAISES_ASSERT(( svdFullV.solve(rhs)));
}
template<typename MatrixType, int QRPreconditioner = 0>
void svd_verify_assert(const MatrixType& m = MatrixType())
{
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime
};
typedef Matrix<typename MatrixType::Scalar, RowsAtCompileTime, 1> RhsType;
RhsType rhs = RhsType::Zero(m.rows());
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinU) svdThinU(m);
VERIFY_RAISES_ASSERT(( svdThinU.matrixV() ));
VERIFY_RAISES_ASSERT(( svdThinU.solve(rhs)));
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinV) svdThinV(m);
VERIFY_RAISES_ASSERT(( svdThinV.matrixU() ));
VERIFY_RAISES_ASSERT(( svdThinV.solve(rhs)));
svd_verify_assert_full_only<MatrixType, QRPreconditioner>(m);
}
template<typename MatrixType, int Options>
void svd_compute_checks(const MatrixType& m)
{
typedef SVD_STATIC_OPTIONS(MatrixType, Options) SVDType;
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows();
Index cols = m.cols();
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
MatrixURowsAtCompileTime = SVDType::MatrixUType::RowsAtCompileTime,
MatrixUColsAtCompileTime = SVDType::MatrixUType::ColsAtCompileTime,
MatrixVRowsAtCompileTime = SVDType::MatrixVType::RowsAtCompileTime,
MatrixVColsAtCompileTime = SVDType::MatrixVType::ColsAtCompileTime
ColsAtCompileTime = MatrixType::ColsAtCompileTime
};
SVDType staticSvd(m);
VERIFY(MatrixURowsAtCompileTime == RowsAtCompileTime);
VERIFY(MatrixVRowsAtCompileTime == ColsAtCompileTime);
if (Options & ComputeThinU) VERIFY(MatrixUColsAtCompileTime == DiagAtCompileTime);
if (Options & ComputeFullU) VERIFY(MatrixUColsAtCompileTime == RowsAtCompileTime);
if (Options & ComputeThinV) VERIFY(MatrixVColsAtCompileTime == DiagAtCompileTime);
if (Options & ComputeFullV) VERIFY(MatrixVColsAtCompileTime == ColsAtCompileTime);
typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
RhsType rhs(rows);
SvdType svd;
VERIFY_RAISES_ASSERT(svd.matrixU())
VERIFY_RAISES_ASSERT(svd.singularValues())
VERIFY_RAISES_ASSERT(svd.matrixV())
VERIFY_RAISES_ASSERT(svd.solve(rhs))
VERIFY_RAISES_ASSERT(svd.transpose().solve(rhs))
VERIFY_RAISES_ASSERT(svd.adjoint().solve(rhs))
MatrixType a = MatrixType::Zero(rows, cols);
a.setZero();
svd.compute(a, 0);
VERIFY_RAISES_ASSERT(svd.matrixU())
VERIFY_RAISES_ASSERT(svd.matrixV())
svd.singularValues();
VERIFY_RAISES_ASSERT(svd.solve(rhs))
if (Options & (ComputeThinU|ComputeFullU)) VERIFY(staticSvd.computeU());
else VERIFY(!staticSvd.computeU());
if (Options & (ComputeThinV|ComputeFullV)) VERIFY(staticSvd.computeV());
else VERIFY(!staticSvd.computeV());
svd.compute(a, ComputeFullU);
svd.matrixU();
VERIFY_RAISES_ASSERT(svd.matrixV())
VERIFY_RAISES_ASSERT(svd.solve(rhs))
svd.compute(a, ComputeFullV);
svd.matrixV();
VERIFY_RAISES_ASSERT(svd.matrixU())
VERIFY_RAISES_ASSERT(svd.solve(rhs))
if (staticSvd.computeU()) VERIFY(staticSvd.matrixU().isUnitary());
if (staticSvd.computeV()) VERIFY(staticSvd.matrixV().isUnitary());
if (staticSvd.computeU() && staticSvd.computeV())
if (!fullOnly && ColsAtCompileTime == Dynamic)
{
svd_test_solvers(m, staticSvd);
svd_least_square<SVDType, MatrixType>(m);
// svd_min_norm generates non-square matrices so it can't be used with NoQRPreconditioner
if ((Options & internal::QRPreconditionerBits) != NoQRPreconditioner)
svd_min_norm<MatrixType, Options>(m);
svd.compute(a, ComputeThinU);
svd.matrixU();
VERIFY_RAISES_ASSERT(svd.matrixV())
VERIFY_RAISES_ASSERT(svd.solve(rhs))
svd.compute(a, ComputeThinV);
svd.matrixV();
VERIFY_RAISES_ASSERT(svd.matrixU())
VERIFY_RAISES_ASSERT(svd.solve(rhs))
}
else
{
VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
}
}
template<typename MatrixType, int QRPreconditioner = 0>
void svd_option_checks(const MatrixType& m)
{
// singular values only
svd_compute_checks<MatrixType, QRPreconditioner>(m);
// Thin only
svd_compute_checks<MatrixType, QRPreconditioner | ComputeThinU >(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeThinV >(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeThinU | ComputeThinV>(m);
// Full only
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullU >(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullV >(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV>(m);
// Mixed
svd_compute_checks<MatrixType, QRPreconditioner | ComputeThinU | ComputeFullV>(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullU | ComputeThinV>(m);
typedef SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV) FullSvdType;
FullSvdType fullSvd(m);
svd_check_full(m, fullSvd);
svd_compare_to_full<MatrixType, FullSvdType, QRPreconditioner | ComputeFullU | ComputeFullV>(m, fullSvd);
}
template<typename MatrixType, int QRPreconditioner = 0>
void svd_option_checks_full_only(const MatrixType& m)
{
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullU>(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullV>(m);
svd_compute_checks<MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV>(m);
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV) fullSvd(m);
svd_check_full(m, fullSvd);
}
template<typename MatrixType, int QRPreconditioner = 0>
void svd_check_max_size_matrix(int initialRows, int initialCols)
{
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
int rows = MaxRowsAtCompileTime == Dynamic ? initialRows : (std::min)(initialRows, (int)MaxRowsAtCompileTime);
int cols = MaxColsAtCompileTime == Dynamic ? initialCols : (std::min)(initialCols, (int)MaxColsAtCompileTime);
MatrixType m(rows, cols);
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinU | ComputeThinV) thinSvd(m);
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinU | ComputeFullV) mixedSvd1(m);
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeThinV) mixedSvd2(m);
SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV) fullSvd(m);
MatrixType n(MaxRowsAtCompileTime, MaxColsAtCompileTime);
thinSvd.compute(n);
mixedSvd1.compute(n);
mixedSvd2.compute(n);
fullSvd.compute(n);
MatrixX<typename MatrixType::Scalar> dynamicMatrix(MaxRowsAtCompileTime + 1, MaxColsAtCompileTime + 1);
VERIFY_RAISES_ASSERT(thinSvd.compute(dynamicMatrix));
VERIFY_RAISES_ASSERT(mixedSvd1.compute(dynamicMatrix));
VERIFY_RAISES_ASSERT(mixedSvd2.compute(dynamicMatrix));
VERIFY_RAISES_ASSERT(fullSvd.compute(dynamicMatrix));
}
#undef SVD_DEFAULT
#undef SVD_FOR_MIN_NORM
#undef SVD_STATIC_OPTIONS