mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-01-18 14:34:17 +08:00
add specialization of check_sparse_solving() for SuperLU solver, in order to test adjoint and transpose solves
This commit is contained in:
parent
b578930657
commit
984d010b7b
@ -18,6 +18,63 @@ template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename
|
|||||||
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
|
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
|
||||||
template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
|
template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
|
||||||
|
|
||||||
|
template <bool Conjugate,class SparseLUType>
|
||||||
|
class SparseLUTransposeView : public SparseSolverBase<SparseLUTransposeView<Conjugate,SparseLUType> >
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
typedef SparseSolverBase<SparseLUTransposeView<Conjugate,SparseLUType> > APIBase;
|
||||||
|
using APIBase::m_isInitialized;
|
||||||
|
public:
|
||||||
|
typedef typename SparseLUType::Scalar Scalar;
|
||||||
|
typedef typename SparseLUType::StorageIndex StorageIndex;
|
||||||
|
typedef typename SparseLUType::MatrixType MatrixType;
|
||||||
|
typedef typename SparseLUType::OrderingType OrderingType;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||||
|
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
||||||
|
};
|
||||||
|
|
||||||
|
SparseLUTransposeView() : m_sparseLU(nullptr) {}
|
||||||
|
SparseLUTransposeView(const SparseLUTransposeView& view) {
|
||||||
|
this->m_sparseLU = view.m_sparseLU;
|
||||||
|
}
|
||||||
|
void setIsInitialized(const bool isInitialized) {this->m_isInitialized = isInitialized;}
|
||||||
|
void setSparseLU(SparseLUType* sparseLU) {m_sparseLU = sparseLU;}
|
||||||
|
using APIBase::_solve_impl;
|
||||||
|
template<typename Rhs, typename Dest>
|
||||||
|
bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
|
||||||
|
{
|
||||||
|
Dest& X(X_base.derived());
|
||||||
|
eigen_assert(m_sparseLU->info() == Success && "The matrix should be factorized first");
|
||||||
|
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
|
||||||
|
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||||
|
|
||||||
|
|
||||||
|
// this ugly const_cast_derived() helps to detect aliasing when applying the permutations
|
||||||
|
for(Index j = 0; j < B.cols(); ++j){
|
||||||
|
X.col(j) = m_sparseLU->colsPermutation() * B.const_cast_derived().col(j);
|
||||||
|
}
|
||||||
|
//Forward substitution with transposed or adjoint of U
|
||||||
|
m_sparseLU->matrixU().template solveTransposedInPlace<Conjugate>(X);
|
||||||
|
|
||||||
|
//Backward substitution with transposed or adjoint of L
|
||||||
|
m_sparseLU->matrixL().template solveTransposedInPlace<Conjugate>(X);
|
||||||
|
|
||||||
|
// Permute back the solution
|
||||||
|
for (Index j = 0; j < B.cols(); ++j)
|
||||||
|
X.col(j) = m_sparseLU->rowsPermutation().transpose() * X.col(j);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
inline Index rows() const { return m_sparseLU->rows(); }
|
||||||
|
inline Index cols() const { return m_sparseLU->cols(); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
SparseLUType *m_sparseLU;
|
||||||
|
SparseLUTransposeView& operator=(const SparseLUTransposeView&);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/** \ingroup SparseLU_Module
|
/** \ingroup SparseLU_Module
|
||||||
* \class SparseLU
|
* \class SparseLU
|
||||||
*
|
*
|
||||||
@ -97,6 +154,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
|||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
|
SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
|
||||||
{
|
{
|
||||||
initperfvalues();
|
initperfvalues();
|
||||||
@ -129,6 +187,45 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
|||||||
factorize(matrix);
|
factorize(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \returns an expression of the transposed of the factored matrix.
|
||||||
|
*
|
||||||
|
* A typical usage is to solve for the transposed problem A^T x = b:
|
||||||
|
* \code
|
||||||
|
* solver.compute(A);
|
||||||
|
* x = solver.transpose().solve(b);
|
||||||
|
* \endcode
|
||||||
|
*
|
||||||
|
* \sa adjoint(), solve()
|
||||||
|
*/
|
||||||
|
const SparseLUTransposeView<false,SparseLU<_MatrixType,_OrderingType> > transpose()
|
||||||
|
{
|
||||||
|
SparseLUTransposeView<false, SparseLU<_MatrixType,_OrderingType> > transposeView;
|
||||||
|
transposeView.setSparseLU(this);
|
||||||
|
transposeView.setIsInitialized(this->m_isInitialized);
|
||||||
|
return transposeView;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \returns an expression of the adjoint of the factored matrix
|
||||||
|
*
|
||||||
|
* A typical usage is to solve for the adjoint problem A' x = b:
|
||||||
|
* \code
|
||||||
|
* solver.compute(A);
|
||||||
|
* x = solver.adjoint().solve(b);
|
||||||
|
* \endcode
|
||||||
|
*
|
||||||
|
* For real scalar types, this function is equivalent to transpose().
|
||||||
|
*
|
||||||
|
* \sa transpose(), solve()
|
||||||
|
*/
|
||||||
|
const SparseLUTransposeView<true, SparseLU<_MatrixType,_OrderingType> > adjoint()
|
||||||
|
{
|
||||||
|
SparseLUTransposeView<true, SparseLU<_MatrixType,_OrderingType> > adjointView;
|
||||||
|
adjointView.setSparseLU(this);
|
||||||
|
adjointView.setIsInitialized(this->m_isInitialized);
|
||||||
|
return adjointView;
|
||||||
|
}
|
||||||
|
|
||||||
inline Index rows() const { return m_mat.rows(); }
|
inline Index rows() const { return m_mat.rows(); }
|
||||||
inline Index cols() const { return m_mat.cols(); }
|
inline Index cols() const { return m_mat.cols(); }
|
||||||
/** Indicate that the pattern of the input matrix is symmetric */
|
/** Indicate that the pattern of the input matrix is symmetric */
|
||||||
@ -394,7 +491,6 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
|
|||||||
private:
|
private:
|
||||||
// Disable copy constructor
|
// Disable copy constructor
|
||||||
SparseLU (const SparseLU& );
|
SparseLU (const SparseLU& );
|
||||||
|
|
||||||
}; // End class SparseLU
|
}; // End class SparseLU
|
||||||
|
|
||||||
|
|
||||||
@ -712,6 +808,12 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator
|
|||||||
{
|
{
|
||||||
m_mapL.solveInPlace(X);
|
m_mapL.solveInPlace(X);
|
||||||
}
|
}
|
||||||
|
template<bool Conjugate, typename Dest>
|
||||||
|
void solveTransposedInPlace( MatrixBase<Dest> &X) const
|
||||||
|
{
|
||||||
|
m_mapL.template solveTransposedInPlace<Conjugate>(X);
|
||||||
|
}
|
||||||
|
|
||||||
const MappedSupernodalType& m_mapL;
|
const MappedSupernodalType& m_mapL;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -766,6 +868,52 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
|
|||||||
}
|
}
|
||||||
} // End For U-solve
|
} // End For U-solve
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<bool Conjugate, typename Dest> void solveTransposedInPlace(MatrixBase<Dest> &X) const
|
||||||
|
{
|
||||||
|
using numext::conj;
|
||||||
|
Index nrhs = X.cols();
|
||||||
|
Index n = X.rows();
|
||||||
|
// Forward solve with U
|
||||||
|
for (Index k = 0; k <= m_mapL.nsuper(); k++)
|
||||||
|
{
|
||||||
|
Index fsupc = m_mapL.supToCol()[k];
|
||||||
|
Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
|
||||||
|
Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
|
||||||
|
Index luptr = m_mapL.colIndexPtr()[fsupc];
|
||||||
|
|
||||||
|
for (Index j = 0; j < nrhs; ++j)
|
||||||
|
{
|
||||||
|
for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
|
||||||
|
{
|
||||||
|
typename MatrixUType::InnerIterator it(m_mapU, jcol);
|
||||||
|
for ( ; it; ++it)
|
||||||
|
{
|
||||||
|
Index irow = it.index();
|
||||||
|
X(jcol, j) -= X(irow, j) * (Conjugate? conj(it.value()): it.value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (nsupc == 1)
|
||||||
|
{
|
||||||
|
for (Index j = 0; j < nrhs; j++)
|
||||||
|
{
|
||||||
|
X(fsupc, j) /= (Conjugate? conj(m_mapL.valuePtr()[luptr]) : m_mapL.valuePtr()[luptr]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||||
|
Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||||
|
if(Conjugate)
|
||||||
|
U = A.adjoint().template triangularView<Lower>().solve(U);
|
||||||
|
else
|
||||||
|
U = A.transpose().template triangularView<Lower>().solve(U);
|
||||||
|
}
|
||||||
|
}// End For U-solve
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
const MatrixLType& m_mapL;
|
const MatrixLType& m_mapL;
|
||||||
const MatrixUType& m_mapU;
|
const MatrixUType& m_mapU;
|
||||||
};
|
};
|
||||||
|
@ -156,6 +156,9 @@ class MappedSuperNodalMatrix
|
|||||||
class InnerIterator;
|
class InnerIterator;
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
void solveInPlace( MatrixBase<Dest>&X) const;
|
void solveInPlace( MatrixBase<Dest>&X) const;
|
||||||
|
template<bool Conjugate, typename Dest>
|
||||||
|
void solveTransposedInPlace( MatrixBase<Dest>&X) const;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -294,6 +297,77 @@ void MappedSuperNodalMatrix<Scalar,Index_>::solveInPlace( MatrixBase<Dest>&X) co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename Scalar, typename Index_>
|
||||||
|
template<bool Conjugate, typename Dest>
|
||||||
|
void MappedSuperNodalMatrix<Scalar,Index_>::solveTransposedInPlace( MatrixBase<Dest>&X) const
|
||||||
|
{
|
||||||
|
using numext::conj;
|
||||||
|
Index n = int(X.rows());
|
||||||
|
Index nrhs = Index(X.cols());
|
||||||
|
const Scalar * Lval = valuePtr(); // Nonzero values
|
||||||
|
Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor> work(n, nrhs); // working vector
|
||||||
|
work.setZero();
|
||||||
|
for (Index k = nsuper(); k >= 0; k--)
|
||||||
|
{
|
||||||
|
Index fsupc = supToCol()[k]; // First column of the current supernode
|
||||||
|
Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column
|
||||||
|
Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode
|
||||||
|
Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode
|
||||||
|
Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode
|
||||||
|
Index irow; //Current index row
|
||||||
|
|
||||||
|
if (nsupc == 1 )
|
||||||
|
{
|
||||||
|
for (Index j = 0; j < nrhs; j++)
|
||||||
|
{
|
||||||
|
InnerIterator it(*this, fsupc);
|
||||||
|
++it; // Skip the diagonal element
|
||||||
|
for (; it; ++it)
|
||||||
|
{
|
||||||
|
irow = it.row();
|
||||||
|
X(fsupc,j) -= X(irow, j) * (Conjugate?conj(it.value()):it.value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// The supernode has more than one column
|
||||||
|
Index luptr = colIndexPtr()[fsupc];
|
||||||
|
Index lda = colIndexPtr()[fsupc+1] - luptr;
|
||||||
|
|
||||||
|
//Begin Gather
|
||||||
|
for (Index j = 0; j < nrhs; j++)
|
||||||
|
{
|
||||||
|
Index iptr = istart + nsupc;
|
||||||
|
for (Index i = 0; i < nrow; i++)
|
||||||
|
{
|
||||||
|
irow = rowIndex()[iptr];
|
||||||
|
work.topRows(nrow)(i,j)= X(irow,j); // Gather operation
|
||||||
|
iptr++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matrix-vector product with transposed submatrix
|
||||||
|
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||||
|
Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||||
|
if(Conjugate)
|
||||||
|
U = U - A.adjoint() * work.topRows(nrow);
|
||||||
|
else
|
||||||
|
U = U - A.transpose() * work.topRows(nrow);
|
||||||
|
|
||||||
|
// Triangular solve (of transposed diagonal block)
|
||||||
|
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||||
|
if(Conjugate)
|
||||||
|
U = A.adjoint().template triangularView<UnitUpper>().solve(U);
|
||||||
|
else
|
||||||
|
U = A.transpose().template triangularView<UnitUpper>().solve(U);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#include "sparse.h"
|
#include "sparse.h"
|
||||||
#include <Eigen/SparseCore>
|
#include <Eigen/SparseCore>
|
||||||
|
#include <Eigen/SparseLU>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
template<typename Solver, typename Rhs, typename Guess,typename Result>
|
template<typename Solver, typename Rhs, typename Guess,typename Result>
|
||||||
@ -144,6 +145,136 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// specialization of generic check_sparse_solving for SuperLU in order to also test adjoint and transpose solves
|
||||||
|
template<typename Scalar, typename Rhs, typename DenseMat, typename DenseRhs>
|
||||||
|
void check_sparse_solving(Eigen::SparseLU<Eigen::SparseMatrix<Scalar> >& solver, const typename Eigen::SparseMatrix<Scalar>& A, const Rhs& b, const DenseMat& dA, const DenseRhs& db)
|
||||||
|
{
|
||||||
|
typedef typename Eigen::SparseMatrix<Scalar> Mat;
|
||||||
|
typedef typename Mat::StorageIndex StorageIndex;
|
||||||
|
typedef typename Eigen::SparseLU<Eigen::SparseMatrix<Scalar> > Solver;
|
||||||
|
|
||||||
|
// reference solutions computed by dense QR solver
|
||||||
|
DenseRhs refX1 = dA.householderQr().solve(db); // solution of A x = db
|
||||||
|
DenseRhs refX2 = dA.transpose().householderQr().solve(db); // solution of A^T * x = db (use transposed matrix A^T)
|
||||||
|
DenseRhs refX3 = dA.adjoint().householderQr().solve(db); // solution of A^* * x = db (use adjoint matrix A^*)
|
||||||
|
|
||||||
|
|
||||||
|
{
|
||||||
|
Rhs x1(A.cols(), b.cols());
|
||||||
|
Rhs x2(A.cols(), b.cols());
|
||||||
|
Rhs x3(A.cols(), b.cols());
|
||||||
|
Rhs oldb = b;
|
||||||
|
|
||||||
|
solver.compute(A);
|
||||||
|
if (solver.info() != Success)
|
||||||
|
{
|
||||||
|
std::cerr << "ERROR | sparse solver testing, factorization failed (" << typeid(Solver).name() << ")\n";
|
||||||
|
VERIFY(solver.info() == Success);
|
||||||
|
}
|
||||||
|
x1 = solver.solve(b);
|
||||||
|
if (solver.info() != Success)
|
||||||
|
{
|
||||||
|
std::cerr << "WARNING | sparse solver testing: solving failed (" << typeid(Solver).name() << ")\n";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(x1.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
|
||||||
|
// test solve with transposed
|
||||||
|
x2 = solver.transpose().solve(b);
|
||||||
|
VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(x2.isApprox(refX2,test_precision<Scalar>()));
|
||||||
|
|
||||||
|
|
||||||
|
// test solve with adjoint
|
||||||
|
//solver.template _solve_impl_transposed<true>(b, x3);
|
||||||
|
x3 = solver.adjoint().solve(b);
|
||||||
|
VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(x3.isApprox(refX3,test_precision<Scalar>()));
|
||||||
|
|
||||||
|
x1.setZero();
|
||||||
|
solve_with_guess(solver, b, x1, x1);
|
||||||
|
VERIFY(solver.info() == Success && "solving failed when using analyzePattern/factorize API");
|
||||||
|
VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(x1.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
|
||||||
|
x1.setZero();
|
||||||
|
x2.setZero();
|
||||||
|
x3.setZero();
|
||||||
|
// test the analyze/factorize API
|
||||||
|
solver.analyzePattern(A);
|
||||||
|
solver.factorize(A);
|
||||||
|
VERIFY(solver.info() == Success && "factorization failed when using analyzePattern/factorize API");
|
||||||
|
x1 = solver.solve(b);
|
||||||
|
x2 = solver.transpose().solve(b);
|
||||||
|
x3 = solver.adjoint().solve(b);
|
||||||
|
|
||||||
|
VERIFY(solver.info() == Success && "solving failed when using analyzePattern/factorize API");
|
||||||
|
VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(x1.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
VERIFY(x2.isApprox(refX2,test_precision<Scalar>()));
|
||||||
|
VERIFY(x3.isApprox(refX3,test_precision<Scalar>()));
|
||||||
|
|
||||||
|
x1.setZero();
|
||||||
|
// test with Map
|
||||||
|
MappedSparseMatrix<Scalar,Mat::Options,StorageIndex> Am(A.rows(), A.cols(), A.nonZeros(), const_cast<StorageIndex*>(A.outerIndexPtr()), const_cast<StorageIndex*>(A.innerIndexPtr()), const_cast<Scalar*>(A.valuePtr()));
|
||||||
|
solver.compute(Am);
|
||||||
|
VERIFY(solver.info() == Success && "factorization failed when using Map");
|
||||||
|
DenseRhs dx(refX1);
|
||||||
|
dx.setZero();
|
||||||
|
Map<DenseRhs> xm(dx.data(), dx.rows(), dx.cols());
|
||||||
|
Map<const DenseRhs> bm(db.data(), db.rows(), db.cols());
|
||||||
|
xm = solver.solve(bm);
|
||||||
|
VERIFY(solver.info() == Success && "solving failed when using Map");
|
||||||
|
VERIFY(oldb.isApprox(bm,0.0) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(xm.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// if not too large, do some extra check:
|
||||||
|
if(A.rows()<2000)
|
||||||
|
{
|
||||||
|
// test initialization ctor
|
||||||
|
{
|
||||||
|
Rhs x(b.rows(), b.cols());
|
||||||
|
Solver solver2(A);
|
||||||
|
VERIFY(solver2.info() == Success);
|
||||||
|
x = solver2.solve(b);
|
||||||
|
VERIFY(x.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// test dense Block as the result and rhs:
|
||||||
|
{
|
||||||
|
DenseRhs x(refX1.rows(), refX1.cols());
|
||||||
|
DenseRhs oldb(db);
|
||||||
|
x.setZero();
|
||||||
|
x.block(0,0,x.rows(),x.cols()) = solver.solve(db.block(0,0,db.rows(),db.cols()));
|
||||||
|
VERIFY(oldb.isApprox(db,0.0) && "sparse solver testing: the rhs should not be modified!");
|
||||||
|
VERIFY(x.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// test uncompressed inputs
|
||||||
|
{
|
||||||
|
Mat A2 = A;
|
||||||
|
A2.reserve((ArrayXf::Random(A.outerSize())+2).template cast<typename Mat::StorageIndex>().eval());
|
||||||
|
solver.compute(A2);
|
||||||
|
Rhs x = solver.solve(b);
|
||||||
|
VERIFY(x.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// test expression as input
|
||||||
|
{
|
||||||
|
solver.compute(0.5*(A+A));
|
||||||
|
Rhs x = solver.solve(b);
|
||||||
|
VERIFY(x.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
|
||||||
|
Solver solver2(0.5*(A+A));
|
||||||
|
Rhs x2 = solver2.solve(b);
|
||||||
|
VERIFY(x2.isApprox(refX1,test_precision<Scalar>()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
template<typename Solver, typename Rhs>
|
template<typename Solver, typename Rhs>
|
||||||
void check_sparse_solving_real_cases(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const typename Solver::MatrixType& fullA, const Rhs& refX)
|
void check_sparse_solving_real_cases(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const typename Solver::MatrixType& fullA, const Rhs& refX)
|
||||||
{
|
{
|
||||||
|
@ -52,7 +52,7 @@ public:
|
|||||||
Parameters()
|
Parameters()
|
||||||
: factor(Scalar(100.))
|
: factor(Scalar(100.))
|
||||||
, maxfev(1000)
|
, maxfev(1000)
|
||||||
, xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
|
, xtol(numext::sqrt(NumTraits<Scalar>::epsilon()))
|
||||||
, nb_of_subdiagonals(-1)
|
, nb_of_subdiagonals(-1)
|
||||||
, nb_of_superdiagonals(-1)
|
, nb_of_superdiagonals(-1)
|
||||||
, epsfcn(Scalar(0.)) {}
|
, epsfcn(Scalar(0.)) {}
|
||||||
@ -70,7 +70,7 @@ public:
|
|||||||
|
|
||||||
HybridNonLinearSolverSpace::Status hybrj1(
|
HybridNonLinearSolverSpace::Status hybrj1(
|
||||||
FVectorType &x,
|
FVectorType &x,
|
||||||
const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
|
const Scalar tol = numext::sqrt(NumTraits<Scalar>::epsilon())
|
||||||
);
|
);
|
||||||
|
|
||||||
HybridNonLinearSolverSpace::Status solveInit(FVectorType &x);
|
HybridNonLinearSolverSpace::Status solveInit(FVectorType &x);
|
||||||
@ -79,7 +79,7 @@ public:
|
|||||||
|
|
||||||
HybridNonLinearSolverSpace::Status hybrd1(
|
HybridNonLinearSolverSpace::Status hybrd1(
|
||||||
FVectorType &x,
|
FVectorType &x,
|
||||||
const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
|
const Scalar tol = numext::sqrt(NumTraits<Scalar>::epsilon())
|
||||||
);
|
);
|
||||||
|
|
||||||
HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x);
|
HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x);
|
||||||
|
Loading…
Reference in New Issue
Block a user