diff --git a/Eigen/src/Sparse/BasicSparseCholesky.h b/Eigen/src/Sparse/BasicSparseCholesky.h index 59c5b9913..92193a3bc 100644 --- a/Eigen/src/Sparse/BasicSparseCholesky.h +++ b/Eigen/src/Sparse/BasicSparseCholesky.h @@ -60,6 +60,7 @@ template class BasicSparseCholesky /** \returns true if the matrix is positive definite */ inline bool isPositiveDefinite(void) const { return m_isPositiveDefinite; } + // TODO impl the solver // template // typename Derived::Eval solve(const MatrixBase &b) const; @@ -83,7 +84,6 @@ template class BasicSparseCholesky /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix */ -#ifdef IGEN_BASICSPARSECHOLESKY_H template void BasicSparseCholesky::compute(const MatrixType& a) { @@ -154,287 +154,4 @@ void BasicSparseCholesky::compute(const MatrixType& a) m_matrix.endFill(); } - -#else - -template -void BasicSparseCholesky::compute(const MatrixType& a) -{ - assert(a.rows()==a.cols()); - const int size = a.rows(); - m_matrix.resize(size, size); - const RealScalar eps = ei_sqrt(precision()); - - // allocate a temporary buffer - Scalar* buffer = new Scalar[size*2]; - - - m_matrix.startFill(a.nonZeros()*2); - -// RealScalar x; -// x = ei_real(a.coeff(0,0)); -// m_isPositiveDefinite = x > eps && ei_isMuchSmallerThan(ei_imag(a.coeff(0,0)), RealScalar(1)); -// m_matrix.fill(0,0) = ei_sqrt(x); -// m_matrix.col(0).end(size-1) = a.row(0).end(size-1).adjoint() / ei_real(m_matrix.coeff(0,0)); - for (int j = 0; j < size; ++j) - { -// std::cout << j << " " << std::flush; -// Scalar tmp = ei_real(a.coeff(j,j)); -// if (j>0) -// tmp -= m_matrix.row(j).start(j).norm2(); -// x = ei_real(tmp); -// std::cout << "x = " << x << "\n"; -// if (x < eps || (!ei_isMuchSmallerThan(ei_imag(tmp), RealScalar(1)))) -// { -// m_isPositiveDefinite = false; -// return; -// } -// m_matrix.fill(j,j) = x = ei_sqrt(x); - - Scalar x = ei_real(a.coeff(j,j)); -// if (j>0) -// x -= m_matrix.row(j).start(j).norm2(); -// RealScalar rx = ei_sqrt(ei_real(x)); -// m_matrix.fill(j,j) = rx; - int endSize = size-j-1; - /*if (endSize>0)*/ { - // Note that when all matrix columns have good alignment, then the following - // product is guaranteed to be optimal with respect to alignment. -// m_matrix.col(j).end(endSize) = -// (m_matrix.block(j+1, 0, endSize, j) * m_matrix.row(j).start(j).adjoint()).lazy(); - - // FIXME could use a.col instead of a.row -// m_matrix.col(j).end(endSize) = (a.row(j).end(endSize).adjoint() -// - m_matrix.col(j).end(endSize) ) / x; - - // make sure to call innerSize/outerSize since we fake the storage order. - - - - - // estimate the number of non zero entries -// float ratioLhs = float(lhs.nonZeros())/float(lhs.rows()*lhs.cols()); -// float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); -// float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); - - -// for (int j1=0; j10.1) - if (true) - { - // dense path, the scalar * columns products are accumulated into a dense column - Scalar* __restrict__ tmp = buffer; - // set to zero - for (int k=j+1; k(buffer); - // sparse path, the scalar * columns products are accumulated into a linked list - int tmp_size = 0; - int tmp_start = -1; - - { - int tmp_el = tmp_start; - typename MatrixType::InnerIterator it(a,j); - if (it) - { - ++it; - for (; it; ++it) - { - Scalar v = it.value(); - int id = it.index(); - if (tmp_size==0) - { - tmp_start = 0; - tmp_el = 0; - tmp_size++; - tmp[0].value = v; - tmp[0].index = id; - tmp[0].next = -1; - } - else if (id= 0 && tmp[nextel].index<=id) - { - tmp_el = nextel; - nextel = tmp[nextel].next; - } - - if (tmp[tmp_el].index==id) - { - tmp[tmp_el].value = v; - } - else - { - tmp[tmp_size].value = v; - tmp[tmp_size].index = id; - tmp[tmp_size].next = tmp[tmp_el].next; - tmp[tmp_el].next = tmp_size; - tmp_size++; - } - } - } - } - } -// for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) - for (int k=0; k= 0 && tmp[nextel].index<=id) - { - tmp_el = nextel; - nextel = tmp[nextel].next; - } - - if (tmp[tmp_el].index==id) - { - tmp[tmp_el].value -= v; - } - else - { -// std::cout << "insert because not in (1)\n"; - tmp[tmp_size].value = v; - tmp[tmp_size].index = id; - tmp[tmp_size].next = tmp[tmp_el].next; - tmp[tmp_el].next = tmp_size; - tmp_size++; - } - } - } - } - } - RealScalar rx = ei_sqrt(ei_real(x)); - m_matrix.fill(j,j) = rx; - Scalar y = Scalar(1)/rx; - int k = tmp_start; - while (k>=0) - { - if (!ei_isMuchSmallerThan(ei_abs(tmp[k].value),Scalar(0.01))) - { -// std::cout << "fill " << tmp[k].index << "," << j << "\n"; - m_matrix.fill(tmp[k].index, j) = tmp[k].value * y; - } - k = tmp[k].next; - } - } - } - - } - } - m_matrix.endFill(); -} - -#endif - -/** \returns the solution of \f$ A x = b \f$ using the current decomposition of A. - * In other words, it returns \f$ A^{-1} b \f$ computing - * \f$ {L^{*}}^{-1} L^{-1} b \f$ from right to left. - * \param b the column vector \f$ b \f$, which can also be a matrix. - * - * Example: \include Cholesky_solve.cpp - * Output: \verbinclude Cholesky_solve.out - * - * \sa MatrixBase::cholesky(), CholeskyWithoutSquareRoot::solve() - */ -// template -// template -// typename Derived::Eval Cholesky::solve(const MatrixBase &b) const -// { -// const int size = m_matrix.rows(); -// ei_assert(size==b.rows()); -// -// return m_matrix.adjoint().template part().solveTriangular(matrixL().solveTriangular(b)); -// } - #endif // EIGEN_BASICSPARSECHOLESKY_H