diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h index 19a3e8e8b..905485c88 100644 --- a/Eigen/src/SparseCore/SparseAssign.h +++ b/Eigen/src/SparseCore/SparseAssign.h @@ -249,7 +249,7 @@ struct Assignment template static void run(SparseMatrix &dst, const SrcXprType &src, const AssignFunc &func) - { dst._assignDiagonal(src.diagonal(), func); } + { dst.assignDiagonal(src.diagonal(), func); } template static void run(SparseMatrixBase &dst, const SrcXprType &src, const internal::assign_op &/*func*/) diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h index 2ff12a4a5..63dd1cc32 100644 --- a/Eigen/src/SparseCore/SparseMatrix.h +++ b/Eigen/src/SparseCore/SparseMatrix.h @@ -99,6 +99,8 @@ class SparseMatrix typedef SparseCompressedBase Base; using Base::convert_index; friend class SparseVector<_Scalar,0,_StorageIndex>; + template + friend struct internal::Assignment; public: using Base::isCompressed; using Base::nonZeros; @@ -503,113 +505,6 @@ class SparseMatrix } } - /** \internal assign \a diagXpr to the diagonal of \c *this - * There are different strategies: - * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression. - * 2 - otherwise, for each diagonal coeff, - * 2.a - if it already exists, then we update it, - * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion. - * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector. - * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements. - * - * TODO: some piece of code could be isolated and reused for a general in-place update strategy. - * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once), - * then it *might* be better to disable case 2.b since they will have to be copied anyway. - */ - template - void _assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc) - { - struct Record { - Record(Index a_i, Index a_p) : i(a_i), p(a_p) {} - Index i; - Index p; - }; - - Index n = diagXpr.size(); - - const bool overwrite = internal::is_same >::value; - if(overwrite) - { - if((this->rows()!=n) || (this->cols()!=n)) - this->resize(n, n); - } - - if(m_data.size()==0 || overwrite) - { - typedef Array ArrayXI; - this->makeCompressed(); - this->resizeNonZeros(n); - Eigen::Map(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1); - Eigen::Map(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n)); - Eigen::Map > values = this->coeffs(); - values.setZero(); - internal::call_assignment_no_alias(values, diagXpr, assignFunc); - } - else - { - bool isComp = isCompressed(); - internal::evaluator diaEval(diagXpr); - std::vector newEntries; - - // 1 - try in-place update and record insertion failures - for(Index i = 0; ilower_bound(i,i); - Index p = lb.value; - if(lb.found) - { - // the coeff already exists - assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i)); - } - else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i])) - { - // non compressed mode with local room for inserting one element - m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p); - m_innerNonZeros[i]++; - m_data.value(p) = Scalar(0); - m_data.index(p) = StorageIndex(i); - assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i)); - } - else - { - // defer insertion - newEntries.push_back(Record(i,p)); - } - } - // 2 - insert deferred entries - Index n_entries = Index(newEntries.size()); - if(n_entries>0) - { - Storage newData(m_data.size()+n_entries); - Index prev_p = 0; - Index prev_i = 0; - for(Index k=0; k::dummy_precision()) { @@ -1002,6 +897,113 @@ public: m_data.index(p) = convert_index(inner); return (m_data.value(p) = Scalar(0)); } +protected: + struct IndexPosPair { + IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {} + Index i; + Index p; + }; + + /** \internal assign \a diagXpr to the diagonal of \c *this + * There are different strategies: + * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression. + * 2 - otherwise, for each diagonal coeff, + * 2.a - if it already exists, then we update it, + * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion. + * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector. + * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements. + * + * TODO: some piece of code could be isolated and reused for a general in-place update strategy. + * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once), + * then it *might* be better to disable case 2.b since they will have to be copied anyway. + */ + template + void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc) + { + Index n = diagXpr.size(); + + const bool overwrite = internal::is_same >::value; + if(overwrite) + { + if((this->rows()!=n) || (this->cols()!=n)) + this->resize(n, n); + } + + if(m_data.size()==0 || overwrite) + { + typedef Array ArrayXI; + this->makeCompressed(); + this->resizeNonZeros(n); + Eigen::Map(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1); + Eigen::Map(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n)); + Eigen::Map > values = this->coeffs(); + values.setZero(); + internal::call_assignment_no_alias(values, diagXpr, assignFunc); + } + else + { + bool isComp = isCompressed(); + internal::evaluator diaEval(diagXpr); + std::vector newEntries; + + // 1 - try in-place update and record insertion failures + for(Index i = 0; ilower_bound(i,i); + Index p = lb.value; + if(lb.found) + { + // the coeff already exists + assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i)); + } + else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i])) + { + // non compressed mode with local room for inserting one element + m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p); + m_innerNonZeros[i]++; + m_data.value(p) = Scalar(0); + m_data.index(p) = StorageIndex(i); + assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i)); + } + else + { + // defer insertion + newEntries.push_back(IndexPosPair(i,p)); + } + } + // 2 - insert deferred entries + Index n_entries = Index(newEntries.size()); + if(n_entries>0) + { + Storage newData(m_data.size()+n_entries); + Index prev_p = 0; + Index prev_i = 0; + for(Index k=0; k