mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-02-17 18:09:55 +08:00
add a flexible sparse matrix class designed for fast matrix assembly
This commit is contained in:
parent
385fd3d918
commit
178858f1bd
@ -77,6 +77,7 @@ namespace Eigen {
|
||||
#include "src/Sparse/RandomSetter.h"
|
||||
#include "src/Sparse/SparseBlock.h"
|
||||
#include "src/Sparse/SparseMatrix.h"
|
||||
#include "src/Sparse/DynamicSparseMatrix.h"
|
||||
#include "src/Sparse/MappedSparseMatrix.h"
|
||||
#include "src/Sparse/SparseVector.h"
|
||||
#include "src/Sparse/CoreIterators.h"
|
||||
|
@ -114,8 +114,7 @@ struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
|
||||
MaxRowsAtCompileTime = _MaxRows,
|
||||
MaxColsAtCompileTime = _MaxCols,
|
||||
Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = RandomAccessPattern
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -239,9 +239,4 @@ enum {
|
||||
HasDirectAccess = DirectAccessBit
|
||||
};
|
||||
|
||||
const int FullyCoherentAccessPattern = 0x1;
|
||||
const int InnerCoherentAccessPattern = 0x2 | FullyCoherentAccessPattern;
|
||||
const int OuterCoherentAccessPattern = 0x4 | InnerCoherentAccessPattern;
|
||||
const int RandomAccessPattern = 0x8 | OuterCoherentAccessPattern;
|
||||
|
||||
#endif // EIGEN_CONSTANTS_H
|
||||
|
@ -43,6 +43,7 @@ class CompressedStorage
|
||||
}
|
||||
|
||||
CompressedStorage(const CompressedStorage& other)
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
@ -97,15 +98,15 @@ class CompressedStorage
|
||||
m_indices[id] = i;
|
||||
}
|
||||
|
||||
int size() const { return m_size; }
|
||||
int allocatedSize() const { return m_allocatedSize; }
|
||||
void clear() { m_size = 0; }
|
||||
inline int size() const { return m_size; }
|
||||
inline int allocatedSize() const { return m_allocatedSize; }
|
||||
inline void clear() { m_size = 0; }
|
||||
|
||||
Scalar& value(int i) { return m_values[i]; }
|
||||
const Scalar& value(int i) const { return m_values[i]; }
|
||||
inline Scalar& value(int i) { return m_values[i]; }
|
||||
inline const Scalar& value(int i) const { return m_values[i]; }
|
||||
|
||||
int& index(int i) { return m_indices[i]; }
|
||||
const int& index(int i) const { return m_indices[i]; }
|
||||
inline int& index(int i) { return m_indices[i]; }
|
||||
inline const int& index(int i) const { return m_indices[i]; }
|
||||
|
||||
static CompressedStorage Map(int* indices, Scalar* values, int size)
|
||||
{
|
||||
@ -115,10 +116,77 @@ class CompressedStorage
|
||||
res.m_allocatedSize = res.m_size = size;
|
||||
return res;
|
||||
}
|
||||
|
||||
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
|
||||
inline int searchLowerIndex(int key) const
|
||||
{
|
||||
return searchLowerIndex(0, m_size, key);
|
||||
}
|
||||
|
||||
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
|
||||
inline int searchLowerIndex(int start, int end, int key) const
|
||||
{
|
||||
while(end>start)
|
||||
{
|
||||
int mid = (end+start)>>1;
|
||||
if (m_indices[mid]<key)
|
||||
start = mid+1;
|
||||
else
|
||||
end = mid;
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
/** \returns the stored value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
|
||||
inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (m_size==0)
|
||||
return defaultValue;
|
||||
else if (key==m_indices[m_size-1])
|
||||
return m_values[m_size-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
const int id = searchLowerIndex(0,m_size-1,key);
|
||||
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
/** Like at(), but the search is performed in the range [start,end) */
|
||||
inline Scalar atInRange(int start, int end, int key, Scalar defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (start==end)
|
||||
return Scalar(0);
|
||||
else if (end>start && key==m_indices[end-1])
|
||||
return m_values[end-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
const int id = searchLowerIndex(start,end-1,key);
|
||||
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
/** \returns a reference to the value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is inserted
|
||||
* such that the keys are sorted. */
|
||||
inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
|
||||
{
|
||||
int id = searchLowerIndex(0,m_size,key);
|
||||
if (id>=m_size || m_indices[id]!=key)
|
||||
{
|
||||
resize(m_size+1,1);
|
||||
for (int j=m_size-1; j>id; --j)
|
||||
{
|
||||
m_indices[j] = m_indices[j-1];
|
||||
m_values[j] = m_values[j-1];
|
||||
}
|
||||
m_indices[id] = key;
|
||||
m_values[id] = defaultValue;
|
||||
}
|
||||
return m_values[id];
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void reallocate(int size)
|
||||
inline void reallocate(int size)
|
||||
{
|
||||
Scalar* newValues = new Scalar[size];
|
||||
int* newIndices = new int[size];
|
||||
|
284
Eigen/src/Sparse/DynamicSparseMatrix.h
Normal file
284
Eigen/src/Sparse/DynamicSparseMatrix.h
Normal file
@ -0,0 +1,284 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra. Eigen itself is part of the KDE project.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 3 of the License, or (at your option) any later version.
|
||||
//
|
||||
// Alternatively, you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License as
|
||||
// published by the Free Software Foundation; either version 2 of
|
||||
// the License, or (at your option) any later version.
|
||||
//
|
||||
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
|
||||
#define EIGEN_DYNAMIC_SPARSEMATRIX_H
|
||||
|
||||
/** \class DynamicSparseMatrix
|
||||
*
|
||||
* \brief A sparse matrix class designed for matrix assembly purpose
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
|
||||
* random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
|
||||
* nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
|
||||
* otherwise.
|
||||
*
|
||||
* Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
|
||||
* decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
|
||||
* till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
|
||||
*
|
||||
* \see SparseMatrix
|
||||
*/
|
||||
template<typename _Scalar, int _Flags>
|
||||
struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
enum {
|
||||
RowsAtCompileTime = Dynamic,
|
||||
ColsAtCompileTime = Dynamic,
|
||||
MaxRowsAtCompileTime = Dynamic,
|
||||
MaxColsAtCompileTime = Dynamic,
|
||||
Flags = SparseBit | _Flags,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = OuterRandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Flags>
|
||||
class DynamicSparseMatrix
|
||||
: public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags> >
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(DynamicSparseMatrix)
|
||||
typedef MappedSparseMatrix<Scalar,Flags> Map;
|
||||
|
||||
protected:
|
||||
|
||||
enum { IsRowMajor = Base::IsRowMajor };
|
||||
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
|
||||
|
||||
int m_innerSize;
|
||||
std::vector<CompressedStorage<Scalar> > m_data;
|
||||
|
||||
public:
|
||||
|
||||
inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
|
||||
inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
|
||||
inline int innerSize() const { return m_innerSize; }
|
||||
inline int outerSize() const { return m_data.size(); }
|
||||
inline int innerNonZeros(int j) const { return m_data[j].size(); }
|
||||
|
||||
/** \returns the coefficient value at given position \a row, \a col
|
||||
* This operation involes a log(rho*outer_size) binary search.
|
||||
*/
|
||||
inline Scalar coeff(int row, int col) const
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
return m_data[outer].at(inner);
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient value at given position \a row, \a col
|
||||
* This operation involes a log(rho*outer_size) binary search. If the coefficient does not
|
||||
* exist yet, then a sorted insertion into a sequential buffer is performed.
|
||||
*/
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
return m_data[outer].atWithInsertion(inner);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
inline void setZero()
|
||||
{
|
||||
for (int j=0; j<outerSize(); ++j)
|
||||
m_data[j].clear();
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline int nonZeros() const
|
||||
{
|
||||
int res = 0;
|
||||
for (int j=0; j<outerSize(); ++j)
|
||||
res += m_data[j].size();
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
|
||||
inline void startFill(int reserveSize = 1000)
|
||||
{
|
||||
int reserveSizePerVector = std::max(reserveSize/outerSize(),4);
|
||||
for (int j=0; j<outerSize(); ++j)
|
||||
{
|
||||
m_data[j].clear();
|
||||
m_data[j].reserve(reserveSizePerVector);
|
||||
}
|
||||
}
|
||||
|
||||
/** inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
|
||||
* 1 - the coefficient does not exist yet
|
||||
* 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
|
||||
* In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
|
||||
* \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
|
||||
*
|
||||
* \see fillrand(), coeffRef()
|
||||
*/
|
||||
inline Scalar& fill(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
ei_assert(outer<int(m_data.size()) && inner<m_innerSize);
|
||||
ei_assert((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner));
|
||||
m_data[outer].append(0, inner);
|
||||
return m_data[outer].value(m_data[outer].size()-1);
|
||||
}
|
||||
|
||||
/** Like fill() but with random inner coordinates.
|
||||
* Compared to the generic coeffRef(), the unique limitation is that we assume
|
||||
* the coefficient does not exist yet.
|
||||
*/
|
||||
inline Scalar& fillrand(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
|
||||
int startId = 0;
|
||||
int id = m_data[outer].size() - 1;
|
||||
m_data[outer].resize(id+2,1);
|
||||
|
||||
while ( (id >= startId) && (m_data[outer].index(id) > inner) )
|
||||
{
|
||||
m_data[outer].index(id+1) = m_data[outer].index(id);
|
||||
m_data[outer].value(id+1) = m_data[outer].value(id);
|
||||
--id;
|
||||
}
|
||||
m_data[outer].index(id+1) = inner;
|
||||
m_data[outer].value(id+1) = 0;
|
||||
return m_data[outer].value(id+1);
|
||||
}
|
||||
|
||||
/** Does nothing. Provided for compatibility with SparseMatrix. */
|
||||
inline void endFill() {}
|
||||
|
||||
/** Resize the matrix without preserving the data (the matrix is set to zero)
|
||||
*/
|
||||
void resize(int rows, int cols)
|
||||
{
|
||||
const int outerSize = IsRowMajor ? rows : cols;
|
||||
m_innerSize = IsRowMajor ? cols : rows;
|
||||
setZero();
|
||||
if (int(m_data.size()) != outerSize)
|
||||
{
|
||||
m_data.resize(outerSize);
|
||||
}
|
||||
}
|
||||
|
||||
void resizeAndKeepData(int rows, int cols)
|
||||
{
|
||||
const int outerSize = IsRowMajor ? rows : cols;
|
||||
const int innerSize = IsRowMajor ? cols : rows;
|
||||
if (m_innerSize>innerSize)
|
||||
{
|
||||
// remove all coefficients with innerCoord>=innerSize
|
||||
// TODO
|
||||
std::cerr << "not implemented yet\n";
|
||||
exit(2);
|
||||
}
|
||||
if (m_data.size() != outerSize)
|
||||
{
|
||||
m_data.resize(outerSize);
|
||||
}
|
||||
}
|
||||
|
||||
inline DynamicSparseMatrix()
|
||||
: m_innerSize(0)
|
||||
{
|
||||
ei_assert(innerSize()==0 && outerSize()==0);
|
||||
}
|
||||
|
||||
inline DynamicSparseMatrix(int rows, int cols)
|
||||
: m_innerSize(0)
|
||||
{
|
||||
resize(rows, cols);
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_innerSize(0)
|
||||
{
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
|
||||
: m_innerSize(0)
|
||||
{
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline void swap(DynamicSparseMatrix& other)
|
||||
{
|
||||
//EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
|
||||
std::swap(m_innerSize, other.m_innerSize);
|
||||
//std::swap(m_outerSize, other.m_outerSize);
|
||||
m_data.swap(other.m_data);
|
||||
}
|
||||
|
||||
inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
|
||||
{
|
||||
if (other.isRValue())
|
||||
{
|
||||
swap(other.const_cast_derived());
|
||||
}
|
||||
else
|
||||
{
|
||||
resize(other.rows(), other.cols());
|
||||
m_data = other.m_data;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline DynamicSparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
return SparseMatrixBase<DynamicSparseMatrix>::operator=(other.derived());
|
||||
}
|
||||
|
||||
/** Destructor */
|
||||
inline ~DynamicSparseMatrix() {}
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Flags>
|
||||
class DynamicSparseMatrix<Scalar,_Flags>::InnerIterator : public SparseVector<Scalar,_Flags>::InnerIterator
|
||||
{
|
||||
typedef typename SparseVector<Scalar,_Flags>::InnerIterator Base;
|
||||
public:
|
||||
InnerIterator(const DynamicSparseMatrix& mat, int outer)
|
||||
: Base(mat.m_data[outer]), m_outer(outer)
|
||||
{}
|
||||
|
||||
inline int row() const { return IsRowMajor ? m_outer : Base::index(); }
|
||||
inline int col() const { return IsRowMajor ? Base::index() : m_outer; }
|
||||
|
||||
|
||||
protected:
|
||||
const int m_outer;
|
||||
};
|
||||
|
||||
#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
|
@ -45,7 +45,7 @@ struct ei_traits<SparseMatrix<_Scalar, _Flags> >
|
||||
MaxColsAtCompileTime = Dynamic,
|
||||
Flags = SparseBit | _Flags,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = FullyCoherentAccessPattern
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
@ -91,19 +91,7 @@ class SparseMatrix
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
|
||||
int start = m_outerIndex[outer];
|
||||
int end = m_outerIndex[outer+1];
|
||||
if (start==end)
|
||||
return Scalar(0);
|
||||
else if (end>0 && inner==m_data.index(end-1))
|
||||
return m_data.value(end-1);
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
|
||||
const int* r = std::lower_bound(&m_data.index(start),&m_data.index(end-1),inner);
|
||||
const int id = r-&m_data.index(0);
|
||||
return ((*r==inner) && (id<end)) ? m_data.value(id) : Scalar(0);
|
||||
return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
@ -115,9 +103,8 @@ class SparseMatrix
|
||||
int end = m_outerIndex[outer+1];
|
||||
ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
|
||||
ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
|
||||
int* r = std::lower_bound(&m_data.index(start),&m_data.index(end),inner);
|
||||
const int id = r-&m_data.index(0);
|
||||
ei_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
|
||||
const int id = m_data.searchLowerIndex(start,end-1,inner);
|
||||
ei_assert((id<end) && (m_data.index(id)==inner) && "coeffRef cannot be called on a zero coefficient");
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ template<typename Derived> class SparseMatrixBase
|
||||
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
|
||||
* constructed from this one. See the \ref flags "list of flags".
|
||||
*/
|
||||
|
||||
|
||||
CoeffReadCost = ei_traits<Derived>::CoeffReadCost,
|
||||
/**< This is a rough measure of how expensive it is to read one coefficient from
|
||||
* this expression.
|
||||
@ -153,7 +153,10 @@ template<typename Derived> class SparseMatrixBase
|
||||
{
|
||||
// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
|
||||
//const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
ei_assert((!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit))) && "the transpose operation is supposed to be handled in SparseMatrix::operator=");
|
||||
ei_assert(( ((ei_traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
|
||||
(!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
|
||||
"the transpose operation is supposed to be handled in SparseMatrix::operator=");
|
||||
|
||||
const int outerSize = other.outerSize();
|
||||
//typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
|
||||
// thanks to shallow copies, we always eval to a tempary
|
||||
|
@ -246,7 +246,7 @@ struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
|
||||
{
|
||||
// let's transpose the product to get a column x column product
|
||||
SparseTemporaryType _res(res.cols(), res.rows());
|
||||
ei_sparse_product_selector<Rhs,Lhs,ResultType,ColMajor,ColMajor,ColMajor>
|
||||
ei_sparse_product_selector<Rhs,Lhs,SparseTemporaryType,ColMajor,ColMajor,ColMajor>
|
||||
::run(rhs, lhs, _res);
|
||||
res = _res.transpose();
|
||||
}
|
||||
|
@ -102,30 +102,36 @@ enum {
|
||||
|
||||
template<typename Derived> class SparseMatrixBase;
|
||||
template<typename _Scalar, int _Flags = 0> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0> class MappedSparseMatrix;
|
||||
|
||||
template<typename MatrixType> class SparseTranspose;
|
||||
template<typename MatrixType> class SparseInnerVector;
|
||||
template<typename Derived> class SparseCwise;
|
||||
template<typename UnaryOp, typename MatrixType> class SparseCwiseUnaryOp;
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs> class SparseCwiseBinaryOp;
|
||||
template<typename ExpressionType, unsigned int Added, unsigned int Removed> class SparseFlagged;
|
||||
template<typename MatrixType> class SparseTranspose;
|
||||
template<typename MatrixType> class SparseInnerVector;
|
||||
template<typename Derived> class SparseCwise;
|
||||
template<typename UnaryOp, typename MatrixType> class SparseCwiseUnaryOp;
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs> class SparseCwiseBinaryOp;
|
||||
template<typename ExpressionType,
|
||||
unsigned int Added, unsigned int Removed> class SparseFlagged;
|
||||
|
||||
template<typename Lhs, typename Rhs> struct ei_sparse_product_mode;
|
||||
template<typename Lhs, typename Rhs, int ProductMode = ei_sparse_product_mode<Lhs,Rhs>::value> struct SparseProductReturnType;
|
||||
|
||||
const int AccessPatternNotSupported = 0x0;
|
||||
const int AccessPatternSupported = 0x1;
|
||||
const int CoherentAccessPattern = 0x1;
|
||||
const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
|
||||
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
|
||||
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
|
||||
|
||||
|
||||
template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
|
||||
{
|
||||
enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
|
||||
? AccessPatternSupported
|
||||
: AccessPatternNotSupported
|
||||
};
|
||||
};
|
||||
// const int AccessPatternNotSupported = 0x0;
|
||||
// const int AccessPatternSupported = 0x1;
|
||||
//
|
||||
// template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
|
||||
// {
|
||||
// enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
|
||||
// ? AccessPatternSupported
|
||||
// : AccessPatternNotSupported
|
||||
// };
|
||||
// };
|
||||
|
||||
template<typename T> class ei_eval<T,IsSparse>
|
||||
{
|
||||
|
@ -47,12 +47,10 @@ struct ei_traits<SparseVector<_Scalar, _Flags> >
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
Flags = SparseBit | _Flags,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = FullyCoherentAccessPattern
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename _Scalar, int _Flags>
|
||||
class SparseVector
|
||||
: public SparseMatrixBase<SparseVector<_Scalar, _Flags> >
|
||||
@ -89,22 +87,7 @@ class SparseVector
|
||||
ei_assert((IsColVector ? col : row)==0);
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
inline Scalar coeff(int i) const
|
||||
{
|
||||
int start = 0;
|
||||
int end = m_data.size();
|
||||
if (start==end)
|
||||
return Scalar(0);
|
||||
else if (end>0 && i==m_data.index(end-1))
|
||||
return m_data.value(end-1);
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
|
||||
// TODO move this search to ScalarArray
|
||||
const int* r = std::lower_bound(&m_data.index(start),&m_data.index(end-1),i);
|
||||
const int id = r-&m_data.index(0);
|
||||
return ((*r==i) && (id<end)) ? m_data.value(id) : Scalar(0);
|
||||
}
|
||||
inline Scalar coeff(int i) const { return m_data.at(i); }
|
||||
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
{
|
||||
@ -112,16 +95,15 @@ class SparseVector
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient value at given index \a i
|
||||
* This operation involes a log(rho*size) binary search. If the coefficient does not
|
||||
* exist yet, then a sorted insertion into a sequential buffer is performed.
|
||||
*
|
||||
* This insertion might be very costly if the number of nonzeros above \a i is large.
|
||||
*/
|
||||
inline Scalar& coeffRef(int i)
|
||||
{
|
||||
int start = 0;
|
||||
int end = m_data.size();
|
||||
ei_assert(end>=start && "you probably called coeffRef on a non finalized vector");
|
||||
ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
|
||||
int* r = std::lower_bound(&m_data.index(start),&m_data.index(end),i);
|
||||
const int id = r-&m_data.index(0);
|
||||
ei_assert((*r==i) && (id<end) && "coeffRef cannot be called on a zero coefficient");
|
||||
return m_data.value(id);
|
||||
return m_data.atWithInsertiob(i);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -301,29 +283,33 @@ class SparseVector<Scalar,_Flags>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const SparseVector& vec, int outer=0)
|
||||
: m_vector(vec), m_id(0), m_end(vec.nonZeros())
|
||||
: m_data(vec.m_data), m_id(0), m_end(m_data.size())
|
||||
{
|
||||
ei_assert(outer==0);
|
||||
}
|
||||
|
||||
InnerIterator(const CompressedStorage<Scalar>& data)
|
||||
: m_data(data), m_id(0), m_end(m_data.size())
|
||||
{}
|
||||
|
||||
template<unsigned int Added, unsigned int Removed>
|
||||
InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int outer)
|
||||
: m_vector(vec._expression()), m_id(0), m_end(m_vector.nonZeros())
|
||||
: m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
|
||||
{}
|
||||
|
||||
inline InnerIterator& operator++() { m_id++; return *this; }
|
||||
|
||||
inline Scalar value() const { return m_vector.m_data.value(m_id); }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_vector.m_data.value(m_id)); }
|
||||
inline Scalar value() const { return m_data.value(m_id); }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
|
||||
|
||||
inline int index() const { return m_vector.m_data.index(m_id); }
|
||||
inline int index() const { return m_data.index(m_id); }
|
||||
inline int row() const { return IsColVector ? index() : 0; }
|
||||
inline int col() const { return IsColVector ? 0 : index(); }
|
||||
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const SparseVector& m_vector;
|
||||
const CompressedStorage<Scalar>& m_data;
|
||||
int m_id;
|
||||
const int m_end;
|
||||
};
|
||||
|
@ -94,6 +94,49 @@ initSparse(double density,
|
||||
sparseMat.endFill();
|
||||
}
|
||||
|
||||
template<typename Scalar> void
|
||||
initSparse(double density,
|
||||
Matrix<Scalar,Dynamic,Dynamic>& refMat,
|
||||
DynamicSparseMatrix<Scalar>& sparseMat,
|
||||
int flags = 0,
|
||||
std::vector<Vector2i>* zeroCoords = 0,
|
||||
std::vector<Vector2i>* nonzeroCoords = 0)
|
||||
{
|
||||
sparseMat.startFill(int(refMat.rows()*refMat.cols()*density));
|
||||
for(int j=0; j<refMat.cols(); j++)
|
||||
{
|
||||
for(int i=0; i<refMat.rows(); i++)
|
||||
{
|
||||
Scalar v = (ei_random<double>(0,1) < density) ? ei_random<Scalar>() : Scalar(0);
|
||||
if ((flags&ForceNonZeroDiag) && (i==j))
|
||||
{
|
||||
v = ei_random<Scalar>()*Scalar(3.);
|
||||
v = v*v + Scalar(5.);
|
||||
}
|
||||
if ((flags & MakeLowerTriangular) && j>i)
|
||||
v = Scalar(0);
|
||||
else if ((flags & MakeUpperTriangular) && j<i)
|
||||
v = Scalar(0);
|
||||
|
||||
if ((flags&ForceRealDiag) && (i==j))
|
||||
v = ei_real(v);
|
||||
|
||||
if (v!=Scalar(0))
|
||||
{
|
||||
sparseMat.fill(i,j) = v;
|
||||
if (nonzeroCoords)
|
||||
nonzeroCoords->push_back(Vector2i(i,j));
|
||||
}
|
||||
else if (zeroCoords)
|
||||
{
|
||||
zeroCoords->push_back(Vector2i(i,j));
|
||||
}
|
||||
refMat(i,j) = v;
|
||||
}
|
||||
}
|
||||
sparseMat.endFill();
|
||||
}
|
||||
|
||||
template<typename Scalar> void
|
||||
initSparse(double density,
|
||||
Matrix<Scalar,Dynamic,1>& refVec,
|
||||
|
@ -42,14 +42,34 @@ bool test_random_setter(SparseType& sm, const DenseType& ref, const std::vector<
|
||||
return sm.isApprox(ref);
|
||||
}
|
||||
|
||||
template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
template<typename SetterType,typename DenseType, typename T>
|
||||
bool test_random_setter(DynamicSparseMatrix<T>& sm, const DenseType& ref, const std::vector<Vector2i>& nonzeroCoords)
|
||||
{
|
||||
sm.setZero();
|
||||
std::vector<Vector2i> remaining = nonzeroCoords;
|
||||
while(!remaining.empty())
|
||||
{
|
||||
int i = ei_random<int>(0,remaining.size()-1);
|
||||
sm.coeffRef(remaining[i].x(),remaining[i].y()) = ref.coeff(remaining[i].x(),remaining[i].y());
|
||||
remaining[i] = remaining.back();
|
||||
remaining.pop_back();
|
||||
}
|
||||
return sm.isApprox(ref);
|
||||
}
|
||||
|
||||
template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& ref)
|
||||
{
|
||||
const int rows = ref.rows();
|
||||
const int cols = ref.cols();
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
enum { Flags = SparseMatrixType::Flags };
|
||||
|
||||
double density = std::max(8./(rows*cols), 0.01);
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||
typedef Matrix<Scalar,Dynamic,1> DenseVector;
|
||||
Scalar eps = 1e-6;
|
||||
|
||||
SparseMatrix<Scalar> m(rows, cols);
|
||||
SparseMatrixType m(rows, cols);
|
||||
DenseMatrix refMat = DenseMatrix::Zero(rows, cols);
|
||||
DenseVector vec1 = DenseVector::Random(rows);
|
||||
Scalar s1 = ei_random<Scalar>();
|
||||
@ -57,7 +77,7 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
std::vector<Vector2i> zeroCoords;
|
||||
std::vector<Vector2i> nonzeroCoords;
|
||||
initSparse<Scalar>(density, refMat, m, 0, &zeroCoords, &nonzeroCoords);
|
||||
|
||||
|
||||
if (zeroCoords.size()==0 || nonzeroCoords.size()==0)
|
||||
return;
|
||||
|
||||
@ -65,7 +85,8 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
for (int i=0; i<(int)zeroCoords.size(); ++i)
|
||||
{
|
||||
VERIFY_IS_MUCH_SMALLER_THAN( m.coeff(zeroCoords[i].x(),zeroCoords[i].y()), eps );
|
||||
VERIFY_RAISES_ASSERT( m.coeffRef(zeroCoords[0].x(),zeroCoords[0].y()) = 5 );
|
||||
if(ei_is_same_type<SparseMatrixType,SparseMatrix<Scalar,Flags> >::ret)
|
||||
VERIFY_RAISES_ASSERT( m.coeffRef(zeroCoords[0].x(),zeroCoords[0].y()) = 5 );
|
||||
}
|
||||
VERIFY_IS_APPROX(m, refMat);
|
||||
|
||||
@ -120,7 +141,7 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
// {
|
||||
// m.setZero();
|
||||
// VERIFY_IS_NOT_APPROX(m, refMat);
|
||||
// SparseSetter<SparseMatrix<Scalar>, FullyCoherentAccessPattern> w(m);
|
||||
// SparseSetter<SparseMatrixType, FullyCoherentAccessPattern> w(m);
|
||||
// for (int i=0; i<nonzeroCoords.size(); ++i)
|
||||
// {
|
||||
// w->coeffRef(nonzeroCoords[i].x(),nonzeroCoords[i].y()) = refMat.coeff(nonzeroCoords[i].x(),nonzeroCoords[i].y());
|
||||
@ -132,7 +153,7 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
// {
|
||||
// m.setZero();
|
||||
// VERIFY_IS_NOT_APPROX(m, refMat);
|
||||
// SparseSetter<SparseMatrix<Scalar>, RandomAccessPattern> w(m);
|
||||
// SparseSetter<SparseMatrixType, RandomAccessPattern> w(m);
|
||||
// std::vector<Vector2i> remaining = nonzeroCoords;
|
||||
// while(!remaining.empty())
|
||||
// {
|
||||
@ -144,22 +165,22 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
// }
|
||||
// VERIFY_IS_APPROX(m, refMat);
|
||||
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrix<Scalar>, StdMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, StdMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
#ifdef _HASH_MAP
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrix<Scalar>, GnuHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, GnuHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
#endif
|
||||
#ifdef _DENSE_HASH_MAP_H_
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrix<Scalar>, GoogleDenseHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, GoogleDenseHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
#endif
|
||||
#ifdef _SPARSE_HASH_MAP_H_
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrix<Scalar>, GoogleSparseHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, GoogleSparseHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
#endif
|
||||
|
||||
// test fillrand
|
||||
{
|
||||
DenseMatrix m1(rows,cols);
|
||||
m1.setZero();
|
||||
SparseMatrix<Scalar> m2(rows,cols);
|
||||
SparseMatrixType m2(rows,cols);
|
||||
m2.startFill();
|
||||
for (int j=0; j<cols; ++j)
|
||||
{
|
||||
@ -171,23 +192,23 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
}
|
||||
}
|
||||
m2.endFill();
|
||||
std::cerr << m1 << "\n\n" << m2 << "\n";
|
||||
//std::cerr << m1 << "\n\n" << m2 << "\n";
|
||||
VERIFY_IS_APPROX(m2,m1);
|
||||
}
|
||||
|
||||
// test RandomSetter
|
||||
{
|
||||
SparseMatrix<Scalar> m1(rows,cols), m2(rows,cols);
|
||||
/*{
|
||||
SparseMatrixType m1(rows,cols), m2(rows,cols);
|
||||
DenseMatrix refM1 = DenseMatrix::Zero(rows, rows);
|
||||
initSparse<Scalar>(density, refM1, m1);
|
||||
{
|
||||
Eigen::RandomSetter<SparseMatrix<Scalar> > setter(m2);
|
||||
Eigen::RandomSetter<SparseMatrixType > setter(m2);
|
||||
for (int j=0; j<m1.outerSize(); ++j)
|
||||
for (typename SparseMatrix<Scalar>::InnerIterator i(m1,j); i; ++i)
|
||||
for (typename SparseMatrixType::InnerIterator i(m1,j); i; ++i)
|
||||
setter(i.index(), j) = i.value();
|
||||
}
|
||||
VERIFY_IS_APPROX(m1, m2);
|
||||
}
|
||||
}*/
|
||||
// std::cerr << m.transpose() << "\n\n" << refMat.transpose() << "\n\n";
|
||||
// VERIFY_IS_APPROX(m, refMat);
|
||||
|
||||
@ -197,10 +218,10 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
DenseMatrix refM2 = DenseMatrix::Zero(rows, rows);
|
||||
DenseMatrix refM3 = DenseMatrix::Zero(rows, rows);
|
||||
DenseMatrix refM4 = DenseMatrix::Zero(rows, rows);
|
||||
SparseMatrix<Scalar> m1(rows, rows);
|
||||
SparseMatrix<Scalar> m2(rows, rows);
|
||||
SparseMatrix<Scalar> m3(rows, rows);
|
||||
SparseMatrix<Scalar> m4(rows, rows);
|
||||
SparseMatrixType m1(rows, rows);
|
||||
SparseMatrixType m2(rows, rows);
|
||||
SparseMatrixType m3(rows, rows);
|
||||
SparseMatrixType m4(rows, rows);
|
||||
initSparse<Scalar>(density, refM1, m1);
|
||||
initSparse<Scalar>(density, refM2, m2);
|
||||
initSparse<Scalar>(density, refM3, m3);
|
||||
@ -223,7 +244,7 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
// test innerVector()
|
||||
{
|
||||
DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
|
||||
SparseMatrix<Scalar> m2(rows, rows);
|
||||
SparseMatrixType m2(rows, rows);
|
||||
initSparse<Scalar>(density, refMat2, m2);
|
||||
int j0 = ei_random(0,rows-1);
|
||||
int j1 = ei_random(0,rows-1);
|
||||
@ -234,7 +255,7 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
// test transpose
|
||||
{
|
||||
DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows);
|
||||
SparseMatrix<Scalar> m2(rows, rows);
|
||||
SparseMatrixType m2(rows, rows);
|
||||
initSparse<Scalar>(density, refMat2, m2);
|
||||
VERIFY_IS_APPROX(m2.transpose().eval(), refMat2.transpose().eval());
|
||||
VERIFY_IS_APPROX(m2.transpose(), refMat2.transpose());
|
||||
@ -246,9 +267,9 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
DenseMatrix refMat3 = DenseMatrix::Zero(rows, rows);
|
||||
DenseMatrix refMat4 = DenseMatrix::Zero(rows, rows);
|
||||
DenseMatrix dm4 = DenseMatrix::Zero(rows, rows);
|
||||
SparseMatrix<Scalar> m2(rows, rows);
|
||||
SparseMatrix<Scalar> m3(rows, rows);
|
||||
SparseMatrix<Scalar> m4(rows, rows);
|
||||
SparseMatrixType m2(rows, rows);
|
||||
SparseMatrixType m3(rows, rows);
|
||||
SparseMatrixType m4(rows, rows);
|
||||
initSparse<Scalar>(density, refMat2, m2);
|
||||
initSparse<Scalar>(density, refMat3, m3);
|
||||
initSparse<Scalar>(density, refMat4, m4);
|
||||
@ -278,9 +299,9 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
DenseMatrix refUp = DenseMatrix::Zero(rows, rows);
|
||||
DenseMatrix refLo = DenseMatrix::Zero(rows, rows);
|
||||
DenseMatrix refS = DenseMatrix::Zero(rows, rows);
|
||||
SparseMatrix<Scalar> mUp(rows, rows);
|
||||
SparseMatrix<Scalar> mLo(rows, rows);
|
||||
SparseMatrix<Scalar> mS(rows, rows);
|
||||
SparseMatrixType mUp(rows, rows);
|
||||
SparseMatrixType mLo(rows, rows);
|
||||
SparseMatrixType mS(rows, rows);
|
||||
do {
|
||||
initSparse<Scalar>(density, refUp, mUp, ForceRealDiag|/*ForceNonZeroDiag|*/MakeUpperTriangular);
|
||||
} while (refUp.isZero());
|
||||
@ -290,7 +311,7 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
refS.diagonal() *= 0.5;
|
||||
mS = mUp + mLo;
|
||||
for (int k=0; k<mS.outerSize(); ++k)
|
||||
for (typename SparseMatrix<Scalar>::InnerIterator it(mS,k); it; ++it)
|
||||
for (typename SparseMatrixType::InnerIterator it(mS,k); it; ++it)
|
||||
if (it.index() == k)
|
||||
it.valueRef() *= 0.5;
|
||||
|
||||
@ -307,8 +328,10 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
|
||||
void test_sparse_basic()
|
||||
{
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
CALL_SUBTEST( sparse_basic<double>(8, 8) );
|
||||
CALL_SUBTEST( sparse_basic<std::complex<double> >(16, 16) );
|
||||
CALL_SUBTEST( sparse_basic<double>(33, 33) );
|
||||
// CALL_SUBTEST( sparse_basic(SparseMatrix<double>(8, 8)) );
|
||||
// CALL_SUBTEST( sparse_basic(SparseMatrix<std::complex<double> >(16, 16)) );
|
||||
// CALL_SUBTEST( sparse_basic(SparseMatrix<double>(33, 33)) );
|
||||
|
||||
CALL_SUBTEST( sparse_basic(DynamicSparseMatrix<double>(8, 8)) );
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user