mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-11-21 03:11:25 +08:00
Fix a couple of int versus Index issues.
This commit is contained in:
parent
bff264283d
commit
d8f1035355
@ -200,7 +200,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
|
||||
typedef Matrix<Scalar,Dynamic,1> TempVectorType;
|
||||
typedef typename TempVectorType::SegmentReturnType TempVecSegment;
|
||||
|
||||
int n = mat.cols();
|
||||
Index n = mat.cols();
|
||||
eigen_assert(mat.rows()==n && vec.size()==n);
|
||||
|
||||
TempVectorType temp;
|
||||
@ -212,12 +212,12 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
|
||||
// i.e., for sigma > 0
|
||||
temp = sqrt(sigma) * vec;
|
||||
|
||||
for(int i=0; i<n; ++i)
|
||||
for(Index i=0; i<n; ++i)
|
||||
{
|
||||
JacobiRotation<Scalar> g;
|
||||
g.makeGivens(mat(i,i), -temp(i), &mat(i,i));
|
||||
|
||||
int rs = n-i-1;
|
||||
Index rs = n-i-1;
|
||||
if(rs>0)
|
||||
{
|
||||
ColXprSegment x(mat.col(i).tail(rs));
|
||||
@ -230,7 +230,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
|
||||
{
|
||||
temp = vec;
|
||||
RealScalar beta = 1;
|
||||
for(int j=0; j<n; ++j)
|
||||
for(Index j=0; j<n; ++j)
|
||||
{
|
||||
RealScalar Ljj = real(mat.coeff(j,j));
|
||||
RealScalar dj = abs2(Ljj);
|
||||
|
@ -155,7 +155,7 @@ struct assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
|
||||
template<typename Derived1, typename Derived2, int Index, int Stop>
|
||||
struct assign_DefaultTraversal_InnerUnrolling
|
||||
{
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer)
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
|
||||
{
|
||||
dst.copyCoeffByOuterInner(outer, Index, src);
|
||||
assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, outer);
|
||||
@ -165,7 +165,7 @@ struct assign_DefaultTraversal_InnerUnrolling
|
||||
template<typename Derived1, typename Derived2, int Stop>
|
||||
struct assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Stop, Stop>
|
||||
{
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {}
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
|
||||
};
|
||||
|
||||
/***********************
|
||||
@ -218,7 +218,7 @@ struct assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
|
||||
template<typename Derived1, typename Derived2, int Index, int Stop>
|
||||
struct assign_innervec_InnerUnrolling
|
||||
{
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer)
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
|
||||
{
|
||||
dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, Index, src);
|
||||
assign_innervec_InnerUnrolling<Derived1, Derived2,
|
||||
@ -229,7 +229,7 @@ struct assign_innervec_InnerUnrolling
|
||||
template<typename Derived1, typename Derived2, int Stop>
|
||||
struct assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
|
||||
{
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {}
|
||||
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
|
@ -560,7 +560,7 @@ struct linspaced_op_impl<Scalar,false>
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const
|
||||
{
|
||||
m_base = padd(m_base, pset1<Packet>(m_step));
|
||||
return m_low+i*m_step;
|
||||
return m_low+Scalar(i)*m_step;
|
||||
}
|
||||
|
||||
template<typename Index>
|
||||
@ -609,7 +609,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
|
||||
template <typename Scalar, bool RandomAccess> struct linspaced_op
|
||||
{
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
linspaced_op(const Scalar& low, const Scalar& high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
|
||||
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
|
||||
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
|
||||
|
@ -541,24 +541,25 @@ struct permut_matrix_product_retval
|
||||
: public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
|
||||
{
|
||||
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
|
||||
: m_permutation(perm), m_matrix(matrix)
|
||||
{}
|
||||
|
||||
inline int rows() const { return m_matrix.rows(); }
|
||||
inline int cols() const { return m_matrix.cols(); }
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
const int n = Side==OnTheLeft ? rows() : cols();
|
||||
const Index n = Side==OnTheLeft ? rows() : cols();
|
||||
|
||||
if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
|
||||
{
|
||||
// apply the permutation inplace
|
||||
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
|
||||
mask.fill(false);
|
||||
int r = 0;
|
||||
Index r = 0;
|
||||
while(r < m_permutation.size())
|
||||
{
|
||||
// search for the next seed
|
||||
@ -566,10 +567,10 @@ struct permut_matrix_product_retval
|
||||
if(r>=m_permutation.size())
|
||||
break;
|
||||
// we got one, let's follow it until we are back to the seed
|
||||
int k0 = r++;
|
||||
int kPrev = k0;
|
||||
Index k0 = r++;
|
||||
Index kPrev = k0;
|
||||
mask.coeffRef(k0) = true;
|
||||
for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
|
||||
for(Index k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
|
||||
{
|
||||
Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
|
||||
.swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
|
||||
|
@ -256,7 +256,7 @@ template<> struct trmv_selector<ColMajor>
|
||||
if(!evalToDest)
|
||||
{
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
int size = dest.size();
|
||||
Index size = dest.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
if(!alphaIsCompatible)
|
||||
|
@ -242,7 +242,7 @@ struct partial_lu_impl
|
||||
const Index cols = lu.cols();
|
||||
const Index size = (std::min)(rows,cols);
|
||||
nb_transpositions = 0;
|
||||
int first_zero_pivot = -1;
|
||||
Index first_zero_pivot = -1;
|
||||
for(Index k = 0; k < size; ++k)
|
||||
{
|
||||
Index rrows = rows-k-1;
|
||||
@ -253,7 +253,7 @@ struct partial_lu_impl
|
||||
= lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
|
||||
row_of_biggest_in_col += k;
|
||||
|
||||
row_transpositions[k] = row_of_biggest_in_col;
|
||||
row_transpositions[k] = PivIndex(row_of_biggest_in_col);
|
||||
|
||||
if(biggest_in_corner != RealScalar(0))
|
||||
{
|
||||
@ -318,7 +318,7 @@ struct partial_lu_impl
|
||||
}
|
||||
|
||||
nb_transpositions = 0;
|
||||
int first_zero_pivot = -1;
|
||||
Index first_zero_pivot = -1;
|
||||
for(Index k = 0; k < size; k+=blockSize)
|
||||
{
|
||||
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||
|
@ -21,9 +21,9 @@ namespace internal {
|
||||
* - lda and ldc must be multiples of the respective packet size
|
||||
* - C must have the same alignment as A
|
||||
*/
|
||||
template<typename Scalar>
|
||||
template<typename Scalar,typename Index>
|
||||
EIGEN_DONT_INLINE
|
||||
void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar* B, int ldb, Scalar* C, int ldc)
|
||||
void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
|
||||
{
|
||||
using namespace Eigen::internal;
|
||||
|
||||
@ -37,37 +37,37 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk
|
||||
SM = PM*PacketSize // step along M
|
||||
};
|
||||
int d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
|
||||
int n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
|
||||
int i0 = internal::first_aligned(A,m);
|
||||
Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
|
||||
Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
|
||||
Index i0 = internal::first_aligned(A,m);
|
||||
|
||||
eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_aligned(C,m)));
|
||||
|
||||
// handle the non aligned rows of A and C without any optimization:
|
||||
for(int i=0; i<i0; ++i)
|
||||
for(Index i=0; i<i0; ++i)
|
||||
{
|
||||
for(int j=0; j<n; ++j)
|
||||
for(Index j=0; j<n; ++j)
|
||||
{
|
||||
Scalar c = C[i+j*ldc];
|
||||
for(int k=0; k<d; ++k)
|
||||
for(Index k=0; k<d; ++k)
|
||||
c += B[k+j*ldb] * A[i+k*lda];
|
||||
C[i+j*ldc] = c;
|
||||
}
|
||||
}
|
||||
// process the remaining rows per chunk of BM rows
|
||||
for(int ib=i0; ib<m; ib+=BM)
|
||||
for(Index ib=i0; ib<m; ib+=BM)
|
||||
{
|
||||
int actual_b = std::min<int>(BM, m-ib); // actual number of rows
|
||||
int actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
|
||||
int actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
|
||||
Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows
|
||||
Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
|
||||
Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
|
||||
|
||||
// Let's process two columns of B-C at once
|
||||
for(int j=0; j<n_end; j+=RN)
|
||||
for(Index j=0; j<n_end; j+=RN)
|
||||
{
|
||||
const Scalar* Bc0 = B+(j+0)*ldb;
|
||||
const Scalar* Bc1 = B+(j+1)*ldb;
|
||||
|
||||
for(int k=0; k<d_end; k+=RK)
|
||||
for(Index k=0; k<d_end; k+=RK)
|
||||
{
|
||||
|
||||
// load and expand a RN x RK block of B
|
||||
@ -124,7 +124,7 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
pstore(C1+i+(I)*PacketSize, c1)
|
||||
|
||||
// process rows of A' - C' with aggressive vectorization and peeling
|
||||
for(int i=0; i<actual_b_end1; i+=PacketSize*8)
|
||||
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
|
||||
{
|
||||
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
|
||||
prefetch((A0+i+(5)*PacketSize));
|
||||
@ -141,13 +141,13 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
WORK(7);
|
||||
}
|
||||
// process the remaining rows with vectorization only
|
||||
for(int i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
|
||||
for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
|
||||
{
|
||||
WORK(0);
|
||||
}
|
||||
#undef WORK
|
||||
// process the remaining rows without vectorization
|
||||
for(int i=actual_b_end2; i<actual_b; ++i)
|
||||
for(Index i=actual_b_end2; i<actual_b; ++i)
|
||||
{
|
||||
if(RK==4)
|
||||
{
|
||||
@ -170,7 +170,7 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
{
|
||||
const Scalar* Bc0 = B+(n-1)*ldb;
|
||||
|
||||
for(int k=0; k<d_end; k+=RK)
|
||||
for(Index k=0; k<d_end; k+=RK)
|
||||
{
|
||||
|
||||
// load and expand a 1 x RK block of B
|
||||
@ -215,7 +215,7 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
pstore(C0+i+(I)*PacketSize, c0);
|
||||
|
||||
// agressive vectorization and peeling
|
||||
for(int i=0; i<actual_b_end1; i+=PacketSize*8)
|
||||
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
|
||||
{
|
||||
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
|
||||
WORK(0);
|
||||
@ -228,12 +228,12 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
WORK(7);
|
||||
}
|
||||
// vectorization only
|
||||
for(int i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
|
||||
for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
|
||||
{
|
||||
WORK(0);
|
||||
}
|
||||
// remaining scalars
|
||||
for(int i=actual_b_end2; i<actual_b; ++i)
|
||||
for(Index i=actual_b_end2; i<actual_b; ++i)
|
||||
{
|
||||
if(RK==4)
|
||||
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
|
||||
@ -247,10 +247,10 @@ void sparselu_gemm(int m, int n, int d, const Scalar* A, int lda, const Scalar*
|
||||
}
|
||||
|
||||
// process the last columns of A, corresponding to the last rows of B
|
||||
int rd = d-d_end;
|
||||
Index rd = d-d_end;
|
||||
if(rd>0)
|
||||
{
|
||||
for(int j=0; j<n; ++j)
|
||||
for(Index j=0; j<n; ++j)
|
||||
{
|
||||
enum {
|
||||
Alignment = PacketSize>1 ? Aligned : 0
|
||||
|
@ -405,7 +405,7 @@ void set_repeat_from_string(const char *str)
|
||||
void set_seed_from_string(const char *str)
|
||||
{
|
||||
errno = 0;
|
||||
g_seed = strtoul(str, 0, 10);
|
||||
g_seed = int(strtoul(str, 0, 10));
|
||||
if(errno || g_seed == 0)
|
||||
{
|
||||
std::cout << "Invalid seed value " << str << std::endl;
|
||||
|
Loading…
Reference in New Issue
Block a user