bug #1004: remove the inaccurate "sequential" path for LinSpaced, mark respective function as deprecated, and enforce strict interpolation of the higher range using a correction term.

Now, even with floating point precision, both the 'low' and 'high' bounds are exactly reproduced at i=0 and i=size-1 respectively.
This commit is contained in:
Gael Guennebaud 2016-10-24 20:27:21 +02:00
parent b11aab5fcc
commit b027d7a8cf
4 changed files with 58 additions and 98 deletions

View File

@ -215,42 +215,29 @@ DenseBase<Derived>::Constant(const Scalar& value)
return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));
}
/**
* \brief Sets a linearly spaced vector.
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)
*
* The function generates 'size' equally spaced values in the closed interval [low,high].
* This particular version of LinSpaced() uses sequential access, i.e. vector access is
* assumed to be a(0), a(1), ..., a(size-1). This assumption allows for better vectorization
* and yields faster code than the random access version.
*
* When size is set to 1, a vector of length 1 containing 'high' is returned.
*
* \only_for_vectors
*
* Example: \include DenseBase_LinSpaced_seq.cpp
* Output: \verbinclude DenseBase_LinSpaced_seq.out
*
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp
* \sa LinSpaced(Index,Scalar,Scalar), setLinSpaced(Index,const Scalar&,const Scalar&)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar,false>(low,high,size));
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
}
/**
* \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&)
* Special version for fixed size types which does not require the size parameter.
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
*
* \sa LinSpaced(Scalar,Scalar)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar,false>(low,high,Derived::SizeAtCompileTime));
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
}
/**
@ -274,14 +261,14 @@ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig
* Example: \include DenseBase_LinSpacedInt.cpp
* Output: \verbinclude DenseBase_LinSpacedInt.out
*
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar,true>(low,high,size));
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
}
/**
@ -294,7 +281,7 @@ DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar,true>(low,high,Derived::SizeAtCompileTime));
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
}
/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
@ -396,7 +383,7 @@ template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar,false>(low,high,newSize));
return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize));
}
/**

View File

@ -260,10 +260,10 @@ template<typename Derived> class DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;
/** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */
typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar,false>,PlainObject> SequentialLinSpacedReturnType;
/** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType;
/** \internal Represents a vector with linearly spaced coefficients that allows random access. */
typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar,true>,PlainObject> RandomAccessLinSpacedReturnType;
typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType;
/** \internal the return type of MatrixBase::eigenvalues() */
typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;

View File

@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@ -37,66 +37,40 @@ template<typename Scalar>
struct functor_traits<scalar_identity_op<Scalar> >
{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
template <typename Scalar, typename Packet, bool RandomAccess, bool IsInteger> struct linspaced_op_impl;
template <typename Scalar, typename Packet, bool IsInteger> struct linspaced_op_impl;
// linear access for packet ops:
// 1) initialization
// base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
// 2) each step (where size is 1 for coeff access or PacketSize for packet access)
// base += [size*step, ..., size*step]
//
// TODO: Perhaps it's better to initialize lazily (so not in the constructor but in packetOp)
// in order to avoid the padd() in operator() ?
template <typename Scalar, typename Packet>
struct linspaced_op_impl<Scalar,Packet,/*RandomAccess*/false,/*IsInteger*/false>
struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
{
linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
m_low(low), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)),
m_packetStep(pset1<Packet>(unpacket_traits<Packet>::size*m_step)),
m_base(padd(pset1<Packet>(low), pmul(pset1<Packet>(m_step),plset<Packet>(-unpacket_traits<Packet>::size)))) {}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const
{
m_base = padd(m_base, pset1<Packet>(m_step));
return m_low+Scalar(i)*m_step;
m_low(low), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)), m_interPacket(plset<Packet>(0))
{
// Compute the correction to be applied to ensure 'high' is returned exactly for i==num_steps-1
m_corr = (high - (m_low+Scalar(num_steps-1)*m_step))/Scalar(num_steps<=1 ? 1 : num_steps-1);
}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType) const { return m_base = padd(m_base,m_packetStep); }
const Scalar m_low;
const Scalar m_step;
const Packet m_packetStep;
mutable Packet m_base;
};
// random access for packet ops:
// 1) each step
// [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
template <typename Scalar, typename Packet>
struct linspaced_op_impl<Scalar,Packet,/*RandomAccess*/true,/*IsInteger*/false>
{
linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
m_low(low), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)),
m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Packet>(0)) {}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { return m_low+i*m_step; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const {
return m_low + i*m_step + i*m_corr;
}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const
{ return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); }
{
// Principle:
// [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
Packet pi = padd(pset1<Packet>(Scalar(i)),m_interPacket);
return padd(padd(pset1<Packet>(m_low), pmul(pset1<Packet>(m_step), pi)),
pmul(pset1<Packet>(m_corr), pi)); }
const Scalar m_low;
Scalar m_corr;
const Scalar m_step;
const Packet m_lowPacket;
const Packet m_stepPacket;
const Packet m_interPacket;
};
template <typename Scalar, typename Packet>
struct linspaced_op_impl<Scalar,Packet,/*RandomAccess*/true,/*IsInteger*/true>
struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/true>
{
linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
m_low(low),
@ -124,8 +98,8 @@ struct linspaced_op_impl<Scalar,Packet,/*RandomAccess*/true,/*IsInteger*/true>
// Forward declaration (we default to random access which does not really give
// us a speed gain when using packet access but it allows to use the functor in
// nested expressions).
template <typename Scalar, typename PacketType, bool RandomAccess = true> struct linspaced_op;
template <typename Scalar, typename PacketType, bool RandomAccess> struct functor_traits< linspaced_op<Scalar,PacketType,RandomAccess> >
template <typename Scalar, typename PacketType> struct linspaced_op;
template <typename Scalar, typename PacketType> struct functor_traits< linspaced_op<Scalar,PacketType> >
{
enum
{
@ -135,7 +109,7 @@ template <typename Scalar, typename PacketType, bool RandomAccess> struct functo
IsRepeatable = true
};
};
template <typename Scalar, typename PacketType, bool RandomAccess> struct linspaced_op
template <typename Scalar, typename PacketType> struct linspaced_op
{
linspaced_op(const Scalar& low, const Scalar& high, Index num_steps)
: impl((num_steps==1 ? high : low),high,num_steps)
@ -147,12 +121,9 @@ template <typename Scalar, typename PacketType, bool RandomAccess> struct linspa
template<typename Packet,typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.packetOp(i); }
// This proxy object handles the actual required temporaries, the different
// implementations (random vs. sequential access) as well as the
// correct piping to size 2/4 packet operations.
// As long as we don't have a Bresenham-like implementation for linear-access and integer types,
// we have to by-pass RandomAccess for integer types. See bug 698.
const linspaced_op_impl<Scalar,PacketType,(NumTraits<Scalar>::IsInteger?true:RandomAccess),NumTraits<Scalar>::IsInteger> impl;
// This proxy object handles the actual required temporaries and the different
// implementations (integer vs. floating point).
const linspaced_op_impl<Scalar,PacketType,NumTraits<Scalar>::IsInteger> impl;
};
// Linear access is automatically determined from the operator() prototypes available for the given functor.

View File

@ -73,16 +73,18 @@ void testVectorType(const VectorType& base)
VERIFY_IS_APPROX(m,n);
VERIFY( internal::isApprox(m(m.size()-1),high) );
VERIFY( size==1 || internal::isApprox(m(0),low) );
// sequential access version
m = VectorType::LinSpaced(Sequential,size,low,high);
VERIFY_IS_APPROX(m,n);
VERIFY( internal::isApprox(m(m.size()-1),high) );
VERIFY_IS_EQUAL(m(m.size()-1) , high);
}
Scalar tol_factor = (high>=0) ? (1+NumTraits<Scalar>::dummy_precision()) : (1-NumTraits<Scalar>::dummy_precision());
VERIFY( m(m.size()-1) <= high*tol_factor );
VERIFY( size==1 || internal::isApprox(m(0),low) );
VERIFY( m(m.size()-1) <= high );
VERIFY( m(m.size()-1) >= low );
if(size>=1)
{
VERIFY( internal::isApprox(m(0),low) );
VERIFY_IS_EQUAL(m(0) , low);
}
// check whether everything works with row and col major vectors
Matrix<Scalar,Dynamic,1> row_vector(size);
@ -187,10 +189,10 @@ void test_nullary()
VERIFY(( internal::has_binary_operator<internal::scalar_identity_op<double> >::value ));
VERIFY(( !internal::functor_has_linear_access<internal::scalar_identity_op<double> >::ret ));
VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<float,float,false> >::value ));
VERIFY(( internal::has_unary_operator<internal::linspaced_op<float,float,false> >::value ));
VERIFY(( !internal::has_binary_operator<internal::linspaced_op<float,float,false> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<float,float,false> >::ret ));
VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<float,float> >::value ));
VERIFY(( internal::has_unary_operator<internal::linspaced_op<float,float> >::value ));
VERIFY(( !internal::has_binary_operator<internal::linspaced_op<float,float> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<float,float> >::ret ));
// Regression unit test for a weird MSVC bug.
// Search "nullary_wrapper_workaround_msvc" in CoreEvaluators.h for the details.
@ -211,10 +213,10 @@ void test_nullary()
VERIFY(( !internal::has_binary_operator<internal::scalar_constant_op<float> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::scalar_constant_op<float> >::ret ));
VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<int,int,false> >::value ));
VERIFY(( internal::has_unary_operator<internal::linspaced_op<int,int,false> >::value ));
VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int,int,false> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int,int,false> >::ret ));
VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<int,int> >::value ));
VERIFY(( internal::has_unary_operator<internal::linspaced_op<int,int> >::value ));
VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int,int> >::value ));
VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int,int> >::ret ));
}
#endif
}