mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-03-19 18:40:38 +08:00
Enable and fix -Wdouble-conversion warnings
This commit is contained in:
parent
62b710072e
commit
dacb469bc9
@ -120,7 +120,7 @@ endmacro(ei_add_cxx_compiler_flag)
|
||||
if(NOT MSVC)
|
||||
# We assume that other compilers are partly compatible with GNUCC
|
||||
|
||||
# clang outputs some warnings for unknwon flags that are not caught by check_cxx_compiler_flag
|
||||
# clang outputs some warnings for unknown flags that are not caught by check_cxx_compiler_flag
|
||||
# adding -Werror turns such warnings into errors
|
||||
check_cxx_compiler_flag("-Werror" COMPILER_SUPPORT_WERROR)
|
||||
if(COMPILER_SUPPORT_WERROR)
|
||||
@ -143,6 +143,8 @@ if(NOT MSVC)
|
||||
ei_add_cxx_compiler_flag("-Wshorten-64-to-32")
|
||||
ei_add_cxx_compiler_flag("-Wenum-conversion")
|
||||
ei_add_cxx_compiler_flag("-Wc++11-extensions")
|
||||
ei_add_cxx_compiler_flag("-Wdouble-promotion")
|
||||
# ei_add_cxx_compiler_flag("-Wconversion")
|
||||
|
||||
# -Wshadow is insanely too strict with gcc, hopefully it will become usable with gcc 6
|
||||
# if(NOT CMAKE_COMPILER_IS_GNUCXX OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0.0"))
|
||||
@ -158,7 +160,7 @@ if(NOT MSVC)
|
||||
ei_add_cxx_compiler_flag("-fno-common")
|
||||
ei_add_cxx_compiler_flag("-fstrict-aliasing")
|
||||
ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
|
||||
ei_add_cxx_compiler_flag("-wd2304") # disbale ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
||||
ei_add_cxx_compiler_flag("-wd2304") # disable ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
||||
|
||||
|
||||
# The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
|
||||
|
@ -1192,7 +1192,7 @@ double tanh(const double &x) { return ::tanh(x); }
|
||||
template <typename T>
|
||||
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
|
||||
T fmod(const T& a, const T& b) {
|
||||
EIGEN_USING_STD_MATH(floor);
|
||||
EIGEN_USING_STD_MATH(fmod);
|
||||
return fmod(a, b);
|
||||
}
|
||||
|
||||
|
@ -880,9 +880,9 @@ struct scalar_sign_op<Scalar,true> {
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real real_type;
|
||||
real_type aa = numext::abs(a);
|
||||
if (aa==0)
|
||||
if (aa==real_type(0))
|
||||
return Scalar(0);
|
||||
aa = 1./aa;
|
||||
aa = real_type(1)/aa;
|
||||
return Scalar(real(a)*aa, imag(a)*aa );
|
||||
}
|
||||
//TODO
|
||||
|
@ -82,15 +82,15 @@ public:
|
||||
|
||||
/** \returns the rotation angle in [0,2pi] */
|
||||
inline Scalar smallestPositiveAngle() const {
|
||||
Scalar tmp = fmod(m_angle,Scalar(2)*EIGEN_PI);
|
||||
return tmp<Scalar(0) ? tmp + Scalar(2)*EIGEN_PI : tmp;
|
||||
Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI));
|
||||
return tmp<Scalar(0) ? tmp + Scalar(2*EIGEN_PI) : tmp;
|
||||
}
|
||||
|
||||
/** \returns the rotation angle in [-pi,pi] */
|
||||
inline Scalar smallestAngle() const {
|
||||
Scalar tmp = fmod(m_angle,Scalar(2)*EIGEN_PI);
|
||||
if(tmp>Scalar(EIGEN_PI)) tmp -= Scalar(2)*Scalar(EIGEN_PI);
|
||||
else if(tmp<-Scalar(EIGEN_PI)) tmp += Scalar(2)*Scalar(EIGEN_PI);
|
||||
Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI));
|
||||
if(tmp>Scalar(EIGEN_PI)) tmp -= Scalar(2*EIGEN_PI);
|
||||
else if(tmp<-Scalar(EIGEN_PI)) tmp += Scalar(2*EIGEN_PI);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
@ -765,14 +765,14 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
|
||||
RealScalar muPrev, muCur;
|
||||
if (shift == left)
|
||||
{
|
||||
muPrev = (right - left) * 0.1;
|
||||
muPrev = (right - left) * RealScalar(0.1);
|
||||
if (k == actual_n-1) muCur = right - left;
|
||||
else muCur = (right - left) * 0.5;
|
||||
else muCur = (right - left) * RealScalar(0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
muPrev = -(right - left) * 0.1;
|
||||
muCur = -(right - left) * 0.5;
|
||||
muPrev = -(right - left) * RealScalar(0.1);
|
||||
muCur = -(right - left) * RealScalar(0.5);
|
||||
}
|
||||
|
||||
RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
|
||||
@ -820,11 +820,11 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
|
||||
leftShifted = (std::numeric_limits<RealScalar>::min)();
|
||||
// I don't understand why the case k==0 would be special there:
|
||||
// if (k == 0) rightShifted = right - left; else
|
||||
rightShifted = (k==actual_n-1) ? right : ((right - left) * 0.6); // theoretically we can take 0.5, but let's be safe
|
||||
rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.6)); // theoretically we can take 0.5, but let's be safe
|
||||
}
|
||||
else
|
||||
{
|
||||
leftShifted = -(right - left) * 0.6;
|
||||
leftShifted = -(right - left) * RealScalar(0.6);
|
||||
rightShifted = -(std::numeric_limits<RealScalar>::min)();
|
||||
}
|
||||
|
||||
|
@ -19,4 +19,4 @@
|
||||
#include "level3_impl.h"
|
||||
|
||||
float BLASFUNC(sdsdot)(int* n, float* alpha, float* x, int* incx, float* y, int* incy)
|
||||
{ return *alpha + BLASFUNC(dsdot)(n, x, incx, y, incy); }
|
||||
{ return double(*alpha) + BLASFUNC(dsdot)(n, x, incx, y, incy); }
|
||||
|
@ -49,7 +49,7 @@ void check_inf_nan(bool dryrun) {
|
||||
VERIFY( !m.allFinite() );
|
||||
VERIFY( m.hasNaN() );
|
||||
}
|
||||
m(4) /= 0.0;
|
||||
m(4) /= T(0.0);
|
||||
if(dryrun)
|
||||
{
|
||||
std::cout << "std::isfinite(" << m(4) << ") = "; check((std::isfinite)(m(4)),false); std::cout << " ; numext::isfinite = "; check((numext::isfinite)(m(4)), false); std::cout << "\n";
|
||||
|
@ -97,9 +97,9 @@ template<typename Scalar> void lines()
|
||||
Vector u = Vector::Random();
|
||||
Vector v = Vector::Random();
|
||||
Scalar a = internal::random<Scalar>();
|
||||
while (abs(a-1) < 1e-4) a = internal::random<Scalar>();
|
||||
while (u.norm() < 1e-4) u = Vector::Random();
|
||||
while (v.norm() < 1e-4) v = Vector::Random();
|
||||
while (abs(a-1) < Scalar(1e-4)) a = internal::random<Scalar>();
|
||||
while (u.norm() < Scalar(1e-4)) u = Vector::Random();
|
||||
while (v.norm() < Scalar(1e-4)) v = Vector::Random();
|
||||
|
||||
HLine line_u = HLine::Through(center + u, center + a*u);
|
||||
HLine line_v = HLine::Through(center + v, center + a*v);
|
||||
@ -111,14 +111,14 @@ template<typename Scalar> void lines()
|
||||
Vector result = line_u.intersection(line_v);
|
||||
|
||||
// the lines should intersect at the point we called "center"
|
||||
if(abs(a-1) > 1e-2 && abs(v.normalized().dot(u.normalized()))<0.9)
|
||||
if(abs(a-1) > Scalar(1e-2) && abs(v.normalized().dot(u.normalized()))<Scalar(0.9))
|
||||
VERIFY_IS_APPROX(result, center);
|
||||
|
||||
// check conversions between two types of lines
|
||||
PLine pl(line_u); // gcc 3.3 will commit suicide if we don't name this variable
|
||||
HLine line_u2(pl);
|
||||
CoeffsType converted_coeffs = line_u2.coeffs();
|
||||
if(line_u2.normal().dot(line_u.normal())<0.)
|
||||
if(line_u2.normal().dot(line_u.normal())<Scalar(0))
|
||||
converted_coeffs = -line_u2.coeffs();
|
||||
VERIFY(line_u.coeffs().isApprox(converted_coeffs));
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ template<typename QuatType> void check_slerp(const QuatType& q0, const QuatType&
|
||||
Scalar largeEps = test_precision<Scalar>();
|
||||
|
||||
Scalar theta_tot = AA(q1*q0.inverse()).angle();
|
||||
if(theta_tot>EIGEN_PI)
|
||||
if(theta_tot>Scalar(EIGEN_PI))
|
||||
theta_tot = Scalar(2.*EIGEN_PI)-theta_tot;
|
||||
for(Scalar t=0; t<=Scalar(1.001); t+=Scalar(0.1))
|
||||
{
|
||||
@ -115,8 +115,8 @@ template<typename Scalar, int Options> void quaternion(void)
|
||||
// Do not execute the test if the rotation angle is almost zero, or
|
||||
// the rotation axis and v1 are almost parallel.
|
||||
if (abs(aa.angle()) > 5*test_precision<Scalar>()
|
||||
&& (aa.axis() - v1.normalized()).norm() < 1.99
|
||||
&& (aa.axis() + v1.normalized()).norm() < 1.99)
|
||||
&& (aa.axis() - v1.normalized()).norm() < Scalar(1.99)
|
||||
&& (aa.axis() + v1.normalized()).norm() < Scalar(1.99))
|
||||
{
|
||||
VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1);
|
||||
}
|
||||
|
@ -466,7 +466,7 @@ template<typename Scalar, int Mode, int Options> void transformations()
|
||||
Scalar a2 = R0.slerp(Scalar(k+1)/Scalar(path_steps), R1).angle();
|
||||
l += std::abs(a2-a1);
|
||||
}
|
||||
VERIFY(l<=EIGEN_PI*(Scalar(1)+NumTraits<Scalar>::epsilon()*Scalar(path_steps/2)));
|
||||
VERIFY(l<=Scalar(EIGEN_PI)*(Scalar(1)+NumTraits<Scalar>::epsilon()*Scalar(path_steps/2)));
|
||||
|
||||
// check basic features
|
||||
{
|
||||
|
@ -21,6 +21,7 @@ template<typename MatrixType> void linearStructure(const MatrixType& m)
|
||||
*/
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
|
||||
Index rows = m.rows();
|
||||
Index cols = m.cols();
|
||||
@ -32,7 +33,7 @@ template<typename MatrixType> void linearStructure(const MatrixType& m)
|
||||
m3(rows, cols);
|
||||
|
||||
Scalar s1 = internal::random<Scalar>();
|
||||
while (abs(s1)<1e-3) s1 = internal::random<Scalar>();
|
||||
while (abs(s1)<RealScalar(1e-3)) s1 = internal::random<Scalar>();
|
||||
|
||||
Index r = internal::random<Index>(0, rows-1),
|
||||
c = internal::random<Index>(0, cols-1);
|
||||
|
@ -387,7 +387,7 @@ template<typename Scalar> void packetmath_real()
|
||||
data2[i] = internal::random<Scalar>(0,1) * std::pow(Scalar(10), internal::random<Scalar>(-6,6));
|
||||
}
|
||||
|
||||
if(internal::random<float>(0,1)<0.1)
|
||||
if(internal::random<float>(0,1)<0.1f)
|
||||
data1[internal::random<int>(0, PacketSize)] = 0;
|
||||
CHECK_CWISE1_IF(PacketTraits::HasSqrt, std::sqrt, internal::psqrt);
|
||||
CHECK_CWISE1_IF(PacketTraits::HasLog, std::log, internal::plog);
|
||||
|
@ -206,7 +206,7 @@ template<typename MatrixType> void qr_kahan_matrix()
|
||||
RealScalar c = std::sqrt(1 - s*s);
|
||||
for (Index i = 0; i < rows; ++i) {
|
||||
m1(i, i) = pow(s, i);
|
||||
m1.row(i).tail(rows - i - 1) = -pow(s, i) * c * MatrixType::Ones(1, rows - i - 1);
|
||||
m1.row(i).tail(rows - i - 1) = -RealScalar(pow(s, i)) * c * MatrixType::Ones(1, rows - i - 1);
|
||||
}
|
||||
m1 = (m1 + m1.transpose()).eval();
|
||||
ColPivHouseholderQR<MatrixType> qr(m1);
|
||||
|
@ -232,11 +232,11 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
for (Index i=0; i<m2.rows(); ++i)
|
||||
{
|
||||
float x = internal::random<float>(0,1);
|
||||
if (x<0.1)
|
||||
if (x<0.1f)
|
||||
{
|
||||
// do nothing
|
||||
}
|
||||
else if (x<0.5)
|
||||
else if (x<0.5f)
|
||||
{
|
||||
countFalseNonZero++;
|
||||
m2.insert(i,j) = Scalar(0);
|
||||
|
@ -150,7 +150,7 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re
|
||||
DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
|
||||
SparseMatrixType m2(rows, cols);
|
||||
initSparse<Scalar>(density, refMat2, m2);
|
||||
if(internal::random<float>(0,1)>0.5) m2.makeCompressed();
|
||||
if(internal::random<float>(0,1)>0.5f) m2.makeCompressed();
|
||||
Index j0 = internal::random<Index>(0,outer-2);
|
||||
Index j1 = internal::random<Index>(0,outer-2);
|
||||
Index n0 = internal::random<Index>(1,outer-(std::max)(j0,j1));
|
||||
|
@ -245,7 +245,7 @@ template<typename SparseMatrixType> void sparse_product()
|
||||
for (int k=0; k<mS.outerSize(); ++k)
|
||||
for (typename SparseMatrixType::InnerIterator it(mS,k); it; ++it)
|
||||
if (it.index() == k)
|
||||
it.valueRef() *= 0.5;
|
||||
it.valueRef() *= Scalar(0.5);
|
||||
|
||||
VERIFY_IS_APPROX(refS.adjoint(), refS);
|
||||
VERIFY_IS_APPROX(mS.adjoint(), mS);
|
||||
|
@ -12,7 +12,7 @@
|
||||
template<typename Scalar,typename StorageIndex> void sparse_vector(int rows, int cols)
|
||||
{
|
||||
double densityMat = (std::max)(8./(rows*cols), 0.01);
|
||||
double densityVec = (std::max)(8./float(rows), 0.1);
|
||||
double densityVec = (std::max)(8./(rows), 0.1);
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||
typedef Matrix<Scalar,Dynamic,1> DenseVector;
|
||||
typedef SparseVector<Scalar,0,StorageIndex> SparseVectorType;
|
||||
|
@ -54,7 +54,7 @@ template<typename Scalar> void test_sparseqr_scalar()
|
||||
|
||||
b = dA * DenseVector::Random(A.cols());
|
||||
solver.compute(A);
|
||||
if(internal::random<float>(0,1)>0.5)
|
||||
if(internal::random<float>(0,1)>0.5f)
|
||||
solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
|
||||
if (solver.info() != Success)
|
||||
{
|
||||
|
@ -141,14 +141,14 @@ void svd_least_square(const MatrixType& m, unsigned int computationOptions)
|
||||
using std::abs;
|
||||
|
||||
SolutionType y(x);
|
||||
y.row(k) = (1.+2*NumTraits<RealScalar>::epsilon())*x.row(k);
|
||||
y.row(k) = (RealScalar(1)+2*NumTraits<RealScalar>::epsilon())*x.row(k);
|
||||
RealScalar residual_y = (m*y-rhs).norm();
|
||||
VERIFY( test_isMuchSmallerThan(abs(residual_y-residual), rhs_norm) || residual < residual_y );
|
||||
if(internal::is_same<RealScalar,float>::value) ++g_test_level;
|
||||
VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
|
||||
if(internal::is_same<RealScalar,float>::value) --g_test_level;
|
||||
|
||||
y.row(k) = (1.-2*NumTraits<RealScalar>::epsilon())*x.row(k);
|
||||
y.row(k) = (RealScalar(1)-2*NumTraits<RealScalar>::epsilon())*x.row(k);
|
||||
residual_y = (m*y-rhs).norm();
|
||||
VERIFY( test_isMuchSmallerThan(abs(residual_y-residual), rhs_norm) || residual < residual_y );
|
||||
if(internal::is_same<RealScalar,float>::value) ++g_test_level;
|
||||
|
@ -54,7 +54,7 @@ void svd_fill_random(MatrixType &m, int Option = 0)
|
||||
}
|
||||
|
||||
Matrix<Scalar,Dynamic,1> samples(7);
|
||||
samples << 0, 5.60844e-313, -5.60844e-313, 4.94e-324, -4.94e-324, -1./NumTraits<RealScalar>::highest(), 1./NumTraits<RealScalar>::highest();
|
||||
samples << 0, 5.60844e-313, -5.60844e-313, 4.94e-324, -4.94e-324, -RealScalar(1)/NumTraits<RealScalar>::highest(), RealScalar(1)/NumTraits<RealScalar>::highest();
|
||||
|
||||
if(Option==Symmetric)
|
||||
{
|
||||
|
@ -65,7 +65,7 @@ template<typename MatrixType> void triangular_square(const MatrixType& m)
|
||||
|
||||
m1 = MatrixType::Random(rows, cols);
|
||||
for (int i=0; i<rows; ++i)
|
||||
while (numext::abs2(m1(i,i))<1e-1) m1(i,i) = internal::random<Scalar>();
|
||||
while (numext::abs2(m1(i,i))<RealScalar(1e-1)) m1(i,i) = internal::random<Scalar>();
|
||||
|
||||
Transpose<MatrixType> trm4(m4);
|
||||
// test back and forward subsitution with a vector as the rhs
|
||||
@ -78,7 +78,7 @@ template<typename MatrixType> void triangular_square(const MatrixType& m)
|
||||
m3 = m1.template triangularView<Lower>();
|
||||
VERIFY(v2.isApprox(m3.conjugate() * (m1.conjugate().template triangularView<Lower>().solve(v2)), largerEps));
|
||||
|
||||
// test back and forward subsitution with a matrix as the rhs
|
||||
// test back and forward substitution with a matrix as the rhs
|
||||
m3 = m1.template triangularView<Upper>();
|
||||
VERIFY(m2.isApprox(m3.adjoint() * (m1.adjoint().template triangularView<Lower>().solve(m2)), largerEps));
|
||||
m3 = m1.template triangularView<Lower>();
|
||||
|
@ -210,9 +210,9 @@ struct matrix_exp_computeUV<MatrixType, float>
|
||||
using std::pow;
|
||||
const float l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
|
||||
squarings = 0;
|
||||
if (l1norm < 4.258730016922831e-001) {
|
||||
if (l1norm < 4.258730016922831e-001f) {
|
||||
matrix_exp_pade3(arg, U, V);
|
||||
} else if (l1norm < 1.880152677804762e+000) {
|
||||
} else if (l1norm < 1.880152677804762e+000f) {
|
||||
matrix_exp_pade5(arg, U, V);
|
||||
} else {
|
||||
const float maxnorm = 3.925724783138660f;
|
||||
|
@ -132,6 +132,7 @@ template <typename EivalsType, typename Cluster>
|
||||
void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list<Cluster>& clusters)
|
||||
{
|
||||
typedef typename EivalsType::Index Index;
|
||||
typedef typename EivalsType::RealScalar RealScalar;
|
||||
for (Index i=0; i<eivals.rows(); ++i) {
|
||||
// Find cluster containing i-th ei'val, adding a new cluster if necessary
|
||||
typename std::list<Cluster>::iterator qi = matrix_function_find_cluster(i, clusters);
|
||||
@ -145,7 +146,7 @@ void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list<C
|
||||
|
||||
// Look for other element to add to the set
|
||||
for (Index j=i+1; j<eivals.rows(); ++j) {
|
||||
if (abs(eivals(j) - eivals(i)) <= matrix_function_separation
|
||||
if (abs(eivals(j) - eivals(i)) <= RealScalar(matrix_function_separation)
|
||||
&& std::find(qi->begin(), qi->end(), j) == qi->end()) {
|
||||
typename std::list<Cluster>::iterator qj = matrix_function_find_cluster(j, clusters);
|
||||
if (qj == clusters.end()) {
|
||||
|
@ -37,6 +37,7 @@ template <typename MatrixType>
|
||||
void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
using std::abs;
|
||||
using std::ceil;
|
||||
using std::imag;
|
||||
@ -54,14 +55,14 @@ void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result)
|
||||
{
|
||||
result(0,1) = A(0,1) / A(0,0);
|
||||
}
|
||||
else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1))))
|
||||
else if ((abs(A(0,0)) < RealScalar(0.5)*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1))))
|
||||
{
|
||||
result(0,1) = A(0,1) * (logA11 - logA00) / y;
|
||||
}
|
||||
else
|
||||
{
|
||||
// computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
|
||||
int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - EIGEN_PI) / (2*EIGEN_PI)));
|
||||
int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI)));
|
||||
result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,2*EIGEN_PI*unwindingNumber)) / y;
|
||||
}
|
||||
}
|
||||
|
@ -298,7 +298,7 @@ MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const
|
||||
|
||||
ComplexScalar logCurr = log(curr);
|
||||
ComplexScalar logPrev = log(prev);
|
||||
int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - EIGEN_PI) / (2*EIGEN_PI));
|
||||
int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
|
||||
ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, EIGEN_PI*unwindingNumber);
|
||||
return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev);
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ namespace Eigen
|
||||
|
||||
Matrix<Scalar,Order,Order> ndu(p+1,p+1);
|
||||
|
||||
double saved, temp;
|
||||
Scalar saved, temp; // FIXME These were double instead of Scalar. Was there a reason for that?
|
||||
|
||||
ndu(0,0) = 1.0;
|
||||
|
||||
@ -433,7 +433,7 @@ namespace Eigen
|
||||
// Compute the k-th derivative
|
||||
for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
|
||||
{
|
||||
double d = 0.0;
|
||||
Scalar d = 0.0;
|
||||
DenseIndex rk,pk,j1,j2;
|
||||
rk = r-k; pk = p-k;
|
||||
|
||||
|
@ -54,7 +54,7 @@ complex<long double> promote(long double x) { return complex<long double>( x);
|
||||
long double difpower=0;
|
||||
size_t n = (min)( buf1.size(),buf2.size() );
|
||||
for (size_t k=0;k<n;++k) {
|
||||
totalpower += (numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2.;
|
||||
totalpower += (numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2;
|
||||
difpower += numext::abs2(buf1[k] - buf2[k]);
|
||||
}
|
||||
return sqrt(difpower/totalpower);
|
||||
|
@ -16,7 +16,8 @@ EIGEN_DONT_INLINE Scalar foo(const Scalar& x, const Scalar& y)
|
||||
using namespace std;
|
||||
// return x+std::sin(y);
|
||||
EIGEN_ASM_COMMENT("mybegin");
|
||||
return static_cast<Scalar>(x*2 - 1 + pow(1+x,2) + 2*sqrt(y*y+0) - 4 * sin(0+x) + 2 * cos(y+0) - exp(-0.5*x*x+0));
|
||||
// pow(float, int) promotes to pow(double, double)
|
||||
return x*2 - 1 + static_cast<Scalar>(pow(1+x,2)) + 2*sqrt(y*y+0) - 4 * sin(0+x) + 2 * cos(y+0) - exp(Scalar(-0.5)*x*x+0);
|
||||
//return x+2*y*x;//x*2 -std::pow(x,2);//(2*y/x);// - y*2;
|
||||
EIGEN_ASM_COMMENT("myend");
|
||||
}
|
||||
|
@ -34,8 +34,8 @@ void test_conversion()
|
||||
float val1 = float(half(__half(0x3c00)));
|
||||
float val2 = float(half(__half(0x3c01)));
|
||||
float val3 = float(half(__half(0x3c02)));
|
||||
VERIFY_IS_EQUAL(half(0.5 * (val1 + val2)).x, 0x3c00);
|
||||
VERIFY_IS_EQUAL(half(0.5 * (val2 + val3)).x, 0x3c02);
|
||||
VERIFY_IS_EQUAL(half(0.5f * (val1 + val2)).x, 0x3c00);
|
||||
VERIFY_IS_EQUAL(half(0.5f * (val2 + val3)).x, 0x3c02);
|
||||
|
||||
// Conversion from int.
|
||||
VERIFY_IS_EQUAL(half(-1).x, 0xbc00);
|
||||
|
@ -112,13 +112,13 @@ static void test_3d()
|
||||
Tensor<float, 3> mat1(2,3,7);
|
||||
Tensor<float, 3, RowMajor> mat2(2,3,7);
|
||||
|
||||
float val = 1.0;
|
||||
float val = 1.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
mat2(i,j,k) = val;
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -142,7 +142,7 @@ static void test_3d()
|
||||
Tensor<float, 3, RowMajor> mat11(2,3,7);
|
||||
mat11 = mat2 / 3.14f;
|
||||
|
||||
val = 1.0;
|
||||
val = 1.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
@ -155,7 +155,7 @@ static void test_3d()
|
||||
VERIFY_IS_APPROX(mat9(i,j,k), val + 3.14f);
|
||||
VERIFY_IS_APPROX(mat10(i,j,k), val - 3.14f);
|
||||
VERIFY_IS_APPROX(mat11(i,j,k), val / 3.14f);
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -167,25 +167,25 @@ static void test_constants()
|
||||
Tensor<float, 3> mat2(2,3,7);
|
||||
Tensor<float, 3> mat3(2,3,7);
|
||||
|
||||
float val = 1.0;
|
||||
float val = 1.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
mat2 = mat1.constant(3.14f);
|
||||
mat3 = mat1.cwiseMax(7.3f).exp();
|
||||
|
||||
val = 1.0;
|
||||
val = 1.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(mat2(i,j,k), 3.14f);
|
||||
VERIFY_IS_APPROX(mat3(i,j,k), expf((std::max)(val, 7.3f)));
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -228,25 +228,25 @@ static void test_functors()
|
||||
Tensor<float, 3> mat2(2,3,7);
|
||||
Tensor<float, 3> mat3(2,3,7);
|
||||
|
||||
float val = 1.0;
|
||||
float val = 1.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
mat2 = mat1.inverse().unaryExpr(&asinf);
|
||||
mat3 = mat1.unaryExpr(&tanhf);
|
||||
|
||||
val = 1.0;
|
||||
val = 1.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(mat2(i,j,k), asinf(1.0f / mat1(i,j,k)));
|
||||
VERIFY_IS_APPROX(mat3(i,j,k), tanhf(mat1(i,j,k)));
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -205,15 +205,15 @@ static void test_fft_real_input_energy() {
|
||||
VERIFY_IS_EQUAL(output.dimension(i), input.dimension(i));
|
||||
}
|
||||
|
||||
float energy_original = 0.0;
|
||||
float energy_after_fft = 0.0;
|
||||
RealScalar energy_original = 0.0;
|
||||
RealScalar energy_after_fft = 0.0;
|
||||
|
||||
for (int i = 0; i < total_size; ++i) {
|
||||
energy_original += pow(std::abs(input(i)), 2);
|
||||
energy_original += numext::abs2(input(i));
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_size; ++i) {
|
||||
energy_after_fft += pow(std::abs(output(i)), 2);
|
||||
energy_after_fft += numext::abs2(output(i));
|
||||
}
|
||||
|
||||
if(FFTDirection == FFT_FORWARD) {
|
||||
|
@ -188,13 +188,13 @@ static void test_3d()
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(1)), 3);
|
||||
// VERIFY_IS_EQUAL((mat1.dimension(2)), 7);
|
||||
|
||||
float val = 0.0;
|
||||
float val = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
mat2(i,j,k) = val;
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -210,13 +210,13 @@ static void test_3d()
|
||||
// VERIFY_IS_EQUAL((mat3.dimension(2)), 7);
|
||||
|
||||
|
||||
val = 0.0;
|
||||
val = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(mat3(i,j,k), sqrtf(val));
|
||||
VERIFY_IS_APPROX(mat4(i,j,k), sqrtf(val));
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -226,12 +226,12 @@ static void test_3d()
|
||||
static void test_array()
|
||||
{
|
||||
TensorFixedSize<float, Sizes<2, 3, 7> > mat1;
|
||||
float val = 0.0;
|
||||
float val = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
mat1(i,j,k) = val;
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -239,12 +239,12 @@ static void test_array()
|
||||
TensorFixedSize<float, Sizes<2, 3, 7> > mat3;
|
||||
mat3 = mat1.pow(3.5f);
|
||||
|
||||
val = 0.0;
|
||||
val = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
VERIFY_IS_APPROX(mat3(i,j,k), powf(val, 3.5f));
|
||||
val += 1.0;
|
||||
val += 1.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -113,8 +113,8 @@ void testMatrixLogarithm(const MatrixType& A)
|
||||
|
||||
MatrixType scaledA;
|
||||
RealScalar maxImagPartOfSpectrum = A.eigenvalues().imag().cwiseAbs().maxCoeff();
|
||||
if (maxImagPartOfSpectrum >= 0.9 * EIGEN_PI)
|
||||
scaledA = A * 0.9 * EIGEN_PI / maxImagPartOfSpectrum;
|
||||
if (maxImagPartOfSpectrum >= RealScalar(0.9 * EIGEN_PI))
|
||||
scaledA = A * RealScalar(0.9 * EIGEN_PI) / maxImagPartOfSpectrum;
|
||||
else
|
||||
scaledA = A;
|
||||
|
||||
|
@ -24,7 +24,7 @@ void test2dRotation(double tol)
|
||||
s = std::sin(angle);
|
||||
B << c, s, -s, c;
|
||||
|
||||
C = Apow(std::ldexp(angle,1) / EIGEN_PI);
|
||||
C = Apow(std::ldexp(angle,1) / T(EIGEN_PI));
|
||||
std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
|
||||
VERIFY(C.isApprox(B, tol));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user