mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-01-24 14:45:14 +08:00
Replace assert() by eigen_assert() (fixes bug #548).
This commit is contained in:
parent
35647b0133
commit
14e2ab02b5
@ -252,7 +252,7 @@ ComplexEigenSolver<MatrixType>&
|
||||
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
// this code is inspired from Jampack
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
||||
// Do a complex Schur decomposition, A = U T U^*
|
||||
// The eigenvalues are on the diagonal of T.
|
||||
|
@ -49,7 +49,7 @@ ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matri
|
||||
typedef MatrixType::RealScalar RealScalar; \
|
||||
typedef std::complex<RealScalar> ComplexScalar; \
|
||||
\
|
||||
assert(matrix.cols() == matrix.rows()); \
|
||||
eigen_assert(matrix.cols() == matrix.rows()); \
|
||||
\
|
||||
m_matUisUptodate = false; \
|
||||
if(matrix.cols() == 1) \
|
||||
|
@ -366,7 +366,7 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
||||
// Reduce to real Schur form.
|
||||
m_realSchur.compute(matrix, computeEigenvectors);
|
||||
|
@ -291,7 +291,7 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
template<typename MatrixType>
|
||||
void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
|
||||
{
|
||||
assert(matA.rows()==matA.cols());
|
||||
eigen_assert(matA.rows()==matA.cols());
|
||||
Index n = matA.rows();
|
||||
temp.resize(n);
|
||||
for (Index i = 0; i<n-1; ++i)
|
||||
|
@ -559,7 +559,7 @@ namespace Eigen {
|
||||
|
||||
const Index dim = A_in.cols();
|
||||
|
||||
assert (A_in.rows()==dim && A_in.cols()==dim
|
||||
eigen_assert (A_in.rows()==dim && A_in.cols()==dim
|
||||
&& B_in.rows()==dim && B_in.cols()==dim
|
||||
&& "Need square matrices of the same dimension");
|
||||
|
||||
|
@ -245,7 +245,7 @@ template<typename _MatrixType> class RealSchur
|
||||
template<typename MatrixType>
|
||||
RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
|
||||
{
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
Index maxIters = m_maxIters;
|
||||
if (maxIters == -1)
|
||||
maxIters = m_maxIterationsPerRow * matrix.rows();
|
||||
@ -467,8 +467,8 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
|
||||
template<typename MatrixType>
|
||||
inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
|
||||
{
|
||||
assert(im >= il);
|
||||
assert(im <= iu-2);
|
||||
eigen_assert(im >= il);
|
||||
eigen_assert(im <= iu-2);
|
||||
|
||||
const Index size = m_matT.cols();
|
||||
|
||||
|
@ -48,7 +48,7 @@ RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<E
|
||||
typedef MatrixType::Scalar Scalar; \
|
||||
typedef MatrixType::RealScalar RealScalar; \
|
||||
\
|
||||
assert(matrix.cols() == matrix.rows()); \
|
||||
eigen_assert(matrix.cols() == matrix.rows()); \
|
||||
\
|
||||
lapack_int n = matrix.cols(), sdim, info; \
|
||||
lapack_int lda = matrix.outerStride(); \
|
||||
|
@ -91,7 +91,7 @@ template<typename Derived> struct determinant_impl<Derived, 4>
|
||||
template<typename Derived>
|
||||
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
|
||||
{
|
||||
assert(rows() == cols());
|
||||
eigen_assert(rows() == cols());
|
||||
typedef typename internal::nested<Derived,Base::RowsAtCompileTime>::type Nested;
|
||||
return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ struct traits<DGMRES<_MatrixType,_Preconditioner> >
|
||||
template <typename VectorType, typename IndexType>
|
||||
void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut)
|
||||
{
|
||||
assert(vec.size() == perm.size());
|
||||
eigen_assert(vec.size() == perm.size());
|
||||
typedef typename IndexType::Scalar Index;
|
||||
typedef typename VectorType::Scalar Scalar;
|
||||
Index n = vec.size();
|
||||
@ -525,4 +525,4 @@ struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs>
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
#endif
|
||||
#endif
|
||||
|
@ -52,7 +52,7 @@ namespace Eigen {
|
||||
VectorType w_new(precond.solve(v_new)); // initialize w_new
|
||||
// RealScalar beta; // will be initialized inside loop
|
||||
RealScalar beta_new2(v_new.dot(w_new));
|
||||
assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
|
||||
eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
|
||||
RealScalar beta_new(sqrt(beta_new2));
|
||||
const RealScalar beta_one(beta_new);
|
||||
v_new /= beta_new;
|
||||
@ -91,7 +91,7 @@ namespace Eigen {
|
||||
v_new -= alpha*v; // overwrite v_new
|
||||
w_new = precond.solve(v_new); // overwrite w_new
|
||||
beta_new2 = v_new.dot(w_new); // compute beta_new
|
||||
assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
|
||||
eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
|
||||
beta_new = sqrt(beta_new2); // compute beta_new
|
||||
v_new /= beta_new; // overwrite v_new for next iteration
|
||||
w_new /= beta_new; // overwrite w_new for next iteration
|
||||
|
@ -73,7 +73,7 @@ class IterScaling
|
||||
{
|
||||
int m = mat.rows();
|
||||
int n = mat.cols();
|
||||
assert((m>0 && m == n) && "Please give a non - empty matrix");
|
||||
eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
|
||||
m_left.resize(m);
|
||||
m_right.resize(n);
|
||||
m_left.setOnes();
|
||||
@ -182,4 +182,4 @@ class IterScaling
|
||||
int m_maxits; // Maximum number of iterations allowed
|
||||
};
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
@ -33,7 +33,7 @@ void covar(
|
||||
const Index n = r.cols();
|
||||
const Scalar tolr = tol * abs(r(0,0));
|
||||
Matrix< Scalar, Dynamic, 1 > wa(n);
|
||||
assert(ipvt.size()==n);
|
||||
eigen_assert(ipvt.size()==n);
|
||||
|
||||
/* form the inverse of r in the full upper triangle of r. */
|
||||
l = -1;
|
||||
|
@ -26,7 +26,7 @@ LevenbergMarquardt<FunctorType>::minimizeOneStep(FVectorType &x)
|
||||
RealScalar temp, temp1,temp2;
|
||||
RealScalar ratio;
|
||||
RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered;
|
||||
assert(x.size()==n); // check the caller is not cheating us
|
||||
eigen_assert(x.size()==n); // check the caller is not cheating us
|
||||
|
||||
temp = 0.0; xnorm = 0.0;
|
||||
/* calculate the jacobian matrix. */
|
||||
@ -176,4 +176,4 @@ LevenbergMarquardt<FunctorType>::minimizeOneStep(FVectorType &x)
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_LMONESTEP_H
|
||||
#endif // EIGEN_LMONESTEP_H
|
||||
|
@ -45,8 +45,8 @@ namespace internal {
|
||||
/* Function Body */
|
||||
const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
|
||||
const Index n = qr.matrixQR().cols();
|
||||
assert(n==diag.size());
|
||||
assert(n==qtb.size());
|
||||
eigen_assert(n==diag.size());
|
||||
eigen_assert(n==qtb.size());
|
||||
|
||||
VectorType wa1, wa2;
|
||||
|
||||
|
@ -257,7 +257,7 @@ LevenbergMarquardt<FunctorType>::minimizeInit(FVectorType &x)
|
||||
// m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative
|
||||
if (!m_useExternalScaling)
|
||||
m_diag.resize(n);
|
||||
assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'");
|
||||
eigen_assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'");
|
||||
m_qtf.resize(n);
|
||||
|
||||
/* Function Body */
|
||||
|
@ -237,7 +237,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const M
|
||||
0.8872983346207416885179265399782400L };
|
||||
const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
|
||||
0.2777777777777777777777777777777778L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -253,7 +253,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const M
|
||||
0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
|
||||
const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
|
||||
0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -271,7 +271,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const M
|
||||
const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
|
||||
0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
|
||||
0.1184634425280945437571320203599587L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -289,7 +289,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const M
|
||||
const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
|
||||
0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
|
||||
0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -309,7 +309,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const M
|
||||
0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
|
||||
0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
|
||||
0.0647424830844348466353057163395410L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -329,7 +329,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const M
|
||||
0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
|
||||
0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
|
||||
0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -351,7 +351,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const M
|
||||
0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
|
||||
0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
|
||||
0.0406371941807872059859460790552618L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -373,7 +373,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const
|
||||
0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
|
||||
0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
|
||||
0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
@ -397,7 +397,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const
|
||||
0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
|
||||
0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
|
||||
0.0278342835580868332413768602212743L };
|
||||
assert(degree <= maxPadeDegree);
|
||||
eigen_assert(degree <= maxPadeDegree);
|
||||
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
|
||||
result.setZero(T.rows(), T.rows());
|
||||
for (int k = 0; k < degree; ++k)
|
||||
|
@ -150,7 +150,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
|
||||
fjac.resize(n, n);
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
|
||||
/* Function Body */
|
||||
nfev = 0;
|
||||
@ -187,7 +187,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
assert(x.size()==n); // check the caller is not cheating us
|
||||
eigen_assert(x.size()==n); // check the caller is not cheating us
|
||||
|
||||
Index j;
|
||||
std::vector<JacobiRotation<Scalar> > v_givens(n), w_givens(n);
|
||||
@ -390,7 +390,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &
|
||||
fvec.resize(n);
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
|
||||
/* Function Body */
|
||||
nfev = 0;
|
||||
|
@ -172,7 +172,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x)
|
||||
fjac.resize(m, n);
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
qtf.resize(n);
|
||||
|
||||
/* Function Body */
|
||||
@ -209,7 +209,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
||||
using std::abs;
|
||||
using std::sqrt;
|
||||
|
||||
assert(x.size()==n); // check the caller is not cheating us
|
||||
eigen_assert(x.size()==n); // check the caller is not cheating us
|
||||
|
||||
/* calculate the jacobian matrix. */
|
||||
Index df_ret = functor.df(x, fjac);
|
||||
@ -391,7 +391,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType
|
||||
fjac.resize(n, n);
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
qtf.resize(n);
|
||||
|
||||
/* Function Body */
|
||||
@ -429,7 +429,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
||||
using std::abs;
|
||||
using std::sqrt;
|
||||
|
||||
assert(x.size()==n); // check the caller is not cheating us
|
||||
eigen_assert(x.size()==n); // check the caller is not cheating us
|
||||
|
||||
Index i, j;
|
||||
bool sing;
|
||||
|
@ -20,7 +20,7 @@ void covar(
|
||||
const Index n = r.cols();
|
||||
const Scalar tolr = tol * abs(r(0,0));
|
||||
Matrix< Scalar, Dynamic, 1 > wa(n);
|
||||
assert(ipvt.size()==n);
|
||||
eigen_assert(ipvt.size()==n);
|
||||
|
||||
/* form the inverse of r in the full upper triangle of r. */
|
||||
l = -1;
|
||||
|
@ -24,9 +24,9 @@ void dogleg(
|
||||
/* Function Body */
|
||||
const Scalar epsmch = NumTraits<Scalar>::epsilon();
|
||||
const Index n = qrfac.cols();
|
||||
assert(n==qtb.size());
|
||||
assert(n==x.size());
|
||||
assert(n==diag.size());
|
||||
eigen_assert(n==qtb.size());
|
||||
eigen_assert(n==x.size());
|
||||
eigen_assert(n==diag.size());
|
||||
Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
|
||||
|
||||
/* first, calculate the gauss-newton direction. */
|
||||
|
@ -27,7 +27,7 @@ DenseIndex fdjac1(
|
||||
/* Function Body */
|
||||
const Scalar epsmch = NumTraits<Scalar>::epsilon();
|
||||
const Index n = x.size();
|
||||
assert(fvec.size()==n);
|
||||
eigen_assert(fvec.size()==n);
|
||||
Matrix< Scalar, Dynamic, 1 > wa1(n);
|
||||
Matrix< Scalar, Dynamic, 1 > wa2(n);
|
||||
|
||||
|
@ -29,9 +29,9 @@ void lmpar(
|
||||
/* Function Body */
|
||||
const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
|
||||
const Index n = r.cols();
|
||||
assert(n==diag.size());
|
||||
assert(n==qtb.size());
|
||||
assert(n==x.size());
|
||||
eigen_assert(n==diag.size());
|
||||
eigen_assert(n==qtb.size());
|
||||
eigen_assert(n==x.size());
|
||||
|
||||
Matrix< Scalar, Dynamic, 1 > wa1, wa2;
|
||||
|
||||
@ -187,8 +187,8 @@ void lmpar2(
|
||||
/* Function Body */
|
||||
const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
|
||||
const Index n = qr.matrixQR().cols();
|
||||
assert(n==diag.size());
|
||||
assert(n==qtb.size());
|
||||
eigen_assert(n==diag.size());
|
||||
eigen_assert(n==qtb.size());
|
||||
|
||||
Matrix< Scalar, Dynamic, 1 > wa1, wa2;
|
||||
|
||||
|
@ -24,10 +24,10 @@ void r1updt(
|
||||
|
||||
// r1updt had a broader usecase, but we dont use it here. And, more
|
||||
// importantly, we can not test it.
|
||||
assert(m==n);
|
||||
assert(u.size()==m);
|
||||
assert(v.size()==n);
|
||||
assert(w.size()==n);
|
||||
eigen_assert(m==n);
|
||||
eigen_assert(u.size()==m);
|
||||
eigen_assert(v.size()==n);
|
||||
eigen_assert(w.size()==n);
|
||||
|
||||
/* move the nontrivial part of the last column of s into w. */
|
||||
w[n-1] = s(n-1,n-1);
|
||||
|
@ -12,7 +12,7 @@ void rwupdt(
|
||||
typedef DenseIndex Index;
|
||||
|
||||
const Index n = r.cols();
|
||||
assert(r.rows()>=n);
|
||||
eigen_assert(r.rows()>=n);
|
||||
std::vector<JacobiRotation<Scalar> > givens(n);
|
||||
|
||||
/* Local variables */
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
// do nothing
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
eigen_assert(false);
|
||||
};
|
||||
|
||||
// Function Body
|
||||
@ -112,7 +112,7 @@ public:
|
||||
jac.col(j) = (val2-val1)/(2*h);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
eigen_assert(false);
|
||||
};
|
||||
}
|
||||
return nfev;
|
||||
|
@ -344,7 +344,7 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
|
||||
template< typename OtherPolynomial >
|
||||
void compute( const OtherPolynomial& poly )
|
||||
{
|
||||
assert( Scalar(0) != poly[poly.size()-1] );
|
||||
eigen_assert( Scalar(0) != poly[poly.size()-1] );
|
||||
internal::companion<Scalar,_Deg> companion( poly );
|
||||
companion.balance();
|
||||
m_eigenSolver.compute( companion.denseMatrix() );
|
||||
@ -376,7 +376,7 @@ class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
|
||||
template< typename OtherPolynomial >
|
||||
void compute( const OtherPolynomial& poly )
|
||||
{
|
||||
assert( Scalar(0) != poly[poly.size()-1] );
|
||||
eigen_assert( Scalar(0) != poly[poly.size()-1] );
|
||||
m_roots[0] = -poly[0]/poly[poly.size()-1];
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ typename NumTraits<typename Polynomial::Scalar>::Real cauchy_max_bound( const Po
|
||||
typedef typename Polynomial::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real Real;
|
||||
|
||||
assert( Scalar(0) != poly[poly.size()-1] );
|
||||
eigen_assert( Scalar(0) != poly[poly.size()-1] );
|
||||
const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1];
|
||||
Real cb(0);
|
||||
|
||||
|
@ -116,7 +116,7 @@ inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscompl
|
||||
|
||||
std::string line;
|
||||
// The matrix header is always the first line in the file
|
||||
std::getline(in, line); assert(in.good());
|
||||
std::getline(in, line); eigen_assert(in.good());
|
||||
|
||||
std::stringstream fmtline(line);
|
||||
std::string substr[5];
|
||||
@ -200,11 +200,11 @@ bool loadMarketVector(VectorType& vec, const std::string& filename)
|
||||
int n(0), col(0);
|
||||
do
|
||||
{ // Skip comments
|
||||
std::getline(in, line); assert(in.good());
|
||||
std::getline(in, line); eigen_assert(in.good());
|
||||
} while (line[0] == '%');
|
||||
std::istringstream newline(line);
|
||||
newline >> n >> col;
|
||||
assert(n>0 && col>0);
|
||||
eigen_assert(n>0 && col>0);
|
||||
vec.resize(n);
|
||||
int i = 0;
|
||||
Scalar value;
|
||||
|
Loading…
Reference in New Issue
Block a user