mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-02-23 18:20:47 +08:00
port unsupported modules to new API
This commit is contained in:
parent
cab85218db
commit
39209edd71
@ -31,16 +31,16 @@
|
|||||||
|
|
||||||
/** \ingroup MatrixFunctions_Module
|
/** \ingroup MatrixFunctions_Module
|
||||||
*
|
*
|
||||||
* \brief Compute the matrix exponential.
|
* \brief Compute the matrix exponential.
|
||||||
*
|
*
|
||||||
* \param M matrix whose exponential is to be computed.
|
* \param M matrix whose exponential is to be computed.
|
||||||
* \param result pointer to the matrix in which to store the result.
|
* \param result pointer to the matrix in which to store the result.
|
||||||
*
|
*
|
||||||
* The matrix exponential of \f$ M \f$ is defined by
|
* The matrix exponential of \f$ M \f$ is defined by
|
||||||
* \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
|
* \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
|
||||||
* The matrix exponential can be used to solve linear ordinary
|
* The matrix exponential can be used to solve linear ordinary
|
||||||
* differential equations: the solution of \f$ y' = My \f$ with the
|
* differential equations: the solution of \f$ y' = My \f$ with the
|
||||||
* initial condition \f$ y(0) = y_0 \f$ is given by
|
* initial condition \f$ y(0) = y_0 \f$ is given by
|
||||||
* \f$ y(t) = \exp(M) y_0 \f$.
|
* \f$ y(t) = \exp(M) y_0 \f$.
|
||||||
*
|
*
|
||||||
* The cost of the computation is approximately \f$ 20 n^3 \f$ for
|
* The cost of the computation is approximately \f$ 20 n^3 \f$ for
|
||||||
@ -54,17 +54,17 @@
|
|||||||
* squaring. The degree of the Padé approximant is chosen such
|
* squaring. The degree of the Padé approximant is chosen such
|
||||||
* that the approximation error is less than the round-off
|
* that the approximation error is less than the round-off
|
||||||
* error. However, errors may accumulate during the squaring phase.
|
* error. However, errors may accumulate during the squaring phase.
|
||||||
*
|
*
|
||||||
* Details of the algorithm can be found in: Nicholas J. Higham, "The
|
* Details of the algorithm can be found in: Nicholas J. Higham, "The
|
||||||
* scaling and squaring method for the matrix exponential revisited,"
|
* scaling and squaring method for the matrix exponential revisited,"
|
||||||
* <em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179–1193,
|
* <em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179–1193,
|
||||||
* 2005.
|
* 2005.
|
||||||
*
|
*
|
||||||
* Example: The following program checks that
|
* Example: The following program checks that
|
||||||
* \f[ \exp \left[ \begin{array}{ccc}
|
* \f[ \exp \left[ \begin{array}{ccc}
|
||||||
* 0 & \frac14\pi & 0 \\
|
* 0 & \frac14\pi & 0 \\
|
||||||
* -\frac14\pi & 0 & 0 \\
|
* -\frac14\pi & 0 & 0 \\
|
||||||
* 0 & 0 & 0
|
* 0 & 0 & 0
|
||||||
* \end{array} \right] = \left[ \begin{array}{ccc}
|
* \end{array} \right] = \left[ \begin{array}{ccc}
|
||||||
* \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
|
* \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
|
||||||
* \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
|
* \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
|
||||||
@ -76,11 +76,11 @@
|
|||||||
* \include MatrixExponential.cpp
|
* \include MatrixExponential.cpp
|
||||||
* Output: \verbinclude MatrixExponential.out
|
* Output: \verbinclude MatrixExponential.out
|
||||||
*
|
*
|
||||||
* \note \p M has to be a matrix of \c float, \c double,
|
* \note \p M has to be a matrix of \c float, \c double,
|
||||||
* \c complex<float> or \c complex<double> .
|
* \c complex<float> or \c complex<double> .
|
||||||
*/
|
*/
|
||||||
template <typename Derived>
|
template <typename Derived>
|
||||||
EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
|
EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
|
||||||
typename MatrixBase<Derived>::PlainMatrixType* result);
|
typename MatrixBase<Derived>::PlainMatrixType* result);
|
||||||
|
|
||||||
/** \ingroup MatrixFunctions_Module
|
/** \ingroup MatrixFunctions_Module
|
||||||
@ -90,13 +90,13 @@ template <typename MatrixType>
|
|||||||
class MatrixExponential {
|
class MatrixExponential {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/** \brief Compute the matrix exponential.
|
/** \brief Compute the matrix exponential.
|
||||||
*
|
*
|
||||||
* \param M matrix whose exponential is to be computed.
|
* \param M matrix whose exponential is to be computed.
|
||||||
* \param result pointer to the matrix in which to store the result.
|
* \param result pointer to the matrix in which to store the result.
|
||||||
*/
|
*/
|
||||||
MatrixExponential(const MatrixType &M, MatrixType *result);
|
MatrixExponential(const MatrixType &M, MatrixType *result);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ class MatrixExponential {
|
|||||||
MatrixExponential& operator=(const MatrixExponential&);
|
MatrixExponential& operator=(const MatrixExponential&);
|
||||||
|
|
||||||
/** \brief Compute the (3,3)-Padé approximant to the exponential.
|
/** \brief Compute the (3,3)-Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||||
*
|
*
|
||||||
@ -114,7 +114,7 @@ class MatrixExponential {
|
|||||||
void pade3(const MatrixType &A);
|
void pade3(const MatrixType &A);
|
||||||
|
|
||||||
/** \brief Compute the (5,5)-Padé approximant to the exponential.
|
/** \brief Compute the (5,5)-Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||||
*
|
*
|
||||||
@ -123,7 +123,7 @@ class MatrixExponential {
|
|||||||
void pade5(const MatrixType &A);
|
void pade5(const MatrixType &A);
|
||||||
|
|
||||||
/** \brief Compute the (7,7)-Padé approximant to the exponential.
|
/** \brief Compute the (7,7)-Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||||
*
|
*
|
||||||
@ -132,7 +132,7 @@ class MatrixExponential {
|
|||||||
void pade7(const MatrixType &A);
|
void pade7(const MatrixType &A);
|
||||||
|
|
||||||
/** \brief Compute the (9,9)-Padé approximant to the exponential.
|
/** \brief Compute the (9,9)-Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||||
*
|
*
|
||||||
@ -141,7 +141,7 @@ class MatrixExponential {
|
|||||||
void pade9(const MatrixType &A);
|
void pade9(const MatrixType &A);
|
||||||
|
|
||||||
/** \brief Compute the (13,13)-Padé approximant to the exponential.
|
/** \brief Compute the (13,13)-Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||||
*
|
*
|
||||||
@ -149,10 +149,10 @@ class MatrixExponential {
|
|||||||
*/
|
*/
|
||||||
void pade13(const MatrixType &A);
|
void pade13(const MatrixType &A);
|
||||||
|
|
||||||
/** \brief Compute Padé approximant to the exponential.
|
/** \brief Compute Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* Computes \c m_U, \c m_V and \c m_squarings such that
|
* Computes \c m_U, \c m_V and \c m_squarings such that
|
||||||
* \f$ (V+U)(V-U)^{-1} \f$ is a Padé of
|
* \f$ (V+U)(V-U)^{-1} \f$ is a Padé of
|
||||||
* \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The
|
* \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The
|
||||||
* degree of the Padé approximant and the value of
|
* degree of the Padé approximant and the value of
|
||||||
* squarings are chosen such that the approximation error is no
|
* squarings are chosen such that the approximation error is no
|
||||||
@ -164,7 +164,7 @@ class MatrixExponential {
|
|||||||
*/
|
*/
|
||||||
void computeUV(double);
|
void computeUV(double);
|
||||||
|
|
||||||
/** \brief Compute Padé approximant to the exponential.
|
/** \brief Compute Padé approximant to the exponential.
|
||||||
*
|
*
|
||||||
* \sa computeUV(double);
|
* \sa computeUV(double);
|
||||||
*/
|
*/
|
||||||
@ -174,7 +174,7 @@ class MatrixExponential {
|
|||||||
typedef typename NumTraits<typename ei_traits<MatrixType>::Scalar>::Real RealScalar;
|
typedef typename NumTraits<typename ei_traits<MatrixType>::Scalar>::Real RealScalar;
|
||||||
|
|
||||||
/** \brief Pointer to matrix whose exponential is to be computed. */
|
/** \brief Pointer to matrix whose exponential is to be computed. */
|
||||||
const MatrixType* m_M;
|
const MatrixType* m_M;
|
||||||
|
|
||||||
/** \brief Even-degree terms in numerator of Padé approximant. */
|
/** \brief Even-degree terms in numerator of Padé approximant. */
|
||||||
MatrixType m_U;
|
MatrixType m_U;
|
||||||
@ -200,14 +200,14 @@ class MatrixExponential {
|
|||||||
|
|
||||||
template <typename MatrixType>
|
template <typename MatrixType>
|
||||||
MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType *result) :
|
MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType *result) :
|
||||||
m_M(&M),
|
m_M(&M),
|
||||||
m_U(M.rows(),M.cols()),
|
m_U(M.rows(),M.cols()),
|
||||||
m_V(M.rows(),M.cols()),
|
m_V(M.rows(),M.cols()),
|
||||||
m_tmp1(M.rows(),M.cols()),
|
m_tmp1(M.rows(),M.cols()),
|
||||||
m_tmp2(M.rows(),M.cols()),
|
m_tmp2(M.rows(),M.cols()),
|
||||||
m_Id(MatrixType::Identity(M.rows(), M.cols())),
|
m_Id(MatrixType::Identity(M.rows(), M.cols())),
|
||||||
m_squarings(0),
|
m_squarings(0),
|
||||||
m_l1norm(static_cast<float>(M.cwise().abs().colwise().sum().maxCoeff()))
|
m_l1norm(static_cast<float>(M.cwiseAbs().colwise().sum().maxCoeff()))
|
||||||
{
|
{
|
||||||
computeUV(RealScalar());
|
computeUV(RealScalar());
|
||||||
m_tmp1 = m_U + m_V; // numerator of Pade approximant
|
m_tmp1 = m_U + m_V; // numerator of Pade approximant
|
||||||
@ -267,8 +267,8 @@ EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade9(const MatrixType &
|
|||||||
template <typename MatrixType>
|
template <typename MatrixType>
|
||||||
EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
|
EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
|
||||||
{
|
{
|
||||||
const Scalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
|
const Scalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
|
||||||
1187353796428800., 129060195264000., 10559470521600., 670442572800.,
|
1187353796428800., 129060195264000., 10559470521600., 670442572800.,
|
||||||
33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
|
33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
|
||||||
MatrixType A2 = A * A;
|
MatrixType A2 = A * A;
|
||||||
MatrixType A4 = A2 * A2;
|
MatrixType A4 = A2 * A2;
|
||||||
@ -317,7 +317,7 @@ void MatrixExponential<MatrixType>::computeUV(double)
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Derived>
|
template <typename Derived>
|
||||||
EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
|
EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
|
||||||
typename MatrixBase<Derived>::PlainMatrixType* result)
|
typename MatrixBase<Derived>::PlainMatrixType* result)
|
||||||
{
|
{
|
||||||
ei_assert(M.rows() == M.cols());
|
ei_assert(M.rows() == M.cols());
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#ifndef EIGEN_MATRIX_FUNCTION_ATOMIC
|
#ifndef EIGEN_MATRIX_FUNCTION_ATOMIC
|
||||||
#define EIGEN_MATRIX_FUNCTION_ATOMIC
|
#define EIGEN_MATRIX_FUNCTION_ATOMIC
|
||||||
|
|
||||||
/** \ingroup MatrixFunctions_Module
|
/** \ingroup MatrixFunctions_Module
|
||||||
* \class MatrixFunctionAtomic
|
* \class MatrixFunctionAtomic
|
||||||
* \brief Helper class for computing matrix functions of atomic matrices.
|
* \brief Helper class for computing matrix functions of atomic matrices.
|
||||||
*
|
*
|
||||||
@ -110,30 +110,30 @@ void MatrixFunctionAtomic<MatrixType>::computeMu()
|
|||||||
const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted;
|
const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted;
|
||||||
VectorType e = VectorType::Ones(m_Arows);
|
VectorType e = VectorType::Ones(m_Arows);
|
||||||
N.template triangularView<UpperTriangular>().solveInPlace(e);
|
N.template triangularView<UpperTriangular>().solveInPlace(e);
|
||||||
m_mu = e.cwise().abs().maxCoeff();
|
m_mu = e.cwiseAbs().maxCoeff();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \brief Determine whether Taylor series has converged */
|
/** \brief Determine whether Taylor series has converged */
|
||||||
template <typename MatrixType>
|
template <typename MatrixType>
|
||||||
bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType& F,
|
bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType& F,
|
||||||
const MatrixType& Fincr, const MatrixType& P)
|
const MatrixType& Fincr, const MatrixType& P)
|
||||||
{
|
{
|
||||||
const int n = F.rows();
|
const int n = F.rows();
|
||||||
const RealScalar F_norm = F.cwise().abs().rowwise().sum().maxCoeff();
|
const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
const RealScalar Fincr_norm = Fincr.cwise().abs().rowwise().sum().maxCoeff();
|
const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
if (Fincr_norm < epsilon<Scalar>() * F_norm) {
|
if (Fincr_norm < epsilon<Scalar>() * F_norm) {
|
||||||
RealScalar delta = 0;
|
RealScalar delta = 0;
|
||||||
RealScalar rfactorial = 1;
|
RealScalar rfactorial = 1;
|
||||||
for (int r = 0; r < n; r++) {
|
for (int r = 0; r < n; r++) {
|
||||||
RealScalar mx = 0;
|
RealScalar mx = 0;
|
||||||
for (int i = 0; i < n; i++)
|
for (int i = 0; i < n; i++)
|
||||||
mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
|
mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
|
||||||
if (r != 0)
|
if (r != 0)
|
||||||
rfactorial *= r;
|
rfactorial *= r;
|
||||||
delta = std::max(delta, mx / rfactorial);
|
delta = std::max(delta, mx / rfactorial);
|
||||||
}
|
}
|
||||||
const RealScalar P_norm = P.cwise().abs().rowwise().sum().maxCoeff();
|
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
if (m_mu * delta * P_norm < epsilon<Scalar>() * F_norm)
|
if (m_mu * delta * P_norm < epsilon<Scalar>() * F_norm)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
*
|
*
|
||||||
* The user must provide a subroutine which calculates the
|
* The user must provide a subroutine which calculates the
|
||||||
* functions. The Jacobian is either provided by the user, or approximated
|
* functions. The Jacobian is either provided by the user, or approximated
|
||||||
* using a forward-difference method.
|
* using a forward-difference method.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template<typename FunctorType, typename Scalar=double>
|
template<typename FunctorType, typename Scalar=double>
|
||||||
@ -50,7 +50,7 @@ public:
|
|||||||
Running = -1,
|
Running = -1,
|
||||||
ImproperInputParameters = 0,
|
ImproperInputParameters = 0,
|
||||||
RelativeErrorTooSmall = 1,
|
RelativeErrorTooSmall = 1,
|
||||||
TooManyFunctionEvaluation = 2,
|
TooManyFunctionEvaluation = 2,
|
||||||
TolTooSmall = 3,
|
TolTooSmall = 3,
|
||||||
NotMakingProgressJacobian = 4,
|
NotMakingProgressJacobian = 4,
|
||||||
NotMakingProgressIterations = 5,
|
NotMakingProgressIterations = 5,
|
||||||
@ -156,7 +156,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
|
|||||||
parameters.xtol = tol;
|
parameters.xtol = tol;
|
||||||
diag.setConstant(n, 1.);
|
diag.setConstant(n, 1.);
|
||||||
return solve(
|
return solve(
|
||||||
x,
|
x,
|
||||||
2
|
2
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -241,7 +241,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
|
|
||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -285,7 +285,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
|
|
||||||
/* Computing MAX */
|
/* Computing MAX */
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
|
|
||||||
@ -299,7 +299,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -364,7 +364,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
@ -555,7 +555,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
|
|
||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -599,7 +599,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
|
|
||||||
/* Computing MAX */
|
/* Computing MAX */
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
|
|
||||||
@ -613,7 +613,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -678,7 +678,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
* http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
|
* http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
|
||||||
*/
|
*/
|
||||||
template<typename FunctorType, typename Scalar=double>
|
template<typename FunctorType, typename Scalar=double>
|
||||||
class LevenbergMarquardt
|
class LevenbergMarquardt
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
LevenbergMarquardt(FunctorType &_functor)
|
LevenbergMarquardt(FunctorType &_functor)
|
||||||
@ -50,7 +50,7 @@ public:
|
|||||||
RelativeErrorTooSmall = 2,
|
RelativeErrorTooSmall = 2,
|
||||||
RelativeErrorAndReductionTooSmall = 3,
|
RelativeErrorAndReductionTooSmall = 3,
|
||||||
CosinusTooSmall = 4,
|
CosinusTooSmall = 4,
|
||||||
TooManyFunctionEvaluation = 5,
|
TooManyFunctionEvaluation = 5,
|
||||||
FtolTooSmall = 6,
|
FtolTooSmall = 6,
|
||||||
XtolTooSmall = 7,
|
XtolTooSmall = 7,
|
||||||
GtolTooSmall = 8,
|
GtolTooSmall = 8,
|
||||||
@ -253,7 +253,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
|
|
||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
ei_qrfac<Scalar>(m, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
ei_qrfac<Scalar>(m, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
||||||
ipvt.cwise()-=1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
||||||
|
|
||||||
/* on the first iteration and if mode is 1, scale according */
|
/* on the first iteration and if mode is 1, scale according */
|
||||||
/* to the norms of the columns of the initial jacobian. */
|
/* to the norms of the columns of the initial jacobian. */
|
||||||
@ -269,7 +269,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
|
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -316,7 +316,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
/* rescale if necessary. */
|
/* rescale if necessary. */
|
||||||
|
|
||||||
if (mode != 2) /* Computing MAX */
|
if (mode != 2) /* Computing MAX */
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
do {
|
do {
|
||||||
@ -329,7 +329,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -395,7 +395,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
@ -538,10 +538,10 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
wa2[j] = fjac.col(j).head(j).stableNorm();
|
wa2[j] = fjac.col(j).head(j).stableNorm();
|
||||||
}
|
}
|
||||||
if (sing) {
|
if (sing) {
|
||||||
ipvt.cwise()+=1;
|
ipvt.array() += 1;
|
||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
||||||
ipvt.cwise()-=1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
if (fjac(j,j) != 0.) {
|
if (fjac(j,j) != 0.) {
|
||||||
sum = 0.;
|
sum = 0.;
|
||||||
@ -569,7 +569,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
|
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -599,7 +599,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* rescale if necessary. */
|
/* rescale if necessary. */
|
||||||
|
|
||||||
if (mode != 2) /* Computing MAX */
|
if (mode != 2) /* Computing MAX */
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
do {
|
do {
|
||||||
@ -612,7 +612,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -678,7 +678,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
|
@ -50,7 +50,7 @@ void ei_dogleg(
|
|||||||
/* test whether the gauss-newton direction is acceptable. */
|
/* test whether the gauss-newton direction is acceptable. */
|
||||||
|
|
||||||
wa1.fill(0.);
|
wa1.fill(0.);
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
qnorm = wa2.stableNorm();
|
qnorm = wa2.stableNorm();
|
||||||
if (qnorm <= delta)
|
if (qnorm <= delta)
|
||||||
return;
|
return;
|
||||||
@ -80,7 +80,7 @@ void ei_dogleg(
|
|||||||
/* calculate the point along the scaled gradient */
|
/* calculate the point along the scaled gradient */
|
||||||
/* at which the quadratic is minimized. */
|
/* at which the quadratic is minimized. */
|
||||||
|
|
||||||
wa1.cwise() /= diag*gnorm;
|
wa1.array() /= (diag*gnorm).array();
|
||||||
l = 0;
|
l = 0;
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
sum = 0.;
|
sum = 0.;
|
||||||
|
@ -36,7 +36,7 @@ void ei_lmpar(
|
|||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
if (r(j,j) == 0. && nsing == n-1)
|
if (r(j,j) == 0. && nsing == n-1)
|
||||||
nsing = j - 1;
|
nsing = j - 1;
|
||||||
if (nsing < n-1)
|
if (nsing < n-1)
|
||||||
wa1[j] = 0.;
|
wa1[j] = 0.;
|
||||||
}
|
}
|
||||||
for (j = nsing; j>=0; --j) {
|
for (j = nsing; j>=0; --j) {
|
||||||
@ -54,7 +54,7 @@ void ei_lmpar(
|
|||||||
/* for acceptance of the gauss-newton direction. */
|
/* for acceptance of the gauss-newton direction. */
|
||||||
|
|
||||||
iter = 0;
|
iter = 0;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
dxnorm = wa2.blueNorm();
|
dxnorm = wa2.blueNorm();
|
||||||
fp = dxnorm - delta;
|
fp = dxnorm - delta;
|
||||||
if (fp <= Scalar(0.1) * delta) {
|
if (fp <= Scalar(0.1) * delta) {
|
||||||
@ -76,7 +76,7 @@ void ei_lmpar(
|
|||||||
// way:
|
// way:
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
Scalar sum = 0.;
|
Scalar sum = 0.;
|
||||||
for (i = 0; i < j; ++i)
|
for (i = 0; i < j; ++i)
|
||||||
sum += r(i,j) * wa1[i];
|
sum += r(i,j) * wa1[i];
|
||||||
wa1[j] = (wa1[j] - sum) / r(j,j);
|
wa1[j] = (wa1[j] - sum) / r(j,j);
|
||||||
}
|
}
|
||||||
@ -117,7 +117,7 @@ void ei_lmpar(
|
|||||||
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
||||||
ei_qrsolv<Scalar>(r, ipvt, wa1, qtb, x, sdiag);
|
ei_qrsolv<Scalar>(r, ipvt, wa1, qtb, x, sdiag);
|
||||||
|
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
dxnorm = wa2.blueNorm();
|
dxnorm = wa2.blueNorm();
|
||||||
temp = fp;
|
temp = fp;
|
||||||
fp = dxnorm - delta;
|
fp = dxnorm - delta;
|
||||||
|
@ -45,7 +45,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim)
|
|||||||
|
|
||||||
template<typename Scalar, int Dim> AlignedBox<Scalar, Dim> ei_bounding_box(const Matrix<Scalar, Dim, 1> &v) { return AlignedBox<Scalar, Dim>(v); }
|
template<typename Scalar, int Dim> AlignedBox<Scalar, Dim> ei_bounding_box(const Matrix<Scalar, Dim, 1> &v) { return AlignedBox<Scalar, Dim>(v); }
|
||||||
template<int Dim> AlignedBox<double, Dim> ei_bounding_box(const Ball<Dim> &b)
|
template<int Dim> AlignedBox<double, Dim> ei_bounding_box(const Ball<Dim> &b)
|
||||||
{ return AlignedBox<double, Dim>(b.center.cwise() - b.radius, b.center.cwise() + b.radius); }
|
{ return AlignedBox<double, Dim>(b.center.array() - b.radius, b.center.array() + b.radius); }
|
||||||
|
|
||||||
|
|
||||||
template<int Dim>
|
template<int Dim>
|
||||||
|
@ -20,10 +20,10 @@ int fcn_chkder(const VectorXd &x, VectorXd &fvec, MatrixXd &fjac, int iflag)
|
|||||||
3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
|
3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
|
||||||
|
|
||||||
|
|
||||||
if (iflag == 0)
|
if (iflag == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (iflag != 2)
|
if (iflag != 2)
|
||||||
for (i=0; i<15; i++) {
|
for (i=0; i<15; i++) {
|
||||||
tmp1 = i+1;
|
tmp1 = i+1;
|
||||||
tmp2 = 16-i-1;
|
tmp2 = 16-i-1;
|
||||||
@ -108,12 +108,12 @@ struct Functor
|
|||||||
typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
|
typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
|
||||||
typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
|
typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
|
||||||
typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
|
typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
|
||||||
|
|
||||||
int m_inputs, m_values;
|
int m_inputs, m_values;
|
||||||
|
|
||||||
Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
|
Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
|
||||||
Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
|
Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
|
||||||
|
|
||||||
int inputs() const { return m_inputs; }
|
int inputs() const { return m_inputs; }
|
||||||
int values() const { return m_values; }
|
int values() const { return m_values; }
|
||||||
|
|
||||||
@ -219,7 +219,7 @@ void testLmder()
|
|||||||
ei_covar(lm.fjac, lm.ipvt); // TODO : move this as a function of lm
|
ei_covar(lm.fjac, lm.ipvt); // TODO : move this as a function of lm
|
||||||
|
|
||||||
MatrixXd cov_ref(n,n);
|
MatrixXd cov_ref(n,n);
|
||||||
cov_ref <<
|
cov_ref <<
|
||||||
0.0001531202, 0.002869941, -0.002656662,
|
0.0001531202, 0.002869941, -0.002656662,
|
||||||
0.002869941, 0.09480935, -0.09098995,
|
0.002869941, 0.09480935, -0.09098995,
|
||||||
-0.002656662, -0.09098995, 0.08778727;
|
-0.002656662, -0.09098995, 0.08778727;
|
||||||
@ -229,7 +229,7 @@ void testLmder()
|
|||||||
MatrixXd cov;
|
MatrixXd cov;
|
||||||
cov = covfac*lm.fjac.corner<n,n>(TopLeft);
|
cov = covfac*lm.fjac.corner<n,n>(TopLeft);
|
||||||
VERIFY_IS_APPROX( cov, cov_ref);
|
VERIFY_IS_APPROX( cov, cov_ref);
|
||||||
// TODO: why isn't this allowed ? :
|
// TODO: why isn't this allowed ? :
|
||||||
// VERIFY_IS_APPROX( covfac*fjac.corner<n,n>(TopLeft) , cov_ref);
|
// VERIFY_IS_APPROX( covfac*fjac.corner<n,n>(TopLeft) , cov_ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +296,7 @@ void testHybrj1()
|
|||||||
|
|
||||||
// check x
|
// check x
|
||||||
VectorXd x_ref(n);
|
VectorXd x_ref(n);
|
||||||
x_ref <<
|
x_ref <<
|
||||||
-0.5706545, -0.6816283, -0.7017325,
|
-0.5706545, -0.6816283, -0.7017325,
|
||||||
-0.7042129, -0.701369, -0.6918656,
|
-0.7042129, -0.701369, -0.6918656,
|
||||||
-0.665792, -0.5960342, -0.4164121;
|
-0.665792, -0.5960342, -0.4164121;
|
||||||
@ -330,7 +330,7 @@ void testHybrj()
|
|||||||
|
|
||||||
// check x
|
// check x
|
||||||
VectorXd x_ref(n);
|
VectorXd x_ref(n);
|
||||||
x_ref <<
|
x_ref <<
|
||||||
-0.5706545, -0.6816283, -0.7017325,
|
-0.5706545, -0.6816283, -0.7017325,
|
||||||
-0.7042129, -0.701369, -0.6918656,
|
-0.7042129, -0.701369, -0.6918656,
|
||||||
-0.665792, -0.5960342, -0.4164121;
|
-0.665792, -0.5960342, -0.4164121;
|
||||||
@ -412,7 +412,7 @@ void testHybrd()
|
|||||||
|
|
||||||
// check x
|
// check x
|
||||||
VectorXd x_ref(n);
|
VectorXd x_ref(n);
|
||||||
x_ref <<
|
x_ref <<
|
||||||
-0.5706545, -0.6816283, -0.7017325,
|
-0.5706545, -0.6816283, -0.7017325,
|
||||||
-0.7042129, -0.701369, -0.6918656,
|
-0.7042129, -0.701369, -0.6918656,
|
||||||
-0.665792, -0.5960342, -0.4164121;
|
-0.665792, -0.5960342, -0.4164121;
|
||||||
@ -608,7 +608,7 @@ void testLmdif()
|
|||||||
ei_covar(lm.fjac, lm.ipvt);
|
ei_covar(lm.fjac, lm.ipvt);
|
||||||
|
|
||||||
MatrixXd cov_ref(n,n);
|
MatrixXd cov_ref(n,n);
|
||||||
cov_ref <<
|
cov_ref <<
|
||||||
0.0001531202, 0.002869942, -0.002656662,
|
0.0001531202, 0.002869942, -0.002656662,
|
||||||
0.002869942, 0.09480937, -0.09098997,
|
0.002869942, 0.09480937, -0.09098997,
|
||||||
-0.002656662, -0.09098997, 0.08778729;
|
-0.002656662, -0.09098997, 0.08778729;
|
||||||
@ -618,7 +618,7 @@ void testLmdif()
|
|||||||
MatrixXd cov;
|
MatrixXd cov;
|
||||||
cov = covfac*lm.fjac.corner<n,n>(TopLeft);
|
cov = covfac*lm.fjac.corner<n,n>(TopLeft);
|
||||||
VERIFY_IS_APPROX( cov, cov_ref);
|
VERIFY_IS_APPROX( cov, cov_ref);
|
||||||
// TODO: why isn't this allowed ? :
|
// TODO: why isn't this allowed ? :
|
||||||
// VERIFY_IS_APPROX( covfac*fjac.corner<n,n>(TopLeft) , cov_ref);
|
// VERIFY_IS_APPROX( covfac*fjac.corner<n,n>(TopLeft) , cov_ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -676,11 +676,11 @@ void testNistChwirut2(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 10 == lm.nfev);
|
VERIFY( 10 == lm.nfev);
|
||||||
VERIFY( 8 == lm.njev);
|
VERIFY( 8 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
|
VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
|
||||||
VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
|
VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
|
||||||
@ -697,11 +697,11 @@ void testNistChwirut2(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 7 == lm.nfev);
|
VERIFY( 7 == lm.nfev);
|
||||||
VERIFY( 6 == lm.njev);
|
VERIFY( 6 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
|
VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
|
||||||
VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
|
VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
|
||||||
@ -756,11 +756,11 @@ void testNistMisra1a(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 19 == lm.nfev);
|
VERIFY( 19 == lm.nfev);
|
||||||
VERIFY( 15 == lm.njev);
|
VERIFY( 15 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
|
VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
|
||||||
VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
|
VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
|
||||||
@ -773,11 +773,11 @@ void testNistMisra1a(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 5 == lm.nfev);
|
VERIFY( 5 == lm.nfev);
|
||||||
VERIFY( 4 == lm.njev);
|
VERIFY( 4 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
|
VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
|
||||||
VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
|
VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
|
||||||
@ -842,11 +842,11 @@ void testNistHahn1(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 11== lm.nfev);
|
VERIFY( 11== lm.nfev);
|
||||||
VERIFY( 10== lm.njev);
|
VERIFY( 10== lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 1.0776351733E+00 );
|
VERIFY_IS_APPROX(x[0], 1.0776351733E+00 );
|
||||||
VERIFY_IS_APPROX(x[1],-1.2269296921E-01 );
|
VERIFY_IS_APPROX(x[1],-1.2269296921E-01 );
|
||||||
@ -864,18 +864,18 @@ void testNistHahn1(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 11 == lm.nfev);
|
VERIFY( 11 == lm.nfev);
|
||||||
VERIFY( 10 == lm.njev);
|
VERIFY( 10 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 1.077640); // should be : 1.0776351733E+00
|
VERIFY_IS_APPROX(x[0], 1.077640); // should be : 1.0776351733E+00
|
||||||
VERIFY_IS_APPROX(x[1], -0.1226933); // should be : -1.2269296921E-01
|
VERIFY_IS_APPROX(x[1], -0.1226933); // should be : -1.2269296921E-01
|
||||||
VERIFY_IS_APPROX(x[2], 0.004086383); // should be : 4.0863750610E-03
|
VERIFY_IS_APPROX(x[2], 0.004086383); // should be : 4.0863750610E-03
|
||||||
VERIFY_IS_APPROX(x[3], -1.426277e-06); // shoulde be : -1.4262662514E-06
|
VERIFY_IS_APPROX(x[3], -1.426277e-06); // shoulde be : -1.4262662514E-06
|
||||||
VERIFY_IS_APPROX(x[4],-5.7609940901E-03 );
|
VERIFY_IS_APPROX(x[4],-5.7609940901E-03 );
|
||||||
VERIFY_IS_APPROX(x[5], 0.00024053772); // should be : 2.4053735503E-04
|
VERIFY_IS_APPROX(x[5], 0.00024053772); // should be : 2.4053735503E-04
|
||||||
VERIFY_IS_APPROX(x[6], -1.231450e-07); // should be : -1.2314450199E-07
|
VERIFY_IS_APPROX(x[6], -1.231450e-07); // should be : -1.2314450199E-07
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -928,11 +928,11 @@ void testNistMisra1d(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 3 == info);
|
VERIFY( 3 == info);
|
||||||
VERIFY( 9 == lm.nfev);
|
VERIFY( 9 == lm.nfev);
|
||||||
VERIFY( 7 == lm.njev);
|
VERIFY( 7 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
|
VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
|
||||||
VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
|
VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
|
||||||
@ -945,11 +945,11 @@ void testNistMisra1d(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 4 == lm.nfev);
|
VERIFY( 4 == lm.nfev);
|
||||||
VERIFY( 3 == lm.njev);
|
VERIFY( 3 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
|
VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
|
||||||
VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
|
VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
|
||||||
@ -1006,9 +1006,9 @@ void testNistLanczos1(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 2 == info);
|
||||||
VERIFY( 79 == lm.nfev);
|
VERIFY( 79 == lm.nfev);
|
||||||
VERIFY( 72 == lm.njev);
|
VERIFY( 72 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.429604433690E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.429604433690E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
||||||
// check x
|
// check x
|
||||||
@ -1027,9 +1027,9 @@ void testNistLanczos1(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 2 == info);
|
||||||
VERIFY( 9 == lm.nfev);
|
VERIFY( 9 == lm.nfev);
|
||||||
VERIFY( 8 == lm.njev);
|
VERIFY( 8 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.43049947737308E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.43049947737308E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
||||||
// check x
|
// check x
|
||||||
@ -1092,9 +1092,9 @@ void testNistRat42(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 10 == lm.nfev);
|
VERIFY( 10 == lm.nfev);
|
||||||
VERIFY( 8 == lm.njev);
|
VERIFY( 8 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00);
|
||||||
// check x
|
// check x
|
||||||
@ -1110,9 +1110,9 @@ void testNistRat42(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 6 == lm.nfev);
|
VERIFY( 6 == lm.nfev);
|
||||||
VERIFY( 5 == lm.njev);
|
VERIFY( 5 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00);
|
||||||
// check x
|
// check x
|
||||||
@ -1170,9 +1170,9 @@ void testNistMGH10(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 2 == info);
|
||||||
VERIFY( 285 == lm.nfev);
|
VERIFY( 285 == lm.nfev);
|
||||||
VERIFY( 250 == lm.njev);
|
VERIFY( 250 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
|
||||||
// check x
|
// check x
|
||||||
@ -1188,9 +1188,9 @@ void testNistMGH10(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 2 == info);
|
||||||
VERIFY( 126 == lm.nfev);
|
VERIFY( 126 == lm.nfev);
|
||||||
VERIFY( 116 == lm.njev);
|
VERIFY( 116 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
|
||||||
// check x
|
// check x
|
||||||
@ -1249,9 +1249,9 @@ void testNistBoxBOD(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 31 == lm.nfev);
|
VERIFY( 31 == lm.nfev);
|
||||||
VERIFY( 25 == lm.njev);
|
VERIFY( 25 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
|
||||||
// check x
|
// check x
|
||||||
@ -1269,9 +1269,9 @@ void testNistBoxBOD(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 15 == lm.nfev);
|
VERIFY( 15 == lm.nfev);
|
||||||
VERIFY( 14 == lm.njev);
|
VERIFY( 14 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
|
||||||
// check x
|
// check x
|
||||||
@ -1288,7 +1288,7 @@ struct MGH17_functor : Functor<double>
|
|||||||
{
|
{
|
||||||
assert(b.size()==5);
|
assert(b.size()==5);
|
||||||
assert(fvec.size()==33);
|
assert(fvec.size()==33);
|
||||||
for(int i=0; i<33; i++)
|
for(int i=0; i<33; i++)
|
||||||
fvec[i] = b[0] + b[1]*exp(-b[3]*x[i]) + b[2]*exp(-b[4]*x[i]) - y[i];
|
fvec[i] = b[0] + b[1]*exp(-b[3]*x[i]) + b[2]*exp(-b[4]*x[i]) - y[i];
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1331,9 +1331,9 @@ void testNistMGH17(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 599 == lm.nfev);
|
VERIFY( 599 == lm.nfev);
|
||||||
VERIFY( 544 == lm.njev);
|
VERIFY( 544 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
|
||||||
// check x
|
// check x
|
||||||
@ -1352,9 +1352,9 @@ void testNistMGH17(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 18 == lm.nfev);
|
VERIFY( 18 == lm.nfev);
|
||||||
VERIFY( 15 == lm.njev);
|
VERIFY( 15 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
|
||||||
// check x
|
// check x
|
||||||
@ -1418,9 +1418,9 @@ void testNistMGH09(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 503== lm.nfev);
|
VERIFY( 503== lm.nfev);
|
||||||
VERIFY( 385 == lm.njev);
|
VERIFY( 385 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
|
||||||
// check x
|
// check x
|
||||||
@ -1438,9 +1438,9 @@ void testNistMGH09(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 18 == lm.nfev);
|
VERIFY( 18 == lm.nfev);
|
||||||
VERIFY( 16 == lm.njev);
|
VERIFY( 16 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
|
||||||
// check x
|
// check x
|
||||||
@ -1501,9 +1501,9 @@ void testNistBennett5(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 758 == lm.nfev);
|
VERIFY( 758 == lm.nfev);
|
||||||
VERIFY( 744 == lm.njev);
|
VERIFY( 744 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04);
|
||||||
// check x
|
// check x
|
||||||
@ -1519,9 +1519,9 @@ void testNistBennett5(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 203 == lm.nfev);
|
VERIFY( 203 == lm.nfev);
|
||||||
VERIFY( 192 == lm.njev);
|
VERIFY( 192 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04);
|
||||||
// check x
|
// check x
|
||||||
@ -1589,11 +1589,11 @@ void testNistThurber(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 39 == lm.nfev);
|
VERIFY( 39 == lm.nfev);
|
||||||
VERIFY( 36== lm.njev);
|
VERIFY( 36== lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
|
VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
|
||||||
VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
|
VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
|
||||||
@ -1614,11 +1614,11 @@ void testNistThurber(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 29 == lm.nfev);
|
VERIFY( 29 == lm.nfev);
|
||||||
VERIFY( 28 == lm.njev);
|
VERIFY( 28 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
|
VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
|
||||||
VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
|
VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
|
||||||
@ -1681,9 +1681,9 @@ void testNistRat43(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 27 == lm.nfev);
|
VERIFY( 27 == lm.nfev);
|
||||||
VERIFY( 20 == lm.njev);
|
VERIFY( 20 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03);
|
||||||
// check x
|
// check x
|
||||||
@ -1703,9 +1703,9 @@ void testNistRat43(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 9 == lm.nfev);
|
VERIFY( 9 == lm.nfev);
|
||||||
VERIFY( 8 == lm.njev);
|
VERIFY( 8 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03);
|
||||||
// check x
|
// check x
|
||||||
@ -1766,9 +1766,9 @@ void testNistEckerle4(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 18 == lm.nfev);
|
VERIFY( 18 == lm.nfev);
|
||||||
VERIFY( 15 == lm.njev);
|
VERIFY( 15 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03);
|
||||||
// check x
|
// check x
|
||||||
@ -1784,9 +1784,9 @@ void testNistEckerle4(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 7 == lm.nfev);
|
VERIFY( 7 == lm.nfev);
|
||||||
VERIFY( 6 == lm.njev);
|
VERIFY( 6 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03);
|
||||||
// check x
|
// check x
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#include "main.h"
|
#include "main.h"
|
||||||
#include <unsupported/Eigen/MatrixFunctions>
|
#include <unsupported/Eigen/MatrixFunctions>
|
||||||
|
|
||||||
double binom(int n, int k)
|
double binom(int n, int k)
|
||||||
{
|
{
|
||||||
double res = 1;
|
double res = 1;
|
||||||
for (int i=0; i<k; i++)
|
for (int i=0; i<k; i++)
|
||||||
@ -36,7 +36,7 @@ double binom(int n, int k)
|
|||||||
template <typename Derived, typename OtherDerived>
|
template <typename Derived, typename OtherDerived>
|
||||||
double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
|
double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
|
||||||
{
|
{
|
||||||
return std::sqrt((A - B).cwise().abs2().sum() / std::min(A.cwise().abs2().sum(), B.cwise().abs2().sum()));
|
return std::sqrt((A - B).cwiseAbs2().sum() / std::min(A.cwiseAbs2().sum(), B.cwiseAbs2().sum()));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -52,7 +52,7 @@ void test2dRotation(double tol)
|
|||||||
T angle;
|
T angle;
|
||||||
|
|
||||||
A << 0, 1, -1, 0;
|
A << 0, 1, -1, 0;
|
||||||
for (int i=0; i<=20; i++)
|
for (int i=0; i<=20; i++)
|
||||||
{
|
{
|
||||||
angle = static_cast<T>(pow(10, i / 5. - 2));
|
angle = static_cast<T>(pow(10, i / 5. - 2));
|
||||||
B << cos(angle), sin(angle), -sin(angle), cos(angle);
|
B << cos(angle), sin(angle), -sin(angle), cos(angle);
|
||||||
@ -74,7 +74,7 @@ void test2dHyperbolicRotation(double tol)
|
|||||||
std::complex<T> imagUnit(0,1);
|
std::complex<T> imagUnit(0,1);
|
||||||
T angle, ch, sh;
|
T angle, ch, sh;
|
||||||
|
|
||||||
for (int i=0; i<=20; i++)
|
for (int i=0; i<=20; i++)
|
||||||
{
|
{
|
||||||
angle = static_cast<T>((i-10) / 2.0);
|
angle = static_cast<T>((i-10) / 2.0);
|
||||||
ch = std::cosh(angle);
|
ch = std::cosh(angle);
|
||||||
@ -116,7 +116,7 @@ void testPascal(double tol)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
void randomTest(const MatrixType& m, double tol)
|
void randomTest(const MatrixType& m, double tol)
|
||||||
{
|
{
|
||||||
/* this test covers the following files:
|
/* this test covers the following files:
|
||||||
@ -157,7 +157,7 @@ void test_matrix_exponential()
|
|||||||
CALL_SUBTEST_3(randomTest(Matrix4cd(), 1e-13));
|
CALL_SUBTEST_3(randomTest(Matrix4cd(), 1e-13));
|
||||||
CALL_SUBTEST_4(randomTest(MatrixXd(8,8), 1e-13));
|
CALL_SUBTEST_4(randomTest(MatrixXd(8,8), 1e-13));
|
||||||
CALL_SUBTEST_1(randomTest(Matrix2f(), 1e-4));
|
CALL_SUBTEST_1(randomTest(Matrix2f(), 1e-4));
|
||||||
CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4));
|
CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4));
|
||||||
CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4));
|
CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4));
|
||||||
CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4));
|
CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4));
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user