mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-12-15 07:10:37 +08:00
merge
This commit is contained in:
commit
2089a263f8
@ -86,7 +86,7 @@ struct ei_dot_novec_unroller<Derived1, Derived2, Start, 1>
|
||||
|
||||
inline static Scalar run(const Derived1& v1, const Derived2& v2)
|
||||
{
|
||||
return v1.coeff(Start) * ei_conj(v2.coeff(Start));
|
||||
return ei_conj(v1.coeff(Start)) * v2.coeff(Start);
|
||||
}
|
||||
};
|
||||
|
||||
@ -155,9 +155,9 @@ struct ei_dot_impl<Derived1, Derived2, NoVectorization, NoUnrolling>
|
||||
{
|
||||
ei_assert(v1.size()>0 && "you are using a non initialized vector");
|
||||
Scalar res;
|
||||
res = v1.coeff(0) * ei_conj(v2.coeff(0));
|
||||
res = ei_conj(v1.coeff(0)) * v2.coeff(0);
|
||||
for(int i = 1; i < v1.size(); ++i)
|
||||
res += v1.coeff(i) * ei_conj(v2.coeff(i));
|
||||
res += ei_conj(v1.coeff(i)) * v2.coeff(i);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
@ -248,7 +248,7 @@ struct ei_dot_impl<Derived1, Derived2, LinearVectorization, CompleteUnrolling>
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \note If the scalar type is complex numbers, then this function returns the hermitian
|
||||
* (sesquilinear) dot product, linear in the first variable and conjugate-linear in the
|
||||
* (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
|
||||
* second variable.
|
||||
*
|
||||
* \sa squaredNorm(), norm()
|
||||
|
@ -71,7 +71,7 @@ public:
|
||||
: m_coeffs(n.size()+1)
|
||||
{
|
||||
normal() = n;
|
||||
offset() = -e.dot(n);
|
||||
offset() = -n.dot(e);
|
||||
}
|
||||
|
||||
/** Constructs a plane from its normal \a n and distance to the origin \a d
|
||||
@ -92,7 +92,7 @@ public:
|
||||
{
|
||||
Hyperplane result(p0.size());
|
||||
result.normal() = (p1 - p0).unitOrthogonal();
|
||||
result.offset() = -result.normal().dot(p0);
|
||||
result.offset() = -p0.dot(result.normal());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ public:
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
|
||||
Hyperplane result(p0.size());
|
||||
result.normal() = (p2 - p0).cross(p1 - p0).normalized();
|
||||
result.offset() = -result.normal().dot(p0);
|
||||
result.offset() = -p0.dot(result.normal());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ public:
|
||||
explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
|
||||
{
|
||||
normal() = parametrized.direction().unitOrthogonal();
|
||||
offset() = -normal().dot(parametrized.origin());
|
||||
offset() = -parametrized.origin().dot(normal());
|
||||
}
|
||||
|
||||
~Hyperplane() {}
|
||||
@ -133,7 +133,7 @@ public:
|
||||
/** \returns the signed distance between the plane \c *this and a point \a p.
|
||||
* \sa absDistance()
|
||||
*/
|
||||
inline Scalar signedDistance(const VectorType& p) const { return p.dot(normal()) + offset(); }
|
||||
inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }
|
||||
|
||||
/** \returns the absolute distance between the plane \c *this and a point \a p.
|
||||
* \sa signedDistance()
|
||||
@ -231,7 +231,7 @@ public:
|
||||
TransformTraits traits = Affine)
|
||||
{
|
||||
transform(t.linear(), traits);
|
||||
offset() -= t.translation().dot(normal());
|
||||
offset() -= normal().dot(t.translation());
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -84,8 +84,8 @@ public:
|
||||
*/
|
||||
RealScalar squaredDistance(const VectorType& p) const
|
||||
{
|
||||
VectorType diff = p-origin();
|
||||
return (diff - diff.dot(direction())* direction()).squaredNorm();
|
||||
VectorType diff = p - origin();
|
||||
return (diff - direction().dot(diff) * direction()).squaredNorm();
|
||||
}
|
||||
/** \returns the distance of a point \a p to its projection onto the line \c *this.
|
||||
* \sa squaredDistance()
|
||||
@ -94,7 +94,7 @@ public:
|
||||
|
||||
/** \returns the projection of a point \a p onto the line \c *this. */
|
||||
VectorType projection(const VectorType& p) const
|
||||
{ return origin() + (p-origin()).dot(direction()) * direction(); }
|
||||
{ return origin() + direction().dot(p-origin()) * direction(); }
|
||||
|
||||
Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
|
||||
|
||||
@ -148,8 +148,8 @@ inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane
|
||||
template <typename _Scalar, int _AmbientDim>
|
||||
inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
|
||||
{
|
||||
return -(hyperplane.offset()+origin().dot(hyperplane.normal()))
|
||||
/(direction().dot(hyperplane.normal()));
|
||||
return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
|
||||
/ hyperplane.normal().dot(direction());
|
||||
}
|
||||
|
||||
#endif // EIGEN_PARAMETRIZEDLINE_H
|
||||
|
@ -358,7 +358,7 @@ inline Quaternion<Scalar>& Quaternion<Scalar>::setFromTwoVectors(const MatrixBas
|
||||
{
|
||||
Vector3 v0 = a.normalized();
|
||||
Vector3 v1 = b.normalized();
|
||||
Scalar c = v0.dot(v1);
|
||||
Scalar c = v1.dot(v0);
|
||||
|
||||
// if dot == -1, vectors are nearly opposites
|
||||
// => accuraletly compute the rotation axis by computing the
|
||||
|
@ -50,7 +50,8 @@ void MatrixBase<Derived>::makeHouseholder(
|
||||
Scalar sign = coeff(0) / ei_abs(coeff(0));
|
||||
c0 = coeff(0) + sign * ei_sqrt(_squaredNorm);
|
||||
}
|
||||
*essential = end(size()-1) / c0; // FIXME take advantage of fixed size
|
||||
VectorBlock<Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
|
||||
*essential = tail / c0;
|
||||
const RealScalar c0abs2 = ei_abs2(c0);
|
||||
*beta = RealScalar(2) * c0abs2 / (c0abs2 + _squaredNorm - ei_abs2(coeff(0)));
|
||||
}
|
||||
@ -62,12 +63,10 @@ void MatrixBase<Derived>::applyHouseholderOnTheLeft(
|
||||
const RealScalar& beta)
|
||||
{
|
||||
Matrix<Scalar, 1, ColsAtCompileTime, PlainMatrixType::Options, 1, MaxColsAtCompileTime> tmp(cols());
|
||||
tmp = row(0) + essential.adjoint() * block(1,0,rows()-1,cols());
|
||||
// FIXME take advantage of fixed size
|
||||
// FIXME play with lazy()
|
||||
// FIXME maybe not a good idea to use matrix product
|
||||
Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());
|
||||
tmp = row(0) + essential.adjoint() * bottom;
|
||||
row(0) -= beta * tmp;
|
||||
block(1,0,rows()-1,cols()) -= beta * essential * tmp;
|
||||
bottom -= beta * essential * tmp;
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
@ -77,12 +76,10 @@ void MatrixBase<Derived>::applyHouseholderOnTheRight(
|
||||
const RealScalar& beta)
|
||||
{
|
||||
Matrix<Scalar, RowsAtCompileTime, 1, PlainMatrixType::Options, MaxRowsAtCompileTime, 1> tmp(rows());
|
||||
tmp = col(0) + block(0,1,rows(),cols()-1) * essential.conjugate();
|
||||
// FIXME take advantage of fixed size
|
||||
// FIXME play with lazy()
|
||||
// FIXME maybe not a good idea to use matrix product
|
||||
Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
|
||||
tmp = col(0) + right * essential.conjugate();
|
||||
col(0) -= beta * tmp;
|
||||
block(0,1,rows(),cols()-1) -= beta * tmp * essential.transpose();
|
||||
right -= beta * tmp * essential.transpose();
|
||||
}
|
||||
|
||||
#endif // EIGEN_HOUSEHOLDER_H
|
||||
|
@ -229,7 +229,7 @@ void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType&
|
||||
hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1).template selfadjointView<LowerTriangular>()
|
||||
* (h * matA.col(i).end(n-i-1)));
|
||||
|
||||
hCoeffs.end(n-i-1) += (h*Scalar(-0.5)*(matA.col(i).end(n-i-1).dot(hCoeffs.end(n-i-1)))) * matA.col(i).end(n-i-1);
|
||||
hCoeffs.end(n-i-1) += (h*Scalar(-0.5)*(hCoeffs.end(n-i-1).dot(matA.col(i).end(n-i-1)))) * matA.col(i).end(n-i-1);
|
||||
|
||||
matA.corner(BottomRight, n-i-1, n-i-1).template selfadjointView<LowerTriangular>()
|
||||
.rankUpdate(matA.col(i).end(n-i-1), hCoeffs.end(n-i-1), -1);
|
||||
|
@ -187,7 +187,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
A(i, i)=f-g;
|
||||
for (j=l-1; j<n; j++)
|
||||
{
|
||||
s = A.col(i).end(m-i).dot(A.col(j).end(m-i));
|
||||
s = A.col(j).end(m-i).dot(A.col(i).end(m-i));
|
||||
f = s/h;
|
||||
A.col(j).end(m-i) += f*A.col(i).end(m-i);
|
||||
}
|
||||
@ -213,7 +213,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
rv1.end(n-l+1) = A.row(i).end(n-l+1)/h;
|
||||
for (j=l-1; j<m; j++)
|
||||
{
|
||||
s = A.row(j).end(n-l+1).dot(A.row(i).end(n-l+1));
|
||||
s = A.row(i).end(n-l+1).dot(A.row(j).end(n-l+1));
|
||||
A.row(j).end(n-l+1) += s*rv1.end(n-l+1).transpose();
|
||||
}
|
||||
A.row(i).end(n-l+1) *= scale;
|
||||
@ -233,7 +233,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
V(j, i) = (A(i, j)/A(i, l))/g;
|
||||
for (j=l; j<n; j++)
|
||||
{
|
||||
s = A.row(i).end(n-l).dot(V.col(j).end(n-l));
|
||||
s = V.col(j).end(n-l).dot(A.row(i).end(n-l));
|
||||
V.col(j).end(n-l) += s * V.col(i).end(n-l);
|
||||
}
|
||||
}
|
||||
@ -258,7 +258,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
for (j=l; j<n; j++)
|
||||
{
|
||||
s = A.col(i).end(m-l).dot(A.col(j).end(m-l));
|
||||
s = A.col(j).end(m-l).dot(A.col(i).end(m-l));
|
||||
f = (s/A(i,i))*g;
|
||||
A.col(j).end(m-i) += f * A.col(i).end(m-i);
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
|
||||
Scalar res = 0;
|
||||
while (i)
|
||||
{
|
||||
res += i.value() * ei_conj(other.coeff(i.index()));
|
||||
res += ei_conj(i.value()) * other.coeff(i.index());
|
||||
++i;
|
||||
}
|
||||
return res;
|
||||
@ -69,7 +69,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
|
||||
{
|
||||
if (i.index()==j.index())
|
||||
{
|
||||
res += i.value() * ei_conj(j.value());
|
||||
res += ei_conj(i.value()) * j.value();
|
||||
++i; ++j;
|
||||
}
|
||||
else if (i.index()<j.index())
|
||||
|
@ -65,8 +65,8 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
|
||||
|
||||
// check basic properties of dot, norm, norm2
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
VERIFY(ei_isApprox((s1 * v1 + s2 * v2).dot(v3), s1 * v1.dot(v3) + s2 * v2.dot(v3), largerEps));
|
||||
VERIFY(ei_isApprox(v3.dot(s1 * v1 + s2 * v2), ei_conj(s1)*v3.dot(v1)+ei_conj(s2)*v3.dot(v2), largerEps));
|
||||
VERIFY(ei_isApprox((s1 * v1 + s2 * v2).dot(v3), ei_conj(s1) * v1.dot(v3) + ei_conj(s2) * v2.dot(v3), largerEps));
|
||||
VERIFY(ei_isApprox(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), largerEps));
|
||||
VERIFY_IS_APPROX(ei_conj(v1.dot(v2)), v2.dot(v1));
|
||||
VERIFY_IS_APPROX(ei_abs(v1.dot(v1)), v1.squaredNorm());
|
||||
if(NumTraits<Scalar>::HasFloatingPoint)
|
||||
|
@ -86,7 +86,7 @@ template<typename MatrixType> void submatrices(const MatrixType& m)
|
||||
|
||||
//check row() and col()
|
||||
VERIFY_IS_APPROX(m1.col(c1).transpose(), m1.transpose().row(c1));
|
||||
VERIFY_IS_APPROX(square.row(r1).dot(m1.col(c1)), (square.lazy() * m1.conjugate())(r1,c1));
|
||||
VERIFY_IS_APPROX(m1.col(c1).dot(square.row(r1)), (square.lazy() * m1.conjugate())(r1,c1));
|
||||
//check operator(), both constant and non-constant, on row() and col()
|
||||
m1.row(r1) += s1 * m1.row(r2);
|
||||
m1.col(c1) += s1 * m1.col(c2);
|
||||
|
Loading…
Reference in New Issue
Block a user