mirror of
https://gitlab.com/libeigen/eigen.git
synced 2024-11-27 06:30:28 +08:00
[PATCH 1/2] Misc. typos
From 68d431b4c14ad60a778ee93c1f59ecc4b931950e Mon Sep 17 00:00:00 2001 Found via `codespell -q 3 -I ../eigen-word-whitelist.txt` where the whitelists consists of: ``` als ans cas dum lastr lowd nd overfl pres preverse substraction te uint whch ``` --- CMakeLists.txt | 26 +++++++++---------- Eigen/src/Core/GenericPacketMath.h | 2 +- Eigen/src/SparseLU/SparseLU.h | 2 +- bench/bench_norm.cpp | 2 +- doc/HiPerformance.dox | 2 +- doc/QuickStartGuide.dox | 2 +- .../Eigen/CXX11/src/Tensor/TensorChipping.h | 6 ++--- .../Eigen/CXX11/src/Tensor/TensorDeviceGpu.h | 2 +- .../src/Tensor/TensorForwardDeclarations.h | 4 +-- .../src/Tensor/TensorGpuHipCudaDefines.h | 2 +- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 2 +- .../CXX11/src/Tensor/TensorReductionGpu.h | 2 +- .../test/cxx11_tensor_concatenation.cpp | 2 +- unsupported/test/cxx11_tensor_executor.cpp | 2 +- 14 files changed, 29 insertions(+), 29 deletions(-)
This commit is contained in:
parent
77b447c24e
commit
f67b19a884
@ -327,7 +327,7 @@ else(NOT MSVC)
|
||||
# because we are oftentimes returning objects that have a destructor or may
|
||||
# throw exceptions - in particular in the unit tests we are throwing extra many
|
||||
# exceptions to cover indexing errors.
|
||||
# C4505 - unreferenced local function has been removed (impossible to deactive selectively)
|
||||
# C4505 - unreferenced local function has been removed (impossible to deactivate selectively)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /wd4127 /wd4505 /wd4714")
|
||||
|
||||
# replace all /Wx by /W4
|
||||
|
@ -450,7 +450,7 @@ Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); }
|
||||
* The following functions might not have to be overwritten for vectorized types
|
||||
***************************************************************************/
|
||||
|
||||
/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
|
||||
/** \internal copy a packet with constant coefficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
|
||||
// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type)
|
||||
template<typename Packet>
|
||||
inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a)
|
||||
|
@ -705,7 +705,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
*
|
||||
* \internal
|
||||
*/
|
||||
// aliasing is dealt once in internall::call_assignment
|
||||
// aliasing is dealt once in internal::call_assignment
|
||||
// so at this stage we have to assume aliasing... and resising has to be done later.
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -273,7 +273,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
|
||||
|
||||
namespace internal {
|
||||
/** \jacobi_module
|
||||
* Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
|
||||
* Applies the clock wise 2D rotation \a j to the set of 2D vectors of coordinates \a x and \a y:
|
||||
* \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right ) = J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
|
||||
*
|
||||
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
|
||||
|
@ -26,7 +26,7 @@ template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixURetu
|
||||
* This class implements the supernodal LU factorization for general matrices.
|
||||
* It uses the main techniques from the sequential SuperLU package
|
||||
* (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real
|
||||
* and complex arithmetics with single and double precision, depending on the
|
||||
* and complex arithmetic with single and double precision, depending on the
|
||||
* scalar type of your input matrix.
|
||||
* The code has been optimized to provide BLAS-3 operations during supernode-panel updates.
|
||||
* It benefits directly from the built-in high-performant Eigen BLAS routines.
|
||||
|
@ -134,7 +134,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
|
||||
iexp = - ((iemax+it)/2);
|
||||
s2m = std::pow(ibeta,iexp); // scaling factor for upper range
|
||||
|
||||
overfl = rbig*s2m; // overfow boundary for abig
|
||||
overfl = rbig*s2m; // overflow boundary for abig
|
||||
eps = std::pow(ibeta, 1-it);
|
||||
relerr = std::sqrt(eps); // tolerance for neglecting asml
|
||||
abig = 1.0/eps - 1.0;
|
||||
|
@ -105,7 +105,7 @@ m1.noalias() += m2 * m3; \endcode</td>
|
||||
<td>First of all, here the .noalias() in the first expression is useless because
|
||||
m2*m3 will be evaluated anyway. However, note how this expression can be rewritten
|
||||
so that no temporary is required. (tip: for very small fixed size matrix
|
||||
it is slighlty better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
|
||||
it is slightly better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
|
||||
</tr>
|
||||
<tr class="alt">
|
||||
<td>\code
|
||||
|
@ -66,7 +66,7 @@ The output is as follows:
|
||||
|
||||
\section GettingStartedExplanation2 Explanation of the second example
|
||||
|
||||
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
|
||||
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetic.
|
||||
|
||||
The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
|
||||
|
||||
|
@ -244,7 +244,7 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
|
||||
return rslt;
|
||||
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
|
||||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
|
||||
// m_stride is aways greater than index, so let's avoid the integer division.
|
||||
// m_stride is always greater than index, so let's avoid the integer division.
|
||||
eigen_assert(m_stride > index);
|
||||
return m_impl.template packet<LoadMode>(index + m_inputOffset);
|
||||
} else {
|
||||
@ -377,7 +377,7 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
|
||||
inputIndex = index * m_inputStride + m_inputOffset;
|
||||
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
|
||||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
|
||||
// m_stride is aways greater than index, so let's avoid the integer
|
||||
// m_stride is always greater than index, so let's avoid the integer
|
||||
// division.
|
||||
eigen_assert(m_stride > index);
|
||||
inputIndex = index + m_inputOffset;
|
||||
@ -462,7 +462,7 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
|
||||
}
|
||||
} else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
|
||||
(static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
|
||||
// m_stride is aways greater than index, so let's avoid the integer division.
|
||||
// m_stride is always greater than index, so let's avoid the integer division.
|
||||
eigen_assert(this->m_stride > index);
|
||||
this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
|
||||
} else {
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
// This header file container defines fo gpu* macros which will resolve to
|
||||
// their equivalent hip* or cuda* versions depending on the compiler in use
|
||||
// A separte header (included at the end of this file) will undefine all
|
||||
// A separate header (included at the end of this file) will undefine all
|
||||
#include "TensorGpuHipCudaDefines.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
@ -25,9 +25,9 @@ template<typename T> struct MakePointer {
|
||||
};
|
||||
|
||||
// The PointerType class is a container of the device specefic pointer
|
||||
// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression
|
||||
// used for referring to a Pointer on TensorEvaluator class. While the TensorExpression
|
||||
// is a device-agnostic type and need MakePointer class for type conversion,
|
||||
// the TensorEvaluator calss can be specialized for a device, hence it is possible
|
||||
// the TensorEvaluator calls can be specialized for a device, hence it is possible
|
||||
// to construct different types of temproray storage memory in TensorEvaluator
|
||||
// for different devices by specializing the following PointerType class.
|
||||
template<typename T, typename Device> struct PointerType : MakePointer<T>{};
|
||||
|
@ -16,7 +16,7 @@
|
||||
// for some reason gets sent to the gcc/host compiler instead of the gpu/nvcc/hipcc compiler
|
||||
// When compiling such files, gcc will end up trying to pick up the CUDA headers by
|
||||
// default (see the code within "unsupported/Eigen/CXX11/Tensor" that is guarded by EIGEN_USE_GPU)
|
||||
// This will obsviously not work when trying to compile tensorflow on a sytem with no CUDA
|
||||
// This will obsviously not work when trying to compile tensorflow on a system with no CUDA
|
||||
// To work around this issue for HIP systems (and leave the default behaviour intact), the
|
||||
// HIP tensorflow build defines EIGEN_USE_HIP when compiling all source files, and
|
||||
// "unsupported/Eigen/CXX11/Tensor" has been updated to use HIP header when EIGEN_USE_HIP is
|
||||
|
@ -965,7 +965,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
|
||||
}
|
||||
}
|
||||
|
||||
// Intialize output coefficient reducers.
|
||||
// Initialize output coefficient reducers.
|
||||
for (int i = 0; i < num_reducers; ++i) {
|
||||
new (&reducers[i]) BlockReducer(m_reducer);
|
||||
}
|
||||
|
@ -771,7 +771,7 @@ struct OuterReducer<Self, Op, GpuDevice> {
|
||||
// terminate called after throwing an instance of 'std::runtime_error'
|
||||
// what(): No device code available for function: _ZN5Eigen8internal20OuterReductionKernelIL...
|
||||
//
|
||||
// dont know why this happens (and why is it a runtime error instead of a compile time errror)
|
||||
// don't know why this happens (and why is it a runtime error instead of a compile time error)
|
||||
//
|
||||
// this will be fixed by HIP PR#457
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -50,7 +50,7 @@ static void test_static_dimension_failure()
|
||||
.reshape(Tensor<int, 3>::Dimensions(2, 3, 1))
|
||||
.concatenate(right, 0);
|
||||
Tensor<int, 2, DataLayout> alternative = left
|
||||
// Clang compiler break with {{{}}} with an ambigous error on copy constructor
|
||||
// Clang compiler break with {{{}}} with an ambiguous error on copy constructor
|
||||
// the variadic DSize constructor added for #ifndef EIGEN_EMULATE_CXX11_META_H.
|
||||
// Solution:
|
||||
// either the code should change to
|
||||
|
@ -433,7 +433,7 @@ static void test_execute_slice_lvalue(Device d)
|
||||
Tensor<T, NumDims, Options, Index> slice(slice_size);
|
||||
slice.setRandom();
|
||||
|
||||
// Asign a slice using default executor.
|
||||
// Assign a slice using default executor.
|
||||
Tensor<T, NumDims, Options, Index> golden = src;
|
||||
golden.slice(slice_start, slice_size) = slice;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user