mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-01-30 17:40:05 +08:00
Added static assertions to avoid misuses of padding, broadcasting and concatenation ops.
This commit is contained in:
parent
6857a35a11
commit
53432a17b2
@ -99,6 +99,10 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
|
||||
: m_impl(op.expression(), device)
|
||||
{
|
||||
// The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar
|
||||
// and store the result in a scalar. Instead one should reshape the scalar into a a N-D
|
||||
// tensor with N >= 1 of 1 element first and then broadcast.
|
||||
EIGEN_STATIC_ASSERT(NumDims > 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
|
||||
const Broadcast& broadcast = op.broadcast();
|
||||
for (int i = 0; i < NumDims; ++i) {
|
||||
|
@ -131,7 +131,9 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
|
||||
: m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis())
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT(NumDims == RightNumDims, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
EIGEN_STATIC_ASSERT(NumDims == RightNumDims, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT(NumDims > 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
|
||||
eigen_assert(0 <= m_axis && m_axis < NumDims);
|
||||
const Dimensions& lhs_dims = m_leftImpl.dimensions();
|
||||
const Dimensions& rhs_dims = m_rightImpl.dimensions();
|
||||
|
@ -98,6 +98,11 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
|
||||
: m_impl(op.expression(), device), m_padding(op.padding())
|
||||
{
|
||||
// The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead
|
||||
// to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector
|
||||
// of 1 element first and then pad.
|
||||
EIGEN_STATIC_ASSERT(NumDims > 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
|
||||
// Compute dimensions
|
||||
m_dimensions = m_impl.dimensions();
|
||||
for (int i = 0; i < NumDims; ++i) {
|
||||
|
Loading…
Reference in New Issue
Block a user