[AArch64] Use intrinsics for widening multiplies (PR91598)

Inline assembler instructions don't have latency info and the scheduler does
not attempt to schedule them at all - it does not even honor latencies of
asm source operands.  As a result, SIMD intrinsics which are implemented using
inline assembler perform very poorly, particularly on in-order cores.
Add new patterns and intrinsics for widening multiplies, which results in a
63% speedup for the example in the PR, thus fixing the reported regression.

    gcc/
	PR target/91598
	* config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define.
	* config/aarch64/aarch64-simd.md
	(aarch64_vec_<su>mult_lane<Qlane>): Add new insn for widening lane mul.
	(aarch64_vec_<su>mlal_lane<Qlane>): Likewise.
	* config/aarch64/aarch64-simd-builtins.def: Add intrinsics.
	* config/aarch64/arm_neon.h:
	(vmlal_lane_s16): Expand using intrinsics rather than inline asm.
	(vmlal_lane_u16): Likewise.
	(vmlal_lane_s32): Likewise.
	(vmlal_lane_u32): Likewise.
	(vmlal_laneq_s16): Likewise.
	(vmlal_laneq_u16): Likewise.
	(vmlal_laneq_s32): Likewise.
	(vmlal_laneq_u32): Likewise.
	(vmull_lane_s16): Likewise.
	(vmull_lane_u16): Likewise.
	(vmull_lane_s32): Likewise.
	(vmull_lane_u32): Likewise.
	(vmull_laneq_s16): Likewise.
	(vmull_laneq_u16): Likewise.
	(vmull_laneq_s32): Likewise.
	(vmull_laneq_u32): Likewise.
	* config/aarch64/iterators.md (Vcondtype): New iterator for lane mul.
	(Qlane): Likewise.
This commit is contained in:
Wilco Dijkstra 2020-03-06 18:29:02 +00:00
parent 3e5c062e96
commit 0b83932211
6 changed files with 185 additions and 200 deletions

View File

@ -1,3 +1,31 @@
2020-03-06 Wilco Dijkstra <wdijkstr@arm.com>
PR target/91598
* config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define.
* config/aarch64/aarch64-simd.md
(aarch64_vec_<su>mult_lane<Qlane>): Add new insn for widening lane mul.
(aarch64_vec_<su>mlal_lane<Qlane>): Likewise.
* config/aarch64/aarch64-simd-builtins.def: Add intrinsics.
* config/aarch64/arm_neon.h:
(vmlal_lane_s16): Expand using intrinsics rather than inline asm.
(vmlal_lane_u16): Likewise.
(vmlal_lane_s32): Likewise.
(vmlal_lane_u32): Likewise.
(vmlal_laneq_s16): Likewise.
(vmlal_laneq_u16): Likewise.
(vmlal_laneq_s32): Likewise.
(vmlal_laneq_u32): Likewise.
(vmull_lane_s16): Likewise.
(vmull_lane_u16): Likewise.
(vmull_lane_s32): Likewise.
(vmull_lane_u32): Likewise.
(vmull_laneq_s16): Likewise.
(vmull_laneq_u16): Likewise.
(vmull_laneq_s32): Likewise.
(vmull_laneq_u32): Likewise.
* config/aarch64/iterators.md (Vcondtype): New iterator for lane mul.
(Qlane): Likewise.
2020-03-06 Wilco Dijkstra <wdijkstr@arm.com>
* aarch64/aarch64-simd.md (aarch64_mla_elt<mode>): Correct lane syntax.

View File

@ -175,6 +175,11 @@ aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
qualifier_unsigned, qualifier_unsigned };
#define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_ternopu_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned,
qualifier_unsigned, qualifier_lane_index };
#define TYPES_TERNOPU_LANE (aarch64_types_ternopu_lane_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_ternopu_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned,
qualifier_unsigned, qualifier_immediate };

View File

@ -191,6 +191,15 @@
BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10)
BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10)
BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_lane_, 0)
BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_lane_, 0)
BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_laneq_, 0)
BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_laneq_, 0)
BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_lane_, 0)
BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_lane_, 0)
BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_laneq_, 0)
BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_laneq_, 0)
BUILTIN_VSD_HSI (BINOP, sqdmull, 0)
BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0)
BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0)

View File

@ -1892,6 +1892,46 @@
}
)
;; vmull_lane_s16 intrinsics
(define_insn "aarch64_vec_<su>mult_lane<Qlane>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(mult:<VWIDE>
(ANY_EXTEND:<VWIDE>
(match_operand:<VCOND> 1 "register_operand" "w"))
(ANY_EXTEND:<VWIDE>
(vec_duplicate:<VCOND>
(vec_select:<VEL>
(match_operand:VDQHS 2 "register_operand" "<vwx>")
(parallel [(match_operand:SI 3 "immediate_operand" "i")]))))))]
"TARGET_SIMD"
{
operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
return "<su>mull\\t%0.<Vwtype>, %1.<Vcondtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon_mul_<Vetype>_scalar_long")]
)
;; vmlal_lane_s16 intrinsics
(define_insn "aarch64_vec_<su>mlal_lane<Qlane>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(plus:<VWIDE>
(mult:<VWIDE>
(ANY_EXTEND:<VWIDE>
(match_operand:<VCOND> 2 "register_operand" "w"))
(ANY_EXTEND:<VWIDE>
(vec_duplicate:<VCOND>
(vec_select:<VEL>
(match_operand:VDQHS 3 "register_operand" "<vwx>")
(parallel [(match_operand:SI 4 "immediate_operand" "i")])))))
(match_operand:<VWIDE> 1 "register_operand" "0")))]
"TARGET_SIMD"
{
operands[4] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[4]));
return "<su>mlal\\t%0.<Vwtype>, %2.<Vcondtype>, %3.<Vetype>[%4]";
}
[(set_attr "type" "neon_mla_<Vetype>_scalar_long")]
)
;; FP vector operations.
;; AArch64 AdvSIMD supports single-precision (32-bit) and
;; double-precision (64-bit) floating-point data types and arithmetic as

View File

@ -7700,117 +7700,61 @@ vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
return __result;
}
#define vmlal_lane_s16(a, b, c, d) \
__extension__ \
({ \
int16x4_t c_ = (c); \
int16x4_t b_ = (b); \
int32x4_t a_ = (a); \
int32x4_t result; \
__asm__ ("smlal %0.4s,%2.4h,%3.h[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "x"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_s16 (int32x4_t __acc, int16x4_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_vec_smlal_lane_v4hi (__acc, __a, __b, __c);
}
#define vmlal_lane_s32(a, b, c, d) \
__extension__ \
({ \
int32x2_t c_ = (c); \
int32x2_t b_ = (b); \
int64x2_t a_ = (a); \
int64x2_t result; \
__asm__ ("smlal %0.2d,%2.2s,%3.s[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "w"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_s32 (int64x2_t __acc, int32x2_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_vec_smlal_lane_v2si (__acc, __a, __b, __c);
}
#define vmlal_lane_u16(a, b, c, d) \
__extension__ \
({ \
uint16x4_t c_ = (c); \
uint16x4_t b_ = (b); \
uint32x4_t a_ = (a); \
uint32x4_t result; \
__asm__ ("umlal %0.4s,%2.4h,%3.h[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "x"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x4_t __b, const int __c)
{
return __builtin_aarch64_vec_umlal_lane_v4hi_uuuus (__acc, __a, __b, __c);
}
#define vmlal_lane_u32(a, b, c, d) \
__extension__ \
({ \
uint32x2_t c_ = (c); \
uint32x2_t b_ = (b); \
uint64x2_t a_ = (a); \
uint64x2_t result; \
__asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "w"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x2_t __b, const int __c)
{
return __builtin_aarch64_vec_umlal_lane_v2si_uuuus (__acc, __a, __b, __c);
}
#define vmlal_laneq_s16(a, b, c, d) \
__extension__ \
({ \
int16x8_t c_ = (c); \
int16x4_t b_ = (b); \
int32x4_t a_ = (a); \
int32x4_t result; \
__asm__ ("smlal %0.4s, %2.4h, %3.h[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "x"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_laneq_s16 (int32x4_t __acc, int16x4_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_vec_smlal_laneq_v4hi (__acc, __a, __b, __c);
}
#define vmlal_laneq_s32(a, b, c, d) \
__extension__ \
({ \
int32x4_t c_ = (c); \
int32x2_t b_ = (b); \
int64x2_t a_ = (a); \
int64x2_t result; \
__asm__ ("smlal %0.2d, %2.2s, %3.s[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "w"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_laneq_s32 (int64x2_t __acc, int32x2_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_vec_smlal_laneq_v2si (__acc, __a, __b, __c);
}
#define vmlal_laneq_u16(a, b, c, d) \
__extension__ \
({ \
uint16x8_t c_ = (c); \
uint16x4_t b_ = (b); \
uint32x4_t a_ = (a); \
uint32x4_t result; \
__asm__ ("umlal %0.4s, %2.4h, %3.h[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "x"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_laneq_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x8_t __b, const int __c)
{
return __builtin_aarch64_vec_umlal_laneq_v4hi_uuuus (__acc, __a, __b, __c);
}
#define vmlal_laneq_u32(a, b, c, d) \
__extension__ \
({ \
uint32x4_t c_ = (c); \
uint32x2_t b_ = (b); \
uint64x2_t a_ = (a); \
uint64x2_t result; \
__asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
: "=w"(result) \
: "0"(a_), "w"(b_), "w"(c_), "i"(d) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_laneq_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x4_t __b, const int __c)
{
return __builtin_aarch64_vec_umlal_laneq_v2si_uuuus (__acc, __a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@ -9289,109 +9233,61 @@ vmull_high_u32 (uint32x4_t __a, uint32x4_t __b)
return __builtin_aarch64_vec_widen_umult_hi_v4si_uuu (__a, __b);
}
#define vmull_lane_s16(a, b, c) \
__extension__ \
({ \
int16x4_t b_ = (b); \
int16x4_t a_ = (a); \
int32x4_t result; \
__asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \
: "=w"(result) \
: "w"(a_), "x"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_vec_smult_lane_v4hi (__a, __b, __c);
}
#define vmull_lane_s32(a, b, c) \
__extension__ \
({ \
int32x2_t b_ = (b); \
int32x2_t a_ = (a); \
int64x2_t result; \
__asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \
: "=w"(result) \
: "w"(a_), "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_vec_smult_lane_v2si (__a, __b, __c);
}
#define vmull_lane_u16(a, b, c) \
__extension__ \
({ \
uint16x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint32x4_t result; \
__asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \
: "=w"(result) \
: "w"(a_), "x"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
return __builtin_aarch64_vec_umult_lane_v4hi_uuus (__a, __b, __c);
}
#define vmull_lane_u32(a, b, c) \
__extension__ \
({ \
uint32x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint64x2_t result; \
__asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
: "=w"(result) \
: "w"(a_), "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
return __builtin_aarch64_vec_umult_lane_v2si_uuus (__a, __b, __c);
}
#define vmull_laneq_s16(a, b, c) \
__extension__ \
({ \
int16x8_t b_ = (b); \
int16x4_t a_ = (a); \
int32x4_t result; \
__asm__ ("smull %0.4s, %1.4h, %2.h[%3]" \
: "=w"(result) \
: "w"(a_), "x"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_vec_smult_laneq_v4hi (__a, __b, __c);
}
#define vmull_laneq_s32(a, b, c) \
__extension__ \
({ \
int32x4_t b_ = (b); \
int32x2_t a_ = (a); \
int64x2_t result; \
__asm__ ("smull %0.2d, %1.2s, %2.s[%3]" \
: "=w"(result) \
: "w"(a_), "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_vec_smult_laneq_v2si (__a, __b, __c);
}
#define vmull_laneq_u16(a, b, c) \
__extension__ \
({ \
uint16x8_t b_ = (b); \
uint16x4_t a_ = (a); \
uint32x4_t result; \
__asm__ ("umull %0.4s, %1.4h, %2.h[%3]" \
: "=w"(result) \
: "w"(a_), "x"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __c)
{
return __builtin_aarch64_vec_umult_laneq_v4hi_uuus (__a, __b, __c);
}
#define vmull_laneq_u32(a, b, c) \
__extension__ \
({ \
uint32x4_t b_ = (b); \
uint32x2_t a_ = (a); \
uint64x2_t result; \
__asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
: "=w"(result) \
: "w"(a_), "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __c)
{
return __builtin_aarch64_vec_umult_laneq_v2si_uuus (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))

View File

@ -986,6 +986,13 @@
(V4SF "4s") (V2DF "2d")
(V4HF "4h") (V8HF "8h")])
;; Map mode to type used in widening multiplies.
(define_mode_attr Vcondtype [(V4HI "4h") (V8HI "4h") (V2SI "2s") (V4SI "2s")])
;; Map lane mode to name
(define_mode_attr Qlane [(V4HI "_v4hi") (V8HI "q_v4hi")
(V2SI "_v2si") (V4SI "q_v2si")])
(define_mode_attr Vrevsuff [(V4HI "16") (V8HI "16") (V2SI "32")
(V4SI "32") (V2DI "64")])