mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-04-05 23:41:08 +08:00
[ARM][GCC][3/4x]: MVE intrinsics with quaternary operands.
This patch supports following MVE ACLE intrinsics with quaternary operands. vmlaldavaq_p_s16, vmlaldavaq_p_s32, vmlaldavaq_p_u16, vmlaldavaq_p_u32, vmlaldavaxq_p_s16, vmlaldavaxq_p_s32, vmlaldavaxq_p_u16, vmlaldavaxq_p_u32, vmlsldavaq_p_s16, vmlsldavaq_p_s32, vmlsldavaxq_p_s16, vmlsldavaxq_p_s32, vmullbq_poly_m_p16, vmullbq_poly_m_p8, vmulltq_poly_m_p16, vmulltq_poly_m_p8, vqdmullbq_m_n_s16, vqdmullbq_m_n_s32, vqdmullbq_m_s16, vqdmullbq_m_s32, vqdmulltq_m_n_s16, vqdmulltq_m_n_s32, vqdmulltq_m_s16, vqdmulltq_m_s32, vqrshrnbq_m_n_s16, vqrshrnbq_m_n_s32, vqrshrnbq_m_n_u16, vqrshrnbq_m_n_u32, vqrshrntq_m_n_s16, vqrshrntq_m_n_s32, vqrshrntq_m_n_u16, vqrshrntq_m_n_u32, vqrshrunbq_m_n_s16, vqrshrunbq_m_n_s32, vqrshruntq_m_n_s16, vqrshruntq_m_n_s32, vqshrnbq_m_n_s16, vqshrnbq_m_n_s32, vqshrnbq_m_n_u16, vqshrnbq_m_n_u32, vqshrntq_m_n_s16, vqshrntq_m_n_s32, vqshrntq_m_n_u16, vqshrntq_m_n_u32, vqshrunbq_m_n_s16, vqshrunbq_m_n_s32, vqshruntq_m_n_s16, vqshruntq_m_n_s32, vrmlaldavhaq_p_s32, vrmlaldavhaq_p_u32, vrmlaldavhaxq_p_s32, vrmlsldavhaq_p_s32, vrmlsldavhaxq_p_s32, vrshrnbq_m_n_s16, vrshrnbq_m_n_s32, vrshrnbq_m_n_u16, vrshrnbq_m_n_u32, vrshrntq_m_n_s16, vrshrntq_m_n_s32, vrshrntq_m_n_u16, vrshrntq_m_n_u32, vshllbq_m_n_s16, vshllbq_m_n_s8, vshllbq_m_n_u16, vshllbq_m_n_u8, vshlltq_m_n_s16, vshlltq_m_n_s8, vshlltq_m_n_u16, vshlltq_m_n_u8, vshrnbq_m_n_s16, vshrnbq_m_n_s32, vshrnbq_m_n_u16, vshrnbq_m_n_u32, vshrntq_m_n_s16, vshrntq_m_n_s32, vshrntq_m_n_u16, vshrntq_m_n_u32. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics 2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com> Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> * config/arm/arm-protos.h (arm_mve_immediate_check): * config/arm/arm.c (arm_mve_immediate_check): Define fuction to check mode and interger value. * config/arm/arm_mve.h (vmlaldavaq_p_s32): Define macro. (vmlaldavaq_p_s16): Likewise. (vmlaldavaq_p_u32): Likewise. (vmlaldavaq_p_u16): Likewise. (vmlaldavaxq_p_s32): Likewise. (vmlaldavaxq_p_s16): Likewise. (vmlaldavaxq_p_u32): Likewise. (vmlaldavaxq_p_u16): Likewise. (vmlsldavaq_p_s32): Likewise. (vmlsldavaq_p_s16): Likewise. (vmlsldavaxq_p_s32): Likewise. (vmlsldavaxq_p_s16): Likewise. (vmullbq_poly_m_p8): Likewise. (vmullbq_poly_m_p16): Likewise. (vmulltq_poly_m_p8): Likewise. (vmulltq_poly_m_p16): Likewise. (vqdmullbq_m_n_s32): Likewise. (vqdmullbq_m_n_s16): Likewise. (vqdmullbq_m_s32): Likewise. (vqdmullbq_m_s16): Likewise. (vqdmulltq_m_n_s32): Likewise. (vqdmulltq_m_n_s16): Likewise. (vqdmulltq_m_s32): Likewise. (vqdmulltq_m_s16): Likewise. (vqrshrnbq_m_n_s32): Likewise. (vqrshrnbq_m_n_s16): Likewise. (vqrshrnbq_m_n_u32): Likewise. (vqrshrnbq_m_n_u16): Likewise. (vqrshrntq_m_n_s32): Likewise. (vqrshrntq_m_n_s16): Likewise. (vqrshrntq_m_n_u32): Likewise. (vqrshrntq_m_n_u16): Likewise. (vqrshrunbq_m_n_s32): Likewise. (vqrshrunbq_m_n_s16): Likewise. (vqrshruntq_m_n_s32): Likewise. (vqrshruntq_m_n_s16): Likewise. (vqshrnbq_m_n_s32): Likewise. (vqshrnbq_m_n_s16): Likewise. (vqshrnbq_m_n_u32): Likewise. (vqshrnbq_m_n_u16): Likewise. (vqshrntq_m_n_s32): Likewise. (vqshrntq_m_n_s16): Likewise. (vqshrntq_m_n_u32): Likewise. (vqshrntq_m_n_u16): Likewise. (vqshrunbq_m_n_s32): Likewise. (vqshrunbq_m_n_s16): Likewise. (vqshruntq_m_n_s32): Likewise. (vqshruntq_m_n_s16): Likewise. (vrmlaldavhaq_p_s32): Likewise. (vrmlaldavhaq_p_u32): Likewise. (vrmlaldavhaxq_p_s32): Likewise. (vrmlsldavhaq_p_s32): Likewise. (vrmlsldavhaxq_p_s32): Likewise. (vrshrnbq_m_n_s32): Likewise. (vrshrnbq_m_n_s16): Likewise. (vrshrnbq_m_n_u32): Likewise. (vrshrnbq_m_n_u16): Likewise. (vrshrntq_m_n_s32): Likewise. (vrshrntq_m_n_s16): Likewise. (vrshrntq_m_n_u32): Likewise. (vrshrntq_m_n_u16): Likewise. (vshllbq_m_n_s8): Likewise. (vshllbq_m_n_s16): Likewise. (vshllbq_m_n_u8): Likewise. (vshllbq_m_n_u16): Likewise. (vshlltq_m_n_s8): Likewise. (vshlltq_m_n_s16): Likewise. (vshlltq_m_n_u8): Likewise. (vshlltq_m_n_u16): Likewise. (vshrnbq_m_n_s32): Likewise. (vshrnbq_m_n_s16): Likewise. (vshrnbq_m_n_u32): Likewise. (vshrnbq_m_n_u16): Likewise. (vshrntq_m_n_s32): Likewise. (vshrntq_m_n_s16): Likewise. (vshrntq_m_n_u32): Likewise. (vshrntq_m_n_u16): Likewise. (__arm_vmlaldavaq_p_s32): Define intrinsic. (__arm_vmlaldavaq_p_s16): Likewise. (__arm_vmlaldavaq_p_u32): Likewise. (__arm_vmlaldavaq_p_u16): Likewise. (__arm_vmlaldavaxq_p_s32): Likewise. (__arm_vmlaldavaxq_p_s16): Likewise. (__arm_vmlaldavaxq_p_u32): Likewise. (__arm_vmlaldavaxq_p_u16): Likewise. (__arm_vmlsldavaq_p_s32): Likewise. (__arm_vmlsldavaq_p_s16): Likewise. (__arm_vmlsldavaxq_p_s32): Likewise. (__arm_vmlsldavaxq_p_s16): Likewise. (__arm_vmullbq_poly_m_p8): Likewise. (__arm_vmullbq_poly_m_p16): Likewise. (__arm_vmulltq_poly_m_p8): Likewise. (__arm_vmulltq_poly_m_p16): Likewise. (__arm_vqdmullbq_m_n_s32): Likewise. (__arm_vqdmullbq_m_n_s16): Likewise. (__arm_vqdmullbq_m_s32): Likewise. (__arm_vqdmullbq_m_s16): Likewise. (__arm_vqdmulltq_m_n_s32): Likewise. (__arm_vqdmulltq_m_n_s16): Likewise. (__arm_vqdmulltq_m_s32): Likewise. (__arm_vqdmulltq_m_s16): Likewise. (__arm_vqrshrnbq_m_n_s32): Likewise. (__arm_vqrshrnbq_m_n_s16): Likewise. (__arm_vqrshrnbq_m_n_u32): Likewise. (__arm_vqrshrnbq_m_n_u16): Likewise. (__arm_vqrshrntq_m_n_s32): Likewise. (__arm_vqrshrntq_m_n_s16): Likewise. (__arm_vqrshrntq_m_n_u32): Likewise. (__arm_vqrshrntq_m_n_u16): Likewise. (__arm_vqrshrunbq_m_n_s32): Likewise. (__arm_vqrshrunbq_m_n_s16): Likewise. (__arm_vqrshruntq_m_n_s32): Likewise. (__arm_vqrshruntq_m_n_s16): Likewise. (__arm_vqshrnbq_m_n_s32): Likewise. (__arm_vqshrnbq_m_n_s16): Likewise. (__arm_vqshrnbq_m_n_u32): Likewise. (__arm_vqshrnbq_m_n_u16): Likewise. (__arm_vqshrntq_m_n_s32): Likewise. (__arm_vqshrntq_m_n_s16): Likewise. (__arm_vqshrntq_m_n_u32): Likewise. (__arm_vqshrntq_m_n_u16): Likewise. (__arm_vqshrunbq_m_n_s32): Likewise. (__arm_vqshrunbq_m_n_s16): Likewise. (__arm_vqshruntq_m_n_s32): Likewise. (__arm_vqshruntq_m_n_s16): Likewise. (__arm_vrmlaldavhaq_p_s32): Likewise. (__arm_vrmlaldavhaq_p_u32): Likewise. (__arm_vrmlaldavhaxq_p_s32): Likewise. (__arm_vrmlsldavhaq_p_s32): Likewise. (__arm_vrmlsldavhaxq_p_s32): Likewise. (__arm_vrshrnbq_m_n_s32): Likewise. (__arm_vrshrnbq_m_n_s16): Likewise. (__arm_vrshrnbq_m_n_u32): Likewise. (__arm_vrshrnbq_m_n_u16): Likewise. (__arm_vrshrntq_m_n_s32): Likewise. (__arm_vrshrntq_m_n_s16): Likewise. (__arm_vrshrntq_m_n_u32): Likewise. (__arm_vrshrntq_m_n_u16): Likewise. (__arm_vshllbq_m_n_s8): Likewise. (__arm_vshllbq_m_n_s16): Likewise. (__arm_vshllbq_m_n_u8): Likewise. (__arm_vshllbq_m_n_u16): Likewise. (__arm_vshlltq_m_n_s8): Likewise. (__arm_vshlltq_m_n_s16): Likewise. (__arm_vshlltq_m_n_u8): Likewise. (__arm_vshlltq_m_n_u16): Likewise. (__arm_vshrnbq_m_n_s32): Likewise. (__arm_vshrnbq_m_n_s16): Likewise. (__arm_vshrnbq_m_n_u32): Likewise. (__arm_vshrnbq_m_n_u16): Likewise. (__arm_vshrntq_m_n_s32): Likewise. (__arm_vshrntq_m_n_s16): Likewise. (__arm_vshrntq_m_n_u32): Likewise. (__arm_vshrntq_m_n_u16): Likewise. (vmullbq_poly_m): Define polymorphic variant. (vmulltq_poly_m): Likewise. (vshllbq_m): Likewise. (vshrntq_m_n): Likewise. (vshrnbq_m_n): Likewise. (vshlltq_m_n): Likewise. (vshllbq_m_n): Likewise. (vrshrntq_m_n): Likewise. (vrshrnbq_m_n): Likewise. (vqshruntq_m_n): Likewise. (vqshrunbq_m_n): Likewise. (vqdmullbq_m_n): Likewise. (vqdmullbq_m): Likewise. (vqdmulltq_m_n): Likewise. (vqdmulltq_m): Likewise. (vqrshrnbq_m_n): Likewise. (vqrshrntq_m_n): Likewise. (vqrshrunbq_m_n): Likewise. (vqrshruntq_m_n): Likewise. (vqshrnbq_m_n): Likewise. (vqshrntq_m_n): Likewise. * config/arm/arm_mve_builtins.def (QUADOP_NONE_NONE_NONE_IMM_UNONE): Use builtin qualifiers. (QUADOP_NONE_NONE_NONE_NONE_UNONE): Likewise. (QUADOP_UNONE_UNONE_NONE_IMM_UNONE): Likewise. (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE): Likewise. (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE): Likewise. * config/arm/mve.md (VMLALDAVAQ_P): Define iterator. (VMLALDAVAXQ_P): Likewise. (VQRSHRNBQ_M_N): Likewise. (VQRSHRNTQ_M_N): Likewise. (VQSHRNBQ_M_N): Likewise. (VQSHRNTQ_M_N): Likewise. (VRSHRNBQ_M_N): Likewise. (VRSHRNTQ_M_N): Likewise. (VSHLLBQ_M_N): Likewise. (VSHLLTQ_M_N): Likewise. (VSHRNBQ_M_N): Likewise. (VSHRNTQ_M_N): Likewise. (mve_vmlaldavaq_p_<supf><mode>): Define RTL pattern. (mve_vmlaldavaxq_p_<supf><mode>): Likewise. (mve_vqrshrnbq_m_n_<supf><mode>): Likewise. (mve_vqrshrntq_m_n_<supf><mode>): Likewise. (mve_vqshrnbq_m_n_<supf><mode>): Likewise. (mve_vqshrntq_m_n_<supf><mode>): Likewise. (mve_vrmlaldavhaq_p_sv4si): Likewise. (mve_vrshrnbq_m_n_<supf><mode>): Likewise. (mve_vrshrntq_m_n_<supf><mode>): Likewise. (mve_vshllbq_m_n_<supf><mode>): Likewise. (mve_vshlltq_m_n_<supf><mode>): Likewise. (mve_vshrnbq_m_n_<supf><mode>): Likewise. (mve_vshrntq_m_n_<supf><mode>): Likewise. (mve_vmlsldavaq_p_s<mode>): Likewise. (mve_vmlsldavaxq_p_s<mode>): Likewise. (mve_vmullbq_poly_m_p<mode>): Likewise. (mve_vmulltq_poly_m_p<mode>): Likewise. (mve_vqdmullbq_m_n_s<mode>): Likewise. (mve_vqdmullbq_m_s<mode>): Likewise. (mve_vqdmulltq_m_n_s<mode>): Likewise. (mve_vqdmulltq_m_s<mode>): Likewise. (mve_vqrshrunbq_m_n_s<mode>): Likewise. (mve_vqrshruntq_m_n_s<mode>): Likewise. (mve_vqshrunbq_m_n_s<mode>): Likewise. (mve_vqshruntq_m_n_s<mode>): Likewise. (mve_vrmlaldavhaq_p_uv4si): Likewise. (mve_vrmlaldavhaxq_p_sv4si): Likewise. (mve_vrmlsldavhaq_p_sv4si): Likewise. (mve_vrmlsldavhaxq_p_sv4si): Likewise. gcc/testsuite/ChangeLog: 2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com> Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmullbq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmullbq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulltq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulltq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlsldavhaq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrntq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrntq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrntq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrntq_m_n_u32.c: Likewise.
This commit is contained in:
parent
8eb3b6b9cf
commit
f2170a379b
230
gcc/ChangeLog
230
gcc/ChangeLog
@ -1,3 +1,233 @@
|
||||
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
||||
* config/arm/arm-protos.h (arm_mve_immediate_check):
|
||||
* config/arm/arm.c (arm_mve_immediate_check): Define fuction to check
|
||||
mode and interger value.
|
||||
* config/arm/arm_mve.h (vmlaldavaq_p_s32): Define macro.
|
||||
(vmlaldavaq_p_s16): Likewise.
|
||||
(vmlaldavaq_p_u32): Likewise.
|
||||
(vmlaldavaq_p_u16): Likewise.
|
||||
(vmlaldavaxq_p_s32): Likewise.
|
||||
(vmlaldavaxq_p_s16): Likewise.
|
||||
(vmlaldavaxq_p_u32): Likewise.
|
||||
(vmlaldavaxq_p_u16): Likewise.
|
||||
(vmlsldavaq_p_s32): Likewise.
|
||||
(vmlsldavaq_p_s16): Likewise.
|
||||
(vmlsldavaxq_p_s32): Likewise.
|
||||
(vmlsldavaxq_p_s16): Likewise.
|
||||
(vmullbq_poly_m_p8): Likewise.
|
||||
(vmullbq_poly_m_p16): Likewise.
|
||||
(vmulltq_poly_m_p8): Likewise.
|
||||
(vmulltq_poly_m_p16): Likewise.
|
||||
(vqdmullbq_m_n_s32): Likewise.
|
||||
(vqdmullbq_m_n_s16): Likewise.
|
||||
(vqdmullbq_m_s32): Likewise.
|
||||
(vqdmullbq_m_s16): Likewise.
|
||||
(vqdmulltq_m_n_s32): Likewise.
|
||||
(vqdmulltq_m_n_s16): Likewise.
|
||||
(vqdmulltq_m_s32): Likewise.
|
||||
(vqdmulltq_m_s16): Likewise.
|
||||
(vqrshrnbq_m_n_s32): Likewise.
|
||||
(vqrshrnbq_m_n_s16): Likewise.
|
||||
(vqrshrnbq_m_n_u32): Likewise.
|
||||
(vqrshrnbq_m_n_u16): Likewise.
|
||||
(vqrshrntq_m_n_s32): Likewise.
|
||||
(vqrshrntq_m_n_s16): Likewise.
|
||||
(vqrshrntq_m_n_u32): Likewise.
|
||||
(vqrshrntq_m_n_u16): Likewise.
|
||||
(vqrshrunbq_m_n_s32): Likewise.
|
||||
(vqrshrunbq_m_n_s16): Likewise.
|
||||
(vqrshruntq_m_n_s32): Likewise.
|
||||
(vqrshruntq_m_n_s16): Likewise.
|
||||
(vqshrnbq_m_n_s32): Likewise.
|
||||
(vqshrnbq_m_n_s16): Likewise.
|
||||
(vqshrnbq_m_n_u32): Likewise.
|
||||
(vqshrnbq_m_n_u16): Likewise.
|
||||
(vqshrntq_m_n_s32): Likewise.
|
||||
(vqshrntq_m_n_s16): Likewise.
|
||||
(vqshrntq_m_n_u32): Likewise.
|
||||
(vqshrntq_m_n_u16): Likewise.
|
||||
(vqshrunbq_m_n_s32): Likewise.
|
||||
(vqshrunbq_m_n_s16): Likewise.
|
||||
(vqshruntq_m_n_s32): Likewise.
|
||||
(vqshruntq_m_n_s16): Likewise.
|
||||
(vrmlaldavhaq_p_s32): Likewise.
|
||||
(vrmlaldavhaq_p_u32): Likewise.
|
||||
(vrmlaldavhaxq_p_s32): Likewise.
|
||||
(vrmlsldavhaq_p_s32): Likewise.
|
||||
(vrmlsldavhaxq_p_s32): Likewise.
|
||||
(vrshrnbq_m_n_s32): Likewise.
|
||||
(vrshrnbq_m_n_s16): Likewise.
|
||||
(vrshrnbq_m_n_u32): Likewise.
|
||||
(vrshrnbq_m_n_u16): Likewise.
|
||||
(vrshrntq_m_n_s32): Likewise.
|
||||
(vrshrntq_m_n_s16): Likewise.
|
||||
(vrshrntq_m_n_u32): Likewise.
|
||||
(vrshrntq_m_n_u16): Likewise.
|
||||
(vshllbq_m_n_s8): Likewise.
|
||||
(vshllbq_m_n_s16): Likewise.
|
||||
(vshllbq_m_n_u8): Likewise.
|
||||
(vshllbq_m_n_u16): Likewise.
|
||||
(vshlltq_m_n_s8): Likewise.
|
||||
(vshlltq_m_n_s16): Likewise.
|
||||
(vshlltq_m_n_u8): Likewise.
|
||||
(vshlltq_m_n_u16): Likewise.
|
||||
(vshrnbq_m_n_s32): Likewise.
|
||||
(vshrnbq_m_n_s16): Likewise.
|
||||
(vshrnbq_m_n_u32): Likewise.
|
||||
(vshrnbq_m_n_u16): Likewise.
|
||||
(vshrntq_m_n_s32): Likewise.
|
||||
(vshrntq_m_n_s16): Likewise.
|
||||
(vshrntq_m_n_u32): Likewise.
|
||||
(vshrntq_m_n_u16): Likewise.
|
||||
(__arm_vmlaldavaq_p_s32): Define intrinsic.
|
||||
(__arm_vmlaldavaq_p_s16): Likewise.
|
||||
(__arm_vmlaldavaq_p_u32): Likewise.
|
||||
(__arm_vmlaldavaq_p_u16): Likewise.
|
||||
(__arm_vmlaldavaxq_p_s32): Likewise.
|
||||
(__arm_vmlaldavaxq_p_s16): Likewise.
|
||||
(__arm_vmlaldavaxq_p_u32): Likewise.
|
||||
(__arm_vmlaldavaxq_p_u16): Likewise.
|
||||
(__arm_vmlsldavaq_p_s32): Likewise.
|
||||
(__arm_vmlsldavaq_p_s16): Likewise.
|
||||
(__arm_vmlsldavaxq_p_s32): Likewise.
|
||||
(__arm_vmlsldavaxq_p_s16): Likewise.
|
||||
(__arm_vmullbq_poly_m_p8): Likewise.
|
||||
(__arm_vmullbq_poly_m_p16): Likewise.
|
||||
(__arm_vmulltq_poly_m_p8): Likewise.
|
||||
(__arm_vmulltq_poly_m_p16): Likewise.
|
||||
(__arm_vqdmullbq_m_n_s32): Likewise.
|
||||
(__arm_vqdmullbq_m_n_s16): Likewise.
|
||||
(__arm_vqdmullbq_m_s32): Likewise.
|
||||
(__arm_vqdmullbq_m_s16): Likewise.
|
||||
(__arm_vqdmulltq_m_n_s32): Likewise.
|
||||
(__arm_vqdmulltq_m_n_s16): Likewise.
|
||||
(__arm_vqdmulltq_m_s32): Likewise.
|
||||
(__arm_vqdmulltq_m_s16): Likewise.
|
||||
(__arm_vqrshrnbq_m_n_s32): Likewise.
|
||||
(__arm_vqrshrnbq_m_n_s16): Likewise.
|
||||
(__arm_vqrshrnbq_m_n_u32): Likewise.
|
||||
(__arm_vqrshrnbq_m_n_u16): Likewise.
|
||||
(__arm_vqrshrntq_m_n_s32): Likewise.
|
||||
(__arm_vqrshrntq_m_n_s16): Likewise.
|
||||
(__arm_vqrshrntq_m_n_u32): Likewise.
|
||||
(__arm_vqrshrntq_m_n_u16): Likewise.
|
||||
(__arm_vqrshrunbq_m_n_s32): Likewise.
|
||||
(__arm_vqrshrunbq_m_n_s16): Likewise.
|
||||
(__arm_vqrshruntq_m_n_s32): Likewise.
|
||||
(__arm_vqrshruntq_m_n_s16): Likewise.
|
||||
(__arm_vqshrnbq_m_n_s32): Likewise.
|
||||
(__arm_vqshrnbq_m_n_s16): Likewise.
|
||||
(__arm_vqshrnbq_m_n_u32): Likewise.
|
||||
(__arm_vqshrnbq_m_n_u16): Likewise.
|
||||
(__arm_vqshrntq_m_n_s32): Likewise.
|
||||
(__arm_vqshrntq_m_n_s16): Likewise.
|
||||
(__arm_vqshrntq_m_n_u32): Likewise.
|
||||
(__arm_vqshrntq_m_n_u16): Likewise.
|
||||
(__arm_vqshrunbq_m_n_s32): Likewise.
|
||||
(__arm_vqshrunbq_m_n_s16): Likewise.
|
||||
(__arm_vqshruntq_m_n_s32): Likewise.
|
||||
(__arm_vqshruntq_m_n_s16): Likewise.
|
||||
(__arm_vrmlaldavhaq_p_s32): Likewise.
|
||||
(__arm_vrmlaldavhaq_p_u32): Likewise.
|
||||
(__arm_vrmlaldavhaxq_p_s32): Likewise.
|
||||
(__arm_vrmlsldavhaq_p_s32): Likewise.
|
||||
(__arm_vrmlsldavhaxq_p_s32): Likewise.
|
||||
(__arm_vrshrnbq_m_n_s32): Likewise.
|
||||
(__arm_vrshrnbq_m_n_s16): Likewise.
|
||||
(__arm_vrshrnbq_m_n_u32): Likewise.
|
||||
(__arm_vrshrnbq_m_n_u16): Likewise.
|
||||
(__arm_vrshrntq_m_n_s32): Likewise.
|
||||
(__arm_vrshrntq_m_n_s16): Likewise.
|
||||
(__arm_vrshrntq_m_n_u32): Likewise.
|
||||
(__arm_vrshrntq_m_n_u16): Likewise.
|
||||
(__arm_vshllbq_m_n_s8): Likewise.
|
||||
(__arm_vshllbq_m_n_s16): Likewise.
|
||||
(__arm_vshllbq_m_n_u8): Likewise.
|
||||
(__arm_vshllbq_m_n_u16): Likewise.
|
||||
(__arm_vshlltq_m_n_s8): Likewise.
|
||||
(__arm_vshlltq_m_n_s16): Likewise.
|
||||
(__arm_vshlltq_m_n_u8): Likewise.
|
||||
(__arm_vshlltq_m_n_u16): Likewise.
|
||||
(__arm_vshrnbq_m_n_s32): Likewise.
|
||||
(__arm_vshrnbq_m_n_s16): Likewise.
|
||||
(__arm_vshrnbq_m_n_u32): Likewise.
|
||||
(__arm_vshrnbq_m_n_u16): Likewise.
|
||||
(__arm_vshrntq_m_n_s32): Likewise.
|
||||
(__arm_vshrntq_m_n_s16): Likewise.
|
||||
(__arm_vshrntq_m_n_u32): Likewise.
|
||||
(__arm_vshrntq_m_n_u16): Likewise.
|
||||
(vmullbq_poly_m): Define polymorphic variant.
|
||||
(vmulltq_poly_m): Likewise.
|
||||
(vshllbq_m): Likewise.
|
||||
(vshrntq_m_n): Likewise.
|
||||
(vshrnbq_m_n): Likewise.
|
||||
(vshlltq_m_n): Likewise.
|
||||
(vshllbq_m_n): Likewise.
|
||||
(vrshrntq_m_n): Likewise.
|
||||
(vrshrnbq_m_n): Likewise.
|
||||
(vqshruntq_m_n): Likewise.
|
||||
(vqshrunbq_m_n): Likewise.
|
||||
(vqdmullbq_m_n): Likewise.
|
||||
(vqdmullbq_m): Likewise.
|
||||
(vqdmulltq_m_n): Likewise.
|
||||
(vqdmulltq_m): Likewise.
|
||||
(vqrshrnbq_m_n): Likewise.
|
||||
(vqrshrntq_m_n): Likewise.
|
||||
(vqrshrunbq_m_n): Likewise.
|
||||
(vqrshruntq_m_n): Likewise.
|
||||
(vqshrnbq_m_n): Likewise.
|
||||
(vqshrntq_m_n): Likewise.
|
||||
* config/arm/arm_mve_builtins.def (QUADOP_NONE_NONE_NONE_IMM_UNONE): Use
|
||||
builtin qualifiers.
|
||||
(QUADOP_NONE_NONE_NONE_NONE_UNONE): Likewise.
|
||||
(QUADOP_UNONE_UNONE_NONE_IMM_UNONE): Likewise.
|
||||
(QUADOP_UNONE_UNONE_UNONE_IMM_UNONE): Likewise.
|
||||
(QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE): Likewise.
|
||||
* config/arm/mve.md (VMLALDAVAQ_P): Define iterator.
|
||||
(VMLALDAVAXQ_P): Likewise.
|
||||
(VQRSHRNBQ_M_N): Likewise.
|
||||
(VQRSHRNTQ_M_N): Likewise.
|
||||
(VQSHRNBQ_M_N): Likewise.
|
||||
(VQSHRNTQ_M_N): Likewise.
|
||||
(VRSHRNBQ_M_N): Likewise.
|
||||
(VRSHRNTQ_M_N): Likewise.
|
||||
(VSHLLBQ_M_N): Likewise.
|
||||
(VSHLLTQ_M_N): Likewise.
|
||||
(VSHRNBQ_M_N): Likewise.
|
||||
(VSHRNTQ_M_N): Likewise.
|
||||
(mve_vmlaldavaq_p_<supf><mode>): Define RTL pattern.
|
||||
(mve_vmlaldavaxq_p_<supf><mode>): Likewise.
|
||||
(mve_vqrshrnbq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vqrshrntq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vqshrnbq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vqshrntq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vrmlaldavhaq_p_sv4si): Likewise.
|
||||
(mve_vrshrnbq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vrshrntq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vshllbq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vshlltq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vshrnbq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vshrntq_m_n_<supf><mode>): Likewise.
|
||||
(mve_vmlsldavaq_p_s<mode>): Likewise.
|
||||
(mve_vmlsldavaxq_p_s<mode>): Likewise.
|
||||
(mve_vmullbq_poly_m_p<mode>): Likewise.
|
||||
(mve_vmulltq_poly_m_p<mode>): Likewise.
|
||||
(mve_vqdmullbq_m_n_s<mode>): Likewise.
|
||||
(mve_vqdmullbq_m_s<mode>): Likewise.
|
||||
(mve_vqdmulltq_m_n_s<mode>): Likewise.
|
||||
(mve_vqdmulltq_m_s<mode>): Likewise.
|
||||
(mve_vqrshrunbq_m_n_s<mode>): Likewise.
|
||||
(mve_vqrshruntq_m_n_s<mode>): Likewise.
|
||||
(mve_vqshrunbq_m_n_s<mode>): Likewise.
|
||||
(mve_vqshruntq_m_n_s<mode>): Likewise.
|
||||
(mve_vrmlaldavhaq_p_uv4si): Likewise.
|
||||
(mve_vrmlaldavhaxq_p_sv4si): Likewise.
|
||||
(mve_vrmlsldavhaq_p_sv4si): Likewise.
|
||||
(mve_vrmlsldavhaxq_p_sv4si): Likewise.
|
||||
|
||||
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
@ -579,4 +579,5 @@ void arm_initialize_isa (sbitmap, const enum isa_feature *);
|
||||
|
||||
const char * arm_gen_far_branch (rtx *, int, const char * , const char *);
|
||||
|
||||
bool arm_mve_immediate_check(rtx, machine_mode, bool);
|
||||
#endif /* ! GCC_ARM_PROTOS_H */
|
||||
|
@ -32702,6 +32702,31 @@ arm_simd_check_vect_par_cnst_half_p (rtx op, machine_mode mode,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* To check op's immediate values matches the mode of the defined insn. */
|
||||
bool
|
||||
arm_mve_immediate_check (rtx op, machine_mode mode, bool val)
|
||||
{
|
||||
if (val)
|
||||
{
|
||||
if (((GET_CODE (op) == CONST_INT) && (INTVAL (op) <= 7)
|
||||
&& (mode == E_V16QImode))
|
||||
|| ((GET_CODE (op) == CONST_INT) && (INTVAL (op) <= 15)
|
||||
&& (mode == E_V8HImode))
|
||||
|| ((GET_CODE (op) == CONST_INT) && (INTVAL (op) <= 31)
|
||||
&& (mode == E_V4SImode)))
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (((GET_CODE (op) == CONST_INT) && (INTVAL (op) <= 7)
|
||||
&& (mode == E_V8HImode))
|
||||
|| ((GET_CODE (op) == CONST_INT) && (INTVAL (op) <= 15)
|
||||
&& (mode == E_V4SImode)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Can output mi_thunk for all cases except for non-zero vcall_offset
|
||||
in Thumb1. */
|
||||
static bool
|
||||
|
@ -1563,6 +1563,83 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
|
||||
#define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p)
|
||||
#define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p)
|
||||
#define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p)
|
||||
#define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p)
|
||||
#define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p)
|
||||
#define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p)
|
||||
#define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p)
|
||||
#define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p)
|
||||
#define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p)
|
||||
#define vmlaldavaxq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_u32(__a, __b, __c, __p)
|
||||
#define vmlaldavaxq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_u16(__a, __b, __c, __p)
|
||||
#define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p)
|
||||
#define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p)
|
||||
#define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p)
|
||||
#define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p)
|
||||
#define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p)
|
||||
#define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p)
|
||||
#define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p)
|
||||
#define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p)
|
||||
#define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p)
|
||||
#define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p)
|
||||
#define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p)
|
||||
#define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p)
|
||||
#define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p)
|
||||
#define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p)
|
||||
#define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p)
|
||||
#define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p)
|
||||
#define vqrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vqrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vqrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vqrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vqrshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqrshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqrshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqrshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vqshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vqshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vqshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vqshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vqshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vqshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p)
|
||||
#define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p)
|
||||
#define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p)
|
||||
#define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p)
|
||||
#define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p)
|
||||
#define vrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vshllbq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a, __imm, __p)
|
||||
#define vshllbq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a, __imm, __p)
|
||||
#define vshllbq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a, __imm, __p)
|
||||
#define vshllbq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a, __imm, __p)
|
||||
#define vshlltq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a, __imm, __p)
|
||||
#define vshlltq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a, __imm, __p)
|
||||
#define vshlltq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a, __imm, __p)
|
||||
#define vshlltq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a, __imm, __p)
|
||||
#define vshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b, __imm, __p)
|
||||
#define vshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vshrntq_m_n_s32(__a, __b, __imm, __p)
|
||||
#define vshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vshrntq_m_n_s16(__a, __b, __imm, __p)
|
||||
#define vshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vshrntq_m_n_u32(__a, __b, __imm, __p)
|
||||
#define vshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vshrntq_m_n_u16(__a, __b, __imm, __p)
|
||||
#endif
|
||||
|
||||
__extension__ extern __inline void
|
||||
@ -10317,6 +10394,545 @@ __arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pr
|
||||
return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaxq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaxq_p_uv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlaldavaxq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlaldavaxq_p_uv8hi (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p);
|
||||
}
|
||||
|
||||
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
|
||||
|
||||
__extension__ extern __inline void
|
||||
@ -16547,7 +17163,6 @@ extern void *__ARM_undef;
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
|
||||
|
||||
|
||||
#define vqrdmlsdhxq_m(p0,p1,p2,p3) __arm_vqrdmlsdhxq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
@ -16611,6 +17226,213 @@ extern void *__ARM_undef;
|
||||
int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
|
||||
|
||||
#define vmullbq_poly_m(p0,p1,p2,p3) __arm_vmullbq_poly_m(p0,p1,p2,p3)
|
||||
#define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
|
||||
|
||||
#define vmulltq_poly_m(p0,p1,p2,p3) __arm_vmulltq_poly_m(p0,p1,p2,p3)
|
||||
#define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
|
||||
|
||||
#define vshllbq_m(p0,p1,p2,p3) __arm_vshllbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
|
||||
|
||||
#define vshrntq_m(p0,p1,p2,p3) __arm_vshrntq_m(p0,p1,p2,p3)
|
||||
#define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vshrnbq_m(p0,p1,p2,p3) __arm_vshrnbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vshlltq_m(p0,p1,p2,p3) __arm_vshlltq_m(p0,p1,p2,p3)
|
||||
#define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
|
||||
|
||||
#define vrshrntq_m(p0,p1,p2,p3) __arm_vrshrntq_m(p0,p1,p2,p3)
|
||||
#define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vqshruntq_m(p0,p1,p2,p3) __arm_vqshruntq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
|
||||
|
||||
#define vqshrunbq_m(p0,p1,p2,p3) __arm_vqshrunbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
|
||||
|
||||
#define vqdmullbq_m(p0,p1,p2,p3) __arm_vqdmullbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
|
||||
int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
|
||||
|
||||
#define vqdmulltq_m(p0,p1,p2,p3) __arm_vqdmulltq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
|
||||
int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
|
||||
|
||||
#define vqrshrnbq_m(p0,p1,p2,p3) __arm_vqrshrnbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vqrshrntq_m(p0,p1,p2,p3) __arm_vqrshrntq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vqrshrunbq_m(p0,p1,p2,p3) __arm_vqrshrunbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
|
||||
|
||||
#define vqrshruntq_m(p0,p1,p2,p3) __arm_vqrshruntq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
|
||||
|
||||
#define vqshrnbq_m(p0,p1,p2,p3) __arm_vqshrnbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vqshrntq_m(p0,p1,p2,p3) __arm_vqshrntq_m(p0,p1,p2,p3)
|
||||
#define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vrshrnbq_m(p0,p1,p2,p3) __arm_vrshrnbq_m(p0,p1,p2,p3)
|
||||
#define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
|
||||
|
||||
#define vmlaldavaq_p(p0,p1,p2,p3) __arm_vmlaldavaq_p(p0,p1,p2,p3)
|
||||
#define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
|
||||
|
||||
#define vmlaldavaxq_p(p0,p1,p2,p3) __arm_vmlaldavaxq_p(p0,p1,p2,p3)
|
||||
#define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaxq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaxq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
|
||||
|
||||
#define vmlsldavaq_p(p0,p1,p2,p3) __arm_vmlsldavaq_p(p0,p1,p2,p3)
|
||||
#define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
|
||||
|
||||
#define vmlsldavaxq_p(p0,p1,p2,p3) __arm_vmlsldavaxq_p(p0,p1,p2,p3)
|
||||
#define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
|
||||
|
||||
#define vrmlaldavhaq_p(p0,p1,p2,p3) __arm_vrmlaldavhaq_p(p0,p1,p2,p3)
|
||||
#define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
|
||||
int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
|
||||
|
||||
#define vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p(p0,p1,p2,p3)
|
||||
#define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3)
|
||||
|
||||
#define vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p(p0,p1,p2,p3)
|
||||
#define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3)
|
||||
|
||||
#define vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p(p0,p1,p2,p3)
|
||||
#define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3)
|
||||
|
||||
#endif /* MVE Integer. */
|
||||
|
||||
#define vqdmladhq_m(p0,p1,p2,p3) __arm_vqdmladhq_m(p0,p1,p2,p3)
|
||||
|
@ -613,3 +613,44 @@ VAR3 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vshrq_m_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vshlq_m_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vrshrq_m_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vqshlq_m_n_s, v16qi, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vmulltq_poly_m_p, v16qi, v8hi)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vmullbq_poly_m_p, v16qi, v8hi)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vmlaldavaxq_p_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vmlaldavaq_p_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshrntq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshrnbq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlltq_m_n_u, v16qi, v8hi)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshllbq_m_n_u, v16qi, v8hi)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vrshrntq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vrshrnbq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vqshrntq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vqshrnbq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vqrshrntq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vqrshrnbq_m_n_u, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_NONE_IMM_UNONE, vqshruntq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_NONE_IMM_UNONE, vqshrunbq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_NONE_IMM_UNONE, vqrshruntq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_UNONE_UNONE_NONE_IMM_UNONE, vqrshrunbq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vqdmulltq_m_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vqdmulltq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vqdmullbq_m_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vqdmullbq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vmlsldavaxq_p_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vmlsldavaq_p_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vmlaldavaxq_p_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vmlaldavaq_p_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vshrntq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vshrnbq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vshlltq_m_n_s, v16qi, v8hi)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vshllbq_m_n_s, v16qi, v8hi)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vrshrntq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vrshrnbq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vqshrntq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vqshrnbq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vqrshrntq_m_n_s, v8hi, v4si)
|
||||
VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vqrshrnbq_m_n_s, v8hi, v4si)
|
||||
VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vrmlaldavhaq_p_u, v4si)
|
||||
VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vrmlsldavhaxq_p_s, v4si)
|
||||
VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vrmlsldavhaq_p_s, v4si)
|
||||
VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vrmlaldavhaxq_p_s, v4si)
|
||||
VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vrmlaldavhaq_p_s, v4si)
|
||||
|
@ -173,7 +173,20 @@
|
||||
VQRDMULHQ_M_S VMLSDAVAXQ_P_S VQDMULHQ_M_N_S
|
||||
VHCADDQ_ROT270_M_S VQDMLSDHQ_M_S VQDMLSDHXQ_M_S
|
||||
VMLSDAVAQ_P_S VQRDMLADHQ_M_S VQDMLADHQ_M_S
|
||||
VQRDMULHQ_M_N_S])
|
||||
VMLALDAVAQ_P_U VMLALDAVAQ_P_S VMLALDAVAXQ_P_U
|
||||
VQRSHRNBQ_M_N_U VQRSHRNBQ_M_N_S VQRSHRNTQ_M_N_S
|
||||
VQSHRNBQ_M_N_U VQSHRNBQ_M_N_S VQSHRNTQ_M_N_S
|
||||
VRSHRNBQ_M_N_U VRSHRNBQ_M_N_S VRSHRNTQ_M_N_U
|
||||
VSHLLBQ_M_N_U VSHLLBQ_M_N_S VSHLLTQ_M_N_U VSHLLTQ_M_N_S
|
||||
VSHRNBQ_M_N_S VSHRNBQ_M_N_U VSHRNTQ_M_N_S VSHRNTQ_M_N_U
|
||||
VMLALDAVAXQ_P_S VQRSHRNTQ_M_N_U VQSHRNTQ_M_N_U
|
||||
VRSHRNTQ_M_N_S VQRDMULHQ_M_N_S VRMLALDAVHAQ_P_S
|
||||
VMLSLDAVAQ_P_S VMLSLDAVAXQ_P_S VMULLBQ_POLY_M_P
|
||||
VMULLTQ_POLY_M_P VQDMULLBQ_M_N_S VQDMULLBQ_M_S
|
||||
VQDMULLTQ_M_N_S VQDMULLTQ_M_S VQRSHRUNBQ_M_N_S
|
||||
VQRSHRUNTQ_M_N_SVQSHRUNBQ_M_N_S VQSHRUNTQ_M_N_S
|
||||
VRMLALDAVHAQ_P_U VRMLALDAVHAXQ_P_S VRMLSLDAVHAQ_P_S
|
||||
VRMLSLDAVHAXQ_P_S VQRSHRUNTQ_M_N_S VQSHRUNBQ_M_N_S])
|
||||
|
||||
(define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF")
|
||||
(V8HF "V8HI") (V4SF "V4SI")])
|
||||
@ -311,7 +324,20 @@
|
||||
(VADDQ_M_U "u") (VRSHLQ_M_U "u") (VSLIQ_M_N_S "s")
|
||||
(VQADDQ_M_N_U "u") (VADDQ_M_N_S "s") (VQSUBQ_M_N_U "u")
|
||||
(VMLAQ_M_N_U "u") (VMINQ_M_S "s") (VMULHQ_M_U "u")
|
||||
(VQADDQ_M_S "s") (VBICQ_M_S "s") (VQSHLQ_M_N_S "s")])
|
||||
(VQADDQ_M_S "s") (VBICQ_M_S "s") (VQSHLQ_M_N_S "s")
|
||||
(VQSHRNTQ_M_N_S "s") (VQSHRNTQ_M_N_U "u")
|
||||
(VSHRNTQ_M_N_U "u") (VSHRNTQ_M_N_S "s")
|
||||
(VSHRNBQ_M_N_S "s") (VSHRNBQ_M_N_U "u")
|
||||
(VSHLLTQ_M_N_S "s") (VSHLLTQ_M_N_U "u")
|
||||
(VSHLLBQ_M_N_S "s") (VSHLLBQ_M_N_U "u")
|
||||
(VRSHRNTQ_M_N_S "s") (VRSHRNTQ_M_N_U "u")
|
||||
(VRSHRNBQ_M_N_U "u") (VRSHRNBQ_M_N_S "s")
|
||||
(VQSHRNTQ_M_N_U "u") (VQSHRNTQ_M_N_S "s")
|
||||
(VQSHRNBQ_M_N_S "s") (VQSHRNBQ_M_N_U "u")
|
||||
(VQRSHRNTQ_M_N_S "s") (VQRSHRNTQ_M_N_U "u")
|
||||
(VQRSHRNBQ_M_N_S "s") (VQRSHRNBQ_M_N_U "u")
|
||||
(VMLALDAVAXQ_P_S "s") (VMLALDAVAXQ_P_U "u")
|
||||
(VMLALDAVAQ_P_S "s") (VMLALDAVAQ_P_U "u")])
|
||||
|
||||
(define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
|
||||
(VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16")
|
||||
@ -519,7 +545,18 @@
|
||||
(define_int_iterator VABDQ_M [VABDQ_M_S VABDQ_M_U])
|
||||
(define_int_iterator VMLAQ_M_N [VMLAQ_M_N_S VMLAQ_M_N_U])
|
||||
(define_int_iterator VQSHLQ_M_N [VQSHLQ_M_N_S VQSHLQ_M_N_U])
|
||||
|
||||
(define_int_iterator VMLALDAVAQ_P [VMLALDAVAQ_P_U VMLALDAVAQ_P_S])
|
||||
(define_int_iterator VMLALDAVAXQ_P [VMLALDAVAXQ_P_U VMLALDAVAXQ_P_S])
|
||||
(define_int_iterator VQRSHRNBQ_M_N [VQRSHRNBQ_M_N_U VQRSHRNBQ_M_N_S])
|
||||
(define_int_iterator VQRSHRNTQ_M_N [VQRSHRNTQ_M_N_S VQRSHRNTQ_M_N_U])
|
||||
(define_int_iterator VQSHRNBQ_M_N [VQSHRNBQ_M_N_U VQSHRNBQ_M_N_S])
|
||||
(define_int_iterator VQSHRNTQ_M_N [VQSHRNTQ_M_N_S VQSHRNTQ_M_N_U])
|
||||
(define_int_iterator VRSHRNBQ_M_N [VRSHRNBQ_M_N_U VRSHRNBQ_M_N_S])
|
||||
(define_int_iterator VRSHRNTQ_M_N [VRSHRNTQ_M_N_U VRSHRNTQ_M_N_S])
|
||||
(define_int_iterator VSHLLBQ_M_N [VSHLLBQ_M_N_U VSHLLBQ_M_N_S])
|
||||
(define_int_iterator VSHLLTQ_M_N [VSHLLTQ_M_N_U VSHLLTQ_M_N_S])
|
||||
(define_int_iterator VSHRNBQ_M_N [VSHRNBQ_M_N_S VSHRNBQ_M_N_U])
|
||||
(define_int_iterator VSHRNTQ_M_N [VSHRNTQ_M_N_S VSHRNTQ_M_N_U])
|
||||
|
||||
(define_insn "*mve_mov<mode>"
|
||||
[(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
|
||||
@ -6914,3 +6951,495 @@
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vmlaldavaq_p_u, vmlaldavaq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vmlaldavaq_p_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_5 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VMLALDAVAQ_P))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vmlaldavat.<supf>%#<V_sz_elem> %Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vmlaldavaxq_p_u, vmlaldavaxq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vmlaldavaxq_p_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_5 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VMLALDAVAXQ_P))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vmlaldavaxt.<supf>%#<V_sz_elem> %Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqrshrnbq_m_n_u, vqrshrnbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqrshrnbq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQRSHRNBQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqrshrnbt.<supf>%#<V_sz_elem> %q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqrshrntq_m_n_s, vqrshrntq_m_n_u])
|
||||
;;
|
||||
(define_insn "mve_vqrshrntq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQRSHRNTQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqrshrntt.<supf>%#<V_sz_elem> %q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqshrnbq_m_n_u, vqshrnbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqshrnbq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "<MVE_pred1>" "<MVE_constraint1>")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQSHRNBQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE && arm_mve_immediate_check (operands[3], <MODE>mode, 0)"
|
||||
"vpst\n\tvqshrnbt.<supf>%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqshrntq_m_n_s, vqshrntq_m_n_u])
|
||||
;;
|
||||
(define_insn "mve_vqshrntq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQSHRNTQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqshrntt.<supf>%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrmlaldavhaq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vrmlaldavhaq_p_sv4si"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:V4SI 2 "s_register_operand" "w")
|
||||
(match_operand:V4SI 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRMLALDAVHAQ_P_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrmlaldavhat.s32\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrshrnbq_m_n_u, vrshrnbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vrshrnbq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRSHRNBQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrshrnbt.i%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrshrntq_m_n_u, vrshrntq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vrshrntq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRSHRNTQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrshrntt.i%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vshllbq_m_n_u, vshllbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vshllbq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_3 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "immediate_operand" "i")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VSHLLBQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vshllbt.<supf>%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vshlltq_m_n_u, vshlltq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vshlltq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_3 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "immediate_operand" "i")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VSHLLTQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vshlltt.<supf>%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vshrnbq_m_n_s, vshrnbq_m_n_u])
|
||||
;;
|
||||
(define_insn "mve_vshrnbq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VSHRNBQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vshrnbt.i%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vshrntq_m_n_s, vshrntq_m_n_u])
|
||||
;;
|
||||
(define_insn "mve_vshrntq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VSHRNTQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vshrntt.i%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vmlsldavaq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vmlsldavaq_p_s<mode>"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_5 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VMLSLDAVAQ_P_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vmlsldavat.s%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vmlsldavaxq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vmlsldavaxq_p_s<mode>"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_5 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VMLSLDAVAXQ_P_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vmlsldavaxt.s%#<V_sz_elem>\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vmullbq_poly_m_p])
|
||||
;;
|
||||
(define_insn "mve_vmullbq_poly_m_p<mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_3 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_3 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VMULLBQ_POLY_M_P))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vmullbt.p%#<V_sz_elem>\t%q0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vmulltq_poly_m_p])
|
||||
;;
|
||||
(define_insn "mve_vmulltq_poly_m_p<mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_3 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_3 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VMULLTQ_POLY_M_P))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vmulltt.p%#<V_sz_elem>\t%q0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqdmullbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqdmullbq_m_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:<V_elem> 3 "s_register_operand" "r")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQDMULLBQ_M_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqdmullbt.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqdmullbq_m_s])
|
||||
;;
|
||||
(define_insn "mve_vqdmullbq_m_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_5 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQDMULLBQ_M_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqdmullbt.s%#<V_sz_elem>\t%q0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqdmulltq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqdmulltq_m_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:<V_elem> 3 "s_register_operand" "r")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQDMULLTQ_M_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqdmulltt.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqdmulltq_m_s])
|
||||
;;
|
||||
(define_insn "mve_vqdmulltq_m_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_5 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQDMULLTQ_M_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqdmulltt.s%#<V_sz_elem>\t%q0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqrshrunbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqrshrunbq_m_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQRSHRUNBQ_M_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqrshrunbt.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqrshruntq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqrshruntq_m_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQRSHRUNTQ_M_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqrshruntt.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqshrunbq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqshrunbq_m_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQSHRUNBQ_M_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqshrunbt.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vqshruntq_m_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqshruntq_m_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VQSHRUNTQ_M_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vqshruntt.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrmlaldavhaq_p_u])
|
||||
;;
|
||||
(define_insn "mve_vrmlaldavhaq_p_uv4si"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:V4SI 2 "s_register_operand" "w")
|
||||
(match_operand:V4SI 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRMLALDAVHAQ_P_U))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrmlaldavhat.u32\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrmlaldavhaxq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vrmlaldavhaxq_p_sv4si"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:V4SI 2 "s_register_operand" "w")
|
||||
(match_operand:V4SI 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRMLALDAVHAXQ_P_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrmlaldavhaxt.s32\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrmlsldavhaq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vrmlsldavhaq_p_sv4si"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:V4SI 2 "s_register_operand" "w")
|
||||
(match_operand:V4SI 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRMLSLDAVHAQ_P_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrmlsldavhat.s32\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
||||
;;
|
||||
;; [vrmlsldavhaxq_p_s])
|
||||
;;
|
||||
(define_insn "mve_vrmlsldavhaxq_p_sv4si"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:V4SI 2 "s_register_operand" "w")
|
||||
(match_operand:V4SI 3 "s_register_operand" "w")
|
||||
(match_operand:HI 4 "vpr_register_operand" "Up")]
|
||||
VRMLSLDAVHAXQ_P_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vrmlsldavhaxt.s32\t%Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
|
@ -1,3 +1,85 @@
|
||||
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaq_p_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlaldavaxq_p_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlsldavaq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmlsldavaxq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmullbq_poly_m_p8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vmulltq_poly_m_p8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmullbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmullbq_m_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmullbq_m_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmulltq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmulltq_m_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqdmulltq_m_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrntq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrunbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshruntq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrnbq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrntq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrntq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshrunbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqshruntq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlaldavhaq_p_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlaldavhaxq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlsldavhaq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlsldavhaxq_p_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrnbq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrntq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrshrntq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshllbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshllbq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlltq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlltq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrnbq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrnbq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrntq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrntq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrntq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshrntq_m_n_u32.c: Likewise.
|
||||
|
||||
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p_s16 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.s16" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.s16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint64_t
|
||||
foo (uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p_u16 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.u16" } } */
|
||||
|
||||
uint64_t
|
||||
foo1 (uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.u16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint64_t
|
||||
foo (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p_u32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.u32" } } */
|
||||
|
||||
uint64_t
|
||||
foo1 (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavat.u32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p_s16 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.s16" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.s16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint64_t
|
||||
foo (uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p_u16 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.u16" } } */
|
||||
|
||||
uint64_t
|
||||
foo1 (uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.u16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint64_t
|
||||
foo (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p_u32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.u32" } } */
|
||||
|
||||
uint64_t
|
||||
foo1 (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlaldavaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlaldavaxt.u32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaq_p_s16 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavat.s16" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavat.s16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavat.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavat.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaxq_p_s16 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavaxt.s16" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavaxt.s16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaxq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavaxt.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vmlsldavaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmlsldavaxt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmullbq_poly_m_p16 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmullbt.p16" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmullbq_poly_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmullbt.p16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmullbq_poly_m_p8 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmullbt.p8" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmullbq_poly_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmullbt.p8" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmulltq_poly_m_p16 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmulltt.p16" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmulltq_poly_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmulltt.p16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmulltq_poly_m_p8 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmulltt.p8" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vmulltq_poly_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vmulltt.p8" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m_n_s16 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64x2_t
|
||||
foo (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m_n_s32 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
|
||||
|
||||
int64x2_t
|
||||
foo1 (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m_s16 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64x2_t
|
||||
foo (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m_s32 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
|
||||
|
||||
int64x2_t
|
||||
foo1 (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmullbq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmullbt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m_n_s16 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, int16x8_t a, int16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64x2_t
|
||||
foo (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m_n_s32 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
|
||||
|
||||
int64x2_t
|
||||
foo1 (int64x2_t inactive, int32x4_t a, int32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m_s16 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64x2_t
|
||||
foo (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m_s32 (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
|
||||
|
||||
int64x2_t
|
||||
foo1 (int64x2_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqdmulltq_m (inactive, a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqdmulltt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.s16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.s32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m_n_u16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.u16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.u16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m_n_u32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.u32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrnbt.u32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.s16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.s32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m_n_u16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.u16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.u16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m_n_u32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.u32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrntt.u32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrunbq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrunbt.s16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrunbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrunbt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrunbq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrunbt.s32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshrunbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshrunbt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshruntq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshruntt.s16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshruntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshruntt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshruntq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshruntt.s32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqrshruntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqrshruntt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m_n_s16 (a, b, 7, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.s16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m (a, b, 7, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m_n_s32 (a, b, 11, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.s32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m (a, b, 11, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m_n_u16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.u16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.u16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m_n_u32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.u32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrnbt.u32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.s16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.s32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m_n_u16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.u16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.u16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m_n_u32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.u32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrntt.u32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrunbq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrunbt.s16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrunbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrunbt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrunbq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrunbt.s32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshrunbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshrunbt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshruntq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshruntt.s16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshruntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshruntt.s16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshruntq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshruntt.s32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vqshruntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vqshruntt.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlaldavhaq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavhat.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlaldavhaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavhat.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint64_t
|
||||
foo (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlaldavhaq_p_u32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavhat.u32" } } */
|
||||
|
||||
uint64_t
|
||||
foo1 (uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlaldavhaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavhat.u32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlaldavhaxq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavhaxt.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlaldavhaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavhaxt.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlsldavhaq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlsldavhat.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlsldavhaq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlsldavhat.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlsldavhaxq_p_s32 (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlsldavhaxt.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
|
||||
{
|
||||
return vrmlsldavhaxq_p (a, b, c, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlsldavhaxt.s32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m_n_u16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m_n_u32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrnbq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrnbt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m_n_s16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m_n_s32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m_n_u16 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m_n_u32 (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vrshrntq_m (a, b, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vrshrntt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m_n_s16 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.s16" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.s16" } } */
|
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c
Normal file
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_s8.c
Normal file
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m_n_s8 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.s8" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.s8" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m_n_u16 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.u16" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.u16" } } */
|
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c
Normal file
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_m_n_u8.c
Normal file
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m_n_u8 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.u8" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshllbq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshllbt.u8" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m_n_s16 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.s16" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.s16" } } */
|
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c
Normal file
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_s8.c
Normal file
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m_n_s8 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.s8" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t inactive, int8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.s8" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m_n_u16 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.u16" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.u16" } } */
|
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c
Normal file
24
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_m_n_u8.c
Normal file
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m_n_u8 (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.u8" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
|
||||
{
|
||||
return vshlltq_m (inactive, a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshlltt.u8" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m_n_s16 (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m_n_s32 (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m_n_u16 (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m_n_u32 (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrnbq_m (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrnbt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m_n_s16 (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i16" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m_n_s32 (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i32" } } */
|
@ -0,0 +1,24 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m_n_u16 (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m (a, b, 8, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i16" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
|
||||
/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m_n_u32 (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vshrntq_m (a, b, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vshrntt.i32" } } */
|
Loading…
x
Reference in New Issue
Block a user