[AArch64] Use type attributes to mark types that use the SVE PCS

The SVE port needs to maintain a different type identity for
GNU vectors and "SVE vectors", since the types use different ABIs.
Until now we've done that using pointer equality between the
TYPE_MAIN_VARIANT and the built-in SVE type.

However, as Richard B noted, that doesn't work well for LTO,
where we stream both GNU and SVE types from a file instead of
creating them directly.  We need a mechanism for distinguishing
the types using streamed type information.

This patch does that using a new type attribute.  This attribute
is only meant to be used for the built-in SVE types and shouldn't
be user-visible.  The patch tries to ensure this by including a space
in the attribute name, like we already do for things like "fn spec"
and "omp declare simd".

2020-01-07  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* config/aarch64/aarch64-protos.h (aarch64_sve::svbool_type_p)
	(aarch64_sve::nvectors_if_data_type): Replace with...
	(aarch64_sve::builtin_type_p): ...this.
	* config/aarch64/aarch64-sve-builtins.cc: Include attribs.h.
	(find_vector_type): Delete.
	(add_sve_type_attribute): New function.
	(lookup_sve_type_attribute): Likewise.
	(register_builtin_types): Add an "SVE type" attribute to each type.
	(register_tuple_type): Likewise.
	(svbool_type_p, nvectors_if_data_type): Delete.
	(mangle_builtin_type): Use lookup_sve_type_attribute.
	(builtin_type_p): Likewise.  Add an overload that returns the
	number of constituent vector and predicate registers.
	* config/aarch64/aarch64.c (aarch64_sve_argument_p): Delete.
	(aarch64_returns_value_in_sve_regs_p): Use aarch64_sve::builtin_type_p
	instead of aarch64_sve_argument_p.
	(aarch64_takes_arguments_in_sve_regs_p): Likewise.
	(aarch64_pass_by_reference): Likewise.
	(aarch64_function_value_1): Likewise.
	(aarch64_return_in_memory): Likewise.
	(aarch64_layout_arg): Likewise.

gcc/testsuite/
	* g++.target/aarch64/sve/acle/general-c++/mangle_5.C: New test.
	* gcc.target/aarch64/sve/pcs/asm_1.c: Likewise.
	* gcc.target/aarch64/sve/pcs/asm_2.c: Likewise.
	* gcc.target/aarch64/sve/pcs/asm_3.c: Likewise.

From-SVN: r279953
This commit is contained in:
Richard Sandiford 2020-01-07 10:18:14 +00:00 committed by Richard Sandiford
parent c4b30920c7
commit 683e93d197
9 changed files with 248 additions and 89 deletions

View File

@ -1,3 +1,27 @@
2020-01-07 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64-protos.h (aarch64_sve::svbool_type_p)
(aarch64_sve::nvectors_if_data_type): Replace with...
(aarch64_sve::builtin_type_p): ...this.
* config/aarch64/aarch64-sve-builtins.cc: Include attribs.h.
(find_vector_type): Delete.
(add_sve_type_attribute): New function.
(lookup_sve_type_attribute): Likewise.
(register_builtin_types): Add an "SVE type" attribute to each type.
(register_tuple_type): Likewise.
(svbool_type_p, nvectors_if_data_type): Delete.
(mangle_builtin_type): Use lookup_sve_type_attribute.
(builtin_type_p): Likewise. Add an overload that returns the
number of constituent vector and predicate registers.
* config/aarch64/aarch64.c (aarch64_sve_argument_p): Delete.
(aarch64_returns_value_in_sve_regs_p): Use aarch64_sve::builtin_type_p
instead of aarch64_sve_argument_p.
(aarch64_takes_arguments_in_sve_regs_p): Likewise.
(aarch64_pass_by_reference): Likewise.
(aarch64_function_value_1): Likewise.
(aarch64_return_in_memory): Likewise.
(aarch64_layout_arg): Likewise.
2020-01-07 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/93156

View File

@ -706,8 +706,7 @@ namespace aarch64_sve {
void handle_arm_sve_h ();
tree builtin_decl (unsigned, bool);
bool builtin_type_p (const_tree);
bool svbool_type_p (const_tree);
unsigned int nvectors_if_data_type (const_tree);
bool builtin_type_p (const_tree, unsigned int *, unsigned int *);
const char *mangle_builtin_type (const_tree);
tree resolve_overloaded_builtin (location_t, unsigned int,
vec<tree, va_gc> *);

View File

@ -47,6 +47,7 @@
#include "gimple-fold.h"
#include "langhooks.h"
#include "stringpool.h"
#include "attribs.h"
#include "aarch64-sve-builtins.h"
#include "aarch64-sve-builtins-base.h"
#include "aarch64-sve-builtins-shapes.h"
@ -418,18 +419,31 @@ static hash_table<registered_function_hasher> *function_table;
when the required extension is disabled. */
static bool reported_missing_extension_p;
/* If TYPE is an ACLE vector type, return the associated vector_type,
otherwise return NUM_VECTOR_TYPES. */
static vector_type_index
find_vector_type (const_tree type)
/* Record that TYPE is an ABI-defined SVE type that contains NUM_ZR SVE vectors
and NUM_PR SVE predicates. MANGLED_NAME, if nonnull, is the ABI-defined
mangling of the type. */
static void
add_sve_type_attribute (tree type, unsigned int num_zr, unsigned int num_pr,
const char *mangled_name)
{
/* A linear search should be OK here, since the code isn't hot and
the number of types is only small. */
type = TYPE_MAIN_VARIANT (type);
for (unsigned int i = 0; i < NUM_VECTOR_TYPES; ++i)
if (type == abi_vector_types[i])
return vector_type_index (i);
return NUM_VECTOR_TYPES;
tree mangled_name_tree
= (mangled_name ? get_identifier (mangled_name) : NULL_TREE);
tree value = tree_cons (NULL_TREE, mangled_name_tree, NULL_TREE);
value = tree_cons (NULL_TREE, size_int (num_pr), value);
value = tree_cons (NULL_TREE, size_int (num_zr), value);
TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("SVE type"), value,
TYPE_ATTRIBUTES (type));
}
/* If TYPE is an ABI-defined SVE type, return its attribute descriptor,
otherwise return null. */
static tree
lookup_sve_type_attribute (const_tree type)
{
if (type == error_mark_node)
return NULL_TREE;
return lookup_attribute ("SVE type", TYPE_ATTRIBUTES (type));
}
/* If TYPE is a valid SVE element type, return the corresponding type
@ -2986,6 +3000,7 @@ register_builtin_types ()
{
tree eltype = scalar_types[i];
tree vectype;
unsigned int num_zr = 0, num_pr = 0;
if (eltype == boolean_type_node)
{
vectype = build_truth_vector_type_for_mode (BYTES_PER_SVE_VECTOR,
@ -2995,6 +3010,7 @@ register_builtin_types ()
&& TYPE_ALIGN (vectype) == 16
&& known_eq (wi::to_poly_offset (TYPE_SIZE (vectype)),
BYTES_PER_SVE_VECTOR));
num_pr = 1;
}
else
{
@ -3006,12 +3022,15 @@ register_builtin_types ()
&& TYPE_ALIGN (vectype) == 128
&& known_eq (wi::to_poly_offset (TYPE_SIZE (vectype)),
BITS_PER_SVE_VECTOR));
num_zr = 1;
}
vectype = build_distinct_type_copy (vectype);
gcc_assert (vectype == TYPE_MAIN_VARIANT (vectype));
SET_TYPE_STRUCTURAL_EQUALITY (vectype);
TYPE_ARTIFICIAL (vectype) = 1;
TYPE_INDIVISIBLE_P (vectype) = 1;
add_sve_type_attribute (vectype, num_zr, num_pr,
vector_types[i].mangled_name);
abi_vector_types[i] = vectype;
lang_hooks.types.register_builtin_type (vectype,
vector_types[i].abi_name);
@ -3076,6 +3095,7 @@ register_tuple_type (unsigned int num_vectors, vector_type_index type)
get_identifier ("__val"), array_type);
DECL_FIELD_CONTEXT (field) = tuple_type;
TYPE_FIELDS (tuple_type) = field;
add_sve_type_attribute (tuple_type, num_vectors, 0, NULL);
layout_type (tuple_type);
gcc_assert (VECTOR_MODE_P (TYPE_MODE (tuple_type))
&& TYPE_MODE_RAW (tuple_type) == TYPE_MODE (tuple_type)
@ -3247,64 +3267,45 @@ expand_builtin (unsigned int code, tree exp, rtx target)
return function_expander (rfn.instance, rfn.decl, exp, target).expand ();
}
/* Return true if TYPE is the ABI-defined __SVBool_t type. */
bool
svbool_type_p (const_tree type)
{
tree abi_type = abi_vector_types[VECTOR_TYPE_svbool_t];
return type != error_mark_node && TYPE_MAIN_VARIANT (type) == abi_type;
}
/* If TYPE is a built-in type defined by the SVE ABI, return the mangled name,
otherwise return NULL. */
const char *
mangle_builtin_type (const_tree type)
{
if (type == error_mark_node)
return NULL;
vector_type_index vtype = find_vector_type (type);
if (vtype != NUM_VECTOR_TYPES)
return vector_types[vtype].mangled_name;
/* ??? The C++ frontend normally strips qualifiers and attributes before
calling this hook, adding separate mangling for attributes that affect
type identity. Fortunately the type copy will have the same TYPE_NAME
as the original, so we can get the attributes from there. */
if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
type = TREE_TYPE (TYPE_NAME (type));
if (tree attr = lookup_sve_type_attribute (type))
if (tree id = TREE_VALUE (chain_index (2, TREE_VALUE (attr))))
return IDENTIFIER_POINTER (id);
return NULL;
}
/* If TYPE is one of the ABI-defined SVE vector types, or an ACLE-defined
tuple of them, return the number of vectors it contains. Return 0
otherwise. */
unsigned int
nvectors_if_data_type (const_tree type)
{
if (type == error_mark_node)
return 0;
type = TYPE_MAIN_VARIANT (type);
if (VECTOR_TYPE_P (type))
{
vector_type_index type_id = find_vector_type (type);
if (type_id != VECTOR_TYPE_svbool_t && type_id != NUM_VECTOR_TYPES)
return 1;
}
else if (TREE_CODE (type) == RECORD_TYPE)
{
for (unsigned int size_i = 1; size_i < MAX_TUPLE_SIZE; ++size_i)
for (unsigned int type_i = 0; type_i < NUM_VECTOR_TYPES; ++type_i)
{
tree tuple_type = acle_vector_types[size_i][type_i];
if (tuple_type && type == TYPE_MAIN_VARIANT (tuple_type))
return size_i + 1;
}
}
return 0;
}
/* Return true if TYPE is a built-in type defined by the SVE ABI. */
/* Return true if TYPE is a built-in SVE type defined by the ABI or ACLE. */
bool
builtin_type_p (const_tree type)
{
return svbool_type_p (type) || nvectors_if_data_type (type) > 0;
return lookup_sve_type_attribute (type);
}
/* Return true if TYPE is a built-in SVE type defined by the ABI or ACLE.
If so, store the number of constituent SVE vectors in *NUM_ZR and the
number of constituent SVE predicates in *NUM_PR. */
bool
builtin_type_p (const_tree type, unsigned int *num_zr, unsigned int *num_pr)
{
if (tree attr = lookup_sve_type_attribute (type))
{
tree num_zr_node = TREE_VALUE (attr);
tree num_pr_node = TREE_CHAIN (num_zr_node);
*num_zr = tree_to_uhwi (TREE_VALUE (num_zr_node));
*num_pr = tree_to_uhwi (TREE_VALUE (num_pr_node));
return true;
}
return false;
}
/* Implement TARGET_VERIFY_TYPE_CONTEXT for SVE types. */

View File

@ -1246,6 +1246,7 @@ static const struct attribute_spec aarch64_attribute_table[] =
affects_type_identity, handler, exclude } */
{ "aarch64_vector_pcs", 0, 0, false, true, true, true,
handle_aarch64_vector_pcs_attribute, NULL },
{ "SVE type", 3, 3, false, true, false, true, NULL, NULL },
{ NULL, 0, 0, false, false, false, false, NULL, NULL }
};
@ -2042,37 +2043,15 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
true, set *NUM_ZR and *NUM_PR to the number of required Z and P registers
respectively. */
static bool
aarch64_sve_argument_p (const_tree type, unsigned int *num_zr,
unsigned int *num_pr)
{
if (aarch64_sve::svbool_type_p (type))
{
*num_pr = 1;
*num_zr = 0;
return true;
}
if (unsigned int nvectors = aarch64_sve::nvectors_if_data_type (type))
{
*num_pr = 0;
*num_zr = nvectors;
return true;
}
return false;
}
/* Return true if a function with type FNTYPE returns its value in
SVE vector or predicate registers. */
static bool
aarch64_returns_value_in_sve_regs_p (const_tree fntype)
{
unsigned int num_zr, num_pr;
tree return_type = TREE_TYPE (fntype);
return (return_type != error_mark_node
&& aarch64_sve_argument_p (return_type, &num_zr, &num_pr));
&& aarch64_sve::builtin_type_p (return_type));
}
/* Return true if a function with type FNTYPE takes arguments in
@ -2096,8 +2075,7 @@ aarch64_takes_arguments_in_sve_regs_p (const_tree fntype)
function_arg_info arg (arg_type, /*named=*/true);
apply_pass_by_reference_rules (&args_so_far_v, arg);
unsigned int num_zr, num_pr;
if (aarch64_sve_argument_p (arg.type, &num_zr, &num_pr))
if (aarch64_sve::builtin_type_p (arg.type))
return true;
targetm.calls.function_arg_advance (args_so_far, arg);
@ -4876,7 +4854,7 @@ aarch64_pass_by_reference (cumulative_args_t pcum_v,
int nregs;
unsigned int num_zr, num_pr;
if (arg.type && aarch64_sve_argument_p (arg.type, &num_zr, &num_pr))
if (arg.type && aarch64_sve::builtin_type_p (arg.type, &num_zr, &num_pr))
{
if (pcum && !pcum->silent_p && !TARGET_SVE)
/* We can't gracefully recover at this point, so make this a
@ -4955,7 +4933,7 @@ static rtx
aarch64_function_value_1 (const_tree type, machine_mode mode)
{
unsigned int num_zr, num_pr;
if (type && aarch64_sve_argument_p (type, &num_zr, &num_pr))
if (type && aarch64_sve::builtin_type_p (type, &num_zr, &num_pr))
{
/* Don't raise an error here if we're called when SVE is disabled,
since this is really just a query function. Other code must
@ -5098,7 +5076,7 @@ aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
return false;
unsigned int num_zr, num_pr;
if (type && aarch64_sve_argument_p (type, &num_zr, &num_pr))
if (type && aarch64_sve::builtin_type_p (type, &num_zr, &num_pr))
{
/* All SVE types we support fit in registers. For example, it isn't
yet possible to define an aggregate of 9+ SVE vectors or 5+ SVE
@ -5226,7 +5204,7 @@ aarch64_layout_arg (cumulative_args_t pcum_v, const function_arg_info &arg,
pcum->aapcs_arg_processed = true;
unsigned int num_zr, num_pr;
if (type && aarch64_sve_argument_p (type, &num_zr, &num_pr))
if (type && aarch64_sve::builtin_type_p (type, &num_zr, &num_pr))
{
/* The PCS says that it is invalid to pass an SVE value to an
unprototyped function. There is no ABI-defined location we

View File

@ -1,3 +1,10 @@
2020-01-07 Richard Sandiford <richard.sandiford@arm.com>
* g++.target/aarch64/sve/acle/general-c++/mangle_5.C: New test.
* gcc.target/aarch64/sve/pcs/asm_1.c: Likewise.
* gcc.target/aarch64/sve/pcs/asm_2.c: Likewise.
* gcc.target/aarch64/sve/pcs/asm_3.c: Likewise.
2020-01-07 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/93156

View File

@ -0,0 +1,8 @@
typedef const __SVInt8_t foo;
typedef volatile foo bar;
foo f (foo x) { return x; }
bar g (bar x) { return x; }
/* { dg-final { scan-assembler {_Z1f10__SVInt8_t:\n} } } */
/* { dg-final { scan-assembler {_Z1g10__SVInt8_t:\n} } } */

View File

@ -0,0 +1,70 @@
/* { dg-do run { target aarch64_sve_hw } } */
/* { dg-options "-O0 -ffixed-z0 -ffixed-p0" } */
#include <arm_sve.h>
#define ASM_FUNCTION(NAME, RET_TYPE, ARG_TYPE, INSN) \
extern RET_TYPE NAME (svbool_t, ARG_TYPE); \
asm( \
" .type " #NAME ", %function\n" \
#NAME ":\n" \
" " INSN "\n" \
" ret\n" \
" .size " #NAME ", .-" #NAME "\n" \
)
ASM_FUNCTION (u8_callee, uint64_t, svuint8_t,
"uaddv d0, p0, z0.b\n\tfmov x0, d0");
ASM_FUNCTION (u16_callee, uint64_t, svuint16_t,
"uaddv d0, p0, z0.h\n\tfmov x0, d0");
ASM_FUNCTION (u32_callee, uint64_t, svuint32_t,
"uaddv d0, p0, z0.s\n\tfmov x0, d0");
ASM_FUNCTION (u64_callee, uint64_t, svuint64_t,
"uaddv d0, p0, z0.d\n\tfmov x0, d0");
ASM_FUNCTION (s8_callee, int64_t, svint8_t,
"saddv d0, p0, z0.b\n\tfmov x0, d0");
ASM_FUNCTION (s16_callee, int64_t, svint16_t,
"saddv d0, p0, z0.h\n\tfmov x0, d0");
ASM_FUNCTION (s32_callee, int64_t, svint32_t,
"saddv d0, p0, z0.s\n\tfmov x0, d0");
ASM_FUNCTION (s64_callee, int64_t, svint64_t,
"uaddv d0, p0, z0.d\n\tfmov x0, d0");
ASM_FUNCTION (f16_callee, float16_t, svfloat16_t, "faddv\th0, p0, z0.h");
ASM_FUNCTION (f32_callee, float32_t, svfloat32_t, "faddv\ts0, p0, z0.s");
ASM_FUNCTION (f64_callee, float64_t, svfloat64_t, "faddv\td0, p0, z0.d");
int
main (void)
{
if (u8_callee (svptrue_pat_b8 (SV_VL7), svdup_u8 (-1)) != 7 * 0xff)
__builtin_abort ();
if (u16_callee (svptrue_pat_b16 (SV_VL6), svdup_u16 (-1)) != 6 * 0xffff)
__builtin_abort ();
if (u32_callee (svptrue_pat_b32 (SV_VL3), svdup_u32 (-1))
!= 3 * (uint64_t) (uint32_t) -1)
__builtin_abort ();
if (u64_callee (svptrue_pat_b64 (SV_VL2), svdup_u64 ((uint64_t) 1 << 33))
!= (uint64_t) 1 << 34)
__builtin_abort ();
if (s8_callee (svptrue_pat_b8 (SV_VL7), svdup_s8 (-10)) != -70)
__builtin_abort ();
if (s16_callee (svptrue_pat_b16 (SV_VL6), svdup_s16 (-14)) != -84)
__builtin_abort ();
if (s32_callee (svptrue_pat_b32 (SV_VL3), svdup_s32 (-22)) != -66)
__builtin_abort ();
if (s64_callee (svptrue_pat_b64 (SV_VL2), svdup_s64 ((int64_t) 1 << 33))
!= (int64_t) 1 << 34)
__builtin_abort ();
if (f16_callee (svptrue_pat_b16 (SV_VL5), svdup_f16 (1.0)) != 5.0)
__builtin_abort ();
if (f32_callee (svptrue_b32 (), svdup_f32 (3.0)) != 3 * svcntw ())
__builtin_abort ();
if (f64_callee (svptrue_b64 (), svdup_f64 (11.0)) != 11 * svcntd ())
__builtin_abort ();
return 0;
}

View File

@ -0,0 +1,4 @@
/* { dg-do run { target aarch64_sve_hw } } */
/* { dg-options "-O2 -flto -ffixed-z0 -ffixed-p0" } */
#include "asm_1.c"

View File

@ -0,0 +1,68 @@
/* { dg-do run { target aarch64_sve_hw } } */
/* { dg-options "-O0 -ffixed-z0 -ffixed-p0" } */
#include <arm_sve.h>
#define ASM_FUNCTION(NAME, RET_TYPE, ARG_TYPE, INSN) \
extern RET_TYPE NAME (svbool_t, ARG_TYPE); \
asm( \
" .type " #NAME ", %function\n" \
#NAME ":\n" \
" " INSN "\n" \
" ret\n" \
" .size " #NAME ", .-" #NAME "\n" \
)
ASM_FUNCTION (u8_callee, svuint8_t, svuint8x2_t,
"add z0.b, p0/m, z0.b, z1.b");
ASM_FUNCTION (u16_callee, svuint16_t, svuint16x2_t,
"add z0.h, p0/m, z0.h, z1.h");
ASM_FUNCTION (u32_callee, svuint32_t, svuint32x2_t,
"add z0.s, p0/m, z0.s, z1.s");
ASM_FUNCTION (u64_callee, svuint64_t, svuint64x2_t,
"add z0.d, p0/m, z0.d, z1.d");
ASM_FUNCTION (s8_callee, svint8_t, svint8x2_t,
"add z0.b, p0/m, z0.b, z1.b");
ASM_FUNCTION (s16_callee, svint16_t, svint16x2_t,
"add z0.h, p0/m, z0.h, z1.h");
ASM_FUNCTION (s32_callee, svint32_t, svint32x2_t,
"add z0.s, p0/m, z0.s, z1.s");
ASM_FUNCTION (s64_callee, svint64_t, svint64x2_t,
"add z0.d, p0/m, z0.d, z1.d");
ASM_FUNCTION (f16_callee, svfloat16_t, svfloat16x2_t,
"fadd z0.h, p0/m, z0.h, z1.h");
ASM_FUNCTION (f32_callee, svfloat32_t, svfloat32x2_t,
"fadd z0.s, p0/m, z0.s, z1.s");
ASM_FUNCTION (f64_callee, svfloat64_t, svfloat64x2_t,
"fadd z0.d, p0/m, z0.d, z1.d");
int
main (void)
{
#define CHECK(SUFFIX) \
if (svptest_any (svptrue_b8 (), \
svcmpne (svptrue_b8 (), \
SUFFIX##_callee (svptrue_b8 (), \
svcreate2 (svdup_##SUFFIX (3), \
svdup_##SUFFIX (6))), \
svdup_##SUFFIX (9)))) \
__builtin_abort ()
CHECK (u8);
CHECK (u16);
CHECK (u32);
CHECK (u64);
CHECK (s8);
CHECK (s16);
CHECK (s32);
CHECK (s64);
CHECK (f16);
CHECK (f32);
CHECK (f64);
return 0;
}