mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-04-08 20:52:05 +08:00
[PATCH, GCC/ARM, 5/10] Clear VFP registers with VSCCLRM
This patch adds a new pattern for the VSCCLRM instruction. cmse_clear_registers () is then modified to use the new VSCCLRM instruction when targeting Armv8.1-M Mainline, thus, making the Armv8-M register clearing code specific to Armv8-M. Since the VSCCLRM instruction mandates VPR in the register list, the pattern is encoded with a parallel which only requires an unspecified VUNSPEC_CLRM_VPR constant modelling the APSR clearing. Other expression in the parallel are expected to be set expression for clearing the VFP registers. *** gcc/ChangeLog *** 2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com> 2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com> * config/arm/arm-protos.h (clear_operation_p): Adapt prototype. * config/arm/arm.c (clear_operation_p): Extend to be able to check a clear_vfp_multiple pattern based on a new vfp parameter. (cmse_clear_registers): Generate VSCCLRM to clear VFP registers when targeting Armv8.1-M Mainline. (cmse_nonsecure_entry_clear_before_return): Clear VFP registers unconditionally when targeting Armv8.1-M Mainline architecture. Check whether VFP registers are available before looking call_used_regs for a VFP register. * config/arm/predicates.md (clear_multiple_operation): Adapt to change of prototype of clear_operation_p. (clear_vfp_multiple_operation): New predicate. * config/arm/unspecs.md (VUNSPEC_VSCCLRM_VPR): New volatile unspec. * config/arm/vfp.md (clear_vfp_multiple): New define_insn. *** gcc/testsuite/ChangeLog *** 2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com> 2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com> * gcc.target/arm/cmse/bitfield-1.c: Add check for VSCCLRM. * gcc.target/arm/cmse/bitfield-2.c: Likewise. * gcc.target/arm/cmse/bitfield-3.c: Likewise. * gcc.target/arm/cmse/cmse-1.c: Likewise. * gcc.target/arm/cmse/struct-1.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-13.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-5.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-7.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-8.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-13.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-5.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-7.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-8.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-5.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-5.c: Likewise. * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-5.c: Likewise.
This commit is contained in:
parent
9722215a02
commit
0b1c7b27a7
@ -1,6 +1,24 @@
|
||||
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
|
||||
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
|
||||
|
||||
* config/arm/arm-protos.h (clear_operation_p): Adapt prototype.
|
||||
* config/arm/arm.c (clear_operation_p): Extend to be able to check a
|
||||
clear_vfp_multiple pattern based on a new vfp parameter.
|
||||
(cmse_clear_registers): Generate VSCCLRM to clear VFP registers when
|
||||
targeting Armv8.1-M Mainline.
|
||||
(cmse_nonsecure_entry_clear_before_return): Clear VFP registers
|
||||
unconditionally when targeting Armv8.1-M Mainline architecture. Check
|
||||
whether VFP registers are available before looking call_used_regs for a
|
||||
VFP register.
|
||||
* config/arm/predicates.md (clear_multiple_operation): Adapt to change
|
||||
of prototype of clear_operation_p.
|
||||
(clear_vfp_multiple_operation): New predicate.
|
||||
* config/arm/unspecs.md (VUNSPEC_VSCCLRM_VPR): New volatile unspec.
|
||||
* config/arm/vfp.md (clear_vfp_multiple): New define_insn.
|
||||
|
||||
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
|
||||
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
|
||||
|
||||
* config/arm/arm-protos.h (clear_operation_p): Declare.
|
||||
* config/arm/arm.c (clear_operation_p): New function.
|
||||
(cmse_clear_registers): Generate clear_multiple instruction pattern if
|
||||
|
@ -83,7 +83,7 @@ extern int thumb_legitimate_offset_p (machine_mode, HOST_WIDE_INT);
|
||||
extern int thumb1_legitimate_address_p (machine_mode, rtx, int);
|
||||
extern bool ldm_stm_operation_p (rtx, bool, machine_mode mode,
|
||||
bool, bool);
|
||||
extern bool clear_operation_p (rtx);
|
||||
extern bool clear_operation_p (rtx, bool);
|
||||
extern int arm_const_double_rtx (rtx);
|
||||
extern int vfp3_const_double_rtx (rtx);
|
||||
extern int neon_immediate_valid_for_move (rtx, machine_mode, rtx *, int *);
|
||||
|
@ -13709,8 +13709,9 @@ ldm_stm_operation_p (rtx op, bool load, machine_mode mode,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Checks whether OP is a valid parallel pattern for a CLRM insn. To be a
|
||||
valid CLRM pattern, OP must have the following form:
|
||||
/* Checks whether OP is a valid parallel pattern for a CLRM (if VFP is false)
|
||||
or VSCCLRM (otherwise) insn. To be a valid CLRM pattern, OP must have the
|
||||
following form:
|
||||
|
||||
[(set (reg:SI <N>) (const_int 0))
|
||||
(set (reg:SI <M>) (const_int 0))
|
||||
@ -13722,22 +13723,35 @@ ldm_stm_operation_p (rtx op, bool load, machine_mode mode,
|
||||
|
||||
Any number (including 0) of set expressions is valid, the volatile unspec is
|
||||
optional. All registers but SP and PC are allowed and registers must be in
|
||||
strict increasing order. */
|
||||
strict increasing order.
|
||||
|
||||
To be a valid VSCCLRM pattern, OP must have the following form:
|
||||
|
||||
[(unspec_volatile [(const_int 0)]
|
||||
VUNSPEC_VSCCLRM_VPR)
|
||||
(set (reg:SF <N>) (const_int 0))
|
||||
(set (reg:SF <M>) (const_int 0))
|
||||
...
|
||||
]
|
||||
|
||||
As with CLRM, any number (including 0) of set expressions is valid, however
|
||||
the volatile unspec is mandatory here. Any VFP single-precision register is
|
||||
accepted but all registers must be consecutive and in increasing order. */
|
||||
|
||||
bool
|
||||
clear_operation_p (rtx op)
|
||||
clear_operation_p (rtx op, bool vfp)
|
||||
{
|
||||
HOST_WIDE_INT i;
|
||||
unsigned regno, last_regno;
|
||||
rtx elt, reg, zero;
|
||||
machine_mode mode;
|
||||
HOST_WIDE_INT count = XVECLEN (op, 0);
|
||||
HOST_WIDE_INT i, first_set = vfp ? 1 : 0;
|
||||
machine_mode expected_mode = vfp ? E_SFmode : E_SImode;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
for (i = first_set; i < count; i++)
|
||||
{
|
||||
elt = XVECEXP (op, 0, i);
|
||||
|
||||
if (GET_CODE (elt) == UNSPEC_VOLATILE)
|
||||
if (!vfp && GET_CODE (elt) == UNSPEC_VOLATILE)
|
||||
{
|
||||
if (XINT (elt, 1) != VUNSPEC_CLRM_APSR
|
||||
|| XVECLEN (elt, 0) != 1
|
||||
@ -13756,17 +13770,26 @@ clear_operation_p (rtx op)
|
||||
|
||||
reg = SET_DEST (elt);
|
||||
regno = REGNO (reg);
|
||||
mode = GET_MODE (reg);
|
||||
zero = SET_SRC (elt);
|
||||
|
||||
if (!REG_P (reg)
|
||||
|| GET_MODE (reg) != SImode
|
||||
|| regno == SP_REGNUM
|
||||
|| regno == PC_REGNUM
|
||||
|| (i != 0 && regno <= last_regno)
|
||||
|| GET_MODE (reg) != expected_mode
|
||||
|| zero != CONST0_RTX (SImode))
|
||||
return false;
|
||||
|
||||
if (vfp)
|
||||
{
|
||||
if (i != 1 && regno != last_regno + 1)
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (regno == SP_REGNUM || regno == PC_REGNUM)
|
||||
return false;
|
||||
if (i != 0 && regno <= last_regno)
|
||||
return false;
|
||||
}
|
||||
|
||||
last_regno = REGNO (reg);
|
||||
}
|
||||
|
||||
@ -18112,6 +18135,43 @@ cmse_clear_registers (sbitmap to_clear_bitmap, uint32_t *padding_bits_to_clear,
|
||||
auto_sbitmap core_regs_bitmap (to_clear_bitmap_size);
|
||||
auto_sbitmap to_clear_core_bitmap (to_clear_bitmap_size);
|
||||
|
||||
for (i = FIRST_VFP_REGNUM; i <= maxregno; i += nb_regs)
|
||||
{
|
||||
/* Find next register to clear and exit if none. */
|
||||
for (; i <= maxregno && !bitmap_bit_p (to_clear_bitmap, i); i++);
|
||||
if (i > maxregno)
|
||||
break;
|
||||
|
||||
/* Compute number of consecutive registers to clear. */
|
||||
for (j = i; j <= maxregno && bitmap_bit_p (to_clear_bitmap, j);
|
||||
j++);
|
||||
nb_regs = j - i;
|
||||
|
||||
/* Create VSCCLRM RTX pattern. */
|
||||
par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nb_regs + 1));
|
||||
vunspec_vec = gen_rtvec (1, gen_int_mode (0, SImode));
|
||||
vunspec = gen_rtx_UNSPEC_VOLATILE (SImode, vunspec_vec,
|
||||
VUNSPEC_VSCCLRM_VPR);
|
||||
XVECEXP (par, 0, 0) = vunspec;
|
||||
|
||||
/* Insert VFP register clearing RTX in the pattern. */
|
||||
start_sequence ();
|
||||
for (k = 1, j = i; j <= maxregno && k < nb_regs + 1; j++)
|
||||
{
|
||||
if (!bitmap_bit_p (to_clear_bitmap, j))
|
||||
continue;
|
||||
|
||||
reg = gen_rtx_REG (SFmode, j);
|
||||
set = gen_rtx_SET (reg, const0_rtx);
|
||||
XVECEXP (par, 0, k++) = set;
|
||||
emit_use (reg);
|
||||
}
|
||||
use_seq = get_insns ();
|
||||
end_sequence ();
|
||||
|
||||
emit_insn_after (use_seq, emit_insn (par));
|
||||
}
|
||||
|
||||
/* Get set of core registers to clear. */
|
||||
bitmap_clear (core_regs_bitmap);
|
||||
bitmap_set_range (core_regs_bitmap, R0_REGNUM,
|
||||
@ -18156,49 +18216,50 @@ cmse_clear_registers (sbitmap to_clear_bitmap, uint32_t *padding_bits_to_clear,
|
||||
end_sequence ();
|
||||
|
||||
emit_insn_after (use_seq, emit_insn (par));
|
||||
minregno = FIRST_VFP_REGNUM;
|
||||
}
|
||||
|
||||
/* If not marked for clearing, clearing_reg already does not contain
|
||||
any secret. */
|
||||
if (clearing_regno <= maxregno
|
||||
&& bitmap_bit_p (to_clear_bitmap, clearing_regno))
|
||||
else
|
||||
{
|
||||
emit_move_insn (clearing_reg, const0_rtx);
|
||||
emit_use (clearing_reg);
|
||||
bitmap_clear_bit (to_clear_bitmap, clearing_regno);
|
||||
}
|
||||
|
||||
for (regno = minregno; regno <= maxregno; regno++)
|
||||
{
|
||||
if (!bitmap_bit_p (to_clear_bitmap, regno))
|
||||
continue;
|
||||
|
||||
if (IS_VFP_REGNUM (regno))
|
||||
/* If not marked for clearing, clearing_reg already does not contain
|
||||
any secret. */
|
||||
if (clearing_regno <= maxregno
|
||||
&& bitmap_bit_p (to_clear_bitmap, clearing_regno))
|
||||
{
|
||||
/* If regno is an even vfp register and its successor is also to
|
||||
be cleared, use vmov. */
|
||||
if (TARGET_VFP_DOUBLE
|
||||
&& VFP_REGNO_OK_FOR_DOUBLE (regno)
|
||||
&& bitmap_bit_p (to_clear_bitmap, regno + 1))
|
||||
emit_move_insn (clearing_reg, const0_rtx);
|
||||
emit_use (clearing_reg);
|
||||
bitmap_clear_bit (to_clear_bitmap, clearing_regno);
|
||||
}
|
||||
|
||||
for (regno = minregno; regno <= maxregno; regno++)
|
||||
{
|
||||
if (!bitmap_bit_p (to_clear_bitmap, regno))
|
||||
continue;
|
||||
|
||||
if (IS_VFP_REGNUM (regno))
|
||||
{
|
||||
emit_move_insn (gen_rtx_REG (DFmode, regno),
|
||||
CONST1_RTX (DFmode));
|
||||
emit_use (gen_rtx_REG (DFmode, regno));
|
||||
regno++;
|
||||
/* If regno is an even vfp register and its successor is also to
|
||||
be cleared, use vmov. */
|
||||
if (TARGET_VFP_DOUBLE
|
||||
&& VFP_REGNO_OK_FOR_DOUBLE (regno)
|
||||
&& bitmap_bit_p (to_clear_bitmap, regno + 1))
|
||||
{
|
||||
emit_move_insn (gen_rtx_REG (DFmode, regno),
|
||||
CONST1_RTX (DFmode));
|
||||
emit_use (gen_rtx_REG (DFmode, regno));
|
||||
regno++;
|
||||
}
|
||||
else
|
||||
{
|
||||
emit_move_insn (gen_rtx_REG (SFmode, regno),
|
||||
CONST1_RTX (SFmode));
|
||||
emit_use (gen_rtx_REG (SFmode, regno));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
emit_move_insn (gen_rtx_REG (SFmode, regno),
|
||||
CONST1_RTX (SFmode));
|
||||
emit_use (gen_rtx_REG (SFmode, regno));
|
||||
emit_move_insn (gen_rtx_REG (SImode, regno), clearing_reg);
|
||||
emit_use (gen_rtx_REG (SImode, regno));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
emit_move_insn (gen_rtx_REG (SImode, regno), clearing_reg);
|
||||
emit_use (gen_rtx_REG (SImode, regno));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -26397,7 +26458,8 @@ thumb1_expand_prologue (void)
|
||||
void
|
||||
cmse_nonsecure_entry_clear_before_return (void)
|
||||
{
|
||||
int regno, maxregno = TARGET_HARD_FLOAT ? LAST_VFP_REGNUM : IP_REGNUM;
|
||||
bool clear_vfpregs = TARGET_HARD_FLOAT || TARGET_HAVE_FPCXT_CMSE;
|
||||
int regno, maxregno = clear_vfpregs ? LAST_VFP_REGNUM : IP_REGNUM;
|
||||
uint32_t padding_bits_to_clear = 0;
|
||||
auto_sbitmap to_clear_bitmap (maxregno + 1);
|
||||
rtx r1_reg, result_rtl, clearing_reg = NULL_RTX;
|
||||
@ -26409,7 +26471,7 @@ cmse_nonsecure_entry_clear_before_return (void)
|
||||
|
||||
/* If we are not dealing with -mfloat-abi=soft we will need to clear VFP
|
||||
registers. */
|
||||
if (TARGET_HARD_FLOAT)
|
||||
if (clear_vfpregs)
|
||||
{
|
||||
int float_bits = D7_VFP_REGNUM - FIRST_VFP_REGNUM + 1;
|
||||
|
||||
@ -26438,7 +26500,9 @@ cmse_nonsecure_entry_clear_before_return (void)
|
||||
continue;
|
||||
if (IN_RANGE (regno, IP_REGNUM, PC_REGNUM))
|
||||
continue;
|
||||
if (call_used_or_fixed_reg_p (regno))
|
||||
if (call_used_or_fixed_reg_p (regno)
|
||||
&& (!IN_RANGE (regno, FIRST_VFP_REGNUM, LAST_VFP_REGNUM)
|
||||
|| TARGET_HARD_FLOAT))
|
||||
bitmap_set_bit (to_clear_bitmap, regno);
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,13 @@
|
||||
(define_special_predicate "clear_multiple_operation"
|
||||
(match_code "parallel")
|
||||
{
|
||||
return clear_operation_p (op);
|
||||
return clear_operation_p (op, /*vfp*/false);
|
||||
})
|
||||
|
||||
(define_special_predicate "clear_vfp_multiple_operation"
|
||||
(match_code "parallel")
|
||||
{
|
||||
return clear_operation_p (op, /*vfp*/true);
|
||||
})
|
||||
|
||||
(define_special_predicate "load_multiple_operation"
|
||||
|
@ -241,6 +241,8 @@
|
||||
VUNSPEC_APSR_WRITE ; Represent writing the APSR.
|
||||
VUNSPEC_VSTR_VLDR ; Represent the vstr/vldr instruction.
|
||||
VUNSPEC_CLRM_APSR ; Represent the clearing of APSR with clrm instruction.
|
||||
VUNSPEC_VSCCLRM_VPR ; Represent the clearing of VPR with vscclrm
|
||||
; instruction.
|
||||
])
|
||||
|
||||
;; Enumerators for NEON unspecs.
|
||||
|
@ -1637,6 +1637,42 @@
|
||||
(set_attr "type" "load_4")]
|
||||
)
|
||||
|
||||
;; The operands are validated through the clear_multiple_operation
|
||||
;; match_parallel predicate rather than through constraints so enable it only
|
||||
;; after reload.
|
||||
(define_insn "*clear_vfp_multiple"
|
||||
[(match_parallel 0 "clear_vfp_multiple_operation"
|
||||
[(unspec_volatile [(const_int 0)]
|
||||
VUNSPEC_VSCCLRM_VPR)])]
|
||||
"TARGET_HAVE_FPCXT_CMSE && use_cmse && reload_completed"
|
||||
{
|
||||
int num_regs = XVECLEN (operands[0], 0);
|
||||
char pattern[30];
|
||||
const char *regname;
|
||||
rtx reg;
|
||||
|
||||
strcpy (pattern, \"vscclrm%?\\t{%|\");
|
||||
if (num_regs > 1)
|
||||
{
|
||||
reg = XEXP (XVECEXP (operands[0], 0, 1), 0);
|
||||
strcat (pattern, reg_names[REGNO (reg)]);
|
||||
if (num_regs > 2)
|
||||
{
|
||||
strcat (pattern, \"-%|\");
|
||||
reg = XEXP (XVECEXP (operands[0], 0, num_regs - 1), 0);
|
||||
strcat (pattern, reg_names[REGNO (reg)]);
|
||||
}
|
||||
strcat (pattern, \", \");
|
||||
}
|
||||
|
||||
strcat (pattern, \"VPR}\");
|
||||
output_asm_insn (pattern, operands);
|
||||
return \"\";
|
||||
}
|
||||
[(set_attr "predicable" "yes")
|
||||
(set_attr "type" "mov_reg")]
|
||||
)
|
||||
|
||||
(define_insn_and_split "*cmpsf_split_vfp"
|
||||
[(set (reg:CCFP CC_REGNUM)
|
||||
(compare:CCFP (match_operand:SF 0 "s_register_operand" "t")
|
||||
|
@ -1,6 +1,26 @@
|
||||
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
|
||||
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
|
||||
|
||||
* gcc.target/arm/cmse/bitfield-1.c: Add check for VSCCLRM.
|
||||
* gcc.target/arm/cmse/bitfield-2.c: Likewise.
|
||||
* gcc.target/arm/cmse/bitfield-3.c: Likewise.
|
||||
* gcc.target/arm/cmse/cmse-1.c: Likewise.
|
||||
* gcc.target/arm/cmse/struct-1.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-13.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-5.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-7.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-8.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard/cmse-13.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard/cmse-5.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard/cmse-7.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/hard/cmse-8.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/soft/cmse-5.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-5.c: Likewise.
|
||||
* gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-5.c: Likewise.
|
||||
|
||||
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
|
||||
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
|
||||
|
||||
* gcc.target/arm/cmse/bitfield-1.c: Add check for CLRM.
|
||||
* gcc.target/arm/cmse/bitfield-2.c: Likewise.
|
||||
* gcc.target/arm/cmse/bitfield-3.c: Likewise.
|
||||
|
@ -36,6 +36,7 @@ main (void)
|
||||
/* { dg-final { scan-assembler "movw\tr1, #1855" } } */
|
||||
/* { dg-final { scan-assembler "movt\tr1, 65535" } } */
|
||||
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -33,6 +33,7 @@ main (void)
|
||||
/* { dg-final { scan-assembler "movw\tr1, #1919" } } */
|
||||
/* { dg-final { scan-assembler "movt\tr1, 2047" } } */
|
||||
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -34,6 +34,7 @@ main (void)
|
||||
/* { dg-final { scan-assembler "movw\tr1, #65535" } } */
|
||||
/* { dg-final { scan-assembler "movt\tr1, 63" } } */
|
||||
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -105,6 +105,7 @@ qux (int_nsfunc_t * callback)
|
||||
/* { dg-final { scan-assembler "bic" } } */
|
||||
/* { dg-final { scan-assembler "push\t\{r4, r5, r6" } } */
|
||||
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "msr\tAPSR_nzcvq" { target { ! arm_cmse_clear_ok } } } } */
|
||||
|
@ -10,21 +10,10 @@
|
||||
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s1, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s4-s15, VPR\}" } } */
|
||||
|
||||
/* Now we check that we use the correct intrinsic to call. */
|
||||
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
|
||||
|
@ -6,22 +6,7 @@
|
||||
#include "../../../cmse-5.x"
|
||||
|
||||
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s1-s15, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, ip, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -9,22 +9,7 @@
|
||||
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" } } */
|
||||
|
||||
/* Now we check that we use the correct intrinsic to call. */
|
||||
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
|
||||
|
@ -11,20 +11,7 @@
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s2-s15, VPR\}" } } */
|
||||
|
||||
/* Now we check that we use the correct intrinsic to call. */
|
||||
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
|
||||
|
@ -9,18 +9,13 @@
|
||||
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f64\td0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f64\td1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s1, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s4-s15, VPR\}" } } */
|
||||
|
||||
/* Now we check that we use the correct intrinsic to call. */
|
||||
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
|
||||
|
@ -6,15 +6,7 @@
|
||||
#include "../../../cmse-5.x"
|
||||
|
||||
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s1-s15, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, ip, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -9,14 +9,7 @@
|
||||
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" } } */
|
||||
|
||||
/* Now we check that we use the correct intrinsic to call. */
|
||||
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
|
||||
|
@ -10,13 +10,7 @@
|
||||
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov\.f64\td0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s2-s15, VPR\}" } } */
|
||||
|
||||
/* Now we check that we use the correct intrinsic to call. */
|
||||
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
|
||||
|
@ -7,6 +7,7 @@
|
||||
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
|
||||
/* { dg-final { scan-assembler-not "vmov" } } */
|
||||
/* { dg-final { scan-assembler-not "vmsr" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -8,22 +8,7 @@
|
||||
/* { dg-final { scan-assembler "__acle_se_foo:" } } */
|
||||
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
|
||||
/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -7,14 +7,7 @@
|
||||
|
||||
/* { dg-final { scan-assembler "__acle_se_foo:" } } */
|
||||
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
@ -29,6 +29,7 @@ main (void)
|
||||
/* { dg-final { scan-assembler "movs\tr1, #255" } } */
|
||||
/* { dg-final { scan-assembler "movt\tr1, 65535" } } */
|
||||
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
|
||||
/* { dg-final { scan-assembler "vscclrm\t\{s0-s15, VPR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
|
||||
/* { dg-final { scan-assembler "bxns" } } */
|
||||
|
Loading…
x
Reference in New Issue
Block a user