md.texi (Standard Names): Document probe_stack_address.

* doc/md.texi (Standard Names): Document probe_stack_address.
	* explow.c (emit_stack_probe): Handle probe_stack_address.
	* config/ia64/ia64.md (UNSPECV_PROBE_STACK_ADDRESS): New constant.
	(UNSPECV_PROBE_STACK_RANGE): Likewise.
	(probe_stack_address): New insn.
	(probe_stack_range): Likewise.
	* config/ia64/ia64.c: Include common/common-target.h.
	(ia64_compute_frame_size): Mark r2 and r3 as used if static stack
	checking is enabled.
	(ia64_emit_probe_stack_range): New function.
	(output_probe_stack_range): Likewise.
	(ia64_expand_prologue): Invoke ia64_emit_probe_stack_range if static
	builtin stack checking is enabled.
	(rtx_needs_barrier) <UNSPEC_VOLATILE>: Handle UNSPECV_PROBE_STACK_RANGE
	and UNSPECV_PROBE_STACK_ADDRESS.
	(unknown_for_bundling_p): New predicate.
	(group_barrier_needed): Use important_for_bundling_p.
	(ia64_dfa_new_cycle): Use unknown_for_bundling_p.
	(issue_nops_and_insn): Likewise.
	(bundling): Likewise.
	(final_emit_insn_group_barriers): Likewise.
	* config/ia64/ia64-protos.h (output_probe_stack_range): Declare.
	* config/ia64/hpux.h (STACK_CHECK_STATIC_BUILTIN): Define.
	(STACK_CHECK_PROTECT): Likewise.
	* config/ia64/linux.h (STACK_CHECK_STATIC_BUILTIN): Likewise.

Co-Authored-By: Tristan Gingold <gingold@adacore.com>

From-SVN: r187383
This commit is contained in:
Eric Botcazou 2012-05-10 22:15:07 +00:00 committed by Eric Botcazou
parent f1016df403
commit 7b84aac03c
8 changed files with 335 additions and 43 deletions

View File

@ -1,3 +1,32 @@
2012-05-10 Eric Botcazou <ebotcazou@adacore.com>
Tristan Gingold <gingold@adacore.com>
* doc/md.texi (Standard Names): Document probe_stack_address.
* explow.c (emit_stack_probe): Handle probe_stack_address.
* config/ia64/ia64.md (UNSPECV_PROBE_STACK_ADDRESS): New constant.
(UNSPECV_PROBE_STACK_RANGE): Likewise.
(probe_stack_address): New insn.
(probe_stack_range): Likewise.
* config/ia64/ia64.c: Include common/common-target.h.
(ia64_compute_frame_size): Mark r2 and r3 as used if static stack
checking is enabled.
(ia64_emit_probe_stack_range): New function.
(output_probe_stack_range): Likewise.
(ia64_expand_prologue): Invoke ia64_emit_probe_stack_range if static
builtin stack checking is enabled.
(rtx_needs_barrier) <UNSPEC_VOLATILE>: Handle UNSPECV_PROBE_STACK_RANGE
and UNSPECV_PROBE_STACK_ADDRESS.
(unknown_for_bundling_p): New predicate.
(group_barrier_needed): Use important_for_bundling_p.
(ia64_dfa_new_cycle): Use unknown_for_bundling_p.
(issue_nops_and_insn): Likewise.
(bundling): Likewise.
(final_emit_insn_group_barriers): Likewise.
* config/ia64/ia64-protos.h (output_probe_stack_range): Declare.
* config/ia64/hpux.h (STACK_CHECK_STATIC_BUILTIN): Define.
(STACK_CHECK_PROTECT): Likewise.
* config/ia64/linux.h (STACK_CHECK_STATIC_BUILTIN): Likewise.
2012-05-10 Jan Hubicka <jh@suse.cz>
* ipa-inline.c (update_all_callee_keys): Remove.

View File

@ -228,3 +228,10 @@ do { \
#define TARGET_ASM_FUNCTION_SECTION ia64_hpux_function_section
#define TARGET_POSIX_IO
/* Define this to be nonzero if static stack checking is supported. */
#define STACK_CHECK_STATIC_BUILTIN 1
/* Minimum amount of stack required to recover from an anticipated stack
overflow detection. */
#define STACK_CHECK_PROTECT (24 * 1024)

View File

@ -61,6 +61,7 @@ extern int ia64_hard_regno_rename_ok (int, int);
extern enum reg_class ia64_secondary_reload_class (enum reg_class,
enum machine_mode, rtx);
extern const char *get_bundle_name (int);
extern const char *output_probe_stack_range (rtx, rtx);
extern void ia64_expand_vec_perm_even_odd (rtx, rtx, rtx, int);
extern bool ia64_expand_vec_perm_const (rtx op[4]);

View File

@ -47,6 +47,7 @@ along with GCC; see the file COPYING3. If not see
#include "timevar.h"
#include "target.h"
#include "target-def.h"
#include "common/common-target.h"
#include "tm_p.h"
#include "hashtab.h"
#include "langhooks.h"
@ -272,6 +273,7 @@ static int get_template (state_t, int);
static rtx get_next_important_insn (rtx, rtx);
static bool important_for_bundling_p (rtx);
static bool unknown_for_bundling_p (rtx);
static void bundling (FILE *, int, rtx, rtx);
static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
@ -2672,6 +2674,10 @@ ia64_compute_frame_size (HOST_WIDE_INT size)
if (cfun->machine->ia64_eh_epilogue_bsp)
mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
/* Static stack checking uses r2 and r3. */
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
current_frame_info.gr_used_mask |= 0xc;
/* Find the size of the register stack frame. We have only 80 local
registers, because we reserve 8 for the inputs and 8 for the
outputs. */
@ -3230,6 +3236,213 @@ gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
return gen_fr_restore (dest, src);
}
#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
/* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
#define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
inclusive. These are offsets from the current stack pointer. SOL is the
size of local registers. ??? This clobbers r2 and r3. */
static void
ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size, int sol)
{
/* On the IA-64 there is a second stack in memory, namely the Backing Store
of the Register Stack Engine. We also need to probe it after checking
that the 2 stacks don't overlap. */
const int bs_size = BACKING_STORE_SIZE (sol);
rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
/* Detect collision of the 2 stacks if necessary. */
if (bs_size > 0 || size > 0)
{
rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
emit_insn (gen_bsp_value (r3));
emit_move_insn (r2, GEN_INT (-(first + size)));
/* Compare current value of BSP and SP registers. */
emit_insn (gen_rtx_SET (VOIDmode, p6,
gen_rtx_fmt_ee (LTU, BImode,
r3, stack_pointer_rtx)));
/* Compute the address of the probe for the Backing Store (which grows
towards higher addresses). We probe only at the first offset of
the next page because some OS (eg Linux/ia64) only extend the
backing store when this specific address is hit (but generate a SEGV
on other address). Page size is the worst case (4KB). The reserve
size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
Also compute the address of the last probe for the memory stack
(which grows towards lower addresses). */
emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (r3, 4095)));
emit_insn (gen_rtx_SET (VOIDmode, r2,
gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
/* Compare them and raise SEGV if the former has topped the latter. */
emit_insn (gen_rtx_COND_EXEC (VOIDmode,
gen_rtx_fmt_ee (NE, VOIDmode, p6,
const0_rtx),
gen_rtx_SET (VOIDmode, p6,
gen_rtx_fmt_ee (GEU, BImode,
r3, r2))));
emit_insn (gen_rtx_SET (VOIDmode,
gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
const0_rtx),
const0_rtx));
emit_insn (gen_rtx_COND_EXEC (VOIDmode,
gen_rtx_fmt_ee (NE, VOIDmode, p6,
const0_rtx),
gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
GEN_INT (11))));
}
/* Probe the Backing Store if necessary. */
if (bs_size > 0)
emit_stack_probe (r3);
/* Probe the memory stack if necessary. */
if (size == 0)
;
/* See if we have a constant small number of probes to generate. If so,
that's the easy case. */
else if (size <= PROBE_INTERVAL)
emit_stack_probe (r2);
/* The run-time loop is made up of 8 insns in the generic case while this
compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
else if (size <= 4 * PROBE_INTERVAL)
{
HOST_WIDE_INT i;
emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
emit_insn (gen_rtx_SET (VOIDmode, r2,
gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
emit_stack_probe (r2);
/* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
it exceeds SIZE. If only two probes are needed, this will not
generate any code. Then probe at FIRST + SIZE. */
for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
{
emit_insn (gen_rtx_SET (VOIDmode, r2,
plus_constant (r2, -PROBE_INTERVAL)));
emit_stack_probe (r2);
}
emit_insn (gen_rtx_SET (VOIDmode, r2,
plus_constant (r2,
(i - PROBE_INTERVAL) - size)));
emit_stack_probe (r2);
}
/* Otherwise, do the same as above, but in a loop. Note that we must be
extra careful with variables wrapping around because we might be at
the very top (or the very bottom) of the address space and we have
to be able to handle this case properly; in particular, we use an
equality test for the loop condition. */
else
{
HOST_WIDE_INT rounded_size;
emit_move_insn (r2, GEN_INT (-first));
/* Step 1: round SIZE to the previous multiple of the interval. */
rounded_size = size & -PROBE_INTERVAL;
/* Step 2: compute initial and final value of the loop counter. */
/* TEST_ADDR = SP + FIRST. */
emit_insn (gen_rtx_SET (VOIDmode, r2,
gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
/* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
if (rounded_size > (1 << 21))
{
emit_move_insn (r3, GEN_INT (-rounded_size));
emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
}
else
emit_insn (gen_rtx_SET (VOIDmode, r3,
gen_rtx_PLUS (Pmode, r2,
GEN_INT (-rounded_size))));
/* Step 3: the loop
while (TEST_ADDR != LAST_ADDR)
{
TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
probe at TEST_ADDR
}
probes at FIRST + N * PROBE_INTERVAL for values of N from 1
until it is equal to ROUNDED_SIZE. */
emit_insn (gen_probe_stack_range (r2, r2, r3));
/* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
that SIZE is equal to ROUNDED_SIZE. */
/* TEMP = SIZE - ROUNDED_SIZE. */
if (size != rounded_size)
{
emit_insn (gen_rtx_SET (VOIDmode, r2,
plus_constant (r2, rounded_size - size)));
emit_stack_probe (r2);
}
}
/* Make sure nothing is scheduled before we are done. */
emit_insn (gen_blockage ());
}
/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
absolute addresses. */
const char *
output_probe_stack_range (rtx reg1, rtx reg2)
{
static int labelno = 0;
char loop_lab[32], end_lab[32];
rtx xops[3];
ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
/* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
xops[0] = reg1;
xops[1] = reg2;
xops[2] = gen_rtx_REG (BImode, PR_REG (6));
output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
assemble_name_raw (asm_out_file, end_lab);
fputc ('\n', asm_out_file);
/* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
xops[1] = GEN_INT (-PROBE_INTERVAL);
output_asm_insn ("addl %0 = %1, %0", xops);
fputs ("\t;;\n", asm_out_file);
/* Probe at TEST_ADDR and branch. */
output_asm_insn ("probe.w.fault %0, 0", xops);
fprintf (asm_out_file, "\tbr ");
assemble_name_raw (asm_out_file, loop_lab);
fputc ('\n', asm_out_file);
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
return "";
}
/* Called after register allocation to add any instructions needed for the
prologue. Using a prologue insn is favored compared to putting all of the
instructions in output_function_prologue(), since it allows the scheduler
@ -3265,6 +3478,12 @@ ia64_expand_prologue (void)
if (flag_stack_usage_info)
current_function_static_stack_size = current_frame_info.total_size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
current_frame_info.total_size,
current_frame_info.n_input_regs
+ current_frame_info.n_local_regs);
if (dump_file)
{
fprintf (dump_file, "ia64 frame related registers "
@ -6529,6 +6748,7 @@ rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
return 1;
case UNSPECV_SET_BSP:
case UNSPECV_PROBE_STACK_RANGE:
need_barrier = 1;
break;
@ -6539,6 +6759,10 @@ rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
case UNSPECV_PSAC_NORMAL:
return 0;
case UNSPECV_PROBE_STACK_ADDRESS:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
break;
default:
gcc_unreachable ();
}
@ -6700,10 +6924,7 @@ group_barrier_needed (rtx insn)
gcc_unreachable ();
}
if (first_instruction && INSN_P (insn)
&& ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
if (first_instruction && important_for_bundling_p (insn))
{
need_barrier = 0;
first_instruction = 0;
@ -7397,8 +7618,7 @@ ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
&& scheduled_good_insn (last_scheduled_insn))))
|| (last_scheduled_insn
&& (GET_CODE (last_scheduled_insn) == CALL_INSN
|| GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
|| asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
|| unknown_for_bundling_p (last_scheduled_insn))))
{
init_insn_group_barriers ();
@ -7423,8 +7643,7 @@ ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
if (last_scheduled_insn)
{
if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
|| asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
if (unknown_for_bundling_p (last_scheduled_insn))
state_reset (curr_state);
else
{
@ -8540,8 +8759,7 @@ issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
if (!try_issue_insn (curr_state, insn))
return;
curr_state->accumulated_insns_num++;
gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
&& asm_noperands (PATTERN (insn)) < 0);
gcc_assert (!unknown_for_bundling_p (insn));
if (ia64_safe_type (insn) == TYPE_L)
curr_state->accumulated_insns_num++;
@ -8567,8 +8785,7 @@ issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
if (!try_issue_insn (curr_state, insn))
return;
curr_state->accumulated_insns_num++;
if (GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0)
if (unknown_for_bundling_p (insn))
{
/* Finish bundle containing asm insn. */
curr_state->after_nops_num
@ -8702,6 +8919,7 @@ get_template (state_t state, int pos)
}
/* True when INSN is important for bundling. */
static bool
important_for_bundling_p (rtx insn)
{
@ -8723,6 +8941,17 @@ get_next_important_insn (rtx insn, rtx tail)
return NULL_RTX;
}
/* True when INSN is unknown, but important, for bundling. */
static bool
unknown_for_bundling_p (rtx insn)
{
return (INSN_P (insn)
&& ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER);
}
/* Add a bundle selector TEMPLATE0 before INSN. */
static void
@ -8850,19 +9079,14 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
insn != tail;
insn = NEXT_INSN (insn))
if (INSN_P (insn)
&& (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
|| GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER)
&& !important_for_bundling_p (insn)
&& GET_MODE (insn) == TImode)
{
PUT_MODE (insn, VOIDmode);
for (next_insn = NEXT_INSN (insn);
next_insn != tail;
next_insn = NEXT_INSN (next_insn))
if (INSN_P (next_insn)
&& ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
&& GET_CODE (PATTERN (next_insn)) != USE
&& GET_CODE (PATTERN (next_insn)) != CLOBBER
if (important_for_bundling_p (next_insn)
&& INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
{
PUT_MODE (next_insn, TImode);
@ -8874,10 +9098,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
insn != NULL_RTX;
insn = next_insn)
{
gcc_assert (INSN_P (insn)
&& ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER);
gcc_assert (important_for_bundling_p (insn));
type = ia64_safe_type (insn);
next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
insn_num++;
@ -8894,7 +9115,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
only_bundle_end_p
= (next_insn != NULL_RTX
&& INSN_CODE (insn) == CODE_FOR_insn_group_barrier
&& ia64_safe_type (next_insn) == TYPE_UNKNOWN);
&& unknown_for_bundling_p (next_insn));
/* We may fill up the current bundle if it is the cycle end
without a group barrier. */
bundle_end_p
@ -8978,8 +9199,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
curr_state = curr_state->originator)
{
insn = curr_state->insn;
asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0);
asm_p = unknown_for_bundling_p (insn);
insn_num++;
if (verbose >= 2 && dump)
{
@ -9055,8 +9275,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
/* Move the position backward in the window. Group barrier has
no slot. Asm insn takes all bundle. */
if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
&& GET_CODE (PATTERN (insn)) != ASM_INPUT
&& asm_noperands (PATTERN (insn)) < 0)
&& !unknown_for_bundling_p (insn))
pos--;
/* Long insn takes 2 slots. */
if (ia64_safe_type (insn) == TYPE_L)
@ -9064,8 +9283,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
gcc_assert (pos >= 0);
if (pos % 3 == 0
&& INSN_CODE (insn) != CODE_FOR_insn_group_barrier
&& GET_CODE (PATTERN (insn)) != ASM_INPUT
&& asm_noperands (PATTERN (insn)) < 0)
&& !unknown_for_bundling_p (insn))
{
/* The current insn is at the bundle start: emit the
template. */
@ -9139,8 +9357,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
&& !start_bundle && !end_bundle
&& next_insn
&& GET_CODE (PATTERN (next_insn)) != ASM_INPUT
&& asm_noperands (PATTERN (next_insn)) < 0)
&& !unknown_for_bundling_p (next_insn))
num--;
start_bundle = false;
@ -9270,8 +9487,7 @@ final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
&& important_for_bundling_p (insn))
seen_good_insn = 1;
need_barrier_p = (GET_CODE (insn) == CALL_INSN
|| GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0);
|| unknown_for_bundling_p (insn));
}
}
}

View File

@ -105,6 +105,8 @@
UNSPECV_PSAC_NORMAL
UNSPECV_SETJMP_RECEIVER
UNSPECV_GOTO_RECEIVER
UNSPECV_PROBE_STACK_ADDRESS
UNSPECV_PROBE_STACK_RANGE
])
(include "predicates.md")
@ -5182,6 +5184,26 @@
"mov %0 = ip"
[(set_attr "itanium_class" "frbr")])
;;
;; Stack checking
(define_insn "probe_stack_address"
[(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
UNSPECV_PROBE_STACK_ADDRESS)]
""
"probe.w.fault %0, 0"
[(set_attr "itanium_class" "chk_s_i")])
(define_insn "probe_stack_range"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec_volatile:DI [(match_operand:DI 1 "register_operand" "0")
(match_operand:DI 2 "register_operand" "r")]
UNSPECV_PROBE_STACK_RANGE))]
""
"* return output_probe_stack_range (operands[0], operands[2]);"
[(set_attr "itanium_class" "unknown")
(set_attr "predicable" "no")])
;; Vector operations
(include "vect.md")
;; Atomic operations

View File

@ -86,3 +86,6 @@ do { \
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS ia64_soft_fp_init_libfuncs
/* Define this to be nonzero if static stack checking is supported. */
#define STACK_CHECK_STATIC_BUILTIN 1

View File

@ -5614,6 +5614,13 @@ the stack farthest from the current stack pointer that you need to validate.
Normally, on platforms where this pattern is needed, you would obtain the
stack limit from a global or thread-specific variable or register.
@cindex @code{probe_stack_address} instruction pattern
@item @samp{probe_stack_address}
If stack checking (@pxref{Stack Checking}) can be done on your system by
probing the stack but without the need to actually access it, define this
pattern and signal an error if the stack has overflowed. The single operand
is the memory address in the stack that needs to be probed.
@cindex @code{probe_stack} instruction pattern
@item @samp{probe_stack}
If stack checking (@pxref{Stack Checking}) can be done on your system by

View File

@ -1525,17 +1525,24 @@ set_stack_check_libfunc (const char *libfunc_name)
void
emit_stack_probe (rtx address)
{
rtx memref = gen_rtx_MEM (word_mode, address);
MEM_VOLATILE_P (memref) = 1;
/* See if we have an insn to probe the stack. */
#ifdef HAVE_probe_stack
if (HAVE_probe_stack)
emit_insn (gen_probe_stack (memref));
#ifdef HAVE_probe_stack_address
if (HAVE_probe_stack_address)
emit_insn (gen_probe_stack_address (address));
else
#endif
emit_move_insn (memref, const0_rtx);
{
rtx memref = gen_rtx_MEM (word_mode, address);
MEM_VOLATILE_P (memref) = 1;
/* See if we have an insn to probe the stack. */
#ifdef HAVE_probe_stack
if (HAVE_probe_stack)
emit_insn (gen_probe_stack (memref));
else
#endif
emit_move_insn (memref, const0_rtx);
}
}
/* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive.