mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-02-05 14:59:45 +08:00
sched-deps.c (sched_analyze_insn): Consolidate scheduling barrier code.
* sched-deps.c (sched_analyze_insn): Consolidate scheduling barrier code. Add a scheduling barrier if a non-call insn can throw internally. From-SVN: r40915
This commit is contained in:
parent
4f73495e07
commit
ca3c102896
@ -1,5 +1,9 @@
|
||||
2001-03-27 Richard Henderson <rth@redhat.com>
|
||||
|
||||
* sched-deps.c (sched_analyze_insn): Consolidate scheduling
|
||||
barrier code. Add a scheduling barrier if a non-call insn
|
||||
can throw internally.
|
||||
|
||||
* rtlanal.c (rtx_addr_can_trap_p): Virtual registers cannot trap.
|
||||
Auto-inc addresses trap only if their base register does.
|
||||
|
||||
|
@ -972,6 +972,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|
||||
rtx loop_notes;
|
||||
{
|
||||
register RTX_CODE code = GET_CODE (x);
|
||||
int schedule_barrier_found = 0;
|
||||
rtx link;
|
||||
int i;
|
||||
|
||||
@ -1021,22 +1022,10 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|
||||
|
||||
if (GET_CODE (insn) == JUMP_INSN)
|
||||
{
|
||||
rtx next, u, pending, pending_mem;
|
||||
rtx next, u;
|
||||
next = next_nonnote_insn (insn);
|
||||
if (next && GET_CODE (next) == BARRIER)
|
||||
{
|
||||
for (i = 0; i < deps->max_reg; i++)
|
||||
{
|
||||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
|
||||
for (u = reg_last->uses; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
for (u = reg_last->sets; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
}
|
||||
}
|
||||
schedule_barrier_found = 1;
|
||||
else
|
||||
{
|
||||
regset_head tmp;
|
||||
@ -1054,33 +1043,19 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|
||||
|
||||
CLEAR_REG_SET (&tmp);
|
||||
}
|
||||
pending = deps->pending_write_insns;
|
||||
pending_mem = deps->pending_write_mems;
|
||||
while (pending)
|
||||
{
|
||||
add_dependence (insn, XEXP (pending, 0), 0);
|
||||
|
||||
pending = XEXP (pending, 1);
|
||||
pending_mem = XEXP (pending_mem, 1);
|
||||
}
|
||||
|
||||
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
}
|
||||
|
||||
/* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
|
||||
block, then we must be sure that no instructions are scheduled across it.
|
||||
Otherwise, the reg_n_refs info (which depends on loop_depth) would
|
||||
become incorrect. */
|
||||
|
||||
if (loop_notes)
|
||||
{
|
||||
int schedule_barrier_found = 0;
|
||||
rtx link;
|
||||
|
||||
/* Update loop_notes with any notes from this insn. Also determine
|
||||
if any of the notes on the list correspond to instruction scheduling
|
||||
barriers (loop, eh & setjmp notes, but not range notes. */
|
||||
barriers (loop, eh & setjmp notes, but not range notes). */
|
||||
link = loop_notes;
|
||||
while (XEXP (link, 1))
|
||||
{
|
||||
@ -1095,30 +1070,48 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|
||||
}
|
||||
XEXP (link, 1) = REG_NOTES (insn);
|
||||
REG_NOTES (insn) = loop_notes;
|
||||
}
|
||||
|
||||
/* Add dependencies if a scheduling barrier was found. */
|
||||
if (schedule_barrier_found)
|
||||
/* If this instruction can throw an exception, then moving it changes
|
||||
where block boundaries fall. This is mighty confusing elsewhere.
|
||||
Therefore, prevent such an instruction from being moved. */
|
||||
if (flag_non_call_exceptions && can_throw_internal (insn))
|
||||
schedule_barrier_found = 1;
|
||||
|
||||
/* Add dependencies if a scheduling barrier was found. */
|
||||
if (schedule_barrier_found)
|
||||
{
|
||||
rtx u, pending, pending_mem;
|
||||
|
||||
for (i = 0; i < deps->max_reg; i++)
|
||||
{
|
||||
for (i = 0; i < deps->max_reg; i++)
|
||||
{
|
||||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
rtx u;
|
||||
struct deps_reg *reg_last = &deps->reg_last[i];
|
||||
|
||||
for (u = reg_last->uses; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
for (u = reg_last->sets; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), 0);
|
||||
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), 0);
|
||||
for (u = reg_last->uses; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
for (u = reg_last->sets; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), 0);
|
||||
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), 0);
|
||||
|
||||
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
|
||||
free_INSN_LIST_list (®_last->uses);
|
||||
}
|
||||
reg_pending_sets_all = 1;
|
||||
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
|
||||
free_INSN_LIST_list (®_last->uses);
|
||||
}
|
||||
flush_pending_lists (deps, insn, 0);
|
||||
|
||||
flush_pending_lists (deps, insn, 0);
|
||||
pending = deps->pending_write_insns;
|
||||
pending_mem = deps->pending_write_mems;
|
||||
while (pending)
|
||||
{
|
||||
add_dependence (insn, XEXP (pending, 0), 0);
|
||||
pending = XEXP (pending, 1);
|
||||
pending_mem = XEXP (pending_mem, 1);
|
||||
}
|
||||
|
||||
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
|
||||
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
||||
|
||||
reg_pending_sets_all = 1;
|
||||
}
|
||||
|
||||
/* Accumulate clobbers until the next set so that it will be output
|
||||
|
Loading…
Reference in New Issue
Block a user