mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-02-05 13:20:12 +08:00
sched-int.h (struct deps): New field libcall_block_tail_insn.
* sched-int.h (struct deps): New field libcall_block_tail_insn. * sched_deps.c (init_deps): Initialize libcall_block_tail_insn. * sched_deps.c (sched_analyze_insn): If libcall_block_tail_insn is set, then mark the current insn as being part of a libcall scheduling group. (sched_analyze): Set and clear libcall_block_tail_insn appropriately. * haifa-sched.c (schedule_block): Do not count USE or CLOBBER insns against the issue rate. From-SVN: r54251
This commit is contained in:
parent
63f4a88e28
commit
85d6921689
@ -1,3 +1,17 @@
|
||||
2002-06-04 Jeff Law <law@redhat.com.
|
||||
David Edelsohn <edelsohn@gnu.org>
|
||||
Michael Matz <matz@kde.org>
|
||||
|
||||
* sched-int.h (struct deps): New field libcall_block_tail_insn.
|
||||
* sched_deps.c (init_deps): Initialize libcall_block_tail_insn.
|
||||
* sched_deps.c (sched_analyze_insn): If libcall_block_tail_insn
|
||||
is set, then mark the current insn as being part of a libcall
|
||||
scheduling group.
|
||||
(sched_analyze): Set and clear libcall_block_tail_insn appropriately.
|
||||
|
||||
* haifa-sched.c (schedule_block): Do not count USE or CLOBBER
|
||||
insns against the issue rate.
|
||||
|
||||
Tue Jun 4 19:29:42 CEST 2002 Jan Hubicka <jh@suse.cz>
|
||||
|
||||
* cfglayout.c (duplicate_insn_chain): Kill NOTE_INSN_RANGE notes.
|
||||
|
@ -2166,7 +2166,10 @@ schedule_block (b, rgn_n_insns)
|
||||
can_issue_more =
|
||||
(*targetm.sched.variable_issue) (sched_dump, sched_verbose,
|
||||
insn, can_issue_more);
|
||||
else
|
||||
/* A naked CLOBBER or USE generates no instruction, so do
|
||||
not count them against the issue rate. */
|
||||
else if (GET_CODE (PATTERN (insn)) != USE
|
||||
&& GET_CODE (PATTERN (insn)) != CLOBBER)
|
||||
can_issue_more--;
|
||||
|
||||
schedule_insn (insn, &ready, clock_var);
|
||||
|
@ -1163,6 +1163,16 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|
||||
CLEAR_REG_SET (reg_pending_clobbers);
|
||||
CLEAR_REG_SET (reg_pending_sets);
|
||||
|
||||
/* If we are currently in a libcall scheduling group, then mark the
|
||||
current insn as being in a scheduling group and that it can not
|
||||
be moved into a different basic block. */
|
||||
|
||||
if (deps->libcall_block_tail_insn)
|
||||
{
|
||||
set_sched_group_p (insn);
|
||||
CANT_MOVE (insn) = 1;
|
||||
}
|
||||
|
||||
/* If a post-call group is still open, see if it should remain so.
|
||||
This insn must be a simple move of a hard reg to a pseudo or
|
||||
vice-versa.
|
||||
@ -1226,6 +1236,8 @@ sched_analyze (deps, head, tail)
|
||||
|
||||
for (insn = head;; insn = NEXT_INSN (insn))
|
||||
{
|
||||
rtx link, end_seq, r0, set, note;
|
||||
|
||||
if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
|
||||
{
|
||||
/* Clear out the stale LOG_LINKS from flow. */
|
||||
@ -1340,6 +1352,46 @@ sched_analyze (deps, head, tail)
|
||||
|
||||
if (current_sched_info->use_cselib)
|
||||
cselib_process_insn (insn);
|
||||
|
||||
/* Now that we have completed handling INSN, check and see if it is
|
||||
a CLOBBER beginning a libcall block. If it is, record the
|
||||
end of the libcall sequence.
|
||||
|
||||
We want to schedule libcall blocks as a unit before reload. While
|
||||
this restricts scheduling, it preserves the meaning of a libcall
|
||||
block.
|
||||
|
||||
As a side effect, we may get better code due to decreased register
|
||||
pressure as well as less chance of a foreign insn appearing in
|
||||
a libcall block. */
|
||||
if (!reload_completed
|
||||
/* Note we may have nested libcall sequences. We only care about
|
||||
the outermost libcall sequence. */
|
||||
&& deps->libcall_block_tail_insn == 0
|
||||
/* The sequence must start with a clobber of a register. */
|
||||
&& GET_CODE (insn) == INSN
|
||||
&& GET_CODE (PATTERN (insn)) == CLOBBER
|
||||
&& (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
|
||||
&& GET_CODE (XEXP (PATTERN (insn), 0)) == REG
|
||||
/* The CLOBBER must also have a REG_LIBCALL note attached. */
|
||||
&& (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
|
||||
&& (end_seq = XEXP (link, 0)) != 0
|
||||
/* The insn referenced by the REG_LIBCALL note must be a
|
||||
simple nop copy with the same destination as the register
|
||||
mentioned in the clobber. */
|
||||
&& (set = single_set (end_seq)) != 0
|
||||
&& SET_DEST (set) == r0 && SET_SRC (set) == r0
|
||||
/* And finally the insn referenced by the REG_LIBCALL must
|
||||
also contain a REG_EQUAL note and a REG_RETVAL note. */
|
||||
&& find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
|
||||
&& find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
|
||||
deps->libcall_block_tail_insn = XEXP (link, 0);
|
||||
|
||||
/* If we have reached the end of a libcall block, then close the
|
||||
block. */
|
||||
if (deps->libcall_block_tail_insn == insn)
|
||||
deps->libcall_block_tail_insn = 0;
|
||||
|
||||
if (insn == tail)
|
||||
{
|
||||
if (current_sched_info->use_cselib)
|
||||
@ -1433,6 +1485,7 @@ init_deps (deps)
|
||||
deps->last_function_call = 0;
|
||||
deps->sched_before_next_call = 0;
|
||||
deps->in_post_call_group_p = false;
|
||||
deps->libcall_block_tail_insn = 0;
|
||||
}
|
||||
|
||||
/* Free insn lists found in DEPS. */
|
||||
|
@ -86,6 +86,12 @@ struct deps
|
||||
the call. */
|
||||
bool in_post_call_group_p;
|
||||
|
||||
/* Set to the tail insn of the outermost libcall block.
|
||||
|
||||
When nonzero, we will mark each insn processed by sched_analyze_insn
|
||||
with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */
|
||||
rtx libcall_block_tail_insn;
|
||||
|
||||
/* The maximum register number for the following arrays. Before reload
|
||||
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
|
||||
int max_reg;
|
||||
|
Loading…
Reference in New Issue
Block a user