sched-ebb.c (begin_schedule_ready): Remove second argument.

* sched-ebb.c (begin_schedule_ready): Remove second argument.
	Split most of the code into...
	(begin_move_insn): ... here.  New function.
	(ebb_sched_info): Add a pointer to it.
	* haifa-sched.c (scheduled_insns): New static variable.
	(sched_extend_ready_list): Allocate it.
	(schedule_block): Use it to record the order of scheduled insns.
	Perform RTL changes to move insns only after all scheduling
	decisions have been made.
	* modulo-sched.c (sms_sched_haifa_sched_info): Add NULL entry for the
	begin_move_insn field.
	* sel-sched-ir.c (sched_sel_haifa_sched_info): Likewise.
	* sched-int.h (struct haifa_sched_info): Remove second argument
	from begin_schedule_ready hook.  Add new member begin_move_insn.
	* sched-rgn.c (begin_schedule_ready): Remove second argument.
	(rgn_const_sched_info): Add NULL entry for the begin_move_insn field.

From-SVN: r171843
This commit is contained in:
Bernd Schmidt 2011-04-01 17:46:17 +00:00 committed by Bernd Schmidt
parent 2a6a0d809f
commit 86014d0748
7 changed files with 101 additions and 83 deletions

View File

@ -15,6 +15,23 @@
* sched-ebb.c (schedule_ebbs): Honor the BB_DISABLE_SCHEDULE flag.
* sched-ebb.c (begin_schedule_ready): Remove second argument.
Split most of the code into...
(begin_move_insn): ... here. New function.
(ebb_sched_info): Add a pointer to it.
* haifa-sched.c (scheduled_insns): New static variable.
(sched_extend_ready_list): Allocate it.
(schedule_block): Use it to record the order of scheduled insns.
Perform RTL changes to move insns only after all scheduling
decisions have been made.
* modulo-sched.c (sms_sched_haifa_sched_info): Add NULL entry for the
begin_move_insn field.
* sel-sched-ir.c (sched_sel_haifa_sched_info): Likewise.
* sched-int.h (struct haifa_sched_info): Remove second argument
from begin_schedule_ready hook. Add new member begin_move_insn.
* sched-rgn.c (begin_schedule_ready): Remove second argument.
(rgn_const_sched_info): Add NULL entry for the begin_move_insn field.
2011-04-01 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
* config/spu/t-spu-elf (dp-bit.c): Use > instead of >>.

View File

@ -302,6 +302,10 @@ static struct ready_list *readyp = &ready;
/* Scheduling clock. */
static int clock_var;
/* This records the actual schedule. It is built up during the main phase
of schedule_block, and afterwards used to reorder the insns in the RTL. */
static VEC(rtx, heap) *scheduled_insns;
static int may_trap_exp (const_rtx, int);
/* Nonzero iff the address is comprised from at most 1 register. */
@ -2813,6 +2817,51 @@ choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
}
}
/* This function is called when we have successfully scheduled a
block. It uses the schedule stored in the scheduled_insns vector
to rearrange the RTL. PREV_HEAD is used as the anchor to which we
append the scheduled insns; TAIL is the insn after the scheduled
block. TARGET_BB is the argument passed to schedule_block. */
static void
commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
{
int i;
last_scheduled_insn = prev_head;
for (i = 0; i < (int)VEC_length (rtx, scheduled_insns); i++)
{
rtx insn = VEC_index (rtx, scheduled_insns, i);
if (control_flow_insn_p (last_scheduled_insn)
|| current_sched_info->advance_target_bb (*target_bb, insn))
{
*target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
if (sched_verbose)
{
rtx x;
x = next_real_insn (last_scheduled_insn);
gcc_assert (x);
dump_new_block_header (1, *target_bb, x, tail);
}
last_scheduled_insn = bb_note (*target_bb);
}
if (current_sched_info->begin_move_insn)
(*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
move_insn (insn, last_scheduled_insn,
current_sched_info->next_tail);
if (!DEBUG_INSN_P (insn))
reemit_notes (insn);
last_scheduled_insn = insn;
}
VEC_truncate (rtx, scheduled_insns, 0);
}
/* Use forward list scheduling to rearrange insns of block pointed to by
TARGET_BB, possibly bringing insns from subsequent blocks in the same
region. */
@ -2934,6 +2983,7 @@ schedule_block (basic_block *target_bb)
advance = 0;
gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
sort_p = TRUE;
/* Loop until all the insns in BB are scheduled. */
while ((*current_sched_info->schedule_more_p) ())
@ -2979,31 +3029,12 @@ schedule_block (basic_block *target_bb)
them out right away. */
if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
{
if (control_flow_insn_p (last_scheduled_insn))
{
*target_bb = current_sched_info->advance_target_bb
(*target_bb, 0);
if (sched_verbose)
{
rtx x;
x = next_real_insn (last_scheduled_insn);
gcc_assert (x);
dump_new_block_header (1, *target_bb, x, tail);
}
last_scheduled_insn = bb_note (*target_bb);
}
while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
{
rtx insn = ready_remove_first (&ready);
gcc_assert (DEBUG_INSN_P (insn));
(*current_sched_info->begin_schedule_ready) (insn,
last_scheduled_insn);
move_insn (insn, last_scheduled_insn,
current_sched_info->next_tail);
(*current_sched_info->begin_schedule_ready) (insn);
VEC_safe_push (rtx, heap, scheduled_insns, insn);
last_scheduled_insn = insn;
advance = schedule_insn (insn);
gcc_assert (advance == 0);
@ -3165,39 +3196,12 @@ schedule_block (basic_block *target_bb)
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
if (control_flow_insn_p (last_scheduled_insn)
/* This is used to switch basic blocks by request
from scheduler front-end (actually, sched-ebb.c only).
This is used to process blocks with single fallthru
edge. If succeeding block has jump, it [jump] will try
move at the end of current bb, thus corrupting CFG. */
|| current_sched_info->advance_target_bb (*target_bb, insn))
{
*target_bb = current_sched_info->advance_target_bb
(*target_bb, 0);
if (sched_verbose)
{
rtx x;
x = next_real_insn (last_scheduled_insn);
gcc_assert (x);
dump_new_block_header (1, *target_bb, x, tail);
}
last_scheduled_insn = bb_note (*target_bb);
}
/* Update counters, etc in the scheduler's front end. */
(*current_sched_info->begin_schedule_ready) (insn,
last_scheduled_insn);
move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
reemit_notes (insn);
/* Update counters, etc in the scheduler's front end. */
(*current_sched_info->begin_schedule_ready) (insn);
VEC_safe_push (rtx, heap, scheduled_insns, insn);
last_scheduled_insn = insn;
if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
@ -3236,31 +3240,12 @@ schedule_block (basic_block *target_bb)
if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
&& (*current_sched_info->schedule_more_p) ())
{
if (control_flow_insn_p (last_scheduled_insn))
{
*target_bb = current_sched_info->advance_target_bb
(*target_bb, 0);
if (sched_verbose)
{
rtx x;
x = next_real_insn (last_scheduled_insn);
gcc_assert (x);
dump_new_block_header (1, *target_bb, x, tail);
}
last_scheduled_insn = bb_note (*target_bb);
}
while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
{
insn = ready_remove_first (&ready);
gcc_assert (DEBUG_INSN_P (insn));
(*current_sched_info->begin_schedule_ready)
(insn, last_scheduled_insn);
move_insn (insn, last_scheduled_insn,
current_sched_info->next_tail);
(*current_sched_info->begin_schedule_ready) (insn);
VEC_safe_push (rtx, heap, scheduled_insns, insn);
advance = schedule_insn (insn);
last_scheduled_insn = insn;
gcc_assert (advance == 0);
@ -3321,6 +3306,7 @@ schedule_block (basic_block *target_bb)
}
}
commit_schedule (prev_head, tail, target_bb);
if (sched_verbose)
fprintf (sched_dump, ";; total time = %d\n", clock_var);
@ -4001,6 +3987,7 @@ sched_extend_ready_list (int new_sched_ready_n_insns)
{
i = 0;
sched_ready_n_insns = 0;
scheduled_insns = VEC_alloc (rtx, heap, new_sched_ready_n_insns);
}
else
i = sched_ready_n_insns + 1;

View File

@ -1,5 +1,5 @@
/* Swing Modulo Scheduling implementation.
Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
@ -275,7 +275,7 @@ static struct haifa_sched_info sms_sched_info =
NULL, NULL,
0, 0,
NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
0
};

View File

@ -59,7 +59,7 @@ static basic_block last_bb;
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
static void begin_schedule_ready (rtx, rtx);
static void begin_schedule_ready (rtx);
static int schedule_more_p (void);
static const char *ebb_print_insn (const_rtx, int);
static int rank (rtx, rtx);
@ -125,10 +125,15 @@ init_ready_list (void)
/* INSN is being scheduled after LAST. Update counters. */
static void
begin_schedule_ready (rtx insn, rtx last)
begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED)
{
sched_rgn_n_insns++;
}
/* INSN is being moved to its place in the schedule, after LAST. */
static void
begin_move_insn (rtx insn, rtx last)
{
if (BLOCK_FOR_INSN (insn) == last_bb
/* INSN is a jump in the last block, ... */
&& control_flow_insn_p (insn)
@ -288,6 +293,7 @@ static struct haifa_sched_info ebb_sched_info =
ebb_add_remove_insn,
begin_schedule_ready,
begin_move_insn,
advance_target_bb,
SCHED_EBB
/* We can create new blocks in begin_schedule_ready (). */

View File

@ -605,10 +605,15 @@ struct haifa_sched_info
parameter == 0) or removed (second parameter == 1). */
void (*add_remove_insn) (rtx, int);
/* Called to notify frontend that instruction is being scheduled.
The first parameter - instruction to scheduled, the second parameter -
last scheduled instruction. */
void (*begin_schedule_ready) (rtx, rtx);
/* Called to notify the frontend that instruction INSN is being
scheduled. */
void (*begin_schedule_ready) (rtx insn);
/* Called to notify the frontend that an instruction INSN is about to be
moved to its correct place in the final schedule. This is done for all
insns in order of the schedule. LAST indicates the last scheduled
instruction. */
void (*begin_move_insn) (rtx insn, rtx last);
/* If the second parameter is not NULL, return nonnull value, if the
basic block should be advanced.

View File

@ -2057,7 +2057,7 @@ static int sched_n_insns;
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
static int can_schedule_ready_p (rtx);
static void begin_schedule_ready (rtx, rtx);
static void begin_schedule_ready (rtx);
static ds_t new_ready (rtx, ds_t);
static int schedule_more_p (void);
static const char *rgn_print_insn (const_rtx, int);
@ -2152,7 +2152,7 @@ can_schedule_ready_p (rtx insn)
can_schedule_ready_p () differs from the one passed to
begin_schedule_ready (). */
static void
begin_schedule_ready (rtx insn, rtx last ATTRIBUTE_UNUSED)
begin_schedule_ready (rtx insn)
{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
@ -2369,6 +2369,7 @@ static const struct haifa_sched_info rgn_const_sched_info =
rgn_add_remove_insn,
begin_schedule_ready,
NULL,
advance_target_bb,
SCHED_RGN
};

View File

@ -1,5 +1,6 @@
/* Instruction scheduling pass. Selective scheduler and pipeliner.
Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
@ -5649,6 +5650,7 @@ static struct haifa_sched_info sched_sel_haifa_sched_info =
NULL, /* add_remove_insn */
NULL, /* begin_schedule_ready */
NULL, /* begin_move_insn */
NULL, /* advance_target_bb */
SEL_SCHED | NEW_BBS
};