mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-03-19 03:30:27 +08:00
ra.c: Convert to ISO C90 prototypes.
2003-09-14 Steven Bosscher <steven@gcc.gnu.org> * ra.c: Convert to ISO C90 prototypes. * ra-build.c: Likewise. * ra-colorize.c: Likewise. * ra-debug.c: Likewise. * ra-rewrite.c: Likewise. From-SVN: r71379
This commit is contained in:
parent
3983d33ea8
commit
93bad80ebc
@ -1,3 +1,11 @@
|
||||
2003-09-14 Steven Bosscher <steven@gcc.gnu.org>
|
||||
|
||||
* ra.c: Convert to ISO C90 prototypes.
|
||||
* ra-build.c: Likewise.
|
||||
* ra-colorize.c: Likewise.
|
||||
* ra-debug.c: Likewise.
|
||||
* ra-rewrite.c: Likewise.
|
||||
|
||||
2003-09-14 Richard Sandiford <rsandifo@redhat.com>
|
||||
|
||||
* Makefile.in (%.dvi): Remove excess $(docdir).
|
||||
|
281
gcc/ra-build.c
281
gcc/ra-build.c
@ -68,60 +68,59 @@
|
||||
|
||||
struct curr_use;
|
||||
|
||||
static unsigned HOST_WIDE_INT rtx_to_undefined PARAMS ((rtx));
|
||||
static bitmap find_sub_conflicts PARAMS ((struct web_part *, unsigned int));
|
||||
static bitmap get_sub_conflicts PARAMS ((struct web_part *, unsigned int));
|
||||
static unsigned int undef_to_size_word PARAMS ((rtx, unsigned HOST_WIDE_INT *));
|
||||
static bitmap undef_to_bitmap PARAMS ((struct web_part *,
|
||||
unsigned HOST_WIDE_INT *));
|
||||
static struct web_part * find_web_part_1 PARAMS ((struct web_part *));
|
||||
static unsigned HOST_WIDE_INT rtx_to_undefined (rtx);
|
||||
static bitmap find_sub_conflicts (struct web_part *, unsigned int);
|
||||
static bitmap get_sub_conflicts (struct web_part *, unsigned int);
|
||||
static unsigned int undef_to_size_word (rtx, unsigned HOST_WIDE_INT *);
|
||||
static bitmap undef_to_bitmap (struct web_part *,
|
||||
unsigned HOST_WIDE_INT *);
|
||||
static struct web_part * find_web_part_1 (struct web_part *);
|
||||
static struct web_part * union_web_part_roots
|
||||
PARAMS ((struct web_part *, struct web_part *));
|
||||
static int defuse_overlap_p_1 PARAMS ((rtx, struct curr_use *));
|
||||
static int live_out_1 PARAMS ((struct df *, struct curr_use *, rtx));
|
||||
static int live_out PARAMS ((struct df *, struct curr_use *, rtx));
|
||||
static rtx live_in_edge PARAMS (( struct df *, struct curr_use *, edge));
|
||||
static void live_in PARAMS ((struct df *, struct curr_use *, rtx));
|
||||
static int copy_insn_p PARAMS ((rtx, rtx *, rtx *));
|
||||
static void remember_move PARAMS ((rtx));
|
||||
static void handle_asm_insn PARAMS ((struct df *, rtx));
|
||||
static void prune_hardregs_for_mode PARAMS ((HARD_REG_SET *,
|
||||
enum machine_mode));
|
||||
static void init_one_web_common PARAMS ((struct web *, rtx));
|
||||
static void init_one_web PARAMS ((struct web *, rtx));
|
||||
static void reinit_one_web PARAMS ((struct web *, rtx));
|
||||
static struct web * add_subweb PARAMS ((struct web *, rtx));
|
||||
static struct web * add_subweb_2 PARAMS ((struct web *, unsigned int));
|
||||
static void init_web_parts PARAMS ((struct df *));
|
||||
static void copy_conflict_list PARAMS ((struct web *));
|
||||
static void add_conflict_edge PARAMS ((struct web *, struct web *));
|
||||
static void build_inverse_webs PARAMS ((struct web *));
|
||||
static void copy_web PARAMS ((struct web *, struct web_link **));
|
||||
static void compare_and_free_webs PARAMS ((struct web_link **));
|
||||
static void init_webs_defs_uses PARAMS ((void));
|
||||
static unsigned int parts_to_webs_1 PARAMS ((struct df *, struct web_link **,
|
||||
struct df_link *));
|
||||
static void parts_to_webs PARAMS ((struct df *));
|
||||
static void reset_conflicts PARAMS ((void));
|
||||
(struct web_part *, struct web_part *);
|
||||
static int defuse_overlap_p_1 (rtx, struct curr_use *);
|
||||
static int live_out_1 (struct df *, struct curr_use *, rtx);
|
||||
static int live_out (struct df *, struct curr_use *, rtx);
|
||||
static rtx live_in_edge ( struct df *, struct curr_use *, edge);
|
||||
static void live_in (struct df *, struct curr_use *, rtx);
|
||||
static int copy_insn_p (rtx, rtx *, rtx *);
|
||||
static void remember_move (rtx);
|
||||
static void handle_asm_insn (struct df *, rtx);
|
||||
static void prune_hardregs_for_mode (HARD_REG_SET *, enum machine_mode);
|
||||
static void init_one_web_common (struct web *, rtx);
|
||||
static void init_one_web (struct web *, rtx);
|
||||
static void reinit_one_web (struct web *, rtx);
|
||||
static struct web * add_subweb (struct web *, rtx);
|
||||
static struct web * add_subweb_2 (struct web *, unsigned int);
|
||||
static void init_web_parts (struct df *);
|
||||
static void copy_conflict_list (struct web *);
|
||||
static void add_conflict_edge (struct web *, struct web *);
|
||||
static void build_inverse_webs (struct web *);
|
||||
static void copy_web (struct web *, struct web_link **);
|
||||
static void compare_and_free_webs (struct web_link **);
|
||||
static void init_webs_defs_uses (void);
|
||||
static unsigned int parts_to_webs_1 (struct df *, struct web_link **,
|
||||
struct df_link *);
|
||||
static void parts_to_webs (struct df *);
|
||||
static void reset_conflicts (void);
|
||||
#if 0
|
||||
static void check_conflict_numbers PARAMS ((void));
|
||||
static void check_conflict_numbers (void)
|
||||
#endif
|
||||
static void conflicts_between_webs PARAMS ((struct df *));
|
||||
static void remember_web_was_spilled PARAMS ((struct web *));
|
||||
static void detect_spill_temps PARAMS ((void));
|
||||
static int contains_pseudo PARAMS ((rtx));
|
||||
static int want_to_remat PARAMS ((rtx x));
|
||||
static void detect_remat_webs PARAMS ((void));
|
||||
static void determine_web_costs PARAMS ((void));
|
||||
static void detect_webs_set_in_cond_jump PARAMS ((void));
|
||||
static void make_webs PARAMS ((struct df *));
|
||||
static void moves_to_webs PARAMS ((struct df *));
|
||||
static void connect_rmw_web_parts PARAMS ((struct df *));
|
||||
static void update_regnos_mentioned PARAMS ((void));
|
||||
static void livethrough_conflicts_bb PARAMS ((basic_block));
|
||||
static void init_bb_info PARAMS ((void));
|
||||
static void free_bb_info PARAMS ((void));
|
||||
static void build_web_parts_and_conflicts PARAMS ((struct df *));
|
||||
static void conflicts_between_webs (struct df *);
|
||||
static void remember_web_was_spilled (struct web *);
|
||||
static void detect_spill_temps (void);
|
||||
static int contains_pseudo (rtx);
|
||||
static int want_to_remat (rtx x);
|
||||
static void detect_remat_webs (void);
|
||||
static void determine_web_costs (void);
|
||||
static void detect_webs_set_in_cond_jump (void);
|
||||
static void make_webs (struct df *);
|
||||
static void moves_to_webs (struct df *);
|
||||
static void connect_rmw_web_parts (struct df *);
|
||||
static void update_regnos_mentioned (void);
|
||||
static void livethrough_conflicts_bb (basic_block);
|
||||
static void init_bb_info (void);
|
||||
static void free_bb_info (void);
|
||||
static void build_web_parts_and_conflicts (struct df *);
|
||||
|
||||
|
||||
/* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
|
||||
@ -181,8 +180,7 @@ struct ra_bb_info
|
||||
as an integer. */
|
||||
|
||||
unsigned int
|
||||
rtx_to_bits (x)
|
||||
rtx x;
|
||||
rtx_to_bits (rtx x)
|
||||
{
|
||||
unsigned int len, beg;
|
||||
len = GET_MODE_SIZE (GET_MODE (x));
|
||||
@ -193,8 +191,7 @@ rtx_to_bits (x)
|
||||
/* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
|
||||
|
||||
static unsigned HOST_WIDE_INT
|
||||
rtx_to_undefined (x)
|
||||
rtx x;
|
||||
rtx_to_undefined (rtx x)
|
||||
{
|
||||
unsigned int len, beg;
|
||||
unsigned HOST_WIDE_INT ret;
|
||||
@ -225,10 +222,7 @@ int *number_seen;
|
||||
not NULL. */
|
||||
|
||||
static int
|
||||
copy_insn_p (insn, source, target)
|
||||
rtx insn;
|
||||
rtx *source;
|
||||
rtx *target;
|
||||
copy_insn_p (rtx insn, rtx *source, rtx *target)
|
||||
{
|
||||
rtx d, s;
|
||||
unsigned int d_regno, s_regno;
|
||||
@ -308,9 +302,7 @@ copy_insn_p (insn, source, target)
|
||||
exist yet in WP. */
|
||||
|
||||
static bitmap
|
||||
find_sub_conflicts (wp, size_word)
|
||||
struct web_part *wp;
|
||||
unsigned int size_word;
|
||||
find_sub_conflicts (struct web_part *wp, unsigned int size_word)
|
||||
{
|
||||
struct tagged_conflict *cl;
|
||||
cl = wp->sub_conflicts;
|
||||
@ -324,9 +316,7 @@ find_sub_conflicts (wp, size_word)
|
||||
doesn't exist. I.e. this never returns NULL. */
|
||||
|
||||
static bitmap
|
||||
get_sub_conflicts (wp, size_word)
|
||||
struct web_part *wp;
|
||||
unsigned int size_word;
|
||||
get_sub_conflicts (struct web_part *wp, unsigned int size_word)
|
||||
{
|
||||
bitmap b = find_sub_conflicts (wp, size_word);
|
||||
if (!b)
|
||||
@ -382,9 +372,7 @@ static struct undef_table_s {
|
||||
*/
|
||||
|
||||
static unsigned int
|
||||
undef_to_size_word (reg, undefined)
|
||||
rtx reg;
|
||||
unsigned HOST_WIDE_INT *undefined;
|
||||
undef_to_size_word (rtx reg, unsigned HOST_WIDE_INT *undefined)
|
||||
{
|
||||
/* When only the lower four bits are possibly set, we use
|
||||
a fast lookup table. */
|
||||
@ -440,9 +428,7 @@ undef_to_size_word (reg, undefined)
|
||||
covered by the part for that bitmap. */
|
||||
|
||||
static bitmap
|
||||
undef_to_bitmap (wp, undefined)
|
||||
struct web_part *wp;
|
||||
unsigned HOST_WIDE_INT *undefined;
|
||||
undef_to_bitmap (struct web_part *wp, unsigned HOST_WIDE_INT *undefined)
|
||||
{
|
||||
unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
|
||||
undefined);
|
||||
@ -453,8 +439,7 @@ undef_to_bitmap (wp, undefined)
|
||||
it compresses the path. P may not be NULL. */
|
||||
|
||||
static struct web_part *
|
||||
find_web_part_1 (p)
|
||||
struct web_part *p;
|
||||
find_web_part_1 (struct web_part *p)
|
||||
{
|
||||
struct web_part *r = p;
|
||||
struct web_part *p_next;
|
||||
@ -480,8 +465,7 @@ find_web_part_1 (p)
|
||||
The root of the resulting (possibly larger) web part is returned. */
|
||||
|
||||
static struct web_part *
|
||||
union_web_part_roots (r1, r2)
|
||||
struct web_part *r1, *r2;
|
||||
union_web_part_roots (struct web_part *r1, struct web_part *r2)
|
||||
{
|
||||
if (r1 != r2)
|
||||
{
|
||||
@ -552,8 +536,7 @@ union_web_part_roots (r1, r2)
|
||||
/* Remember that we've handled a given move, so we don't reprocess it. */
|
||||
|
||||
static void
|
||||
remember_move (insn)
|
||||
rtx insn;
|
||||
remember_move (rtx insn)
|
||||
{
|
||||
if (!TEST_BIT (move_handled, INSN_UID (insn)))
|
||||
{
|
||||
@ -629,9 +612,7 @@ struct curr_use {
|
||||
otherwise a test is needed to track the already defined bytes. */
|
||||
|
||||
static int
|
||||
defuse_overlap_p_1 (def, use)
|
||||
rtx def;
|
||||
struct curr_use *use;
|
||||
defuse_overlap_p_1 (rtx def, struct curr_use *use)
|
||||
{
|
||||
int mode = 0;
|
||||
if (def == use->x)
|
||||
@ -708,10 +689,7 @@ defuse_overlap_p_1 (def, use)
|
||||
this insn. */
|
||||
|
||||
static int
|
||||
live_out_1 (df, use, insn)
|
||||
struct df *df ATTRIBUTE_UNUSED;
|
||||
struct curr_use *use;
|
||||
rtx insn;
|
||||
live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
|
||||
{
|
||||
int defined = 0;
|
||||
int uid = INSN_UID (insn);
|
||||
@ -863,10 +841,7 @@ live_out_1 (df, use, insn)
|
||||
this insn). */
|
||||
|
||||
static inline int
|
||||
live_out (df, use, insn)
|
||||
struct df *df;
|
||||
struct curr_use *use;
|
||||
rtx insn;
|
||||
live_out (struct df *df, struct curr_use *use, rtx insn)
|
||||
{
|
||||
unsigned int uid = INSN_UID (insn);
|
||||
if (visit_trace[uid].wp
|
||||
@ -894,10 +869,7 @@ live_out (df, use, insn)
|
||||
which uses are live at the end of that basic block. */
|
||||
|
||||
static rtx
|
||||
live_in_edge (df, use, e)
|
||||
struct df *df;
|
||||
struct curr_use *use;
|
||||
edge e;
|
||||
live_in_edge (struct df *df, struct curr_use *use, edge e)
|
||||
{
|
||||
struct ra_bb_info *info_pred;
|
||||
rtx next_insn;
|
||||
@ -943,10 +915,7 @@ live_in_edge (df, use, e)
|
||||
def-use chains, and all defs during that chain are noted. */
|
||||
|
||||
static void
|
||||
live_in (df, use, insn)
|
||||
struct df *df;
|
||||
struct curr_use *use;
|
||||
rtx insn;
|
||||
live_in (struct df *df, struct curr_use *use, rtx insn)
|
||||
{
|
||||
unsigned int loc_vpass = visited_pass;
|
||||
|
||||
@ -1006,7 +975,7 @@ live_in (df, use, insn)
|
||||
pass. */
|
||||
|
||||
static void
|
||||
update_regnos_mentioned ()
|
||||
update_regnos_mentioned (void)
|
||||
{
|
||||
int last_uid = last_max_uid;
|
||||
rtx insn;
|
||||
@ -1048,8 +1017,7 @@ update_regnos_mentioned ()
|
||||
spanned_deaths members. */
|
||||
|
||||
static void
|
||||
livethrough_conflicts_bb (bb)
|
||||
basic_block bb;
|
||||
livethrough_conflicts_bb (basic_block bb)
|
||||
{
|
||||
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
|
||||
rtx insn;
|
||||
@ -1106,7 +1074,7 @@ livethrough_conflicts_bb (bb)
|
||||
building live ranges. */
|
||||
|
||||
static void
|
||||
init_bb_info ()
|
||||
init_bb_info (void)
|
||||
{
|
||||
basic_block bb;
|
||||
FOR_ALL_BB (bb)
|
||||
@ -1122,7 +1090,7 @@ init_bb_info ()
|
||||
/* Free that per basic block info. */
|
||||
|
||||
static void
|
||||
free_bb_info ()
|
||||
free_bb_info (void)
|
||||
{
|
||||
basic_block bb;
|
||||
FOR_ALL_BB (bb)
|
||||
@ -1140,8 +1108,7 @@ free_bb_info ()
|
||||
their conflicts. */
|
||||
|
||||
static void
|
||||
build_web_parts_and_conflicts (df)
|
||||
struct df *df;
|
||||
build_web_parts_and_conflicts (struct df *df)
|
||||
{
|
||||
struct df_link *link;
|
||||
struct curr_use use;
|
||||
@ -1198,8 +1165,7 @@ build_web_parts_and_conflicts (df)
|
||||
read-mod-write instruction), so we must reconnect such webs. */
|
||||
|
||||
static void
|
||||
connect_rmw_web_parts (df)
|
||||
struct df *df;
|
||||
connect_rmw_web_parts (struct df *df)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@ -1228,9 +1194,7 @@ connect_rmw_web_parts (df)
|
||||
/* Deletes all hardregs from *S which are not allowed for MODE. */
|
||||
|
||||
static void
|
||||
prune_hardregs_for_mode (s, mode)
|
||||
HARD_REG_SET *s;
|
||||
enum machine_mode mode;
|
||||
prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode)
|
||||
{
|
||||
AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
|
||||
}
|
||||
@ -1238,9 +1202,7 @@ prune_hardregs_for_mode (s, mode)
|
||||
/* Initialize the members of a web, which are deducible from REG. */
|
||||
|
||||
static void
|
||||
init_one_web_common (web, reg)
|
||||
struct web *web;
|
||||
rtx reg;
|
||||
init_one_web_common (struct web *web, rtx reg)
|
||||
{
|
||||
if (GET_CODE (reg) != REG)
|
||||
abort ();
|
||||
@ -1317,9 +1279,7 @@ init_one_web_common (web, reg)
|
||||
/* Initializes WEBs members from REG or zero them. */
|
||||
|
||||
static void
|
||||
init_one_web (web, reg)
|
||||
struct web *web;
|
||||
rtx reg;
|
||||
init_one_web (struct web *web, rtx reg)
|
||||
{
|
||||
memset (web, 0, sizeof (struct web));
|
||||
init_one_web_common (web, reg);
|
||||
@ -1331,9 +1291,7 @@ init_one_web (web, reg)
|
||||
members. */
|
||||
|
||||
static void
|
||||
reinit_one_web (web, reg)
|
||||
struct web *web;
|
||||
rtx reg;
|
||||
reinit_one_web (struct web *web, rtx reg)
|
||||
{
|
||||
web->old_color = web->color + 1;
|
||||
init_one_web_common (web, reg);
|
||||
@ -1374,9 +1332,7 @@ reinit_one_web (web, reg)
|
||||
becomes its super web). It must not exist already. */
|
||||
|
||||
static struct web *
|
||||
add_subweb (web, reg)
|
||||
struct web *web;
|
||||
rtx reg;
|
||||
add_subweb (struct web *web, rtx reg)
|
||||
{
|
||||
struct web *w;
|
||||
if (GET_CODE (reg) != SUBREG)
|
||||
@ -1402,9 +1358,7 @@ add_subweb (web, reg)
|
||||
In difference to add_subweb() this marks the new subweb as artificial. */
|
||||
|
||||
static struct web *
|
||||
add_subweb_2 (web, size_word)
|
||||
struct web *web;
|
||||
unsigned int size_word;
|
||||
add_subweb_2 (struct web *web, unsigned int size_word)
|
||||
{
|
||||
/* To get a correct mode for the to be produced subreg, we don't want to
|
||||
simply do a mode_for_size() for the mode_class of the whole web.
|
||||
@ -1431,8 +1385,7 @@ add_subweb_2 (web, size_word)
|
||||
/* Initialize all the web parts we are going to need. */
|
||||
|
||||
static void
|
||||
init_web_parts (df)
|
||||
struct df *df;
|
||||
init_web_parts (struct df *df)
|
||||
{
|
||||
int regno;
|
||||
unsigned int no;
|
||||
@ -1506,8 +1459,7 @@ init_web_parts (df)
|
||||
new conflicts, we copy it here to orig_conflict_list. */
|
||||
|
||||
static void
|
||||
copy_conflict_list (web)
|
||||
struct web *web;
|
||||
copy_conflict_list (struct web *web)
|
||||
{
|
||||
struct conflict_link *cl;
|
||||
if (web->orig_conflict_list || web->have_orig_conflicts)
|
||||
@ -1546,8 +1498,7 @@ copy_conflict_list (web)
|
||||
happen, if SUBREG webs are involved. */
|
||||
|
||||
static void
|
||||
add_conflict_edge (from, to)
|
||||
struct web *from, *to;
|
||||
add_conflict_edge (struct web *from, struct web *to)
|
||||
{
|
||||
if (from->type != PRECOLORED)
|
||||
{
|
||||
@ -1612,8 +1563,7 @@ add_conflict_edge (from, to)
|
||||
already. */
|
||||
|
||||
void
|
||||
record_conflict (web1, web2)
|
||||
struct web *web1, *web2;
|
||||
record_conflict (struct web *web1, struct web *web2)
|
||||
{
|
||||
unsigned int id1 = web1->id, id2 = web2->id;
|
||||
unsigned int index = igraph_index (id1, id2);
|
||||
@ -1665,8 +1615,7 @@ record_conflict (web1, web2)
|
||||
possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
|
||||
|
||||
static void
|
||||
build_inverse_webs (web)
|
||||
struct web *web;
|
||||
build_inverse_webs (struct web *web)
|
||||
{
|
||||
struct web *sweb = web->subreg_next;
|
||||
unsigned HOST_WIDE_INT undef;
|
||||
@ -1691,9 +1640,7 @@ build_inverse_webs (web)
|
||||
Used for consistency checking. */
|
||||
|
||||
static void
|
||||
copy_web (web, wl)
|
||||
struct web *web;
|
||||
struct web_link **wl;
|
||||
copy_web (struct web *web, struct web_link **wl)
|
||||
{
|
||||
struct web *cweb = xmalloc (sizeof *cweb);
|
||||
struct web_link *link = ra_alloc (sizeof *link);
|
||||
@ -1707,8 +1654,7 @@ copy_web (web, wl)
|
||||
with the global webs of the same ID. For consistency checking. */
|
||||
|
||||
static void
|
||||
compare_and_free_webs (link)
|
||||
struct web_link **link;
|
||||
compare_and_free_webs (struct web_link **link)
|
||||
{
|
||||
struct web_link *wl;
|
||||
for (wl = *link; wl; wl = wl->next)
|
||||
@ -1757,7 +1703,7 @@ compare_and_free_webs (link)
|
||||
/* Setup and fill uses[] and defs[] arrays of the webs. */
|
||||
|
||||
static void
|
||||
init_webs_defs_uses ()
|
||||
init_webs_defs_uses (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
for (d = WEBS(INITIAL); d; d = d->next)
|
||||
@ -1795,10 +1741,8 @@ init_webs_defs_uses ()
|
||||
up use2web and def2web arrays. */
|
||||
|
||||
static unsigned int
|
||||
parts_to_webs_1 (df, copy_webs, all_refs)
|
||||
struct df *df;
|
||||
struct web_link **copy_webs;
|
||||
struct df_link *all_refs;
|
||||
parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
|
||||
struct df_link *all_refs)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int webnum;
|
||||
@ -2016,8 +1960,7 @@ parts_to_webs_1 (df, copy_webs, all_refs)
|
||||
other (i.e. without creating the conflict edges). */
|
||||
|
||||
static void
|
||||
parts_to_webs (df)
|
||||
struct df *df;
|
||||
parts_to_webs (struct df *df)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int webnum;
|
||||
@ -2115,7 +2058,7 @@ parts_to_webs (df)
|
||||
conflicts. */
|
||||
|
||||
static void
|
||||
reset_conflicts ()
|
||||
reset_conflicts (void)
|
||||
{
|
||||
unsigned int i;
|
||||
bitmap newwebs = BITMAP_XMALLOC ();
|
||||
@ -2197,7 +2140,7 @@ reset_conflicts ()
|
||||
|
||||
#if 0
|
||||
static void
|
||||
check_conflict_numbers ()
|
||||
check_conflict_numbers (void)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < num_webs; i++)
|
||||
@ -2228,8 +2171,7 @@ check_conflict_numbers ()
|
||||
in reality conflict get the same color. */
|
||||
|
||||
static void
|
||||
conflicts_between_webs (df)
|
||||
struct df *df;
|
||||
conflicts_between_webs (struct df *df)
|
||||
{
|
||||
unsigned int i;
|
||||
#ifdef STACK_REGS
|
||||
@ -2327,8 +2269,7 @@ conflicts_between_webs (df)
|
||||
accordingly. */
|
||||
|
||||
static void
|
||||
remember_web_was_spilled (web)
|
||||
struct web *web;
|
||||
remember_web_was_spilled (struct web *web)
|
||||
{
|
||||
int i;
|
||||
unsigned int found_size = 0;
|
||||
@ -2408,7 +2349,7 @@ remember_web_was_spilled (web)
|
||||
if it will be spillable in this pass. */
|
||||
|
||||
static void
|
||||
detect_spill_temps ()
|
||||
detect_spill_temps (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
bitmap already = BITMAP_XMALLOC ();
|
||||
@ -2512,8 +2453,7 @@ detect_spill_temps ()
|
||||
/* Returns nonzero if the rtx MEM refers somehow to a stack location. */
|
||||
|
||||
int
|
||||
memref_is_stack_slot (mem)
|
||||
rtx mem;
|
||||
memref_is_stack_slot (rtx mem)
|
||||
{
|
||||
rtx ad = XEXP (mem, 0);
|
||||
rtx x;
|
||||
@ -2530,8 +2470,7 @@ memref_is_stack_slot (mem)
|
||||
/* Returns nonzero, if rtx X somewhere contains any pseudo register. */
|
||||
|
||||
static int
|
||||
contains_pseudo (x)
|
||||
rtx x;
|
||||
contains_pseudo (rtx x)
|
||||
{
|
||||
const char *fmt;
|
||||
int i;
|
||||
@ -2569,8 +2508,7 @@ contains_pseudo (x)
|
||||
|
||||
static GTY(()) rtx remat_test_insn;
|
||||
static int
|
||||
want_to_remat (x)
|
||||
rtx x;
|
||||
want_to_remat (rtx x)
|
||||
{
|
||||
int num_clobbers = 0;
|
||||
int icode;
|
||||
@ -2608,7 +2546,7 @@ want_to_remat (x)
|
||||
and that value is simple enough, and want_to_remat() holds for it. */
|
||||
|
||||
static void
|
||||
detect_remat_webs ()
|
||||
detect_remat_webs (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
for (d = WEBS(INITIAL); d; d = d->next)
|
||||
@ -2675,7 +2613,7 @@ detect_remat_webs ()
|
||||
/* Determine the spill costs of all webs. */
|
||||
|
||||
static void
|
||||
determine_web_costs ()
|
||||
determine_web_costs (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
for (d = WEBS(INITIAL); d; d = d->next)
|
||||
@ -2732,7 +2670,7 @@ determine_web_costs ()
|
||||
which destroys the CFG. (Somewhen we want to deal with that XXX) */
|
||||
|
||||
static void
|
||||
detect_webs_set_in_cond_jump ()
|
||||
detect_webs_set_in_cond_jump (void)
|
||||
{
|
||||
basic_block bb;
|
||||
FOR_EACH_BB (bb)
|
||||
@ -2755,8 +2693,7 @@ detect_webs_set_in_cond_jump ()
|
||||
though. */
|
||||
|
||||
static void
|
||||
make_webs (df)
|
||||
struct df *df;
|
||||
make_webs (struct df *df)
|
||||
{
|
||||
/* First build all the webs itself. They are not related with
|
||||
others yet. */
|
||||
@ -2774,8 +2711,7 @@ make_webs (df)
|
||||
/* Distribute moves to the corresponding webs. */
|
||||
|
||||
static void
|
||||
moves_to_webs (df)
|
||||
struct df *df;
|
||||
moves_to_webs (struct df *df)
|
||||
{
|
||||
struct df_link *link;
|
||||
struct move_list *ml;
|
||||
@ -2853,9 +2789,7 @@ moves_to_webs (df)
|
||||
and constrain the allocator too much. */
|
||||
|
||||
static void
|
||||
handle_asm_insn (df, insn)
|
||||
struct df *df;
|
||||
rtx insn;
|
||||
handle_asm_insn (struct df *df, rtx insn)
|
||||
{
|
||||
const char *constraints[MAX_RECOG_OPERANDS];
|
||||
enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
|
||||
@ -3027,8 +2961,7 @@ handle_asm_insn (df, insn)
|
||||
and conflicts. */
|
||||
|
||||
void
|
||||
build_i_graph (df)
|
||||
struct df *df;
|
||||
build_i_graph (struct df *df)
|
||||
{
|
||||
rtx insn;
|
||||
|
||||
@ -3061,8 +2994,7 @@ build_i_graph (df)
|
||||
defs and uses. */
|
||||
|
||||
void
|
||||
ra_build_realloc (df)
|
||||
struct df *df;
|
||||
ra_build_realloc (struct df *df)
|
||||
{
|
||||
struct web_part *last_web_parts = web_parts;
|
||||
struct web **last_def2web = def2web;
|
||||
@ -3176,7 +3108,7 @@ ra_build_realloc (df)
|
||||
/* Free up/clear some memory, only needed for one pass. */
|
||||
|
||||
void
|
||||
ra_build_free ()
|
||||
ra_build_free (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
unsigned int i;
|
||||
@ -3236,8 +3168,7 @@ ra_build_free ()
|
||||
/* Free all memory for the interference graph structures. */
|
||||
|
||||
void
|
||||
ra_build_free_all (df)
|
||||
struct df *df;
|
||||
ra_build_free_all (struct df *df)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
|
@ -47,54 +47,54 @@
|
||||
Additionally there is a custom step to locally improve the overall
|
||||
spill cost of the colored graph (recolor_spills). */
|
||||
|
||||
static void push_list PARAMS ((struct dlist *, struct dlist **));
|
||||
static void push_list_end PARAMS ((struct dlist *, struct dlist **));
|
||||
static void free_dlist PARAMS ((struct dlist **));
|
||||
static void put_web_at_end PARAMS ((struct web *, enum node_type));
|
||||
static void put_move PARAMS ((struct move *, enum move_type));
|
||||
static void build_worklists PARAMS ((struct df *));
|
||||
static void enable_move PARAMS ((struct web *));
|
||||
static void decrement_degree PARAMS ((struct web *, int));
|
||||
static void simplify PARAMS ((void));
|
||||
static void remove_move_1 PARAMS ((struct web *, struct move *));
|
||||
static void remove_move PARAMS ((struct web *, struct move *));
|
||||
static void add_worklist PARAMS ((struct web *));
|
||||
static int ok PARAMS ((struct web *, struct web *));
|
||||
static int conservative PARAMS ((struct web *, struct web *));
|
||||
static inline unsigned int simplify_p PARAMS ((enum node_type));
|
||||
static void combine PARAMS ((struct web *, struct web *));
|
||||
static void coalesce PARAMS ((void));
|
||||
static void freeze_moves PARAMS ((struct web *));
|
||||
static void freeze PARAMS ((void));
|
||||
static void select_spill PARAMS ((void));
|
||||
static int color_usable_p PARAMS ((int, HARD_REG_SET, HARD_REG_SET,
|
||||
enum machine_mode));
|
||||
int get_free_reg PARAMS ((HARD_REG_SET, HARD_REG_SET, enum machine_mode));
|
||||
static int get_biased_reg PARAMS ((HARD_REG_SET, HARD_REG_SET, HARD_REG_SET,
|
||||
HARD_REG_SET, enum machine_mode));
|
||||
static int count_long_blocks PARAMS ((HARD_REG_SET, int));
|
||||
static char * hardregset_to_string PARAMS ((HARD_REG_SET));
|
||||
static void calculate_dont_begin PARAMS ((struct web *, HARD_REG_SET *));
|
||||
static void colorize_one_web PARAMS ((struct web *, int));
|
||||
static void assign_colors PARAMS ((void));
|
||||
static void try_recolor_web PARAMS ((struct web *));
|
||||
static void insert_coalesced_conflicts PARAMS ((void));
|
||||
static int comp_webs_maxcost PARAMS ((const void *, const void *));
|
||||
static void recolor_spills PARAMS ((void));
|
||||
static void check_colors PARAMS ((void));
|
||||
static void restore_conflicts_from_coalesce PARAMS ((struct web *));
|
||||
static void break_coalesced_spills PARAMS ((void));
|
||||
static void unalias_web PARAMS ((struct web *));
|
||||
static void break_aliases_to_web PARAMS ((struct web *));
|
||||
static void break_precolored_alias PARAMS ((struct web *));
|
||||
static void init_web_pairs PARAMS ((void));
|
||||
static void add_web_pair_cost PARAMS ((struct web *, struct web *,
|
||||
unsigned HOST_WIDE_INT, unsigned int));
|
||||
static int comp_web_pairs PARAMS ((const void *, const void *));
|
||||
static void sort_and_combine_web_pairs PARAMS ((int));
|
||||
static void aggressive_coalesce PARAMS ((void));
|
||||
static void extended_coalesce_2 PARAMS ((void));
|
||||
static void check_uncoalesced_moves PARAMS ((void));
|
||||
static void push_list (struct dlist *, struct dlist **);
|
||||
static void push_list_end (struct dlist *, struct dlist **);
|
||||
static void free_dlist (struct dlist **);
|
||||
static void put_web_at_end (struct web *, enum node_type);
|
||||
static void put_move (struct move *, enum move_type);
|
||||
static void build_worklists (struct df *);
|
||||
static void enable_move (struct web *);
|
||||
static void decrement_degree (struct web *, int);
|
||||
static void simplify (void);
|
||||
static void remove_move_1 (struct web *, struct move *);
|
||||
static void remove_move (struct web *, struct move *);
|
||||
static void add_worklist (struct web *);
|
||||
static int ok (struct web *, struct web *);
|
||||
static int conservative (struct web *, struct web *);
|
||||
static inline unsigned int simplify_p (enum node_type);
|
||||
static void combine (struct web *, struct web *);
|
||||
static void coalesce (void);
|
||||
static void freeze_moves (struct web *);
|
||||
static void freeze (void);
|
||||
static void select_spill (void);
|
||||
static int color_usable_p (int, HARD_REG_SET, HARD_REG_SET,
|
||||
enum machine_mode);
|
||||
int get_free_reg (HARD_REG_SET, HARD_REG_SET, enum machine_mode);
|
||||
static int get_biased_reg (HARD_REG_SET, HARD_REG_SET, HARD_REG_SET,
|
||||
HARD_REG_SET, enum machine_mode);
|
||||
static int count_long_blocks (HARD_REG_SET, int);
|
||||
static char * hardregset_to_string (HARD_REG_SET);
|
||||
static void calculate_dont_begin (struct web *, HARD_REG_SET *);
|
||||
static void colorize_one_web (struct web *, int);
|
||||
static void assign_colors (void);
|
||||
static void try_recolor_web (struct web *);
|
||||
static void insert_coalesced_conflicts (void);
|
||||
static int comp_webs_maxcost (const void *, const void *);
|
||||
static void recolor_spills (void);
|
||||
static void check_colors (void);
|
||||
static void restore_conflicts_from_coalesce (struct web *);
|
||||
static void break_coalesced_spills (void);
|
||||
static void unalias_web (struct web *);
|
||||
static void break_aliases_to_web (struct web *);
|
||||
static void break_precolored_alias (struct web *);
|
||||
static void init_web_pairs (void);
|
||||
static void add_web_pair_cost (struct web *, struct web *,
|
||||
unsigned HOST_WIDE_INT, unsigned int);
|
||||
static int comp_web_pairs (const void *, const void *);
|
||||
static void sort_and_combine_web_pairs (int);
|
||||
static void aggressive_coalesce (void);
|
||||
static void extended_coalesce_2 (void);
|
||||
static void check_uncoalesced_moves (void);
|
||||
|
||||
static struct dlist *mv_worklist, *mv_coalesced, *mv_constrained;
|
||||
static struct dlist *mv_frozen, *mv_active;
|
||||
@ -102,9 +102,7 @@ static struct dlist *mv_frozen, *mv_active;
|
||||
/* Push a node onto the front of the list. */
|
||||
|
||||
static void
|
||||
push_list (x, list)
|
||||
struct dlist *x;
|
||||
struct dlist **list;
|
||||
push_list (struct dlist *x, struct dlist **list)
|
||||
{
|
||||
if (x->next || x->prev)
|
||||
abort ();
|
||||
@ -115,9 +113,7 @@ push_list (x, list)
|
||||
}
|
||||
|
||||
static void
|
||||
push_list_end (x, list)
|
||||
struct dlist *x;
|
||||
struct dlist **list;
|
||||
push_list_end (struct dlist *x, struct dlist **list)
|
||||
{
|
||||
if (x->prev || x->next)
|
||||
abort ();
|
||||
@ -135,9 +131,7 @@ push_list_end (x, list)
|
||||
/* Remove a node from the list. */
|
||||
|
||||
void
|
||||
remove_list (x, list)
|
||||
struct dlist *x;
|
||||
struct dlist **list;
|
||||
remove_list (struct dlist *x, struct dlist **list)
|
||||
{
|
||||
struct dlist *y = x->prev;
|
||||
if (y)
|
||||
@ -153,8 +147,7 @@ remove_list (x, list)
|
||||
/* Pop the front of the list. */
|
||||
|
||||
struct dlist *
|
||||
pop_list (list)
|
||||
struct dlist **list;
|
||||
pop_list (struct dlist **list)
|
||||
{
|
||||
struct dlist *r = *list;
|
||||
if (r)
|
||||
@ -165,8 +158,7 @@ pop_list (list)
|
||||
/* Free the given double linked list. */
|
||||
|
||||
static void
|
||||
free_dlist (list)
|
||||
struct dlist **list;
|
||||
free_dlist (struct dlist **list)
|
||||
{
|
||||
*list = NULL;
|
||||
}
|
||||
@ -176,9 +168,7 @@ free_dlist (list)
|
||||
Inline, because it's called with constant TYPE every time. */
|
||||
|
||||
inline void
|
||||
put_web (web, type)
|
||||
struct web *web;
|
||||
enum node_type type;
|
||||
put_web (struct web *web, enum node_type type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
@ -216,7 +206,7 @@ put_web (web, type)
|
||||
they are coalesced to. */
|
||||
|
||||
void
|
||||
reset_lists ()
|
||||
reset_lists (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
unsigned int i;
|
||||
@ -270,9 +260,7 @@ reset_lists ()
|
||||
list. Additionally TYPE may not be SIMPLIFY. */
|
||||
|
||||
static void
|
||||
put_web_at_end (web, type)
|
||||
struct web *web;
|
||||
enum node_type type;
|
||||
put_web_at_end (struct web *web, enum node_type type)
|
||||
{
|
||||
if (type == PRECOLORED)
|
||||
type = INITIAL;
|
||||
@ -286,8 +274,7 @@ put_web_at_end (web, type)
|
||||
its current type). */
|
||||
|
||||
void
|
||||
remove_web_from_list (web)
|
||||
struct web *web;
|
||||
remove_web_from_list (struct web *web)
|
||||
{
|
||||
if (web->type == PRECOLORED)
|
||||
remove_list (web->dlink, &WEBS(INITIAL));
|
||||
@ -298,9 +285,7 @@ remove_web_from_list (web)
|
||||
/* Give MOVE the TYPE, and link it into the correct list. */
|
||||
|
||||
static inline void
|
||||
put_move (move, type)
|
||||
struct move *move;
|
||||
enum move_type type;
|
||||
put_move (struct move *move, enum move_type type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
@ -328,8 +313,7 @@ put_move (move, type)
|
||||
/* Build the worklists we are going to process. */
|
||||
|
||||
static void
|
||||
build_worklists (df)
|
||||
struct df *df ATTRIBUTE_UNUSED;
|
||||
build_worklists (struct df *df ATTRIBUTE_UNUSED)
|
||||
{
|
||||
struct dlist *d, *d_next;
|
||||
struct move_list *ml;
|
||||
@ -407,8 +391,7 @@ build_worklists (df)
|
||||
/* Enable the active moves, in which WEB takes part, to be processed. */
|
||||
|
||||
static void
|
||||
enable_move (web)
|
||||
struct web *web;
|
||||
enable_move (struct web *web)
|
||||
{
|
||||
struct move_list *ml;
|
||||
for (ml = web->moves; ml; ml = ml->next)
|
||||
@ -424,9 +407,7 @@ enable_move (web)
|
||||
now smaller than its freedom. */
|
||||
|
||||
static void
|
||||
decrement_degree (web, dec)
|
||||
struct web *web;
|
||||
int dec;
|
||||
decrement_degree (struct web *web, int dec)
|
||||
{
|
||||
int before = web->num_conflicts;
|
||||
web->num_conflicts -= dec;
|
||||
@ -454,7 +435,7 @@ decrement_degree (web, dec)
|
||||
/* Repeatedly simplify the nodes on the simplify worklists. */
|
||||
|
||||
static void
|
||||
simplify ()
|
||||
simplify (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
struct web *web;
|
||||
@ -493,9 +474,7 @@ simplify ()
|
||||
/* Helper function to remove a move from the movelist of the web. */
|
||||
|
||||
static void
|
||||
remove_move_1 (web, move)
|
||||
struct web *web;
|
||||
struct move *move;
|
||||
remove_move_1 (struct web *web, struct move *move)
|
||||
{
|
||||
struct move_list *ml = web->moves;
|
||||
if (!ml)
|
||||
@ -516,9 +495,7 @@ remove_move_1 (web, move)
|
||||
not in the list anymore. */
|
||||
|
||||
static void
|
||||
remove_move (web, move)
|
||||
struct web *web;
|
||||
struct move *move;
|
||||
remove_move (struct web *web, struct move *move)
|
||||
{
|
||||
struct move_list *ml;
|
||||
remove_move_1 (web, move);
|
||||
@ -530,8 +507,7 @@ remove_move (web, move)
|
||||
/* Merge the moves for the two webs into the first web's movelist. */
|
||||
|
||||
void
|
||||
merge_moves (u, v)
|
||||
struct web *u, *v;
|
||||
merge_moves (struct web *u, struct web *v)
|
||||
{
|
||||
regset seen;
|
||||
struct move_list *ml, *ml_next;
|
||||
@ -555,8 +531,7 @@ merge_moves (u, v)
|
||||
/* Add a web to the simplify worklist, from the freeze worklist. */
|
||||
|
||||
static void
|
||||
add_worklist (web)
|
||||
struct web *web;
|
||||
add_worklist (struct web *web)
|
||||
{
|
||||
if (web->type != PRECOLORED && !web->moves
|
||||
&& web->num_conflicts < NUM_REGS (web))
|
||||
@ -569,8 +544,7 @@ add_worklist (web)
|
||||
/* Precolored node coalescing heuristic. */
|
||||
|
||||
static int
|
||||
ok (target, source)
|
||||
struct web *target, *source;
|
||||
ok (struct web *target, struct web *source)
|
||||
{
|
||||
struct conflict_link *wl;
|
||||
int i;
|
||||
@ -661,8 +635,7 @@ ok (target, source)
|
||||
/* Non-precolored node coalescing heuristic. */
|
||||
|
||||
static int
|
||||
conservative (target, source)
|
||||
struct web *target, *source;
|
||||
conservative (struct web *target, struct web *source)
|
||||
{
|
||||
unsigned int k;
|
||||
unsigned int loop;
|
||||
@ -699,8 +672,7 @@ conservative (target, source)
|
||||
was passed in. */
|
||||
|
||||
struct web *
|
||||
alias (web)
|
||||
struct web *web;
|
||||
alias (struct web *web)
|
||||
{
|
||||
while (web->type == COALESCED)
|
||||
web = web->alias;
|
||||
@ -711,8 +683,7 @@ alias (web)
|
||||
SIMPLIFY types. */
|
||||
|
||||
static inline unsigned int
|
||||
simplify_p (type)
|
||||
enum node_type type;
|
||||
simplify_p (enum node_type type)
|
||||
{
|
||||
return type == SIMPLIFY || type == SIMPLIFY_SPILL || type == SIMPLIFY_FAT;
|
||||
}
|
||||
@ -720,8 +691,7 @@ simplify_p (type)
|
||||
/* Actually combine two webs, that can be coalesced. */
|
||||
|
||||
static void
|
||||
combine (u, v)
|
||||
struct web *u, *v;
|
||||
combine (struct web *u, struct web *v)
|
||||
{
|
||||
int i;
|
||||
struct conflict_link *wl;
|
||||
@ -850,7 +820,7 @@ combine (u, v)
|
||||
This is used only for iterated coalescing. */
|
||||
|
||||
static void
|
||||
coalesce ()
|
||||
coalesce (void)
|
||||
{
|
||||
struct dlist *d = pop_list (&mv_worklist);
|
||||
struct move *m = DLIST_MOVE (d);
|
||||
@ -896,8 +866,7 @@ coalesce ()
|
||||
/* Freeze the moves associated with the web. Used for iterated coalescing. */
|
||||
|
||||
static void
|
||||
freeze_moves (web)
|
||||
struct web *web;
|
||||
freeze_moves (struct web *web)
|
||||
{
|
||||
struct move_list *ml, *ml_next;
|
||||
for (ml = web->moves; ml; ml = ml_next)
|
||||
@ -928,7 +897,7 @@ freeze_moves (web)
|
||||
coalescing). */
|
||||
|
||||
static void
|
||||
freeze ()
|
||||
freeze (void)
|
||||
{
|
||||
struct dlist *d = pop_list (&WEBS(FREEZE));
|
||||
put_web (DLIST_WEB (d), SIMPLIFY);
|
||||
@ -938,17 +907,16 @@ freeze ()
|
||||
/* The current spill heuristic. Returns a number for a WEB.
|
||||
Webs with higher numbers are selected later. */
|
||||
|
||||
static unsigned HOST_WIDE_INT (*spill_heuristic) PARAMS ((struct web *));
|
||||
static unsigned HOST_WIDE_INT (*spill_heuristic) (struct web *);
|
||||
|
||||
static unsigned HOST_WIDE_INT default_spill_heuristic PARAMS ((struct web *));
|
||||
static unsigned HOST_WIDE_INT default_spill_heuristic (struct web *);
|
||||
|
||||
/* Our default heuristic is similar to spill_cost / num_conflicts.
|
||||
Just scaled for integer arithmetic, and it favors coalesced webs,
|
||||
and webs which span more insns with deaths. */
|
||||
|
||||
static unsigned HOST_WIDE_INT
|
||||
default_spill_heuristic (web)
|
||||
struct web *web;
|
||||
default_spill_heuristic (struct web *web)
|
||||
{
|
||||
unsigned HOST_WIDE_INT ret;
|
||||
unsigned int divisor = 1;
|
||||
@ -970,7 +938,7 @@ default_spill_heuristic (web)
|
||||
*actually* spill until we need to). */
|
||||
|
||||
static void
|
||||
select_spill ()
|
||||
select_spill (void)
|
||||
{
|
||||
unsigned HOST_WIDE_INT best = (unsigned HOST_WIDE_INT) -1;
|
||||
struct dlist *bestd = NULL;
|
||||
@ -1016,10 +984,8 @@ select_spill ()
|
||||
free colors, and MODE, returns nonzero of color C is still usable. */
|
||||
|
||||
static int
|
||||
color_usable_p (c, dont_begin_colors, free_colors, mode)
|
||||
int c;
|
||||
HARD_REG_SET dont_begin_colors, free_colors;
|
||||
enum machine_mode mode;
|
||||
color_usable_p (int c, HARD_REG_SET dont_begin_colors,
|
||||
HARD_REG_SET free_colors, enum machine_mode mode)
|
||||
{
|
||||
if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
|
||||
&& TEST_HARD_REG_BIT (free_colors, c)
|
||||
@ -1048,9 +1014,8 @@ color_usable_p (c, dont_begin_colors, free_colors, mode)
|
||||
block could be found. */
|
||||
|
||||
int
|
||||
get_free_reg (dont_begin_colors, free_colors, mode)
|
||||
HARD_REG_SET dont_begin_colors, free_colors;
|
||||
enum machine_mode mode;
|
||||
get_free_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors,
|
||||
enum machine_mode mode)
|
||||
{
|
||||
int c;
|
||||
int last_resort_reg = -1;
|
||||
@ -1099,9 +1064,9 @@ get_free_reg (dont_begin_colors, free_colors, mode)
|
||||
only do the last two steps. */
|
||||
|
||||
static int
|
||||
get_biased_reg (dont_begin_colors, bias, prefer_colors, free_colors, mode)
|
||||
HARD_REG_SET dont_begin_colors, bias, prefer_colors, free_colors;
|
||||
enum machine_mode mode;
|
||||
get_biased_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET bias,
|
||||
HARD_REG_SET prefer_colors, HARD_REG_SET free_colors,
|
||||
enum machine_mode mode)
|
||||
{
|
||||
int c = -1;
|
||||
HARD_REG_SET s;
|
||||
@ -1132,9 +1097,7 @@ get_biased_reg (dont_begin_colors, bias, prefer_colors, free_colors, mode)
|
||||
in FREE_COLORS. */
|
||||
|
||||
static int
|
||||
count_long_blocks (free_colors, len)
|
||||
HARD_REG_SET free_colors;
|
||||
int len;
|
||||
count_long_blocks (HARD_REG_SET free_colors, int len)
|
||||
{
|
||||
int i, j;
|
||||
int count = 0;
|
||||
@ -1158,8 +1121,7 @@ count_long_blocks (free_colors, len)
|
||||
of hardreg sets. Note that this string is statically allocated. */
|
||||
|
||||
static char *
|
||||
hardregset_to_string (s)
|
||||
HARD_REG_SET s;
|
||||
hardregset_to_string (HARD_REG_SET s)
|
||||
{
|
||||
static char string[/*FIRST_PSEUDO_REGISTER + 30*/1024];
|
||||
#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT
|
||||
@ -1190,9 +1152,7 @@ hardregset_to_string (s)
|
||||
3 can't be used as begin color. */
|
||||
|
||||
static void
|
||||
calculate_dont_begin (web, result)
|
||||
struct web *web;
|
||||
HARD_REG_SET *result;
|
||||
calculate_dont_begin (struct web *web, HARD_REG_SET *result)
|
||||
{
|
||||
struct conflict_link *wl;
|
||||
HARD_REG_SET dont_begin;
|
||||
@ -1279,9 +1239,7 @@ calculate_dont_begin (web, result)
|
||||
register starved machines, so we try to avoid this. */
|
||||
|
||||
static void
|
||||
colorize_one_web (web, hard)
|
||||
struct web *web;
|
||||
int hard;
|
||||
colorize_one_web (struct web *web, int hard)
|
||||
{
|
||||
struct conflict_link *wl;
|
||||
HARD_REG_SET colors, dont_begin;
|
||||
@ -1664,7 +1622,7 @@ colorize_one_web (web, hard)
|
||||
colors of coalesced webs. */
|
||||
|
||||
static void
|
||||
assign_colors ()
|
||||
assign_colors (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
|
||||
@ -1693,8 +1651,7 @@ assign_colors ()
|
||||
be aware, that currently this pass is quite slow. */
|
||||
|
||||
static void
|
||||
try_recolor_web (web)
|
||||
struct web *web;
|
||||
try_recolor_web (struct web *web)
|
||||
{
|
||||
struct conflict_link *wl;
|
||||
unsigned HOST_WIDE_INT *cost_neighbors;
|
||||
@ -1895,7 +1852,7 @@ try_recolor_web (web)
|
||||
isn't used anymore, e.g. on a completely colored graph. */
|
||||
|
||||
static void
|
||||
insert_coalesced_conflicts ()
|
||||
insert_coalesced_conflicts (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
for (d = WEBS(COALESCED); 0 && d; d = d->next)
|
||||
@ -1949,8 +1906,7 @@ insert_coalesced_conflicts ()
|
||||
largest cost first. */
|
||||
|
||||
static int
|
||||
comp_webs_maxcost (w1, w2)
|
||||
const void *w1, *w2;
|
||||
comp_webs_maxcost (const void *w1, const void *w2)
|
||||
{
|
||||
struct web *web1 = *(struct web **)w1;
|
||||
struct web *web2 = *(struct web **)w2;
|
||||
@ -1966,7 +1922,7 @@ comp_webs_maxcost (w1, w2)
|
||||
how this is done. This just calls it for each spilled web. */
|
||||
|
||||
static void
|
||||
recolor_spills ()
|
||||
recolor_spills (void)
|
||||
{
|
||||
unsigned int i, num;
|
||||
struct web **order2web;
|
||||
@ -2003,7 +1959,7 @@ recolor_spills ()
|
||||
not being in usable regs. */
|
||||
|
||||
static void
|
||||
check_colors ()
|
||||
check_colors (void)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < num_webs - num_subwebs; i++)
|
||||
@ -2081,8 +2037,7 @@ check_colors ()
|
||||
back onto SELECT stack. */
|
||||
|
||||
static void
|
||||
unalias_web (web)
|
||||
struct web *web;
|
||||
unalias_web (struct web *web)
|
||||
{
|
||||
web->alias = NULL;
|
||||
web->is_coalesced = 0;
|
||||
@ -2110,8 +2065,7 @@ unalias_web (web)
|
||||
Somewhen we'll change this to be more sane. */
|
||||
|
||||
static void
|
||||
break_aliases_to_web (web)
|
||||
struct web *web;
|
||||
break_aliases_to_web (struct web *web)
|
||||
{
|
||||
struct dlist *d, *d_next;
|
||||
if (web->type != SPILLED)
|
||||
@ -2153,8 +2107,7 @@ break_aliases_to_web (web)
|
||||
from initially coalescing both. */
|
||||
|
||||
static void
|
||||
break_precolored_alias (web)
|
||||
struct web *web;
|
||||
break_precolored_alias (struct web *web)
|
||||
{
|
||||
struct web *pre = web->alias;
|
||||
struct conflict_link *wl;
|
||||
@ -2222,8 +2175,7 @@ break_precolored_alias (web)
|
||||
and break up the coalescing. */
|
||||
|
||||
static void
|
||||
restore_conflicts_from_coalesce (web)
|
||||
struct web *web;
|
||||
restore_conflicts_from_coalesce (struct web *web)
|
||||
{
|
||||
struct conflict_link **pcl;
|
||||
struct conflict_link *wl;
|
||||
@ -2329,7 +2281,7 @@ restore_conflicts_from_coalesce (web)
|
||||
there are any spilled coalesce targets. */
|
||||
|
||||
static void
|
||||
break_coalesced_spills ()
|
||||
break_coalesced_spills (void)
|
||||
{
|
||||
int changed = 0;
|
||||
while (1)
|
||||
@ -2397,7 +2349,7 @@ static unsigned int num_web_pairs;
|
||||
/* Clear the hash table of web pairs. */
|
||||
|
||||
static void
|
||||
init_web_pairs ()
|
||||
init_web_pairs (void)
|
||||
{
|
||||
memset (web_pair_hash, 0, sizeof web_pair_hash);
|
||||
num_web_pairs = 0;
|
||||
@ -2409,10 +2361,8 @@ init_web_pairs ()
|
||||
already in, cumulate the costs and conflict number. */
|
||||
|
||||
static void
|
||||
add_web_pair_cost (web1, web2, cost, conflicts)
|
||||
struct web *web1, *web2;
|
||||
unsigned HOST_WIDE_INT cost;
|
||||
unsigned int conflicts;
|
||||
add_web_pair_cost (struct web *web1, struct web *web2,
|
||||
unsigned HOST_WIDE_INT cost, unsigned int conflicts)
|
||||
{
|
||||
unsigned int hash;
|
||||
struct web_pair *p;
|
||||
@ -2447,8 +2397,7 @@ add_web_pair_cost (web1, web2, cost, conflicts)
|
||||
when the moves are removed) come first. */
|
||||
|
||||
static int
|
||||
comp_web_pairs (w1, w2)
|
||||
const void *w1, *w2;
|
||||
comp_web_pairs (const void *w1, const void *w2)
|
||||
{
|
||||
struct web_pair *p1 = *(struct web_pair **)w1;
|
||||
struct web_pair *p2 = *(struct web_pair **)w2;
|
||||
@ -2468,8 +2417,7 @@ comp_web_pairs (w1, w2)
|
||||
with the most savings. */
|
||||
|
||||
static void
|
||||
sort_and_combine_web_pairs (for_move)
|
||||
int for_move;
|
||||
sort_and_combine_web_pairs (int for_move)
|
||||
{
|
||||
unsigned int i;
|
||||
struct web_pair **sorted;
|
||||
@ -2520,7 +2468,7 @@ sort_and_combine_web_pairs (for_move)
|
||||
giving the most saving if coalesced. */
|
||||
|
||||
static void
|
||||
aggressive_coalesce ()
|
||||
aggressive_coalesce (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
struct move *m;
|
||||
@ -2579,7 +2527,7 @@ aggressive_coalesce ()
|
||||
all insns, and for each insn, through all defs and uses. */
|
||||
|
||||
static void
|
||||
extended_coalesce_2 ()
|
||||
extended_coalesce_2 (void)
|
||||
{
|
||||
rtx insn;
|
||||
struct ra_insn_info info;
|
||||
@ -2624,7 +2572,7 @@ extended_coalesce_2 ()
|
||||
/* Check if we forgot to coalesce some moves. */
|
||||
|
||||
static void
|
||||
check_uncoalesced_moves ()
|
||||
check_uncoalesced_moves (void)
|
||||
{
|
||||
struct move_list *ml;
|
||||
struct move *m;
|
||||
@ -2658,8 +2606,7 @@ check_uncoalesced_moves ()
|
||||
produces a list of spilled, colored and coalesced nodes. */
|
||||
|
||||
void
|
||||
ra_colorize_graph (df)
|
||||
struct df *df;
|
||||
ra_colorize_graph (struct df *df)
|
||||
{
|
||||
if (rtl_dump_file)
|
||||
dump_igraph (df);
|
||||
@ -2704,7 +2651,7 @@ ra_colorize_graph (df)
|
||||
|
||||
/* Initialize this module. */
|
||||
|
||||
void ra_colorize_init ()
|
||||
void ra_colorize_init (void)
|
||||
{
|
||||
/* FIXME: Choose spill heuristic for platform if we have one */
|
||||
spill_heuristic = default_spill_heuristic;
|
||||
@ -2714,7 +2661,7 @@ void ra_colorize_init ()
|
||||
memory). */
|
||||
|
||||
void
|
||||
ra_colorize_free_all ()
|
||||
ra_colorize_free_all (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
while ((d = pop_list (&WEBS(FREE))) != NULL)
|
||||
|
@ -36,10 +36,10 @@
|
||||
/* This file contains various dumping and debug functions for
|
||||
the graph coloring register allocator. */
|
||||
|
||||
static void ra_print_rtx_1op PARAMS ((FILE *, rtx));
|
||||
static void ra_print_rtx_2op PARAMS ((FILE *, rtx));
|
||||
static void ra_print_rtx_3op PARAMS ((FILE *, rtx));
|
||||
static void ra_print_rtx_object PARAMS ((FILE *, rtx));
|
||||
static void ra_print_rtx_1op (FILE *, rtx);
|
||||
static void ra_print_rtx_2op (FILE *, rtx);
|
||||
static void ra_print_rtx_3op (FILE *, rtx);
|
||||
static void ra_print_rtx_object (FILE *, rtx);
|
||||
|
||||
/* The hardregs as names, for debugging. */
|
||||
static const char *const reg_class_names[] = REG_CLASS_NAMES;
|
||||
@ -70,9 +70,7 @@ ra_debug_msg (unsigned int level, const char *format, ...)
|
||||
"op(Y)" to FILE. */
|
||||
|
||||
static void
|
||||
ra_print_rtx_1op (file, x)
|
||||
FILE *file;
|
||||
rtx x;
|
||||
ra_print_rtx_1op (FILE *file, rtx x)
|
||||
{
|
||||
enum rtx_code code = GET_CODE (x);
|
||||
rtx op0 = XEXP (x, 0);
|
||||
@ -106,9 +104,7 @@ ra_print_rtx_1op (file, x)
|
||||
to FILE. */
|
||||
|
||||
static void
|
||||
ra_print_rtx_2op (file, x)
|
||||
FILE *file;
|
||||
rtx x;
|
||||
ra_print_rtx_2op (FILE *file, rtx x)
|
||||
{
|
||||
int infix = 1;
|
||||
const char *opname = "shitop";
|
||||
@ -171,9 +167,7 @@ ra_print_rtx_2op (file, x)
|
||||
I.e. X is either an IF_THEN_ELSE, or a bitmap operation. */
|
||||
|
||||
static void
|
||||
ra_print_rtx_3op (file, x)
|
||||
FILE *file;
|
||||
rtx x;
|
||||
ra_print_rtx_3op (FILE *file, rtx x)
|
||||
{
|
||||
enum rtx_code code = GET_CODE (x);
|
||||
rtx op0 = XEXP (x, 0);
|
||||
@ -208,9 +202,7 @@ ra_print_rtx_3op (file, x)
|
||||
is a hardreg, whose name is NULL, or empty. */
|
||||
|
||||
static void
|
||||
ra_print_rtx_object (file, x)
|
||||
FILE *file;
|
||||
rtx x;
|
||||
ra_print_rtx_object (FILE *file, rtx x)
|
||||
{
|
||||
enum rtx_code code = GET_CODE (x);
|
||||
enum machine_mode mode = GET_MODE (x);
|
||||
@ -344,10 +336,7 @@ ra_print_rtx_object (file, x)
|
||||
the preceding and following insn. */
|
||||
|
||||
void
|
||||
ra_print_rtx (file, x, with_pn)
|
||||
FILE *file;
|
||||
rtx x;
|
||||
int with_pn;
|
||||
ra_print_rtx (FILE *file, rtx x, int with_pn)
|
||||
{
|
||||
enum rtx_code code;
|
||||
char class;
|
||||
@ -517,10 +506,7 @@ ra_print_rtx (file, x, with_pn)
|
||||
/* This only calls ra_print_rtx(), but emits a final newline. */
|
||||
|
||||
void
|
||||
ra_print_rtx_top (file, x, with_pn)
|
||||
FILE *file;
|
||||
rtx x;
|
||||
int with_pn;
|
||||
ra_print_rtx_top (FILE *file, rtx x, int with_pn)
|
||||
{
|
||||
ra_print_rtx (file, x, with_pn);
|
||||
fprintf (file, "\n");
|
||||
@ -529,8 +515,7 @@ ra_print_rtx_top (file, x, with_pn)
|
||||
/* Callable from gdb. This prints rtx X onto stderr. */
|
||||
|
||||
void
|
||||
ra_debug_rtx (x)
|
||||
rtx x;
|
||||
ra_debug_rtx (rtx x)
|
||||
{
|
||||
ra_print_rtx_top (stderr, x, 1);
|
||||
}
|
||||
@ -539,8 +524,7 @@ ra_debug_rtx (x)
|
||||
The first and last insn are emitted with UIDs of prev and next insns. */
|
||||
|
||||
void
|
||||
ra_debug_bbi (bbi)
|
||||
int bbi;
|
||||
ra_debug_bbi (int bbi)
|
||||
{
|
||||
basic_block bb = BASIC_BLOCK (bbi);
|
||||
rtx insn;
|
||||
@ -557,9 +541,7 @@ ra_debug_bbi (bbi)
|
||||
or emit a window of NUM insns around INSN, to stderr. */
|
||||
|
||||
void
|
||||
ra_debug_insns (insn, num)
|
||||
rtx insn;
|
||||
int num;
|
||||
ra_debug_insns (rtx insn, int num)
|
||||
{
|
||||
int i, count = (num == 0 ? 1 : num < 0 ? -num : num);
|
||||
if (num < 0)
|
||||
@ -578,9 +560,7 @@ ra_debug_insns (insn, num)
|
||||
some notes, if flag_ra_dump_notes is zero. */
|
||||
|
||||
void
|
||||
ra_print_rtl_with_bb (file, insn)
|
||||
FILE *file;
|
||||
rtx insn;
|
||||
ra_print_rtl_with_bb (FILE *file, rtx insn)
|
||||
{
|
||||
basic_block last_bb, bb;
|
||||
unsigned int num = 0;
|
||||
@ -628,7 +608,7 @@ ra_print_rtl_with_bb (file, insn)
|
||||
graph, and prints the findings. */
|
||||
|
||||
void
|
||||
dump_number_seen ()
|
||||
dump_number_seen (void)
|
||||
{
|
||||
#define N 17
|
||||
int num[N];
|
||||
@ -654,8 +634,7 @@ dump_number_seen ()
|
||||
/* Dump the interference graph, the move list and the webs. */
|
||||
|
||||
void
|
||||
dump_igraph (df)
|
||||
struct df *df ATTRIBUTE_UNUSED;
|
||||
dump_igraph (struct df *df ATTRIBUTE_UNUSED)
|
||||
{
|
||||
struct move_list *ml;
|
||||
unsigned int def1, def2;
|
||||
@ -741,7 +720,7 @@ dump_igraph (df)
|
||||
to my custom graph colorizer. */
|
||||
|
||||
void
|
||||
dump_igraph_machine ()
|
||||
dump_igraph_machine (void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@ -801,7 +780,7 @@ dump_igraph_machine ()
|
||||
and emits information, if the resulting insns are strictly valid. */
|
||||
|
||||
void
|
||||
dump_constraints ()
|
||||
dump_constraints (void)
|
||||
{
|
||||
rtx insn;
|
||||
int i;
|
||||
@ -854,9 +833,7 @@ dump_constraints ()
|
||||
preceded by a custom message MSG, with debug level LEVEL. */
|
||||
|
||||
void
|
||||
dump_graph_cost (level, msg)
|
||||
unsigned int level;
|
||||
const char *msg;
|
||||
dump_graph_cost (unsigned int level, const char *msg)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned HOST_WIDE_INT cost;
|
||||
@ -878,8 +855,7 @@ dump_graph_cost (level, msg)
|
||||
/* Dump the color assignment per web, the coalesced and spilled webs. */
|
||||
|
||||
void
|
||||
dump_ra (df)
|
||||
struct df *df ATTRIBUTE_UNUSED;
|
||||
dump_ra (struct df *df ATTRIBUTE_UNUSED)
|
||||
{
|
||||
struct web *web;
|
||||
struct dlist *d;
|
||||
@ -913,10 +889,7 @@ dump_ra (df)
|
||||
(loads, stores and copies). */
|
||||
|
||||
void
|
||||
dump_static_insn_cost (file, message, prefix)
|
||||
FILE *file;
|
||||
const char *message;
|
||||
const char *prefix;
|
||||
dump_static_insn_cost (FILE *file, const char *message, const char *prefix)
|
||||
{
|
||||
struct cost
|
||||
{
|
||||
@ -1001,9 +974,7 @@ dump_static_insn_cost (file, message, prefix)
|
||||
hardregs in common. */
|
||||
|
||||
int
|
||||
web_conflicts_p (web1, web2)
|
||||
struct web *web1;
|
||||
struct web *web2;
|
||||
web_conflicts_p (struct web *web1, struct web *web2)
|
||||
{
|
||||
if (web1->type == PRECOLORED && web2->type == PRECOLORED)
|
||||
return 0;
|
||||
@ -1020,8 +991,7 @@ web_conflicts_p (web1, web2)
|
||||
/* Dump all uids of insns in which WEB is mentioned. */
|
||||
|
||||
void
|
||||
dump_web_insns (web)
|
||||
struct web *web;
|
||||
dump_web_insns (struct web *web)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@ -1047,8 +1017,7 @@ dump_web_insns (web)
|
||||
/* Dump conflicts for web WEB. */
|
||||
|
||||
void
|
||||
dump_web_conflicts (web)
|
||||
struct web *web;
|
||||
dump_web_conflicts (struct web *web)
|
||||
{
|
||||
int num = 0;
|
||||
unsigned int def2;
|
||||
@ -1099,8 +1068,7 @@ dump_web_conflicts (web)
|
||||
/* Output HARD_REG_SET to stderr. */
|
||||
|
||||
void
|
||||
debug_hard_reg_set (set)
|
||||
HARD_REG_SET set;
|
||||
debug_hard_reg_set (HARD_REG_SET set)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
|
||||
|
152
gcc/ra-rewrite.c
152
gcc/ra-rewrite.c
@ -44,33 +44,32 @@
|
||||
struct rewrite_info;
|
||||
struct rtx_list;
|
||||
|
||||
static void spill_coalescing PARAMS ((sbitmap, sbitmap));
|
||||
static unsigned HOST_WIDE_INT spill_prop_savings PARAMS ((struct web *,
|
||||
sbitmap));
|
||||
static void spill_prop_insert PARAMS ((struct web *, sbitmap, sbitmap));
|
||||
static int spill_propagation PARAMS ((sbitmap, sbitmap, sbitmap));
|
||||
static void spill_coalprop PARAMS ((void));
|
||||
static void allocate_spill_web PARAMS ((struct web *));
|
||||
static void choose_spill_colors PARAMS ((void));
|
||||
static void rewrite_program PARAMS ((bitmap));
|
||||
static void remember_slot PARAMS ((struct rtx_list **, rtx));
|
||||
static int slots_overlap_p PARAMS ((rtx, rtx));
|
||||
static void delete_overlapping_slots PARAMS ((struct rtx_list **, rtx));
|
||||
static int slot_member_p PARAMS ((struct rtx_list *, rtx));
|
||||
static void insert_stores PARAMS ((bitmap));
|
||||
static int spill_same_color_p PARAMS ((struct web *, struct web *));
|
||||
static bool is_partly_live_1 PARAMS ((sbitmap, struct web *));
|
||||
static void update_spill_colors PARAMS ((HARD_REG_SET *, struct web *, int));
|
||||
static int spill_is_free PARAMS ((HARD_REG_SET *, struct web *));
|
||||
static void emit_loads PARAMS ((struct rewrite_info *, int, rtx));
|
||||
static void reloads_to_loads PARAMS ((struct rewrite_info *, struct ref **,
|
||||
unsigned int, struct web **));
|
||||
static void rewrite_program2 PARAMS ((bitmap));
|
||||
static void mark_refs_for_checking PARAMS ((struct web *, bitmap));
|
||||
static void detect_web_parts_to_rebuild PARAMS ((void));
|
||||
static void delete_useless_defs PARAMS ((void));
|
||||
static void detect_non_changed_webs PARAMS ((void));
|
||||
static void reset_changed_flag PARAMS ((void));
|
||||
static void spill_coalescing (sbitmap, sbitmap);
|
||||
static unsigned HOST_WIDE_INT spill_prop_savings (struct web *, sbitmap);
|
||||
static void spill_prop_insert (struct web *, sbitmap, sbitmap);
|
||||
static int spill_propagation (sbitmap, sbitmap, sbitmap);
|
||||
static void spill_coalprop (void);
|
||||
static void allocate_spill_web (struct web *);
|
||||
static void choose_spill_colors (void);
|
||||
static void rewrite_program (bitmap);
|
||||
static void remember_slot (struct rtx_list **, rtx);
|
||||
static int slots_overlap_p (rtx, rtx);
|
||||
static void delete_overlapping_slots (struct rtx_list **, rtx);
|
||||
static int slot_member_p (struct rtx_list *, rtx);
|
||||
static void insert_stores (bitmap);
|
||||
static int spill_same_color_p (struct web *, struct web *);
|
||||
static bool is_partly_live_1 (sbitmap, struct web *);
|
||||
static void update_spill_colors (HARD_REG_SET *, struct web *, int);
|
||||
static int spill_is_free (HARD_REG_SET *, struct web *);
|
||||
static void emit_loads (struct rewrite_info *, int, rtx);
|
||||
static void reloads_to_loads (struct rewrite_info *, struct ref **,
|
||||
unsigned int, struct web **);
|
||||
static void rewrite_program2 (bitmap);
|
||||
static void mark_refs_for_checking (struct web *, bitmap);
|
||||
static void detect_web_parts_to_rebuild (void);
|
||||
static void delete_useless_defs (void);
|
||||
static void detect_non_changed_webs (void);
|
||||
static void reset_changed_flag (void);
|
||||
|
||||
/* For tracking some statistics, we count the number (and cost)
|
||||
of deleted move insns. */
|
||||
@ -84,8 +83,7 @@ static unsigned HOST_WIDE_INT deleted_move_cost;
|
||||
reduces memory shuffling. */
|
||||
|
||||
static void
|
||||
spill_coalescing (coalesce, spilled)
|
||||
sbitmap coalesce, spilled;
|
||||
spill_coalescing (sbitmap coalesce, sbitmap spilled)
|
||||
{
|
||||
struct move_list *ml;
|
||||
struct move *m;
|
||||
@ -160,9 +158,7 @@ spill_coalescing (coalesce, spilled)
|
||||
SPILLED, in terms of removed move insn cost. */
|
||||
|
||||
static unsigned HOST_WIDE_INT
|
||||
spill_prop_savings (web, spilled)
|
||||
struct web *web;
|
||||
sbitmap spilled;
|
||||
spill_prop_savings (struct web *web, sbitmap spilled)
|
||||
{
|
||||
unsigned HOST_WIDE_INT savings = 0;
|
||||
struct move_list *ml;
|
||||
@ -196,9 +192,7 @@ spill_prop_savings (web, spilled)
|
||||
to LIST and PROCESSED. */
|
||||
|
||||
static void
|
||||
spill_prop_insert (web, list, processed)
|
||||
struct web *web;
|
||||
sbitmap list, processed;
|
||||
spill_prop_insert (struct web *web, sbitmap list, sbitmap processed)
|
||||
{
|
||||
struct move_list *ml;
|
||||
struct move *m;
|
||||
@ -232,8 +226,7 @@ spill_prop_insert (web, list, processed)
|
||||
of all webs processed so far, so we don't do work twice. */
|
||||
|
||||
static int
|
||||
spill_propagation (to_prop, spilled, processed)
|
||||
sbitmap to_prop, spilled, processed;
|
||||
spill_propagation (sbitmap to_prop, sbitmap spilled, sbitmap processed)
|
||||
{
|
||||
int id;
|
||||
int again = 0;
|
||||
@ -275,7 +268,7 @@ spill_propagation (to_prop, spilled, processed)
|
||||
spill coalescing and spill propagation, until nothing changes. */
|
||||
|
||||
static void
|
||||
spill_coalprop ()
|
||||
spill_coalprop (void)
|
||||
{
|
||||
sbitmap spilled, processed, to_prop;
|
||||
struct dlist *d;
|
||||
@ -311,8 +304,7 @@ spill_coalprop ()
|
||||
MEM references. */
|
||||
|
||||
static void
|
||||
allocate_spill_web (web)
|
||||
struct web *web;
|
||||
allocate_spill_web (struct web *web)
|
||||
{
|
||||
int regno = web->regno;
|
||||
rtx slot;
|
||||
@ -326,7 +318,7 @@ allocate_spill_web (web)
|
||||
spilling. The heuristic isn't good in any way. */
|
||||
|
||||
static void
|
||||
choose_spill_colors ()
|
||||
choose_spill_colors (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
unsigned HOST_WIDE_INT *costs = xmalloc (FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
|
||||
@ -397,8 +389,7 @@ static bitmap useless_defs;
|
||||
deaths. */
|
||||
|
||||
static void
|
||||
rewrite_program (new_deaths)
|
||||
bitmap new_deaths;
|
||||
rewrite_program (bitmap new_deaths)
|
||||
{
|
||||
unsigned int i;
|
||||
struct dlist *d;
|
||||
@ -538,9 +529,7 @@ struct rtx_list
|
||||
/* Adds X to *LIST. */
|
||||
|
||||
static void
|
||||
remember_slot (list, x)
|
||||
struct rtx_list **list;
|
||||
rtx x;
|
||||
remember_slot (struct rtx_list **list, rtx x)
|
||||
{
|
||||
struct rtx_list *l;
|
||||
/* PRE: X is not already in LIST. */
|
||||
@ -556,8 +545,7 @@ remember_slot (list, x)
|
||||
(plus (basereg) (const_inst x)), otherwise they overlap. */
|
||||
|
||||
static int
|
||||
slots_overlap_p (s1, s2)
|
||||
rtx s1, s2;
|
||||
slots_overlap_p (rtx s1, rtx s2)
|
||||
{
|
||||
rtx base1, base2;
|
||||
HOST_WIDE_INT ofs1 = 0, ofs2 = 0;
|
||||
@ -607,9 +595,7 @@ slots_overlap_p (s1, s2)
|
||||
of slots_overlap_p(). */
|
||||
|
||||
static void
|
||||
delete_overlapping_slots (list, x)
|
||||
struct rtx_list **list;
|
||||
rtx x;
|
||||
delete_overlapping_slots (struct rtx_list **list, rtx x)
|
||||
{
|
||||
while (*list)
|
||||
{
|
||||
@ -623,9 +609,7 @@ delete_overlapping_slots (list, x)
|
||||
/* Returns nonzero, of X is member of LIST. */
|
||||
|
||||
static int
|
||||
slot_member_p (list, x)
|
||||
struct rtx_list *list;
|
||||
rtx x;
|
||||
slot_member_p (struct rtx_list *list, rtx x)
|
||||
{
|
||||
for (;list; list = list->next)
|
||||
if (rtx_equal_p (list->x, x))
|
||||
@ -640,8 +624,7 @@ slot_member_p (list, x)
|
||||
containing deaths. */
|
||||
|
||||
static void
|
||||
insert_stores (new_deaths)
|
||||
bitmap new_deaths;
|
||||
insert_stores (bitmap new_deaths)
|
||||
{
|
||||
rtx insn;
|
||||
rtx last_slot = NULL_RTX;
|
||||
@ -750,8 +733,7 @@ insert_stores (new_deaths)
|
||||
they are not the same width. */
|
||||
|
||||
static int
|
||||
spill_same_color_p (web1, web2)
|
||||
struct web *web1, *web2;
|
||||
spill_same_color_p (struct web *web1, struct web *web2)
|
||||
{
|
||||
int c1, size1, c2, size2;
|
||||
if ((c1 = alias (web1)->color) < 0 || c1 == an_unusable_color)
|
||||
@ -772,9 +754,7 @@ spill_same_color_p (web1, web2)
|
||||
subwebs (or WEB itself) is live. */
|
||||
|
||||
static bool
|
||||
is_partly_live_1 (live, web)
|
||||
sbitmap live;
|
||||
struct web *web;
|
||||
is_partly_live_1 (sbitmap live, struct web *web)
|
||||
{
|
||||
do
|
||||
if (TEST_BIT (live, web->id))
|
||||
@ -793,10 +773,7 @@ is_partly_live_1 (live, web)
|
||||
is nonzero), or remove them. */
|
||||
|
||||
static void
|
||||
update_spill_colors (in_use, web, add)
|
||||
HARD_REG_SET *in_use;
|
||||
struct web *web;
|
||||
int add;
|
||||
update_spill_colors (HARD_REG_SET *in_use, struct web *web, int add)
|
||||
{
|
||||
int c, size;
|
||||
if ((c = alias (find_web_for_subweb (web))->color) < 0
|
||||
@ -825,9 +802,7 @@ update_spill_colors (in_use, web, add)
|
||||
Generally, if WEB can't be left colorized return 1. */
|
||||
|
||||
static int
|
||||
spill_is_free (in_use, web)
|
||||
HARD_REG_SET *in_use;
|
||||
struct web *web;
|
||||
spill_is_free (HARD_REG_SET *in_use, struct web *web)
|
||||
{
|
||||
int c, size;
|
||||
if ((c = alias (web)->color) < 0)
|
||||
@ -881,10 +856,7 @@ struct rewrite_info
|
||||
loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
|
||||
|
||||
static void
|
||||
emit_loads (ri, nl_first_reload, last_block_insn)
|
||||
struct rewrite_info *ri;
|
||||
int nl_first_reload;
|
||||
rtx last_block_insn;
|
||||
emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
|
||||
{
|
||||
int j;
|
||||
for (j = ri->nl_size; j;)
|
||||
@ -1018,11 +990,8 @@ emit_loads (ri, nl_first_reload, last_block_insn)
|
||||
and whose colors isn't free anymore, on the needed_loads list. */
|
||||
|
||||
static void
|
||||
reloads_to_loads (ri, refs, num_refs, ref2web)
|
||||
struct rewrite_info *ri;
|
||||
struct ref **refs;
|
||||
unsigned int num_refs;
|
||||
struct web **ref2web;
|
||||
reloads_to_loads (struct rewrite_info *ri, struct ref **refs,
|
||||
unsigned int num_refs, struct web **ref2web)
|
||||
{
|
||||
unsigned int n;
|
||||
int num_reloads = ri->num_reloads;
|
||||
@ -1081,8 +1050,7 @@ reloads_to_loads (ri, refs, num_refs, ref2web)
|
||||
containing deaths). */
|
||||
|
||||
static void
|
||||
rewrite_program2 (new_deaths)
|
||||
bitmap new_deaths;
|
||||
rewrite_program2 (bitmap new_deaths)
|
||||
{
|
||||
basic_block bb = NULL;
|
||||
int nl_first_reload;
|
||||
@ -1445,9 +1413,7 @@ rewrite_program2 (new_deaths)
|
||||
Also remember all IDs of its uses in USES_AS_BITMAP. */
|
||||
|
||||
static void
|
||||
mark_refs_for_checking (web, uses_as_bitmap)
|
||||
struct web *web;
|
||||
bitmap uses_as_bitmap;
|
||||
mark_refs_for_checking (struct web *web, bitmap uses_as_bitmap)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < web->num_uses; i++)
|
||||
@ -1473,7 +1439,7 @@ mark_refs_for_checking (web, uses_as_bitmap)
|
||||
information, we will rebuild. */
|
||||
|
||||
static void
|
||||
detect_web_parts_to_rebuild ()
|
||||
detect_web_parts_to_rebuild (void)
|
||||
{
|
||||
bitmap uses_as_bitmap;
|
||||
unsigned int i, pass;
|
||||
@ -1585,7 +1551,7 @@ static unsigned HOST_WIDE_INT deleted_def_cost;
|
||||
which wasn't live. Try to delete all those insns. */
|
||||
|
||||
static void
|
||||
delete_useless_defs ()
|
||||
delete_useless_defs (void)
|
||||
{
|
||||
unsigned int i;
|
||||
/* If the insn only sets the def without any sideeffect (besides
|
||||
@ -1613,7 +1579,7 @@ delete_useless_defs ()
|
||||
in this pass). */
|
||||
|
||||
static void
|
||||
detect_non_changed_webs ()
|
||||
detect_non_changed_webs (void)
|
||||
{
|
||||
struct dlist *d, *d_next;
|
||||
for (d = WEBS(SPILLED); d; d = d_next)
|
||||
@ -1639,7 +1605,7 @@ detect_non_changed_webs ()
|
||||
/* Before spilling we clear the changed flags for all spilled webs. */
|
||||
|
||||
static void
|
||||
reset_changed_flag ()
|
||||
reset_changed_flag (void)
|
||||
{
|
||||
struct dlist *d;
|
||||
for (d = WEBS(SPILLED); d; d = d->next)
|
||||
@ -1652,7 +1618,7 @@ reset_changed_flag ()
|
||||
building the interference graph in the next pass. */
|
||||
|
||||
void
|
||||
actual_spill ()
|
||||
actual_spill (void)
|
||||
{
|
||||
int i;
|
||||
bitmap new_deaths = BITMAP_XMALLOC ();
|
||||
@ -1840,7 +1806,7 @@ emit_colors (df)
|
||||
/* Delete some coalesced moves from the insn stream. */
|
||||
|
||||
void
|
||||
delete_moves ()
|
||||
delete_moves (void)
|
||||
{
|
||||
struct move_list *ml;
|
||||
struct web *s, *t;
|
||||
@ -1898,7 +1864,7 @@ delete_moves ()
|
||||
that comes later) Bah. */
|
||||
|
||||
void
|
||||
remove_suspicious_death_notes ()
|
||||
remove_suspicious_death_notes (void)
|
||||
{
|
||||
rtx insn;
|
||||
for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
|
||||
@ -1927,8 +1893,7 @@ remove_suspicious_death_notes ()
|
||||
is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
|
||||
|
||||
void
|
||||
setup_renumber (free_it)
|
||||
int free_it;
|
||||
setup_renumber (int free_it)
|
||||
{
|
||||
int i;
|
||||
max_regno = max_reg_num ();
|
||||
@ -1949,8 +1914,7 @@ setup_renumber (free_it)
|
||||
and removed moves or useless defs. */
|
||||
|
||||
void
|
||||
dump_cost (level)
|
||||
unsigned int level;
|
||||
dump_cost (unsigned int level)
|
||||
{
|
||||
ra_debug_msg (level, "Instructions for spilling\n added:\n");
|
||||
ra_debug_msg (level, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
|
||||
@ -1969,7 +1933,7 @@ dump_cost (level)
|
||||
/* Initialization of the rewrite phase. */
|
||||
|
||||
void
|
||||
ra_rewrite_init ()
|
||||
ra_rewrite_init (void)
|
||||
{
|
||||
emitted_spill_loads = 0;
|
||||
emitted_spill_stores = 0;
|
||||
|
71
gcc/ra.c
71
gcc/ra.c
@ -1,5 +1,5 @@
|
||||
/* Graph coloring register allocator
|
||||
Copyright (C) 2001, 2002 Free Software Foundation, Inc.
|
||||
Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
|
||||
Contributed by Michael Matz <matz@suse.de>
|
||||
and Daniel Berlin <dan@cgsoftware.com>.
|
||||
|
||||
@ -85,16 +85,16 @@
|
||||
*/
|
||||
|
||||
static struct obstack ra_obstack;
|
||||
static void create_insn_info PARAMS ((struct df *));
|
||||
static void free_insn_info PARAMS ((void));
|
||||
static void alloc_mem PARAMS ((struct df *));
|
||||
static void free_mem PARAMS ((struct df *));
|
||||
static void free_all_mem PARAMS ((struct df *df));
|
||||
static int one_pass PARAMS ((struct df *, int));
|
||||
static void check_df PARAMS ((struct df *));
|
||||
static void init_ra PARAMS ((void));
|
||||
static void create_insn_info (struct df *);
|
||||
static void free_insn_info (void);
|
||||
static void alloc_mem (struct df *);
|
||||
static void free_mem (struct df *);
|
||||
static void free_all_mem (struct df *df);
|
||||
static int one_pass (struct df *, int);
|
||||
static void check_df (struct df *);
|
||||
static void init_ra (void);
|
||||
|
||||
void reg_alloc PARAMS ((void));
|
||||
void reg_alloc (void);
|
||||
|
||||
/* These global variables are "internal" to the register allocator.
|
||||
They are all documented at their declarations in ra.h. */
|
||||
@ -165,8 +165,7 @@ int flag_ra_dump_notes = 0;
|
||||
is done. Allocate an object of SIZE bytes. */
|
||||
|
||||
void *
|
||||
ra_alloc (size)
|
||||
size_t size;
|
||||
ra_alloc (size_t size)
|
||||
{
|
||||
return obstack_alloc (&ra_obstack, size);
|
||||
}
|
||||
@ -174,8 +173,7 @@ ra_alloc (size)
|
||||
/* Like ra_alloc(), but clear the returned memory. */
|
||||
|
||||
void *
|
||||
ra_calloc (size)
|
||||
size_t size;
|
||||
ra_calloc (size_t size)
|
||||
{
|
||||
void *p = obstack_alloc (&ra_obstack, size);
|
||||
memset (p, 0, size);
|
||||
@ -185,8 +183,7 @@ ra_calloc (size)
|
||||
/* Returns the number of hardregs in HARD_REG_SET RS. */
|
||||
|
||||
int
|
||||
hard_regs_count (rs)
|
||||
HARD_REG_SET rs;
|
||||
hard_regs_count (HARD_REG_SET rs)
|
||||
{
|
||||
int count = 0;
|
||||
#ifdef HARD_REG_SET
|
||||
@ -220,8 +217,7 @@ hard_regs_count (rs)
|
||||
be basically valid. */
|
||||
|
||||
rtx
|
||||
ra_emit_move_insn (x, y)
|
||||
rtx x, y;
|
||||
ra_emit_move_insn (rtx x, rtx y)
|
||||
{
|
||||
enum machine_mode mode = GET_MODE (x);
|
||||
if (GET_MODE_CLASS (mode) == MODE_CC)
|
||||
@ -238,8 +234,7 @@ static struct ref **refs_for_insn_df;
|
||||
all valid defs and uses in an insn. */
|
||||
|
||||
static void
|
||||
create_insn_info (df)
|
||||
struct df *df;
|
||||
create_insn_info (struct df *df)
|
||||
{
|
||||
rtx insn;
|
||||
struct ref **act_refs;
|
||||
@ -288,7 +283,7 @@ create_insn_info (df)
|
||||
/* Free the insn_df structures. */
|
||||
|
||||
static void
|
||||
free_insn_info ()
|
||||
free_insn_info (void)
|
||||
{
|
||||
free (refs_for_insn_df);
|
||||
refs_for_insn_df = NULL;
|
||||
@ -302,9 +297,7 @@ free_insn_info ()
|
||||
represented by WEB. Returns the matching subweb or NULL. */
|
||||
|
||||
struct web *
|
||||
find_subweb (web, reg)
|
||||
struct web *web;
|
||||
rtx reg;
|
||||
find_subweb (struct web *web, rtx reg)
|
||||
{
|
||||
struct web *w;
|
||||
if (GET_CODE (reg) != SUBREG)
|
||||
@ -320,9 +313,7 @@ find_subweb (web, reg)
|
||||
a collection of the needed size and offset (in bytes). */
|
||||
|
||||
struct web *
|
||||
find_subweb_2 (web, size_word)
|
||||
struct web *web;
|
||||
unsigned int size_word;
|
||||
find_subweb_2 (struct web *web, unsigned int size_word)
|
||||
{
|
||||
struct web *w = web;
|
||||
if (size_word == GET_MODE_SIZE (GET_MODE (web->orig_x)))
|
||||
@ -340,8 +331,7 @@ find_subweb_2 (web, size_word)
|
||||
/* Returns the superweb for SUBWEB. */
|
||||
|
||||
struct web *
|
||||
find_web_for_subweb_1 (subweb)
|
||||
struct web *subweb;
|
||||
find_web_for_subweb_1 (struct web *subweb)
|
||||
{
|
||||
while (subweb->parent_web)
|
||||
subweb = subweb->parent_web;
|
||||
@ -352,8 +342,7 @@ find_web_for_subweb_1 (subweb)
|
||||
Return 1 if they do. */
|
||||
|
||||
int
|
||||
hard_regs_intersect_p (a, b)
|
||||
HARD_REG_SET *a, *b;
|
||||
hard_regs_intersect_p (HARD_REG_SET *a, HARD_REG_SET *b)
|
||||
{
|
||||
HARD_REG_SET c;
|
||||
COPY_HARD_REG_SET (c, *a);
|
||||
@ -368,8 +357,7 @@ lose:
|
||||
register allocator. */
|
||||
|
||||
static void
|
||||
alloc_mem (df)
|
||||
struct df *df;
|
||||
alloc_mem (struct df *df)
|
||||
{
|
||||
int i;
|
||||
ra_build_realloc (df);
|
||||
@ -386,8 +374,7 @@ alloc_mem (df)
|
||||
/* Free the memory which isn't necessary for the next pass. */
|
||||
|
||||
static void
|
||||
free_mem (df)
|
||||
struct df *df ATTRIBUTE_UNUSED;
|
||||
free_mem (struct df *df ATTRIBUTE_UNUSED)
|
||||
{
|
||||
free_insn_info ();
|
||||
ra_build_free ();
|
||||
@ -397,8 +384,7 @@ free_mem (df)
|
||||
it's done. */
|
||||
|
||||
static void
|
||||
free_all_mem (df)
|
||||
struct df *df;
|
||||
free_all_mem (struct df *df)
|
||||
{
|
||||
unsigned int i;
|
||||
live_at_end -= 2;
|
||||
@ -418,9 +404,7 @@ static long ticks_rebuild;
|
||||
was added, i.e. if the allocator needs to rerun. */
|
||||
|
||||
static int
|
||||
one_pass (df, rebuild)
|
||||
struct df *df;
|
||||
int rebuild;
|
||||
one_pass (struct df *df, int rebuild)
|
||||
{
|
||||
long ticks = clock ();
|
||||
int something_spilled;
|
||||
@ -461,7 +445,7 @@ one_pass (df, rebuild)
|
||||
/* Initialize various arrays for the register allocator. */
|
||||
|
||||
static void
|
||||
init_ra ()
|
||||
init_ra (void)
|
||||
{
|
||||
int i;
|
||||
HARD_REG_SET rs;
|
||||
@ -592,8 +576,7 @@ init_ra ()
|
||||
invariances we expect. */
|
||||
|
||||
static void
|
||||
check_df (df)
|
||||
struct df *df;
|
||||
check_df (struct df *df)
|
||||
{
|
||||
struct df_link *link;
|
||||
rtx insn;
|
||||
@ -663,7 +646,7 @@ check_df (df)
|
||||
/* Main register allocator entry point. */
|
||||
|
||||
void
|
||||
reg_alloc ()
|
||||
reg_alloc (void)
|
||||
{
|
||||
int changed;
|
||||
FILE *ra_dump_file = rtl_dump_file;
|
||||
|
Loading…
x
Reference in New Issue
Block a user