mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-04-17 10:20:33 +08:00
cgraph.c (cgraph_edge::maybe_hot_p): Do not check flag_branch_probabilities.
* cgraph.c (cgraph_edge::maybe_hot_p): Do not check flag_branch_probabilities. * ipa-inline.c (edge_badness): Likewise. * ipa-profile.c (ipa_propagate_frequency_1): Likewise. * postreload-gcse.c (eliminate_partially_redundant_load): Likewise. * predict.c (maybe_hot_frequency_p): Likewise. (probably_never_executed): Likewise. * sched-ebb.c (schedule_ebbs): Likewise. * sched-rgn.c (find_single_block_region): Likewise. * tracer.c (tail_duplicate): Likewise. From-SVN: r249020
This commit is contained in:
parent
ae5512dd4f
commit
ee4e85b78f
@ -1,3 +1,16 @@
|
||||
2017-06-08 Jan Hubicka <hubicka@ucw.cz>
|
||||
|
||||
* cgraph.c (cgraph_edge::maybe_hot_p): Do not check
|
||||
flag_branch_probabilities.
|
||||
* ipa-inline.c (edge_badness): Likewise.
|
||||
* ipa-profile.c (ipa_propagate_frequency_1): Likewise.
|
||||
* postreload-gcse.c (eliminate_partially_redundant_load): Likewise.
|
||||
* predict.c (maybe_hot_frequency_p): Likewise.
|
||||
(probably_never_executed): Likewise.
|
||||
* sched-ebb.c (schedule_ebbs): Likewise.
|
||||
* sched-rgn.c (find_single_block_region): Likewise.
|
||||
* tracer.c (tail_duplicate): Likewise.
|
||||
|
||||
2017-06-08 Jan Hubicka <hubicka@ucw.cz>
|
||||
|
||||
* opts.c (finish_options): x_flag_reorder_blocks_and_partition no
|
||||
|
@ -2729,10 +2729,7 @@ cgraph_edge::cannot_lead_to_return_p (void)
|
||||
bool
|
||||
cgraph_edge::maybe_hot_p (void)
|
||||
{
|
||||
/* TODO: Export profile_status from cfun->cfg to cgraph_node. */
|
||||
if (profile_info
|
||||
&& opt_for_fn (caller->decl, flag_branch_probabilities)
|
||||
&& !maybe_hot_count_p (NULL, count))
|
||||
if (!maybe_hot_count_p (NULL, count))
|
||||
return false;
|
||||
if (caller->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED
|
||||
|| (callee
|
||||
|
@ -1078,7 +1078,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
|
||||
numerator = ((sreal) 1 >> 8);
|
||||
if (caller->count > profile_count::zero ())
|
||||
numerator *= caller->count.to_gcov_type ();
|
||||
else if (opt_for_fn (caller->decl, flag_branch_probabilities))
|
||||
else if (caller->count.initialized_p ())
|
||||
numerator = numerator >> 11;
|
||||
denominator = growth;
|
||||
|
||||
|
@ -330,7 +330,7 @@ ipa_propagate_frequency_1 (struct cgraph_node *node, void *data)
|
||||
it is executed by the train run. Transfer the function only if all
|
||||
callers are unlikely executed. */
|
||||
if (profile_info
|
||||
&& opt_for_fn (d->function_symbol->decl, flag_branch_probabilities)
|
||||
&& edge->callee->count.initialized_p ()
|
||||
/* Thunks are not profiled. This is more or less implementation
|
||||
bug. */
|
||||
&& !d->function_symbol->thunk.thunk_p
|
||||
|
@ -1158,7 +1158,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx_insn *insn,
|
||||
|| (optimize_bb_for_size_p (bb) && npred_ok > 1)
|
||||
/* If we don't have profile information we cannot tell if splitting
|
||||
a critical edge is profitable or not so don't do it. */
|
||||
|| ((! profile_info || ! flag_branch_probabilities
|
||||
|| ((! profile_info || profile_status_for_fn (cfun) != PROFILE_READ
|
||||
|| targetm.cannot_modify_jumps_p ())
|
||||
&& critical_edge_split))
|
||||
goto cleanup;
|
||||
|
@ -123,8 +123,7 @@ static inline bool
|
||||
maybe_hot_frequency_p (struct function *fun, int freq)
|
||||
{
|
||||
struct cgraph_node *node = cgraph_node::get (fun->decl);
|
||||
if (!profile_info
|
||||
|| !opt_for_fn (fun->decl, flag_branch_probabilities))
|
||||
if (!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
|
||||
{
|
||||
if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
|
||||
return false;
|
||||
@ -222,7 +221,7 @@ probably_never_executed (struct function *fun,
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
if ((!profile_info || !(opt_for_fn (fun->decl, flag_branch_probabilities)))
|
||||
if ((!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
|
||||
&& (cgraph_node::get (fun->decl)->frequency
|
||||
== NODE_FREQUENCY_UNLIKELY_EXECUTED))
|
||||
return true;
|
||||
|
@ -622,7 +622,7 @@ schedule_ebbs (void)
|
||||
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
|
||||
return;
|
||||
|
||||
if (profile_info && flag_branch_probabilities)
|
||||
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
|
||||
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
|
||||
else
|
||||
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
|
||||
|
@ -477,7 +477,7 @@ find_single_block_region (bool ebbs_p)
|
||||
|
||||
if (ebbs_p) {
|
||||
int probability_cutoff;
|
||||
if (profile_info && flag_branch_probabilities)
|
||||
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
|
||||
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
|
||||
else
|
||||
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
|
||||
|
@ -270,7 +270,7 @@ tail_duplicate (void)
|
||||
bitmap_clear (bb_seen);
|
||||
initialize_original_copy_tables ();
|
||||
|
||||
if (profile_info && flag_branch_probabilities)
|
||||
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
|
||||
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
|
||||
else
|
||||
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
|
||||
@ -290,7 +290,7 @@ tail_duplicate (void)
|
||||
weighted_insns += n * bb->frequency;
|
||||
}
|
||||
|
||||
if (profile_info && flag_branch_probabilities)
|
||||
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
|
||||
cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK);
|
||||
else
|
||||
cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE);
|
||||
|
Loading…
x
Reference in New Issue
Block a user