aarch64: Tidy aarch64_split_compare_and_swap

With aarch64_track_speculation, we had extra code to do exactly what the
!strong_zero_p path already did.  The rest is reducing code duplication.

	* config/aarch64/aarch64 (aarch64_split_compare_and_swap): Disable
	strong_zero_p for aarch64_track_speculation; unify some code paths;
	use aarch64_gen_compare_reg instead of open-coding.

From-SVN: r275966
This commit is contained in:
Richard Henderson 2019-09-19 14:36:33 +00:00 committed by Richard Henderson
parent 4a2095ebac
commit b7e560deb3
2 changed files with 18 additions and 36 deletions

View File

@ -16,6 +16,10 @@
(aarch64_store_exclusive_pair): New.
* config/aarch64/iterators.md (JUST_TI): New.
* config/aarch64/aarch64 (aarch64_split_compare_and_swap): Disable
strong_zero_p for aarch64_track_speculation; unify some code paths;
use aarch64_gen_compare_reg instead of open-coding.
2019-09-19 Feng Xue <fxue@os.amperecomputing.com>
* ipa-fnsummary.c (set_cond_stmt_execution_predicate): Do not compute

View File

@ -16955,13 +16955,11 @@ aarch64_emit_post_barrier (enum memmodel model)
void
aarch64_split_compare_and_swap (rtx operands[])
{
rtx rval, mem, oldval, newval, scratch;
rtx rval, mem, oldval, newval, scratch, x, model_rtx;
machine_mode mode;
bool is_weak;
rtx_code_label *label1, *label2;
rtx x, cond;
enum memmodel model;
rtx model_rtx;
rval = operands[0];
mem = operands[1];
@ -16982,7 +16980,8 @@ aarch64_split_compare_and_swap (rtx operands[])
CBNZ scratch, .label1
.label2:
CMP rval, 0. */
bool strong_zero_p = !is_weak && oldval == const0_rtx && mode != TImode;
bool strong_zero_p = (!is_weak && !aarch64_track_speculation &&
oldval == const0_rtx && mode != TImode);
label1 = NULL;
if (!is_weak)
@ -16995,35 +16994,20 @@ aarch64_split_compare_and_swap (rtx operands[])
/* The initial load can be relaxed for a __sync operation since a final
barrier will be emitted to stop code hoisting. */
if (is_mm_sync (model))
aarch64_emit_load_exclusive (mode, rval, mem,
GEN_INT (MEMMODEL_RELAXED));
aarch64_emit_load_exclusive (mode, rval, mem, GEN_INT (MEMMODEL_RELAXED));
else
aarch64_emit_load_exclusive (mode, rval, mem, model_rtx);
if (strong_zero_p)
{
if (aarch64_track_speculation)
{
/* Emit an explicit compare instruction, so that we can correctly
track the condition codes. */
rtx cc_reg = aarch64_gen_compare_reg (NE, rval, const0_rtx);
x = gen_rtx_NE (GET_MODE (cc_reg), cc_reg, const0_rtx);
}
else
x = gen_rtx_NE (VOIDmode, rval, const0_rtx);
x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
}
x = gen_rtx_NE (VOIDmode, rval, const0_rtx);
else
{
cond = aarch64_gen_compare_reg_maybe_ze (NE, rval, oldval, mode);
x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
rtx cc_reg = aarch64_gen_compare_reg_maybe_ze (NE, rval, oldval, mode);
x = gen_rtx_NE (VOIDmode, cc_reg, const0_rtx);
}
x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
aarch64_emit_store_exclusive (mode, scratch, mem, newval, model_rtx);
@ -17044,22 +17028,16 @@ aarch64_split_compare_and_swap (rtx operands[])
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
}
else
{
cond = gen_rtx_REG (CCmode, CC_REGNUM);
x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
emit_insn (gen_rtx_SET (cond, x));
}
aarch64_gen_compare_reg (NE, scratch, const0_rtx);
emit_label (label2);
/* If we used a CBNZ in the exchange loop emit an explicit compare with RVAL
to set the condition flags. If this is not used it will be removed by
later passes. */
if (strong_zero_p)
{
cond = gen_rtx_REG (CCmode, CC_REGNUM);
x = gen_rtx_COMPARE (CCmode, rval, const0_rtx);
emit_insn (gen_rtx_SET (cond, x));
}
aarch64_gen_compare_reg (NE, rval, const0_rtx);
/* Emit any final barrier needed for a __sync operation. */
if (is_mm_sync (model))
aarch64_emit_post_barrier (model);