PR68577: Handle narrowing for vector popcount, etc.

This patch adds support for simple cases where a vector
internal function returns wider results than the scalar
equivalent.  It punts on other cases.

Tested on powerpc64-linux-gnu and x86_64-linux-gnu.

gcc/
	PR tree-optimization/68577
	* tree-vect-stmts.c (simple_integer_narrowing): New function.
	(vectorizable_call): Restrict internal function handling
	to NONE and NARROW cases, using simple_integer_narrowing
	to test for the latter.  Add cost of narrowing operation
	and insert it where necessary.

gcc/testsuite/
	PR tree-optimization/68577
	* gcc.dg/vect/pr68577.c: New test.

From-SVN: r231131
This commit is contained in:
Richard Sandiford 2015-12-01 21:01:35 +00:00 committed by Richard Sandiford
parent e5f3103135
commit b1b6836e90
4 changed files with 123 additions and 24 deletions

View File

@ -1,3 +1,12 @@
2015-12-01 Richard Sandiford <richard.sandiford@arm.com>
PR tree-optimization/68577
* tree-vect-stmts.c (simple_integer_narrowing): New function.
(vectorizable_call): Restrict internal function handling
to NONE and NARROW cases, using simple_integer_narrowing
to test for the latter. Add cost of narrowing operation
and insert it where necessary.
2015-12-01 Andreas Tobler <andreast@gcc.gnu.org>
* config/rs6000/freebsd64.h (ELFv2_ABI_CHECK): Add new macro.

View File

@ -1,3 +1,8 @@
2015-12-01 Richard Sandiford <richard.sandiford@arm.com>
PR tree-optimization/68577
* gcc.dg/vect/pr68577.c: New test.
2015-12-01 Nathan Sidwell <nathan@acm.org>
* gcc.target/nvptx/uninit-decl.c: New.

View File

@ -0,0 +1,25 @@
/* { dg-do compile } */
int a, b;
void
__sched_cpucount (void)
{
while (b)
{
long l = b++;
a += __builtin_popcountl(l);
}
}
void
slp_test (int *x, long *y)
{
for (int i = 0; i < 512; i += 4)
{
x[i] = __builtin_popcountl(y[i]);
x[i + 1] = __builtin_popcountl(y[i + 1]);
x[i + 2] = __builtin_popcountl(y[i + 2]);
x[i + 3] = __builtin_popcountl(y[i + 3]);
}
}

View File

@ -2140,6 +2140,31 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
return true;
}
/* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
in a single step. On success, store the binary pack code in
*CONVERT_CODE. */
static bool
simple_integer_narrowing (tree vectype_out, tree vectype_in,
tree_code *convert_code)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
|| !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
return false;
tree_code code;
int multi_step_cvt = 0;
auto_vec <tree, 8> interm_types;
if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
&code, &multi_step_cvt,
&interm_types)
|| multi_step_cvt)
return false;
*convert_code = code;
return true;
}
/* Function vectorizable_call.
@ -2306,7 +2331,12 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
tree callee = gimple_call_fndecl (stmt);
/* First try using an internal function. */
if (cfn != CFN_LAST)
tree_code convert_code = ERROR_MARK;
if (cfn != CFN_LAST
&& (modifier == NONE
|| (modifier == NARROW
&& simple_integer_narrowing (vectype_out, vectype_in,
&convert_code))))
ifn = vectorizable_internal_function (cfn, callee, vectype_out,
vectype_in);
@ -2346,7 +2376,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (slp_node || PURE_SLP_STMT (stmt_info))
ncopies = 1;
else if (modifier == NARROW)
else if (modifier == NARROW && ifn == IFN_LAST)
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
else
ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
@ -2362,6 +2392,10 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
"\n");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
vec_promote_demote, stmt_info, 0, vect_body);
return true;
}
@ -2375,9 +2409,9 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
prev_stmt_info = NULL;
switch (modifier)
if (modifier == NONE || ifn != IFN_LAST)
{
case NONE:
tree prev_res = NULL_TREE;
for (j = 0; j < ncopies; ++j)
{
/* Build argument list for the vectorized call. */
@ -2405,12 +2439,30 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vec<tree> vec_oprndsk = vec_defs[k];
vargs[k] = vec_oprndsk[i];
}
if (ifn != IFN_LAST)
new_stmt = gimple_build_call_internal_vec (ifn, vargs);
if (modifier == NARROW)
{
tree half_res = make_ssa_name (vectype_in);
new_stmt = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_lhs (new_stmt, half_res);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if ((i & 1) == 0)
{
prev_res = half_res;
continue;
}
new_temp = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (new_temp, convert_code,
prev_res, half_res);
}
else
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
{
if (ifn != IFN_LAST)
new_stmt = gimple_build_call_internal_vec (ifn, vargs);
else
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
}
vect_finish_stmt_generation (stmt, new_stmt, gsi);
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
@ -2454,6 +2506,21 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
new_temp = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (new_temp, new_var);
}
else if (modifier == NARROW)
{
tree half_res = make_ssa_name (vectype_in);
new_stmt = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_lhs (new_stmt, half_res);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if ((j & 1) == 0)
{
prev_res = half_res;
continue;
}
new_temp = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (new_temp, convert_code,
prev_res, half_res);
}
else
{
if (ifn != IFN_LAST)
@ -2465,17 +2532,16 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (j == 0)
if (j == (modifier == NARROW ? 1 : 0))
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
else
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
break;
case NARROW:
}
else if (modifier == NARROW)
{
for (j = 0; j < ncopies; ++j)
{
/* Build argument list for the vectorized call. */
@ -2546,10 +2612,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vargs.quick_push (vec_oprnd1);
}
if (ifn != IFN_LAST)
new_stmt = gimple_build_call_internal_vec (ifn, vargs);
else
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@ -2563,13 +2626,10 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
break;
case WIDEN:
/* No current target implements this case. */
return false;
}
else
/* No current target implements this case. */
return false;
vargs.release ();