mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-03-25 01:30:44 +08:00
tree-ssa-loop-niter.c (inverse, [...]): Use build_int_cst instead of build_int_cst_type.
* tree-ssa-loop-niter.c (inverse, number_of_iterations_ne, assert_no_overflow_lt, assert_loop_rolls_lt, number_of_iterations_lt, number_of_iterations_le, number_of_iterations_cond, find_loop_niter, estimate_numbers_of_iterations_loop): Use build_int_cst instead of build_int_cst_type. * tree-chrec.c (chrec_fold_multiply_poly_poly, chrec_fold_multiply): Same. * tree-ssa-loop-ivopts.c (strip_offset_1, force_expr_to_var_cost, force_expr_to_var_cost): Same. * tree-mudflap.c (mf_xform_derefs_1): Same. * tree-ssa-loop-prefetch.c (idx_analyze_ref): Same. From-SVN: r112755
This commit is contained in:
parent
544c479db3
commit
ff5e9a942c
@ -1,3 +1,17 @@
|
||||
2006-04-07 Sebastian Pop <pop@cri.ensmp.fr>
|
||||
|
||||
* tree-ssa-loop-niter.c (inverse, number_of_iterations_ne,
|
||||
assert_no_overflow_lt, assert_loop_rolls_lt, number_of_iterations_lt,
|
||||
number_of_iterations_le, number_of_iterations_cond, find_loop_niter,
|
||||
estimate_numbers_of_iterations_loop): Use build_int_cst instead of
|
||||
build_int_cst_type.
|
||||
* tree-chrec.c (chrec_fold_multiply_poly_poly,
|
||||
chrec_fold_multiply): Same.
|
||||
* tree-ssa-loop-ivopts.c (strip_offset_1, force_expr_to_var_cost,
|
||||
force_expr_to_var_cost): Same.
|
||||
* tree-mudflap.c (mf_xform_derefs_1): Same.
|
||||
* tree-ssa-loop-prefetch.c (idx_analyze_ref): Same.
|
||||
|
||||
2006-04-07 David Edelsohn <edelsohn@gnu.org>
|
||||
|
||||
* config/rs6000/rs6000.c (rs6000_rtx_costs) <CONST_DOUBLE>: Do not
|
||||
|
@ -218,7 +218,7 @@ chrec_fold_multiply_poly_poly (tree type,
|
||||
t2 = chrec_fold_multiply (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
|
||||
t2 = chrec_fold_multiply (type, SCALAR_FLOAT_TYPE_P (type)
|
||||
? build_real (type, dconst2)
|
||||
: build_int_cst_type (type, 2), t2);
|
||||
: build_int_cst (type, 2), t2);
|
||||
|
||||
var = CHREC_VARIABLE (poly0);
|
||||
return build_polynomial_chrec (var, t0,
|
||||
@ -374,7 +374,7 @@ chrec_fold_multiply (tree type,
|
||||
if (integer_onep (op1))
|
||||
return op0;
|
||||
if (integer_zerop (op1))
|
||||
return build_int_cst_type (type, 0);
|
||||
return build_int_cst (type, 0);
|
||||
|
||||
return build_polynomial_chrec
|
||||
(CHREC_VARIABLE (op0),
|
||||
@ -387,7 +387,7 @@ chrec_fold_multiply (tree type,
|
||||
return op1;
|
||||
|
||||
if (integer_zerop (op0))
|
||||
return build_int_cst_type (type, 0);
|
||||
return build_int_cst (type, 0);
|
||||
|
||||
switch (TREE_CODE (op1))
|
||||
{
|
||||
@ -401,7 +401,7 @@ chrec_fold_multiply (tree type,
|
||||
if (integer_onep (op1))
|
||||
return op0;
|
||||
if (integer_zerop (op1))
|
||||
return build_int_cst_type (type, 0);
|
||||
return build_int_cst (type, 0);
|
||||
return fold_build2 (MULT_EXPR, type, op0, op1);
|
||||
}
|
||||
}
|
||||
|
@ -845,7 +845,7 @@ mf_xform_derefs_1 (block_stmt_iterator *iter, tree *tp,
|
||||
base = addr;
|
||||
limit = fold_build2 (MINUS_EXPR, ptr_type_node,
|
||||
fold_build2 (PLUS_EXPR, ptr_type_node, base, size),
|
||||
build_int_cst_type (ptr_type_node, 1));
|
||||
build_int_cst (ptr_type_node, 1));
|
||||
break;
|
||||
|
||||
case ARRAY_RANGE_REF:
|
||||
|
@ -1763,7 +1763,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
|
||||
return orig_expr;
|
||||
|
||||
*offset = int_cst_value (expr);
|
||||
return build_int_cst_type (orig_type, 0);
|
||||
return build_int_cst (orig_type, 0);
|
||||
|
||||
case PLUS_EXPR:
|
||||
case MINUS_EXPR:
|
||||
@ -3380,8 +3380,8 @@ force_expr_to_var_cost (tree expr)
|
||||
tree addr;
|
||||
tree type = build_pointer_type (integer_type_node);
|
||||
|
||||
integer_cost = computation_cost (build_int_cst_type (integer_type_node,
|
||||
2000));
|
||||
integer_cost = computation_cost (build_int_cst (integer_type_node,
|
||||
2000));
|
||||
|
||||
SET_DECL_RTL (var, x);
|
||||
TREE_STATIC (var) = 1;
|
||||
@ -3391,7 +3391,7 @@ force_expr_to_var_cost (tree expr)
|
||||
address_cost
|
||||
= computation_cost (build2 (PLUS_EXPR, type,
|
||||
addr,
|
||||
build_int_cst_type (type, 2000))) + 1;
|
||||
build_int_cst (type, 2000))) + 1;
|
||||
if (dump_file && (dump_flags & TDF_DETAILS))
|
||||
{
|
||||
fprintf (dump_file, "force_expr_to_var_cost:\n");
|
||||
|
@ -114,7 +114,7 @@ inverse (tree x, tree mask)
|
||||
}
|
||||
else
|
||||
{
|
||||
rslt = build_int_cst_type (type, 1);
|
||||
rslt = build_int_cst (type, 1);
|
||||
for (; ctr; ctr--)
|
||||
{
|
||||
rslt = int_const_binop (MULT_EXPR, rslt, x, 0);
|
||||
@ -178,7 +178,7 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final,
|
||||
- tree_low_cst (bits, 1)));
|
||||
|
||||
d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
|
||||
build_int_cst_type (niter_type, 1), bits);
|
||||
build_int_cst (niter_type, 1), bits);
|
||||
s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
|
||||
|
||||
if (!never_infinite)
|
||||
@ -305,7 +305,7 @@ assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
}
|
||||
else
|
||||
diff = fold_build2 (MINUS_EXPR, niter_type, step,
|
||||
build_int_cst_type (niter_type, 1));
|
||||
build_int_cst (niter_type, 1));
|
||||
bound = fold_build2 (MINUS_EXPR, type,
|
||||
TYPE_MAX_VALUE (type), fold_convert (type, diff));
|
||||
assumption = fold_build2 (LE_EXPR, boolean_type_node,
|
||||
@ -326,7 +326,7 @@ assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
}
|
||||
else
|
||||
diff = fold_build2 (MINUS_EXPR, niter_type, step,
|
||||
build_int_cst_type (niter_type, 1));
|
||||
build_int_cst (niter_type, 1));
|
||||
bound = fold_build2 (PLUS_EXPR, type,
|
||||
TYPE_MIN_VALUE (type), fold_convert (type, diff));
|
||||
assumption = fold_build2 (GE_EXPR, boolean_type_node,
|
||||
@ -357,7 +357,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
if (nonzero_p (iv0->step))
|
||||
{
|
||||
diff = fold_build2 (MINUS_EXPR, type,
|
||||
iv0->step, build_int_cst_type (type, 1));
|
||||
iv0->step, build_int_cst (type, 1));
|
||||
|
||||
/* We need to know that iv0->base >= MIN + iv0->step - 1. Since
|
||||
0 address never belongs to any object, we can assume this for
|
||||
@ -378,7 +378,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
else
|
||||
{
|
||||
diff = fold_build2 (PLUS_EXPR, type,
|
||||
iv1->step, build_int_cst_type (type, 1));
|
||||
iv1->step, build_int_cst (type, 1));
|
||||
|
||||
if (!POINTER_TYPE_P (type))
|
||||
{
|
||||
@ -464,7 +464,7 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
{
|
||||
affine_iv zps;
|
||||
|
||||
zps.base = build_int_cst_type (niter_type, 0);
|
||||
zps.base = build_int_cst (niter_type, 0);
|
||||
zps.step = step;
|
||||
/* number_of_iterations_lt_to_ne will add assumptions that ensure that
|
||||
zps does not overflow. */
|
||||
@ -483,7 +483,7 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
assert_loop_rolls_lt (type, iv0, iv1, niter);
|
||||
|
||||
s = fold_build2 (MINUS_EXPR, niter_type,
|
||||
step, build_int_cst_type (niter_type, 1));
|
||||
step, build_int_cst (niter_type, 1));
|
||||
delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
|
||||
niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
|
||||
return true;
|
||||
@ -525,10 +525,10 @@ number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
|
||||
|
||||
if (nonzero_p (iv0->step))
|
||||
iv1->base = fold_build2 (PLUS_EXPR, type,
|
||||
iv1->base, build_int_cst_type (type, 1));
|
||||
iv1->base, build_int_cst (type, 1));
|
||||
else
|
||||
iv0->base = fold_build2 (MINUS_EXPR, type,
|
||||
iv0->base, build_int_cst_type (type, 1));
|
||||
iv0->base, build_int_cst (type, 1));
|
||||
return number_of_iterations_lt (type, iv0, iv1, niter, never_infinite);
|
||||
}
|
||||
|
||||
@ -649,7 +649,7 @@ number_of_iterations_cond (tree type, affine_iv *iv0, enum tree_code code,
|
||||
/* If the loop exits immediately, there is nothing to do. */
|
||||
if (zero_p (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
|
||||
{
|
||||
niter->niter = build_int_cst_type (unsigned_type_for (type), 0);
|
||||
niter->niter = build_int_cst (unsigned_type_for (type), 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1171,7 +1171,7 @@ find_loop_niter (struct loop *loop, edge *exit)
|
||||
{
|
||||
/* We exit in the first iteration through this exit.
|
||||
We won't find anything better. */
|
||||
niter = build_int_cst_type (unsigned_type_node, 0);
|
||||
niter = build_int_cst (unsigned_type_node, 0);
|
||||
*exit = ex;
|
||||
break;
|
||||
}
|
||||
@ -1649,7 +1649,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
|
||||
if (!zero_p (niter_desc.may_be_zero)
|
||||
&& !nonzero_p (niter_desc.may_be_zero))
|
||||
niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
|
||||
build_int_cst_type (type, 0),
|
||||
build_int_cst (type, 0),
|
||||
niter);
|
||||
record_estimate (loop, niter,
|
||||
niter_desc.additional_info,
|
||||
|
@ -366,7 +366,7 @@ idx_analyze_ref (tree base, tree *index, void *data)
|
||||
if (cst_and_fits_in_hwi (ibase))
|
||||
{
|
||||
idelta += int_cst_value (ibase);
|
||||
ibase = build_int_cst_type (TREE_TYPE (ibase), 0);
|
||||
ibase = build_int_cst (TREE_TYPE (ibase), 0);
|
||||
}
|
||||
|
||||
if (TREE_CODE (base) == ARRAY_REF)
|
||||
|
Loading…
x
Reference in New Issue
Block a user