mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-03-19 03:50:26 +08:00
c-typeck.c, [...]: Fix comment typos.
* c-typeck.c, config/i386/netware.h, config/m32c/cond.md, config/ms1/ms1.h, config/rs6000/predicates.md, config/s390/s390.c, params.def, postreload-gcse.c, tree-flow-inline.h, tree-ssa-operands.c, tree-vectorizer.c, tree-vrp.c, tree.c: Fix comment typos. * doc/invoke.texi: Fix typos. From-SVN: r106532
This commit is contained in:
parent
d492addcd3
commit
c83eecadfd
@ -1,3 +1,12 @@
|
||||
2005-11-05 Kazu Hirata <kazu@codesourcery.com>
|
||||
|
||||
* c-typeck.c, config/i386/netware.h, config/m32c/cond.md,
|
||||
config/ms1/ms1.h, config/rs6000/predicates.md,
|
||||
config/s390/s390.c, params.def, postreload-gcse.c,
|
||||
tree-flow-inline.h, tree-ssa-operands.c, tree-vectorizer.c,
|
||||
tree-vrp.c, tree.c: Fix comment typos.
|
||||
* doc/invoke.texi: Fix typos.
|
||||
|
||||
2005-11-05 Sebastian Pop <pop@cri.ensmp.fr>
|
||||
|
||||
* lambda-code.c (lambda_transform_legal_p): Use DDR_NUM_DIST_VECTS
|
||||
|
@ -917,7 +917,7 @@ alloc_tagged_tu_seen_cache (tree t1, tree t2)
|
||||
struct a *next;
|
||||
};
|
||||
If we are comparing this against a similar struct in another TU,
|
||||
and did not assume they were compatiable, we end up with an infinite
|
||||
and did not assume they were compatible, we end up with an infinite
|
||||
loop. */
|
||||
tu->val = 1;
|
||||
return tu;
|
||||
|
@ -142,7 +142,7 @@ Boston, MA 02110-1301, USA. */
|
||||
differently depending on something about the variable or
|
||||
function named by the symbol (such as what section it is in).
|
||||
|
||||
On i386 running NetWare, modify the assembler name with an undercore (_)
|
||||
On i386 running NetWare, modify the assembler name with an underscore (_)
|
||||
prefix and a suffix consisting of an atsign (@) followed by a string of
|
||||
digits that represents the number of bytes of arguments passed to the
|
||||
function, if it has the attribute STDCALL. Alternatively, if it has the
|
||||
|
@ -28,7 +28,7 @@
|
||||
; so instead we invert the conditional and jump around a regular jump.
|
||||
|
||||
; Note that we can, at some point in the future, add code to omit the
|
||||
; "cmp" portion of the insn if the preceeding insn happened to set the
|
||||
; "cmp" portion of the insn if the preceding insn happened to set the
|
||||
; right flags already. For example, a mov followed by a "cmp *,0" is
|
||||
; redundant; the move already set the Z flag.
|
||||
|
||||
|
@ -42,7 +42,7 @@ extern enum processor_type ms1_cpu;
|
||||
#undef ASM_SPEC
|
||||
#define ASM_SPEC "%{march=ms1-16-002: -march=ms1-16-002} %{march=ms1-16-003: -march=ms1-16-003} %{!march=*: -march=ms1-16-002}"
|
||||
|
||||
/* A string to pass to at the end of the comman given to the linker. */
|
||||
/* A string to pass to at the end of the command given to the linker. */
|
||||
#undef LIB_SPEC
|
||||
#define LIB_SPEC "--start-group -lc -lsim --end-group \
|
||||
%{msim: ; \
|
||||
|
@ -55,7 +55,7 @@
|
||||
(match_test "INTVAL (op) >= 0 && INTVAL (op) <= 31")))
|
||||
|
||||
;; Return 1 if op is a signed 8-bit constant integer.
|
||||
;; Integer multiplcation complete more quickly
|
||||
;; Integer multiplication complete more quickly
|
||||
(define_predicate "s8bit_cint_operand"
|
||||
(and (match_code "const_int")
|
||||
(match_test "INTVAL (op) >= -128 && INTVAL (op) <= 127")))
|
||||
|
@ -3879,7 +3879,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
|
||||
int bitsize = INTVAL (op1);
|
||||
int bitpos = INTVAL (op2);
|
||||
|
||||
/* We need byte alignement. */
|
||||
/* We need byte alignment. */
|
||||
if (bitsize % BITS_PER_UNIT)
|
||||
return false;
|
||||
|
||||
|
@ -5939,8 +5939,8 @@ function given basic block needs to have to be considered hot
|
||||
The maximum number of loop iterations we predict statically. This is useful
|
||||
in cases where function contain single loop with known bound and other loop
|
||||
with unknown. We predict the known number of iterations correctly, while
|
||||
the unknown nummber of iterations average to roughly 10. This means that the
|
||||
loop without bounds would appear artifically cold relative to the other one.
|
||||
the unknown number of iterations average to roughly 10. This means that the
|
||||
loop without bounds would appear artificially cold relative to the other one.
|
||||
|
||||
@item tracer-dynamic-coverage
|
||||
@itemx tracer-dynamic-coverage-feedback
|
||||
|
@ -314,7 +314,7 @@ DEFPARAM(HOT_BB_FREQUENCY_FRACTION,
|
||||
are predicted to iterate relatively few (10) times at average.
|
||||
For functions containing one loop with large known number of iterations
|
||||
and other loops having unbounded loops we would end up predicting all
|
||||
the other loops cold that is not usually the case. So we need to artifically
|
||||
the other loops cold that is not usually the case. So we need to artificially
|
||||
flatten the profile.
|
||||
|
||||
We need to cut the maximal predicted iterations to large enought iterations
|
||||
|
@ -1095,7 +1095,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Adding a load on a critical edge will cuase a split. */
|
||||
/* Adding a load on a critical edge will cause a split. */
|
||||
if (EDGE_CRITICAL_P (pred))
|
||||
critical_edge_split = true;
|
||||
not_ok_count += pred->count;
|
||||
|
@ -1036,7 +1036,7 @@ op_iter_next_tree (ssa_op_iter *ptr)
|
||||
|
||||
|
||||
/* This functions clears the iterator PTR, and marks it done. This is normally
|
||||
used to prevent warnings in the compile about might be uninitailzied
|
||||
used to prevent warnings in the compile about might be uninitialized
|
||||
components. */
|
||||
|
||||
static inline void
|
||||
|
@ -156,7 +156,7 @@ static maydef_optype_p free_maydefs = NULL;
|
||||
static mustdef_optype_p free_mustdefs = NULL;
|
||||
|
||||
|
||||
/* Return the DECL_UID of the base varaiable of T. */
|
||||
/* Return the DECL_UID of the base variable of T. */
|
||||
|
||||
static inline unsigned
|
||||
get_name_decl (tree t)
|
||||
|
@ -1879,7 +1879,7 @@ vect_is_simple_reduction (struct loop *loop, tree phi)
|
||||
/* CHECKME: check for !flag_finite_math_only too? */
|
||||
if (SCALAR_FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
|
||||
{
|
||||
/* Changing the order of operations changes the sematics. */
|
||||
/* Changing the order of operations changes the semantics. */
|
||||
if (vect_print_dump_info (REPORT_DETAILS))
|
||||
{
|
||||
fprintf (vect_dump, "reduction: unsafe fp math optimization: ");
|
||||
@ -1889,7 +1889,7 @@ vect_is_simple_reduction (struct loop *loop, tree phi)
|
||||
}
|
||||
else if (INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type) && flag_trapv)
|
||||
{
|
||||
/* Changing the order of operations changes the sematics. */
|
||||
/* Changing the order of operations changes the semantics. */
|
||||
if (vect_print_dump_info (REPORT_DETAILS))
|
||||
{
|
||||
fprintf (vect_dump, "reduction: unsafe int math optimization: ");
|
||||
|
@ -904,7 +904,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
|
||||
anything dominated by 'if (i_5 < 5)' will be optimized away.
|
||||
Note, due to the wa in which simulation proceeds, the statement
|
||||
i_7 = ASSERT_EXPR <...> we would never be visited because the
|
||||
conditiona 'if (i_5 < 5)' always evaluates to false. However,
|
||||
conditional 'if (i_5 < 5)' always evaluates to false. However,
|
||||
this extra check does not hurt and may protect against future
|
||||
changes to VRP that may get into a situation similar to the
|
||||
NULL pointer dereference example.
|
||||
|
@ -3584,7 +3584,7 @@ handle_dll_attribute (tree * pnode, tree name, tree args, int flags,
|
||||
any damage. */
|
||||
if (is_attribute_p ("dllimport", name))
|
||||
{
|
||||
/* Honor any target-specific overides. */
|
||||
/* Honor any target-specific overrides. */
|
||||
if (!targetm.valid_dllimport_attribute_p (node))
|
||||
*no_add_attrs = true;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user