From 0d52bcc19e63766de745d046844e8f2c12c89536 Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Wed, 13 Jun 2007 02:27:04 +0000 Subject: [PATCH] auto-inc-dec.c, [...]: Fix comment typos. * auto-inc-dec.c, c-incpath.c, config/c4x/libgcc.S, config/sh/divcost-analysis, dbgcnt.def, df-core.c, df-problems.c, df-scan.c, df.h, dominance.c, dse.c, regstat.c, tree-data-ref.c, tree-ssa-loop-im.c, tree-ssa-loop-prefetch.c, tree-vect-transform.c: Fix comment typos. Follow spelling conventions. From-SVN: r125666 --- gcc/ChangeLog | 11 ++++++++++- gcc/auto-inc-dec.c | 6 +++--- gcc/c-incpath.c | 2 +- gcc/config/c4x/libgcc.S | 2 +- gcc/config/sh/divcost-analysis | 2 +- gcc/dbgcnt.def | 2 +- gcc/df-core.c | 10 +++++----- gcc/df-problems.c | 4 ++-- gcc/df-scan.c | 4 ++-- gcc/df.h | 2 +- gcc/dominance.c | 4 ++-- gcc/dse.c | 16 ++++++++-------- gcc/regstat.c | 2 +- gcc/tree-data-ref.c | 6 +++--- gcc/tree-ssa-loop-im.c | 2 +- gcc/tree-ssa-loop-prefetch.c | 6 +++--- gcc/tree-vect-transform.c | 6 +++--- 17 files changed, 48 insertions(+), 39 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index becb6449abf4..d0fba3ff3bc3 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,4 +1,13 @@ -2007-06-12 Seongbae Park +2007-06-13 Kazu Hirata + + * auto-inc-dec.c, c-incpath.c, config/c4x/libgcc.S, + config/sh/divcost-analysis, dbgcnt.def, df-core.c, + df-problems.c, df-scan.c, df.h, dominance.c, dse.c, regstat.c, + tree-data-ref.c, tree-ssa-loop-im.c, tree-ssa-loop-prefetch.c, + tree-vect-transform.c: Fix comment typos. Follow spelling + conventions. + +2007-06-12 Seongbae Park * df-scan.c (df_get_exit-block_use_set): Always add the stack pointer to the exit block use set. diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c index 24156624eac6..6718b742c2ab 100644 --- a/gcc/auto-inc-dec.c +++ b/gcc/auto-inc-dec.c @@ -90,7 +90,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA HAVE_PRE_INCREMENT, HAVE_POST_INCREMENT, HAVE_PRE_DECREMENT or HAVE_POST_DECREMENT defined. - 2) c is a contant not equal to the width of the value being accessed + 2) c is a constant not equal to the width of the value being accessed by the pointer. This is useful for machines that have HAVE_PRE_MODIFY_DISP, HAVE_POST_MODIFY_DISP defined. @@ -445,7 +445,7 @@ static rtx *reg_next_def = NULL; /* Move dead note that match PATTERN to TO_INSN from FROM_INSN. We do not really care about moving any other notes from the inc or add insn. Moving the REG_EQUAL and REG_EQUIV is clearly wrong and it - does not appear that there are any other kinds of relavant notes. */ + does not appear that there are any other kinds of relevant notes. */ static void move_dead_notes (rtx to_insn, rtx from_insn, rtx pattern) @@ -1195,7 +1195,7 @@ find_inc (bool first_try) return false; } - /* Need to check that there are no assignemnts to b + /* Need to check that there are no assignments to b before the add insn. */ other_insn = get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def); diff --git a/gcc/c-incpath.c b/gcc/c-incpath.c index 8b6512c8e0f0..355f8db22245 100644 --- a/gcc/c-incpath.c +++ b/gcc/c-incpath.c @@ -391,7 +391,7 @@ add_path (char *path, int chain, int cxx_aware, bool user_supplied_p) #if defined (HAVE_DOS_BASED_FILE_SYSTEM) /* Remove unnecessary trailing slashes. On some versions of MS Windows, trailing _forward_ slashes cause no problems for stat(). - On newer versions, stat() does not recognise a directory that ends + On newer versions, stat() does not recognize a directory that ends in a '\\' or '/', unless it is a drive root dir, such as "c:/", where it is obligatory. */ int pathlen = strlen (path); diff --git a/gcc/config/c4x/libgcc.S b/gcc/config/c4x/libgcc.S index 2aa495fd9e49..8f801da6af76 100644 --- a/gcc/config/c4x/libgcc.S +++ b/gcc/config/c4x/libgcc.S @@ -327,7 +327,7 @@ div_32: tstb ar1, ar1 ; Now divisor and dividend are aligned. Do first SUBC by hand, save ; of the forst quotient digit. Then, shift divisor right rather ; than shifting dividend left. This leaves a zero in the top bit of -; the divident +; the dividend ; ldi 1, ar0 ; Initizialize MSB of quotient lsh rc, ar0 ; create a mask for MSBs diff --git a/gcc/config/sh/divcost-analysis b/gcc/config/sh/divcost-analysis index 0296269bb521..d93f04355f46 100644 --- a/gcc/config/sh/divcost-analysis +++ b/gcc/config/sh/divcost-analysis @@ -39,7 +39,7 @@ div_le128_neg -> div_by_1_neg: 4 div_le128_neg -> rts 18 sh4-200 absolute divisor range: - 1 [2..128] [129..64K) [64K..|divident|/256] >=64K,>|divident/256| + 1 [2..128] [129..64K) [64K..|dividend|/256] >=64K,>|dividend/256| udiv 18 22 38 32 30 sdiv pos: 20 24 41 35 32 sdiv neg: 15 25 42 36 33 diff --git a/gcc/dbgcnt.def b/gcc/dbgcnt.def index 5c0b7496d7d8..854f3d922c3a 100644 --- a/gcc/dbgcnt.def +++ b/gcc/dbgcnt.def @@ -33,7 +33,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA How does it work ? - Everytime dbg_cnt(named-counter) is called, + Every time dbg_cnt(named-counter) is called, the counter is incremented for the named-counter. And the incremented value is compared against the threshold (limit) specified by the option. diff --git a/gcc/df-core.c b/gcc/df-core.c index 24c57e183928..68f6c5076e19 100644 --- a/gcc/df-core.c +++ b/gcc/df-core.c @@ -64,7 +64,7 @@ dataflow solution. The transfer functions are only rebuilt if the some instruction within the block has changed. The top layer is the dataflow solution itself. The dataflow solution -is computed by using an efficient iterative solver and the trasfer +is computed by using an efficient iterative solver and the transfer functions. The dataflow solution must be recomputed whenever the control changes or if one of the transfer function changes. @@ -115,7 +115,7 @@ DF_ANALYZE causes all of the defined problems to be (re)solved. When DF_ANALYZE is completes, the IN and OUT sets for each basic block contain the computer information. The DF_*_BB_INFO macros can be used to access these bitvectors. All deferred rescannings are down before -the transfer functions are recompited. +the transfer functions are recomputed. DF_DUMP can then be called to dump the information produce to some file. This calls DF_DUMP_START, to print the information that is not @@ -177,7 +177,7 @@ There are four ways of doing the incremental scanning: rescanned may be impractical. Cse and regrename fall into this category. -2) Defered rescanning - Calls to df_insn_rescan, df_notes_rescan, and +2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and df_insn_delete do not immediately change the insn but instead make a note that the insn needs to be rescanned. The next call to df_analyze, df_finish_pass, or df_process_deferred_rescans will @@ -635,7 +635,7 @@ df_remove_problem (struct dataflow *dflow) /* Remove all of the problems that are not permanent. Scanning, lr, - ur and live are permanent, the rest are removeable. Also clear all + ur and live are permanent, the rest are removable. Also clear all of the changeable_flags. */ void @@ -1505,7 +1505,7 @@ df_bb_delete (int bb_index) dataflow infomation is not being updated properly. You can just sprinkle calls in until you find the place that is changing an underlying structure without calling the proper updating - rountine. */ + routine. */ void df_verify (void) diff --git a/gcc/df-problems.c b/gcc/df-problems.c index 67a8cb0a916a..79b2f42fba7a 100644 --- a/gcc/df-problems.c +++ b/gcc/df-problems.c @@ -4264,7 +4264,7 @@ df_simulate_fixup_sets (basic_block bb, bitmap live) } -/* Apply the artifical uses and defs at the top of BB in a forwards +/* Apply the artificial uses and defs at the top of BB in a forwards direction. */ void @@ -4304,7 +4304,7 @@ df_simulate_one_insn_forwards (basic_block bb, rtx insn, bitmap live) } -/* Apply the artifical uses and defs at the end of BB in a backwards +/* Apply the artificial uses and defs at the end of BB in a backwards direction. */ void diff --git a/gcc/df-scan.c b/gcc/df-scan.c index ff33aa396cb4..010488efc004 100644 --- a/gcc/df-scan.c +++ b/gcc/df-scan.c @@ -1635,7 +1635,7 @@ df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset, } -/* Organinze the refs by insn into the table in REF_INFO. If +/* Organize the refs by insn into the table in REF_INFO. If blocks_to_analyze is defined, use that set, otherwise the entire program. Include the defs if INCLUDE_DEFS. Include the uses if INCLUDE_USES. Include the eq_uses if INCLUDE_EQ_USES. */ @@ -3599,7 +3599,7 @@ df_get_entry_block_def_set (bitmap entry_block_defs) /* Return the (conservative) set of hard registers that are defined on entry to the function. - It uses df->entry_block_defs to determine which regster + It uses df->entry_block_defs to determine which register reference to include. */ static void diff --git a/gcc/df.h b/gcc/df.h index 1474c0ae2046..9a3345c7ce0d 100644 --- a/gcc/df.h +++ b/gcc/df.h @@ -800,7 +800,7 @@ extern struct df *df; #define df_chain (df->problems_by_index[DF_CHAIN]) #define df_note (df->problems_by_index[DF_NOTE]) -/* This symbol turns on checking that each modfication of the cfg has +/* This symbol turns on checking that each modification of the cfg has been identified to the appropriate df routines. It is not part of verification per se because the check that the final solution has not changed covers this. However, if the solution is not being diff --git a/gcc/dominance.c b/gcc/dominance.c index 6fa765ca3dc0..ed64c4f74cfb 100644 --- a/gcc/dominance.c +++ b/gcc/dominance.c @@ -1242,9 +1242,9 @@ iterate_fix_dominators (enum cdi_direction dir, VEC (basic_block, heap) *bbs, Then, we need to establish the dominance relation among the basic blocks in BBS. We split the dominance tree by removing the immediate dominator - edges from BBS, creating a forrest F. We form a graph G whose vertices + edges from BBS, creating a forest F. We form a graph G whose vertices are BBS and ENTRY and X -> Y is an edge of G if there exists an edge - X' -> Y in CFG such that X' belongs to the tree of the dominance forrest + X' -> Y in CFG such that X' belongs to the tree of the dominance forest whose root is X. We then determine dominance tree of G. Note that for X, Y in BBS, X dominates Y in CFG if and only if X dominates Y in G. In this step, we can use arbitrary algorithm to determine dominators. diff --git a/gcc/dse.c b/gcc/dse.c index e846f3f1459a..2c27d6e8e8b3 100644 --- a/gcc/dse.c +++ b/gcc/dse.c @@ -96,7 +96,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA possible for each address. This pass is a forwards pass through each basic block. From the point of view of the global technique, the first pass could examine a block in either direction. The - forwards ordering is to accomodate cselib. + forwards ordering is to accommodate cselib. We a simplifying assumption: addresses fall into four broad categories: @@ -183,7 +183,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA c) For any pass that may prespill, there is currently no mechanism to tell the dse pass that the slot being used has the special properties that reload uses. It may be that all that is - requirred is to have those passes make the same calls that reload + required is to have those passes make the same calls that reload does, assuming that the alias sets can be manipulated in the same way. */ @@ -710,7 +710,7 @@ dse_step0 (void) First step. Scan all of the insns. Any random ordering of the blocks is fine. - Each block is scanned in forward order to accomodate cselib which + Each block is scanned in forward order to accommodate cselib which is used to remove stores with non-constant bases. ----------------------------------------------------------------------------*/ @@ -780,7 +780,7 @@ replace_inc_dec (rtx *r, void *d) case PRE_MODIFY: case POST_MODIFY: { - /* We can resuse the add because we are about to delete the + /* We can reuse the add because we are about to delete the insn that contained it. */ rtx add = XEXP (x, 0); rtx r1 = XEXP (add, 0); @@ -1536,7 +1536,7 @@ check_mem_read_rtx (rtx *loc, void *data) read_info->next = insn_info->read_rec; insn_info->read_rec = read_info; - /* We ignore the clobbers in store_info. The is mildly agressive, + /* We ignore the clobbers in store_info. The is mildly aggressive, but there really should not be a clobber followed by a read. */ if (spill_alias_set) @@ -1756,7 +1756,7 @@ scan_insn (bb_info_t bb_info, rtx insn) bb_info->last_insn = insn_info; - /* Cselib clears the table for this case, so we have to essentually + /* Cselib clears the table for this case, so we have to essentially do the same. */ if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == ASM_OPERANDS @@ -1947,7 +1947,7 @@ dse_step1 (void) algorithm must take a more conservative view of block mode reads than the local alg does. So to get the case where you have a store to the frame followed by a non - overlaping block more read, we look at the active local + overlapping block more read, we look at the active local stores at the end of the function and delete all of the frame and spill based ones. */ if (stores_off_frame_dead_at_return @@ -2762,7 +2762,7 @@ dse_step4 (void) /*---------------------------------------------------------------------------- Fifth step. - Delete the stores that can only be deleted using the global informantion. + Delete the stores that can only be deleted using the global information. ----------------------------------------------------------------------------*/ diff --git a/gcc/regstat.c b/gcc/regstat.c index cfd904f83040..0a028ae12b06 100644 --- a/gcc/regstat.c +++ b/gcc/regstat.c @@ -406,7 +406,7 @@ regstat_get_setjmp_crosses (void) -/* Compute callse crossed for BB. Live is a scratch bitvector. */ +/* Compute calls crossed for BB. Live is a scratch bitvector. */ static void regstat_bb_compute_calls_crossed (unsigned int bb_index, bitmap live) diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c index e0223c326f78..53c9c0054fb8 100644 --- a/gcc/tree-data-ref.c +++ b/gcc/tree-data-ref.c @@ -769,7 +769,7 @@ free_data_ref (data_reference_p dr) /* Analyzes memory reference MEMREF accessed in STMT. The reference is read if IS_READ is true, write otherwise. Returns the data_reference description of MEMREF. NEST is the outermost loop of the - loop nest in that the reference should be analysed. */ + loop nest in that the reference should be analyzed. */ struct data_reference * create_data_ref (struct loop *nest, tree memref, tree stmt, bool is_read) @@ -1225,7 +1225,7 @@ initialize_data_dependence_relation (struct data_reference *a, } /* If the base of the object is not invariant in the loop nest, we cannot - analyse it. TODO -- in fact, it would suffice to record that there may + analyze it. TODO -- in fact, it would suffice to record that there may be arbitrary dependences in the loops where the base object varies. */ if (!object_address_invariant_in_loop_p (VEC_index (loop_p, loop_nest, 0), DR_BASE_OBJECT (a))) @@ -3942,7 +3942,7 @@ get_references_in_stmt (tree stmt, VEC (data_ref_loc, heap) **references) /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable reference, returns false, otherwise returns true. NEST is the outermost - loop of the loop nest in that the references should be analysed. */ + loop of the loop nest in that the references should be analyzed. */ static bool find_data_references_in_stmt (struct loop *nest, tree stmt, diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 5b00cf642176..0813474b9893 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -642,7 +642,7 @@ rewrite_bittest (block_stmt_iterator *bsi) if (TREE_CODE (stmt1) != GIMPLE_MODIFY_STMT) return stmt; - /* There is a conversion inbetween possibly inserted by fold. */ + /* There is a conversion in between possibly inserted by fold. */ t = GIMPLE_STMT_OPERAND (stmt1, 1); if (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR) diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c index 2424c4a1832d..a4c2d0897953 100644 --- a/gcc/tree-ssa-loop-prefetch.c +++ b/gcc/tree-ssa-loop-prefetch.c @@ -1334,7 +1334,7 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs, } /* Prepare the references in the form suitable for data dependence - analysis. We ignore unanalysable data references (the results + analysis. We ignore unanalyzable data references (the results are used just as a heuristics to estimate temporality of the references, hence we do not need to worry about correctness). */ for (gr = refs; gr; gr = gr->next) @@ -1376,7 +1376,7 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs, if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know || DDR_NUM_DIST_VECTS (dep) == 0) { - /* If the dependence cannot be analysed, assume that there might be + /* If the dependence cannot be analyzed, assume that there might be a reuse. */ dist = 0; @@ -1385,7 +1385,7 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs, } else { - /* The distance vectors are normalised to be always lexicographically + /* The distance vectors are normalized to be always lexicographically positive, hence we cannot tell just from them whether DDR_A comes before DDR_B or vice versa. However, it is not important, anyway -- if DDR_A is close to DDR_B, then it is either reused in diff --git a/gcc/tree-vect-transform.c b/gcc/tree-vect-transform.c index 00e55ed209a0..477fc883b7d0 100644 --- a/gcc/tree-vect-transform.c +++ b/gcc/tree-vect-transform.c @@ -188,7 +188,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) "prologue peel iters set conservatively."); /* If peeling for alignment is unknown, loop bound of main loop becomes - unkown. */ + unknown. */ peel_iters_epilogue = vf - 1; if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "cost model: " @@ -270,7 +270,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) /* TODO: Close dependency between vect_model_*_cost and vectorizable_* - functions. Design better to avoid maintainence issues. */ + functions. Design better to avoid maintenance issues. */ /* Function vect_model_reduction_cost. @@ -526,7 +526,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies) /* Unaligned software pipeline has a load of an address, an initial load, and possibly a mask operation to "prime" the loop. However, if this is an access in a group of loads, which provide strided - acccess, then the above cost should only be considered for one + access, then the above cost should only be considered for one access in the group. Inside the loop, there is a load op and a realignment op. */