mirror of
git://gcc.gnu.org/git/gcc.git
synced 2024-12-20 01:39:20 +08:00
ggc-page.c (struct page_entry): Remove varray.h header.
* ggc-page.c (struct page_entry): Remove varray.h header. Add index_by_depth field. Remove save_in_use_p field. (struct globals): Add depth_in_use, depth_max, by_depth_in_use, by_depth_max, by_depth, and save_in_use fields. (INITIAL_PTE_COUNT): Add. (save_in_use_p_i): Add. (save_in_use_p): Add. (adjust_depth): Add. (move_ptes_to_front): Add. (push_depth): Add. (push_by_depth): Add. (prefetch): Add. (free_page): Add support for and use faster data structures. (ggc_alloc): Likewise. (init_ggc): Likewise. (ggc_recalculate_in_use_p): Likewise. (ggc_pop_context): Likewise. (clear_marks): Likewise. (ggc_pch_read): Likewise. * Makefile.in (ggc-page.o): Remove varray.h. From-SVN: r64320
This commit is contained in:
parent
26f8647153
commit
c4775f821f
@ -1,3 +1,27 @@
|
||||
2003-03-13 Mike Stump <mrs@apple.com>
|
||||
|
||||
* ggc-page.c (struct page_entry): Remove varray.h header.
|
||||
Add index_by_depth field.
|
||||
Remove save_in_use_p field.
|
||||
(struct globals): Add depth_in_use, depth_max, by_depth_in_use,
|
||||
by_depth_max, by_depth, and save_in_use fields.
|
||||
(INITIAL_PTE_COUNT): Add.
|
||||
(save_in_use_p_i): Add.
|
||||
(save_in_use_p): Add.
|
||||
(adjust_depth): Add.
|
||||
(move_ptes_to_front): Add.
|
||||
(push_depth): Add.
|
||||
(push_by_depth): Add.
|
||||
(prefetch): Add.
|
||||
(free_page): Add support for and use faster data structures.
|
||||
(ggc_alloc): Likewise.
|
||||
(init_ggc): Likewise.
|
||||
(ggc_recalculate_in_use_p): Likewise.
|
||||
(ggc_pop_context): Likewise.
|
||||
(clear_marks): Likewise.
|
||||
(ggc_pch_read): Likewise.
|
||||
* Makefile.in (ggc-page.o): Remove varray.h.
|
||||
|
||||
2003-03-13 Nathanael Nerode <neroden@gcc.gnu.org>
|
||||
|
||||
* ChangeLog: Rotated last year's entries to...
|
||||
|
@ -1421,7 +1421,7 @@ ggc-simple.o: ggc-simple.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H)
|
||||
flags.h $(GGC_H) varray.h $(TIMEVAR_H) $(TM_P_H) $(PARAMS_H)
|
||||
|
||||
ggc-page.o: ggc-page.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) \
|
||||
flags.h toplev.h $(GGC_H) varray.h $(TIMEVAR_H) $(TM_P_H) $(PARAMS_H)
|
||||
flags.h toplev.h $(GGC_H) $(TIMEVAR_H) $(TM_P_H) $(PARAMS_H)
|
||||
|
||||
stringpool.o: stringpool.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
|
||||
$(TREE_H) $(GGC_H) gt-stringpool.h
|
||||
|
318
gcc/ggc-page.c
318
gcc/ggc-page.c
@ -26,7 +26,6 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
#include "rtl.h"
|
||||
#include "tm_p.h"
|
||||
#include "toplev.h"
|
||||
#include "varray.h"
|
||||
#include "flags.h"
|
||||
#include "ggc.h"
|
||||
#include "timevar.h"
|
||||
@ -257,9 +256,9 @@ typedef struct page_entry
|
||||
struct page_group *group;
|
||||
#endif
|
||||
|
||||
/* Saved in-use bit vector for pages that aren't in the topmost
|
||||
context during collection. */
|
||||
unsigned long *save_in_use_p;
|
||||
/* This is the index in the by_depth varray where this page table
|
||||
can be found. */
|
||||
unsigned long index_by_depth;
|
||||
|
||||
/* Context depth of this page. */
|
||||
unsigned short context_depth;
|
||||
@ -371,6 +370,36 @@ static struct globals
|
||||
|
||||
/* The file descriptor for debugging output. */
|
||||
FILE *debug_file;
|
||||
|
||||
/* Current number of elements in use in depth below. */
|
||||
unsigned int depth_in_use;
|
||||
|
||||
/* Maximum number of elements that can be used before resizing. */
|
||||
unsigned int depth_max;
|
||||
|
||||
/* Each element of this arry is an index in by_depth where the given
|
||||
depth starts. This structure is indexed by that given depth we
|
||||
are interested in. */
|
||||
unsigned int *depth;
|
||||
|
||||
/* Current number of elements in use in by_depth below. */
|
||||
unsigned int by_depth_in_use;
|
||||
|
||||
/* Maximum number of elements that can be used before resizing. */
|
||||
unsigned int by_depth_max;
|
||||
|
||||
/* Each element of this array is a pointer to a page_entry, all
|
||||
page_entries can be found in here by increasing depth.
|
||||
index_by_depth in the page_entry is the index into this data
|
||||
structure where that page_entry can be found. This is used to
|
||||
speed up finding all page_entries at a particular depth. */
|
||||
page_entry **by_depth;
|
||||
|
||||
/* Each element is a pointer to the saved in_use_p bits, if any,
|
||||
zero otherwise. We allocate them all together, to enable a
|
||||
better runtime data access pattern. */
|
||||
unsigned long **save_in_use;
|
||||
|
||||
} G;
|
||||
|
||||
/* The size in bytes required to maintain a bitmap for the objects
|
||||
@ -383,6 +412,9 @@ static struct globals
|
||||
free list. This cannot be larger than HOST_BITS_PER_INT for the
|
||||
in_use bitmask for page_group. */
|
||||
#define GGC_QUIRE_SIZE 16
|
||||
|
||||
/* Initial guess as to how many page table entries we might need. */
|
||||
#define INITIAL_PTE_COUNT 128
|
||||
|
||||
static int ggc_allocated_p PARAMS ((const void *));
|
||||
static page_entry *lookup_page_table_entry PARAMS ((const void *));
|
||||
@ -402,13 +434,62 @@ static void clear_marks PARAMS ((void));
|
||||
static void sweep_pages PARAMS ((void));
|
||||
static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
|
||||
static void compute_inverse PARAMS ((unsigned));
|
||||
static inline void adjust_depth PARAMS ((void));
|
||||
static void move_ptes_to_front PARAMS ((int, int));
|
||||
|
||||
#ifdef ENABLE_GC_CHECKING
|
||||
static void poison_pages PARAMS ((void));
|
||||
#endif
|
||||
|
||||
void debug_print_page_list PARAMS ((int));
|
||||
static void push_depth PARAMS ((unsigned int));
|
||||
static void push_by_depth PARAMS ((page_entry *, unsigned long *));
|
||||
|
||||
/* Push an entry onto G.depth. */
|
||||
|
||||
inline static void
|
||||
push_depth (i)
|
||||
unsigned int i;
|
||||
{
|
||||
if (G.depth_in_use >= G.depth_max)
|
||||
{
|
||||
G.depth_max *= 2;
|
||||
G.depth = (unsigned int *) xrealloc ((char *) G.depth,
|
||||
G.depth_max * sizeof (unsigned int));
|
||||
}
|
||||
G.depth[G.depth_in_use++] = i;
|
||||
}
|
||||
|
||||
/* Push an entry onto G.by_depth and G.save_in_use. */
|
||||
|
||||
inline static void
|
||||
push_by_depth (p, s)
|
||||
page_entry *p;
|
||||
unsigned long *s;
|
||||
{
|
||||
if (G.by_depth_in_use >= G.by_depth_max)
|
||||
{
|
||||
G.by_depth_max *= 2;
|
||||
G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
|
||||
G.by_depth_max * sizeof (page_entry *));
|
||||
G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
|
||||
G.by_depth_max * sizeof (unsigned long *));
|
||||
}
|
||||
G.by_depth[G.by_depth_in_use] = p;
|
||||
G.save_in_use[G.by_depth_in_use++] = s;
|
||||
}
|
||||
|
||||
#if (GCC_VERSION < 3001)
|
||||
#define prefetch(X) ((void) X)
|
||||
#else
|
||||
#define prefetch(X) __builtin_prefetch (X)
|
||||
#endif
|
||||
|
||||
#define save_in_use_p_i(__i) \
|
||||
(G.save_in_use[__i])
|
||||
#define save_in_use_p(__p) \
|
||||
(save_in_use_p_i (__p->index_by_depth))
|
||||
|
||||
/* Returns nonzero if P was allocated in GC'able memory. */
|
||||
|
||||
static inline int
|
||||
@ -776,6 +857,26 @@ alloc_page (order)
|
||||
return entry;
|
||||
}
|
||||
|
||||
/* Adjust the size of G.depth so that no index greater than the one
|
||||
used by the top of the G.by_depth is used. */
|
||||
|
||||
static inline void
|
||||
adjust_depth ()
|
||||
{
|
||||
page_entry *top;
|
||||
|
||||
if (G.by_depth_in_use)
|
||||
{
|
||||
top = G.by_depth[G.by_depth_in_use-1];
|
||||
|
||||
/* Peel back indicies in depth that index into by_depth, so that
|
||||
as new elements are added to by_depth, we note the indicies
|
||||
of those elements, if they are for new context depths. */
|
||||
while (G.depth_in_use > (size_t)top->context_depth+1)
|
||||
--G.depth_in_use;
|
||||
}
|
||||
}
|
||||
|
||||
/* For a page that is no longer needed, put it on the free page list. */
|
||||
|
||||
static inline void
|
||||
@ -797,6 +898,30 @@ free_page (entry)
|
||||
clear_page_group_in_use (entry->group, entry->page);
|
||||
#endif
|
||||
|
||||
if (G.by_depth_in_use > 1)
|
||||
{
|
||||
page_entry *top = G.by_depth[G.by_depth_in_use-1];
|
||||
|
||||
/* If they are at the same depth, put top element into freed
|
||||
slot. */
|
||||
if (entry->context_depth == top->context_depth)
|
||||
{
|
||||
int i = entry->index_by_depth;
|
||||
G.by_depth[i] = top;
|
||||
G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
|
||||
top->index_by_depth = i;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We cannot free a page from a context deeper than the
|
||||
current one. */
|
||||
abort ();
|
||||
}
|
||||
}
|
||||
--G.by_depth_in_use;
|
||||
|
||||
adjust_depth ();
|
||||
|
||||
entry->next = G.free_pages;
|
||||
G.free_pages = entry;
|
||||
}
|
||||
@ -920,6 +1045,14 @@ ggc_alloc (size)
|
||||
struct page_entry *new_entry;
|
||||
new_entry = alloc_page (order);
|
||||
|
||||
new_entry->index_by_depth = G.by_depth_in_use;
|
||||
push_by_depth (new_entry, 0);
|
||||
|
||||
/* We can skip context depths, if we do, make sure we go all the
|
||||
way to the new depth. */
|
||||
while (new_entry->context_depth >= G.depth_in_use)
|
||||
push_depth (G.by_depth_in_use-1);
|
||||
|
||||
/* If this is the only entry, it's also the tail. */
|
||||
if (entry == NULL)
|
||||
G.page_tails[order] = new_entry;
|
||||
@ -1222,6 +1355,15 @@ init_ggc ()
|
||||
for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
|
||||
size_lookup[i] = order;
|
||||
}
|
||||
|
||||
G.depth_in_use = 0;
|
||||
G.depth_max = 10;
|
||||
G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
|
||||
|
||||
G.by_depth_in_use = 0;
|
||||
G.by_depth_max = INITIAL_PTE_COUNT;
|
||||
G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
|
||||
G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
|
||||
}
|
||||
|
||||
/* Increment the `GC context'. Objects allocated in an outer context
|
||||
@ -1264,7 +1406,7 @@ ggc_recalculate_in_use_p (p)
|
||||
|
||||
/* Something is in use if it is marked, or if it was in use in a
|
||||
context further down the context stack. */
|
||||
p->in_use_p[i] |= p->save_in_use_p[i];
|
||||
p->in_use_p[i] |= save_in_use_p (p)[i];
|
||||
|
||||
/* Decrement the free object count for every object allocated. */
|
||||
for (j = p->in_use_p[i]; j; j >>= 1)
|
||||
@ -1282,7 +1424,10 @@ void
|
||||
ggc_pop_context ()
|
||||
{
|
||||
unsigned long omask;
|
||||
unsigned order, depth;
|
||||
unsigned int depth, i, e;
|
||||
#ifdef ENABLE_CHECKING
|
||||
unsigned int order;
|
||||
#endif
|
||||
|
||||
depth = --G.context_depth;
|
||||
omask = (unsigned long)1 << (depth + 1);
|
||||
@ -1294,9 +1439,66 @@ ggc_pop_context ()
|
||||
G.context_depth_allocations &= omask - 1;
|
||||
G.context_depth_collections &= omask - 1;
|
||||
|
||||
/* Any remaining pages in the popped context are lowered to the new
|
||||
current context; i.e. objects allocated in the popped context and
|
||||
left over are imported into the previous context. */
|
||||
/* The G.depth array is shortend so that the last index is the
|
||||
context_depth of the top element of by_depth. */
|
||||
if (depth+1 < G.depth_in_use)
|
||||
e = G.depth[depth+1];
|
||||
else
|
||||
e = G.by_depth_in_use;
|
||||
|
||||
/* We might not have any PTEs of depth depth. */
|
||||
if (depth < G.depth_in_use)
|
||||
{
|
||||
|
||||
/* First we go through all the pages at depth depth to
|
||||
recalculate the in use bits. */
|
||||
for (i = G.depth[depth]; i < e; ++i)
|
||||
{
|
||||
page_entry *p;
|
||||
|
||||
#ifdef ENABLE_CHECKING
|
||||
p = G.by_depth[i];
|
||||
|
||||
/* Check that all of the pages really are at the depth that
|
||||
we expect. */
|
||||
if (p->context_depth != depth)
|
||||
abort ();
|
||||
if (p->index_by_depth != i)
|
||||
abort ();
|
||||
#endif
|
||||
|
||||
prefetch (&save_in_use_p_i (i+8));
|
||||
prefetch (&save_in_use_p_i (i+16));
|
||||
if (save_in_use_p_i (i))
|
||||
{
|
||||
p = G.by_depth[i];
|
||||
ggc_recalculate_in_use_p (p);
|
||||
free (save_in_use_p_i (i));
|
||||
save_in_use_p_i (i) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Then, we reset all page_entries with a depth greater than depth
|
||||
to be at depth. */
|
||||
for (i = e; i < G.by_depth_in_use; ++i)
|
||||
{
|
||||
page_entry *p = G.by_depth[i];
|
||||
|
||||
/* Check that all of the pages really are at the depth we
|
||||
expect. */
|
||||
#ifdef ENABLE_CHECKING
|
||||
if (p->context_depth <= depth)
|
||||
abort ();
|
||||
if (p->index_by_depth != i)
|
||||
abort ();
|
||||
#endif
|
||||
p->context_depth = depth;
|
||||
}
|
||||
|
||||
adjust_depth ();
|
||||
|
||||
#ifdef ENABLE_CHECKING
|
||||
for (order = 2; order < NUM_ORDERS; order++)
|
||||
{
|
||||
page_entry *p;
|
||||
@ -1304,18 +1506,12 @@ ggc_pop_context ()
|
||||
for (p = G.pages[order]; p != NULL; p = p->next)
|
||||
{
|
||||
if (p->context_depth > depth)
|
||||
p->context_depth = depth;
|
||||
|
||||
/* If this page is now in the topmost context, and we'd
|
||||
saved its allocation state, restore it. */
|
||||
else if (p->context_depth == depth && p->save_in_use_p)
|
||||
{
|
||||
ggc_recalculate_in_use_p (p);
|
||||
free (p->save_in_use_p);
|
||||
p->save_in_use_p = 0;
|
||||
}
|
||||
abort ();
|
||||
else if (p->context_depth == depth && save_in_use_p (p))
|
||||
abort ();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Unmark all objects. */
|
||||
@ -1345,9 +1541,9 @@ clear_marks ()
|
||||
marks. So, back them up first. */
|
||||
if (p->context_depth < G.context_depth)
|
||||
{
|
||||
if (! p->save_in_use_p)
|
||||
p->save_in_use_p = xmalloc (bitmap_size);
|
||||
memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
|
||||
if (! save_in_use_p (p))
|
||||
save_in_use_p (p) = xmalloc (bitmap_size);
|
||||
memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
|
||||
}
|
||||
|
||||
/* Reset reset the number of free objects and clear the
|
||||
@ -1781,6 +1977,58 @@ ggc_pch_finish (d, f)
|
||||
free (d);
|
||||
}
|
||||
|
||||
/* Move the PCH PTE entries just added to the end of by_depth, to the
|
||||
front. */
|
||||
|
||||
static void
|
||||
move_ptes_to_front (count_old_page_tables, count_new_page_tables)
|
||||
int count_old_page_tables;
|
||||
int count_new_page_tables;
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* First, we swap the new entries to the front of the varrays. */
|
||||
page_entry **new_by_depth;
|
||||
unsigned long **new_save_in_use;
|
||||
|
||||
new_by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
|
||||
new_save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
|
||||
|
||||
memcpy (&new_by_depth[0],
|
||||
&G.by_depth[count_old_page_tables],
|
||||
count_new_page_tables * sizeof (void *));
|
||||
memcpy (&new_by_depth[count_new_page_tables],
|
||||
&G.by_depth[0],
|
||||
count_old_page_tables * sizeof (void *));
|
||||
memcpy (&new_save_in_use[0],
|
||||
&G.save_in_use[count_old_page_tables],
|
||||
count_new_page_tables * sizeof (void *));
|
||||
memcpy (&new_save_in_use[count_new_page_tables],
|
||||
&G.save_in_use[0],
|
||||
count_old_page_tables * sizeof (void *));
|
||||
|
||||
free (G.by_depth);
|
||||
free (G.save_in_use);
|
||||
|
||||
G.by_depth = new_by_depth;
|
||||
G.save_in_use = new_save_in_use;
|
||||
|
||||
/* Now update all the index_by_depth fields. */
|
||||
for (i = G.by_depth_in_use; i > 0; --i)
|
||||
{
|
||||
page_entry *p = G.by_depth[i-1];
|
||||
p->index_by_depth = i-1;
|
||||
}
|
||||
|
||||
/* And last, we update the depth pointers in G.depth. The first
|
||||
entry is already 0, and context 0 entries always start at index
|
||||
0, so there is nothing to update in the first slot. We need a
|
||||
second slot, only if we have old ptes, and if we do, they start
|
||||
at index count_new_page_tables. */
|
||||
if (count_old_page_tables)
|
||||
push_depth (count_new_page_tables);
|
||||
}
|
||||
|
||||
void
|
||||
ggc_pch_read (f, addr)
|
||||
FILE *f;
|
||||
@ -1789,9 +2037,13 @@ ggc_pch_read (f, addr)
|
||||
struct ggc_pch_ondisk d;
|
||||
unsigned i;
|
||||
char *offs = addr;
|
||||
|
||||
/* We've just read in a PCH file. So, every object that used to be allocated
|
||||
is now free. */
|
||||
unsigned long count_old_page_tables;
|
||||
unsigned long count_new_page_tables;
|
||||
|
||||
count_old_page_tables = G.by_depth_in_use;
|
||||
|
||||
/* We've just read in a PCH file. So, every object that used to be
|
||||
allocated is now free. */
|
||||
clear_marks ();
|
||||
#ifdef GGC_POISON
|
||||
poison_pages ();
|
||||
@ -1822,10 +2074,10 @@ ggc_pch_read (f, addr)
|
||||
size_t bytes;
|
||||
size_t num_objs;
|
||||
size_t j;
|
||||
|
||||
|
||||
if (d.totals[i] == 0)
|
||||
continue;
|
||||
|
||||
|
||||
bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
|
||||
num_objs = bytes / OBJECT_SIZE (i);
|
||||
entry = xcalloc (1, (sizeof (struct page_entry)
|
||||
@ -1856,8 +2108,20 @@ ggc_pch_read (f, addr)
|
||||
else
|
||||
G.pages[i] = entry;
|
||||
G.page_tails[i] = entry;
|
||||
|
||||
/* We start off by just adding all the new information to the
|
||||
end of the varrays, later, we will move the new information
|
||||
to the front of the varrays, as the PCH page tables are at
|
||||
context 0. */
|
||||
push_by_depth (entry, 0);
|
||||
}
|
||||
|
||||
/* Now, we update the various data structures that speed page table
|
||||
handling. */
|
||||
count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
|
||||
|
||||
move_ptes_to_front (count_old_page_tables, count_new_page_tables);
|
||||
|
||||
/* Update the statistics. */
|
||||
G.allocated = G.allocated_last_gc = offs - (char *)addr;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user