From d7fa1e764db0aa3215414f5fbf2fec27416beef9 Mon Sep 17 00:00:00 2001 From: Howard Chu Date: Mon, 1 Jan 2007 01:00:19 +0000 Subject: [PATCH] partial revert - keep acquiring BDB lock in lru_purge. --- servers/slapd/back-bdb/back-bdb.h | 3 +- servers/slapd/back-bdb/cache.c | 63 ++++++++++++++++++++----------- servers/slapd/back-bdb/init.c | 10 +++++ 3 files changed, 52 insertions(+), 24 deletions(-) diff --git a/servers/slapd/back-bdb/back-bdb.h b/servers/slapd/back-bdb/back-bdb.h index ae25890a90..f25f247da1 100644 --- a/servers/slapd/back-bdb/back-bdb.h +++ b/servers/slapd/back-bdb/back-bdb.h @@ -127,9 +127,10 @@ typedef struct bdb_cache { int c_minfree; int c_eiused; /* EntryInfo's in use */ int c_leaves; /* EntryInfo leaf nodes */ + u_int32_t c_locker; /* used by lru cleaner */ EntryInfo c_dntree; EntryInfo *c_eifree; /* free list */ - Avlnode *c_idtree; + Avlnode *c_idtree; EntryInfo *c_lruhead; /* lru - add accessed entries here */ EntryInfo *c_lrutail; /* lru - rem lru entries from here */ ldap_pvt_thread_rdwr_t c_rwlock; diff --git a/servers/slapd/back-bdb/cache.c b/servers/slapd/back-bdb/cache.c index 160de89ae1..77b816b1e8 100644 --- a/servers/slapd/back-bdb/cache.c +++ b/servers/slapd/back-bdb/cache.c @@ -556,6 +556,7 @@ int hdb_cache_load( static void bdb_cache_lru_purge( struct bdb_info *bdb ) { + DB_LOCK lock, *lockp; EntryInfo *elru, *elnext; int count, islocked; @@ -568,6 +569,12 @@ bdb_cache_lru_purge( struct bdb_info *bdb ) return; } + if ( bdb->bi_cache.c_locker ) { + lockp = &lock; + } else { + lockp = NULL; + } + count = 0; /* Look for an unused entry to remove */ for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) { @@ -592,33 +599,43 @@ bdb_cache_lru_purge( struct bdb_info *bdb ) continue; } + /* entryinfo is locked */ islocked = 1; - /* Free entry for this node if it's present */ - if ( elru->bei_e ) { - elru->bei_e->e_private = NULL; -#ifdef SLAP_ZONE_ALLOC - bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq ); -#else - bdb_entry_return( elru->bei_e ); -#endif - elru->bei_e = NULL; - count++; - } - /* ITS#4010 if we're in slapcat, and this node is a leaf - * node, free it. - * - * FIXME: we need to do this for slapd as well, (which is - * why we compute bi_cache.c_leaves now) but at the moment - * we can't because it causes unresolvable deadlocks. + /* If we can successfully writelock it, then + * the object is idle. */ - if ( slapMode & SLAP_TOOL_READONLY ) { - if ( !elru->bei_kids ) { - bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 ); - bdb_cache_delete_cleanup( &bdb->bi_cache, elru ); - islocked = 0; + if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, + bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) { + + /* Free entry for this node if it's present */ + if ( elru->bei_e ) { + elru->bei_e->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq ); +#else + bdb_entry_return( elru->bei_e ); +#endif + elru->bei_e = NULL; + count++; + } + bdb_cache_entry_dbunlock( bdb, lockp ); + + /* ITS#4010 if we're in slapcat, and this node is a leaf + * node, free it. + * + * FIXME: we need to do this for slapd as well, (which is + * why we compute bi_cache.c_leaves now) but at the moment + * we can't because it causes unresolvable deadlocks. + */ + if ( slapMode & SLAP_TOOL_READONLY ) { + if ( !elru->bei_kids ) { + bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 ); + bdb_cache_delete_cleanup( &bdb->bi_cache, elru ); + islocked = 0; + } + /* Leave node on LRU list for a future pass */ } - /* Leave node on LRU list for a future pass */ } if ( islocked ) diff --git a/servers/slapd/back-bdb/init.c b/servers/slapd/back-bdb/init.c index 194c833cff..624f157e77 100644 --- a/servers/slapd/back-bdb/init.c +++ b/servers/slapd/back-bdb/init.c @@ -423,6 +423,10 @@ bdb_db_open( BackendDB *be ) goto fail; } + if ( !quick ) { + XLOCK_ID(bdb->bi_dbenv, &bdb->bi_cache.c_locker); + } + /* monitor setup */ rc = bdb_monitor_db_open( be ); if ( rc != 0 ) { @@ -486,6 +490,12 @@ bdb_db_close( BackendDB *be ) /* close db environment */ if( bdb->bi_dbenv ) { + /* Free cache locker if we enabled locking */ + if ( !( slapMode & SLAP_TOOL_QUICK )) { + XLOCK_ID_FREE(bdb->bi_dbenv, bdb->bi_cache.c_locker); + bdb->bi_cache.c_locker = 0; + } + /* force a checkpoint, but not if we were ReadOnly, * and not in Quick mode since there are no transactions there. */