Import latest from devel including

updated connection close deadlock avoidance
	SSF ACL fixes
	updated SQL docs
	updated client tool usage
This commit is contained in:
Kurt Zeilenga 2000-09-05 19:54:37 +00:00
parent 1c9ae8a640
commit 1623b501a0
13 changed files with 241 additions and 131 deletions

View File

@ -1,8 +1,9 @@
OpenLDAP 2.0 Change Log
OpenLDAP 2.0.X Engineering
Fixed StartTLS & ldaps:// client SDK
Fixed StartTLS & ldaps:// client SDK and command usage
Fixed slapd session close deadlock (ITS#704)
Fixed slapd SSF ACLs
Updated slapd MAXARGS to 200
Updated slapd nisNetgroupTriple validation routine (ITS#705)
Updated slapd bootParameter validation routine (ITS#706)
@ -11,6 +12,7 @@ OpenLDAP 2.0.X Engineering
Documentation
Updated manual pages references
Updated release documents
Updated back-sql documents
Updated "to do" list
OpenLDAP 2.0.0 Release

View File

@ -63,6 +63,7 @@ usage( const char *s )
" -D binddn bind DN\n"
" -f file read operations from `file'\n"
" -h host LDAP server\n"
" -H URI LDAP Uniform Resource Indentifier(s)\n"
" -I use SASL Interactive mode\n"
" -k use Kerberos authentication\n"
" -K like -k, but do only step 1 of the Kerberos bind\n"

View File

@ -110,6 +110,7 @@ usage( const char *prog )
" -D binddn bind DN\n"
" -f file read operations from `file'\n"
" -h host LDAP server\n"
" -H URI LDAP Uniform Resource Indentifier(s)\n"
" -I use SASL Interactive mode\n"
" -k use Kerberos authentication\n"
" -K like -k, but do only step 1 of the Kerberos bind\n"

View File

@ -77,6 +77,7 @@ usage( const char *s )
" -D binddn bind DN\n"
" -f file read operations from `file'\n"
" -h host LDAP server\n"
" -H URI LDAP Uniform Resource Indentifier(s)\n"
" -I use SASL Interactive mode\n"
" -k use Kerberos authentication\n"
" -K like -k, but do only step 1 of the Kerberos bind\n"

View File

@ -70,6 +70,7 @@ usage( const char *s )
" -D binddn bind DN\n"
" -f file read operations from `file'\n"
" -h host LDAP server\n"
" -H URI LDAP Uniform Resource Indentifier(s)\n"
" -I use SASL Interactive mode\n"
" -k use Kerberos authentication\n"
" -K like -k, but do only step 1 of the Kerberos bind\n"

View File

@ -4,7 +4,7 @@
#
# See ldap.conf(5) for details
# This file should be world readable.
# This file should be world readable but not world writable.
#BASE dc=example, dc=com
#URI ldap://ldap.example.com ldap://ldap-master.example.com:666

View File

@ -637,8 +637,8 @@ acl_mask(
}
if ( b->a_authz.sai_ssf ) {
Debug( LDAP_DEBUG_ACL, "<= check a_authz.sai_ssf: %ud\n",
b->a_authz.sai_ssf, 0, 0 );
Debug( LDAP_DEBUG_ACL, "<= check a_authz.sai_ssf: ACL %u > OP %u\n",
b->a_authz.sai_ssf, op->o_ssf, 0 );
if ( b->a_authz.sai_ssf > op->o_ssf ) {
continue;
@ -647,8 +647,8 @@ acl_mask(
if ( b->a_authz.sai_transport_ssf ) {
Debug( LDAP_DEBUG_ACL,
"<= check a_authz.sai_transport_ssf: %ud\n",
b->a_authz.sai_transport_ssf, 0, 0 );
"<= check a_authz.sai_transport_ssf: ACL %u > OP %u\n",
b->a_authz.sai_transport_ssf, op->o_transport_ssf, 0 );
if ( b->a_authz.sai_transport_ssf > op->o_transport_ssf ) {
continue;
@ -657,8 +657,8 @@ acl_mask(
if ( b->a_authz.sai_tls_ssf ) {
Debug( LDAP_DEBUG_ACL,
"<= check a_authz.sai_tls_ssf: %ud\n",
b->a_authz.sai_tls_ssf, 0, 0 );
"<= check a_authz.sai_tls_ssf: ACL %u > OP %u\n",
b->a_authz.sai_tls_ssf, op->o_tls_ssf, 0 );
if ( b->a_authz.sai_tls_ssf > op->o_tls_ssf ) {
continue;
@ -667,8 +667,8 @@ acl_mask(
if ( b->a_authz.sai_sasl_ssf ) {
Debug( LDAP_DEBUG_ACL,
"<= check a_authz.sai_sasl_ssf: %ud\n",
b->a_authz.sai_sasl_ssf, 0, 0 );
"<= check a_authz.sai_sasl_ssf: ACL %u > OP %u\n",
b->a_authz.sai_sasl_ssf, op->o_sasl_ssf, 0 );
if ( b->a_authz.sai_sasl_ssf > op->o_sasl_ssf ) {
continue;

View File

@ -1236,19 +1236,19 @@ print_access( Access *b )
/* Security Strength Factors */
if ( b->a_authz.sai_ssf ) {
fprintf( stderr, " ssf=%ud",
fprintf( stderr, " ssf=%u",
b->a_authz.sai_ssf );
}
if ( b->a_authz.sai_transport_ssf ) {
fprintf( stderr, " transport_ssf=%ud",
fprintf( stderr, " transport_ssf=%u",
b->a_authz.sai_transport_ssf );
}
if ( b->a_authz.sai_tls_ssf ) {
fprintf( stderr, " tls_ssf=%ud",
fprintf( stderr, " tls_ssf=%u",
b->a_authz.sai_tls_ssf );
}
if ( b->a_authz.sai_sasl_ssf ) {
fprintf( stderr, " sasl_ssf=%ud",
fprintf( stderr, " sasl_ssf=%u",
b->a_authz.sai_sasl_ssf );
}

View File

@ -6,25 +6,10 @@
that it is guaranteed to work only when such column goes after last bound
column. Or should we get ALL columns with SQLGetData (then something like
_SQLFetchAsStrings() wrapper would do SQLGetData() for all columns)...
4) in some cases (particularly, when using OpenLink Generic ODBC driver with
3) in some cases (particularly, when using OpenLink Generic ODBC driver with
MS SQL Server), it returns "Function sequence error" after all records are
fetched. I really don't know what it means, and after all
- it works with any other driver I tried
5) referral handling. this bug actually addresses all backends, as I can
understand. draft-ietf-ldapext-namedref-xx.txt says that referral should be
returned for ANY object containing "ref" attribute. And is_entry_referral
macro designed for "referral" objectclass only. This limits usability of
referrals too much. For instance, I could want to replicate some subtree on
another server, storing just "searchable" attributes + referral to full
object, and then use this subtree as kind of index for query routing.
If search returns referrals only for "referral" entries - I cannot do such
thing
6) DO NOT EVER USE -O2 option (or any other optimization) under Un*x/gcc!!!
I have spent days trying to catch weird bugs, which went gone with optimization off
7) The same thing that works on RedHat 6.0 (glibc 2.1.1), dumps core on
6.1 (glibc 2.1.2) (the same code behaves differently when built on 6.0 and 6.1)
my problem was solved by upgrading iODBC to 3.0 beta - but it is kinda strange
that beta works better than release (and release still works fine on 6.0)
9) ldapsearch sometimes refuses to show some attributes ("NOT PRINTABLE" diags)
on Win32 (on linux everything's fine -- at least with mySQL)
4) ldapsearch sometimes refuses to show some attributes ("NOT PRINTABLE" diags)
on Win32 (on linux everything's fine)

View File

@ -2,37 +2,60 @@ CONTENT
1. Purpose
2. Metainformation used
3. Typical back-sql operation
4. Perspectives on back-sql as effective storage backend (not MAPPER)
4. Several important common techniques
1. Purpose
Primary purpose of this backend is to PRESENT information stored in some RDBMS
as an LDAP subtree. It is being designed to be tunable to virtually any
relational schema without having to change source. It is NOT designed as backend
that uses RDBMS to store LDAP data (though you can use it for this purpose, it
will definitely be not the most effective way).
But THIS backend primarily targets the situation when you ALREADY HAVE some
data in one or more RDBMSes of one or more different vendors on one or more
different hosts and operating systems, having one or more different
relational schemas. These could be data used by different software, which you
want to integrate, or just local parts of bigger information project. Using
LDAP standalone server with back-sql as middleware, you can integrate this
heterogeneous information as a single distributed LDAP tree, and even organize
data exchange between nodes, not having to worry about unique id's, different
schemas etc (****see authordn attribute in samples, and dts_ldap utility).
Or, you could simply want to export some information like ISP database to LDAP,
to authenticate users, make email lookups or whatever...
as an LDAP subtree without any programming (some SQL and maybe stored
procedures can't be considered programming, anyway ;).
That is, for example, when you (some ISP) have account
information you use in RDBMS, and want to use modern solutions that expect such
information in LDAP (to authenticate users, make email lookups etc.).
Or you want to syncronize or distribute information between different
sites/applications that use RDBMSes and/or LDAP. Or whatever else...
It is NOT designed as general-purpose backend that uses RDBMS instead of
BerkeleyDB (as standard back-ldbm does), though it can be used as such
with several limitations. You can take a look at
http://www.openldap.org/faq/index.cgi?file=378
(OpenLDAP FAQ-O-Matic/General LDAP FAQ/Directories vs. conventional databases)
to find out more on this point.
The idea (detailed below) is to use some metainformation to translate
LDAP queries to SQL queries, leaving relational schema untouched, so that
old applications can continue using it without any modifications. This allows
SQL and LDAP applications to interoperate without replication, and exchange
data as needed.
Back-sql is designed to be tunable to virtually any relational schema without
having to change source (through that metainformation mentioned).
Also, it uses ODBC to connect to RDBMSes, and is highly configurable for
SQL dialects RDBMSes may use, so it may be used for integration and
distribution of data on different RDBMSes, OSes, hosts etc., in other words,
in highly heterogeneous environment.
2. Metainformation used
***
Almost everything mentioned later is illustrated in example, which is located
in backsql/RDBMS_DEPENDENT directory, and contains scripts for generating sample
in backsql/rdbms_depend directory, and contains scripts for generating sample
database for Oracle,MS SQL Server and mySQL.
***
First thing that one must arrange for himself is what set of objectclasses
can present your RDBMS information. The easiest way is to create objectclass
for each entity you had in ER-diagram when designing your relational schema.
Or you could choose some other way...
First thing that one must arrange for himself is what set of LDAP
objectclasses can present your RDBMS information.
The easiest way is to create objectclass for each entity you had
in ER-diagram when designing your relational schema.
Any relational schema, no matter how normalized it is, was designed after
some model of your applications domain (for instance, accounts, services etc.
in ISP), and is used in terms of its entities, not just tables of normalized
schema.
It means that for every attribute of every such instance there is an effective
SQL query that loads it's values.
Also you might want your objectclasses to conform to some of standard schemas
like inetOrgPerson etc..
Nevertheless, when you think it out, we must define a way to translate LDAP
operation requests to (series of) SQL queries. Let us deal with SEARCH
operation.
@ -67,28 +90,34 @@ SELECT CONCAT(persons.first_name,' ',persons.last_name) as cn FROM persons WHERE
for telephoneNumber we can use:
SELECT phones.phone as telephoneNumber FROM persons,phones WHERE persons.id=phones.pers.id and persons.id=?
if we wanted to service LDAP request with filter like (telephoneNumber=123*),
If we wanted to service LDAP request with filter like (telephoneNumber=123*),
we would construct something like:
SELECT ... FROM persons,phones WHERE persons.id=phones.pers.id and persons.id=? and phones.phone like '123%'
So, if we had information about what tables contain values for given each
So, if we had information about what tables contain values for each
attribute, how to join this tables and arrange these values, we could try
to automatically generate such statements, and translate search filters
to SQL clauses
to SQL WHERE clauses.
To store such information, we add three more tables to our schema, so that
and fill it with data (see samples):
ldap_objclasses
ldap_oc_mappings (some columns are not listed for clarity)
---------------
id=1
name="person"
keytbl="persons"
keycol="id"
create_proc="{call create_person(?)}"
delete_proc="{call delete_person(?)}"
ldap_attrs
This table defines mapping between objectclass (its name held in "name" column),
and table that holds primary key for corresponding entities. For instance,
in our example, the person entity, which we are trying to present as "person"
objectclass, resides in two tables (persons and phones), and is identified
by persons.id column (that we will call primary key for this entity).
keytbl and keycol thus contain "persons" (name of the table), and "id" (name
of the column).
ldap_attr_mappings (some columns are not listed for clarity)
-----------
id=1
oc_id=1
@ -96,39 +125,73 @@ name="cn"
sel_expr="CONCAT(persons.first_name,' ',persons.last_name)"
from_tbls="persons"
join_where=NULL
add_proc=...
delete_proc=...
************
id=<n>
oc_id=1
oc_map_id=1
name="telephoneNumber"
expr="phones.phone"
sel_expr="phones.phone"
from_tbls="persons,phones"
join_where="phones.pers_id=persons.id"
add_proc=...
delete_proc=...
This table defines mappings between LDAP attributes and SQL queries that
load their values. Note that, unlike LDAP schema, these are not *attribute types*
- attribute "cn" for "person" objectclass can well have it's values in different
table than "cn" for other objectclass, so attribute mappings depend on
objectclass mappings (unlike attribute types in LDAP schema, which are
indifferent to objectclasses). Thus, we have oc_map_id column with link to
oc_mappings table.
Now we cut the SQL query that loads values for given attribute into 3 parts.
First goes into sel_expr column - this is the expression we had between
SELECT and FROM keywords, which defines WHAT to load.
Next is table list - text between FROM and WHERE keywords. It may contain
aliases for convenience (see exapmles).
The last is part of where clause, which (if exists at all) express the
condition for joining the table containing values wich table containing
primary key (foreign key equality and such). If values are in the same table
with primary key, then this column is left NULL (as for cn attribute above).
Having this information in parts, we are able to not only construct queries
that load attribute values by id of entry (for this we could store SQL query
as a whole), but to construct queries that load id's of objects that
correspond to given search filter (or at least part of it).
See below for examples.
ldap_entries
------------
id=1
dn=<dn you choose>
oc_map_id=...
parent=<parent record id>
keyval=<value of primary key>
First two tables contain structured information about constructing queries like
those we made in example. The latter (ldap_entries), contains information about
structure of LDAP tree, referencing actual information by key value. Having
objectclass id, we can determine table and column which contain primary keys,
and load data for the entry attributes using our queries.
This table defines mappings between DNs of entries in your LDAP tree,
and values of primary keys for corresponding relational data. It has
recursive structure (parent column references id column of the same table),
which allows you to add any tree structure(s) to your flat relational data.
Having id of objectclass mapping, we can determine table and column for
primary key, and keyval stores value of it, thus defining exact tuple
corresponding to LDAP entry with this DN.
Note that such design (see exact SQL table creation query) implies one
important constraint - the key must be integer. But all that I know about
well-designed schemas makes me think that it s not very narrow ;)
If anyone needs support for different types for keys - he may want to write
a patch, and submit it to OpenLDAP ITS, then I'll include it.
Also, several people complained that they don't really need very structured
tree, and they don't want to update one more table every time they add or
delete instance in relational schema. Those can use a view instead of real
table, something like this:
3. Typical back-sql operation
Having metainformation loaded, back-sql uses these tables to determine a set
of primary keys of candidates (depending on search scope and filter). It tries
to do it for each objectclass registered in ldap_objclasses.
Exapmle:
for our query with filter (telephoneNumber=123*) we would get following
query (which loads candidate IDs)
for our query with filter (telephoneNumber=123*) we would get following
query generated (which loads candidate IDs)
SELECT ldap_entries.id,persons.id, 'person' AS objectClass, ldap_entries.dn AS dn FROM ldap_entries,persons,phones WHERE persons.id=ldap_entries.keyval AND ldap_entries.objclass=? AND ldap_entries.parent=? AND phones.pers_id=persons.id AND (phones.phone LIKE '123%')
(for ONELEVEL search)
or "... AND dn=?" (for BASE search)
@ -139,26 +202,41 @@ like
SELECT phones.phone AS telephoneNumber FROM persons,phones WHERE persons.id=? AND phones.pers_id=persons.id
Then, we use test_filter() to test entry for full LDAP search filter match (since
we cannot effectively make sense of SYNTAX of corresponding LDAP schema attribute,
we translate the filter into most relaxed SQL condition to filter candidates),
and send it to user.
Then, we use test_filter() from frontend API to test entry for full
LDAP search filter match (since we cannot effectively make sense of SYNTAX
of corresponding LDAP schema attribute, we translate the filter into most relaxed
SQL condition to filter candidates), and send it to user.
ADD,DELETE,MODIFY operations also performed on per-attribute metainformation
(add_proc etc.). In those fields one can specify an SQL statement or stored procedure
call which can add, or delete given value of given attribute, using given entry
keyval (see examples -- mostly ORACLE and MSSQL - since there're no stored procs in mySQL).
We just add more columns to oc_m,appings and attr_mappings, holding statements
to execute (like create_proc, add_proc, del_proc etc.), and flags governing
order of parameters passed to those statements.
Please see samples to find out what are the parameters passed, and other
information on this matter - they are self-explanatory for those familiar
with concept expressed above.
4. Perspectives on back-sql as effective storage backend (not MAPPER)
Though as I said, back-sql is intended for presenting existing databases to LDAP,
and as such is not most effective in presenting LDAP data to RDBMS, I have a couple
of ideas on this point, and going to implement this in back-sql using
#ifdefs (one that wants to do RDBMS->LDAP, defines one flag, one that wants
LDAP->RDBMS, defines another).
These tasks have much in common (RDBMS access,connection handling etc), but
latter does not need so much additional metainformation.
For instance, it may have one table for each attribute type in LDAP schema,
and use ldap_entries analog to present tree structure... Later this functionality
will be described more closely...
4. Several common techniques
First of all, lets remember that among other major differences to complete
LDAP data model, the concept above does not directly support such things
as multiple objectclasses for entry, and referrals.
Fortunately, they are easy to adopt in this scheme. Back-sql suggests two more
tables being added to schema -
ldap_entry_objectclasses(entry_id, oc_name), and ldap_referrals (entry_id,url).
First contains any number of objectclass names that corresponding entries
will be found by, in addition to that mentioned in mapping. Back-sql
automatically adds attribute mapping for "objectclass" attribute to each objectclass
mapping, that loads values from this table. So, you may, for instance, have
mapping for inetOrgPerson, and use it for queries for "person" objectclass...
Second table contains any number of referrals associated with given entry.
Back-sql automatically adds attribute mapping for "ref" attribute to each
objectclass mapping, that loads values from this table.
So, if you add objectclass "referral" to this entry, and make one or more
tuples in ldap_referrals for this entry (they will be seen as values of "ref"
attribute), you will have slapd return referral, as described in Administrators
Guide.

View File

@ -1,32 +1,65 @@
1. Build
To build slapd with back-sql under Unix you need to build and install
iODBC 2.50.3 (later versions should probably work). Then, run
"configure <other options you need> --enable-sql",
iODBC 2.50.3 (later versions should probably work, but not earlier).
Then, at top of OpenLDAP source tree, run
"configure <other options you need> --enable-sql", then "make" -
this should build back-sql-enabled slapd, provided that you have iODBC
libraries and include files in include/library paths.
libraries and include files in include/library paths, "make install"...
In other words, follow installation procedure described in OpenLDAP
Administrators Guide, adding --enbale-sql option to configure, and
having iODBC libraries installed an accessible by compiler.
Under Win32/MSVC++, I modified the workspace so that back-sql is built into
slapd automatically, since MS odbc32 is included in standard library pack,
slapd automatically, since MS odbc32.dll is included in standard library pack,
and it does no bad even if you don't plan to use it. I also could provide
precompiled executables for those who don't have MSVC.
Note that Win32 port of OpenLDAP itself is experimental, and thus doesn't
provide very convenient build environment (yet).
2. Tune datasources and slapd.conf
Next, you need to define ODBC datasource with data you want to publish
with help of back-sql. Assuming that you have your data in some SQL-compliant
RDBMS, and have installed proper ODBC driver for this RDBMS, this is as simple
as adding a record into odbc.ini (for iODBC), or using ODBC wizard in
Control Panel (for odbc32). Next, you need to add appropriate "database"
record to your slapd.conf. See
sample provided in "back-sql/RDBMS_DEPENDENT/" subdirectory. The only thing
worth noting about this is that "dbname" directive stands for ODBC datasource
name, not the name of your database in RDBMS context.
Control Panel (for odbc32).
Next, you need to add appropriate "database" record to your slapd.conf.
See samples provided in "back-sql/RDBMS_DEPENDENT/" subdirectory.
Several things worth noting about ODBC:
- "dbname" directive stands for ODBC datasource name (DSN),
not the name of your database in RDBMS context
- ODBC under Unix is not so common as under Windows, so you could have
problems with Unix drivers for your RDBMS. Visit http://www.openlinksw.com,
they provide a multitier solution which allows connecting to DBMSes on
different platforms, proxying and other connectivity and integration issues.
They also support iODBC, and have good free customer service through
newsserver (at news.openlinksw.com).
- be carefull defining RDBMS connection parameters, you'll probably need only
"dbname" directive - all the rest can be defined in datasource. maybe you
will want to use dbuser/dbpasswd to override credentials defined in datasource
- full list of configuration directives supported is available in file "guide"
3. Creating and using back-sql metatables
See SQL scripts and slapd.conf files in sample directory .
Create db/user whatever for test, execute create.sql, create_testdb.sql,
test_data.sql,test_metadata.sql from appropriate directory (use
Read the file "concept" to understand, what metainformation you need to add,
and what for... ;)
See SQL scripts and slapd.conf files in samples directory.
Find subdirectory in "rdbms_depend/" corresponding to your RDBMS (Oracle,
MS SQL Server and mySQL are listed there currently), or copy and edit
any of these to conform to SQL dialect of your RDBMS (please be sure to send
me scripts and notes for new RDBMSes ;).
Execute "backsql_create.sql" from that subdirectory (or edited one),
so that the tables it creates appear in the same
context with the data you want to export through LDAP (under same DB/user,
or whatever is needed in RDBMS you use). You can use something like
"mysql < xxx.sql" for mySQL, Query Analyzer+Open query file for MS SQL,
sqlplus and "@xxx.sql" for Oracle)
sqlplus and "@xxx.sql" for Oracle.
You may well want to try it with test data first, and see how metatables
are used. Create test data and metadata by running testdb_create.sql,
testdb_data.sql, and testdb_metadata.sql scripts (again, adopted for your
RDBMS, and in the same context as metatables you created before), and
tune slapd.conf to use your test DB.
4. Testing
To diagnose back-sql, run slapd with debug level TRACE ("slapd -d 5" will go).
@ -35,4 +68,9 @@ you could for instance search one level from "o=sql,c=RU"). I personally used
saucer, which is included in OpenLDAP package (it builds automatically under
Unix/GNU configure and for MSVC I added appropriate project to workspace).
And also Java LDAP browser-editor (see link somewhere on OpenLDAP site) to
test ADD/DELETE/MODIFY operations on Oracle and MS SQL
test ADD/DELETE/MODIFY operations on Oracle and MS SQL.
See file "platforms" if you encounter connection problems - you may find
a hint for your RDBMS or OS there. If you are stuck - please contact me at
mit@openldap.org, or (better) post an issue through OpenLDAP's Issue Tracking
System (see http:/www.openldap.org/its).

View File

@ -1,9 +1,6 @@
2) must add alias handling
3) must set time limit when preparing all queries, and check size limit
7) MOST important, probably - fix BUGS (see ...)
8) make an utility, which, given objectclass,name of attribute for RDN, and
base DN, adds appropriate records to ldap_entries for all data in your
RDBMS (until then, you have no automated way to fill in ldap_entries except
executing something like "insert into ldap_entries (dn,keyval,...) values
(select concat(...,<base>) as dn,id as keyval, ... from ...)")
1) must add alias handling
2) must set time limit when preparing all queries, and check size limit
3) there was expressed a need to have access to IP in while constructing
queries, to have response alter in accordance to client IP. Will add
preprocessor for values in metatables, which would substitute things
like "$IP$".

View File

@ -1098,29 +1098,35 @@ connection_resched( Connection *conn )
Operation *op;
if( conn->c_conn_state == SLAP_C_CLOSING ) {
int rc;
ber_socket_t sd;
ber_sockbuf_ctrl( conn->c_sb, LBER_SB_OPT_GET_FD, &sd );
Debug( LDAP_DEBUG_TRACE,
"connection_resched: reaquiring locks conn=%ld sd=%d\n",
conn->c_connid, sd, 0 );
/* us trylock to avoid possible deadlock */
rc = ldap_pvt_thread_mutex_trylock( &connections_mutex );
/* reaquire locks in the right order... this may
* allow another thread to close this connection,
* so recheck state
*/
ldap_pvt_thread_mutex_unlock( &conn->c_mutex );
ldap_pvt_thread_mutex_lock( &connections_mutex );
ldap_pvt_thread_mutex_lock( &conn->c_mutex );
if( rc ) {
Debug( LDAP_DEBUG_TRACE,
"connection_resched: reaquiring locks conn=%ld sd=%d\n",
conn->c_connid, sd, 0 );
/*
* reaquire locks in the right order...
* this may allow another thread to close this connection,
* so recheck state below.
*/
ldap_pvt_thread_mutex_unlock( &conn->c_mutex );
ldap_pvt_thread_mutex_lock( &connections_mutex );
ldap_pvt_thread_mutex_lock( &conn->c_mutex );
}
if( conn->c_conn_state != SLAP_C_CLOSING ) {
Debug( LDAP_DEBUG_TRACE,
"connection_resched: closed by other thread conn=%ld sd=%d\n",
Debug( LDAP_DEBUG_TRACE, "connection_resched: "
"closed by other thread conn=%ld sd=%d\n",
conn->c_connid, sd, 0 );
} else {
Debug( LDAP_DEBUG_TRACE,
"connection_resched: attempting closing conn=%ld sd=%d\n",
Debug( LDAP_DEBUG_TRACE, "connection_resched: "
"attempting closing conn=%ld sd=%d\n",
conn->c_connid, sd, 0 );
connection_close( conn );
@ -1168,18 +1174,18 @@ static int connection_op_activate( Connection *conn, Operation *op )
arg->co_conn = conn;
arg->co_op = op;
arg->co_op->o_authz = conn->c_authz;
arg->co_op->o_dn = ch_strdup( conn->c_dn != NULL ? conn->c_dn : "" );
arg->co_op->o_ndn = ch_strdup( arg->co_op->o_dn );
(void) dn_normalize( arg->co_op->o_ndn );
arg->co_op->o_protocol = conn->c_protocol
? conn->c_protocol : LDAP_VERSION3;
arg->co_op->o_connid = conn->c_connid;
arg->co_op->o_authtype = conn->c_authtype;
arg->co_op->o_authmech = conn->c_authmech != NULL
? ch_strdup( conn->c_authmech ) : NULL;
arg->co_op->o_protocol = conn->c_protocol
? conn->c_protocol : LDAP_VERSION3;
arg->co_op->o_connid = conn->c_connid;
slap_op_add( &conn->c_ops, arg->co_op );
status = ldap_pvt_thread_pool_submit( &connection_pool,