2004-09-30 10:24:51 +08:00
|
|
|
/* Cache handling for host lookup.
|
2011-07-02 11:02:09 +08:00
|
|
|
Copyright (C) 2004-2006, 2008, 2009, 2011 Free Software Foundation, Inc.
|
2004-09-30 10:24:51 +08:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
|
|
|
|
|
2005-12-07 13:49:17 +08:00
|
|
|
This program is free software; you can redistribute it and/or modify
|
2007-07-16 08:56:07 +08:00
|
|
|
it under the terms of the GNU General Public License as published
|
|
|
|
by the Free Software Foundation; version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
2004-09-30 10:24:51 +08:00
|
|
|
|
2005-12-07 13:49:17 +08:00
|
|
|
This program is distributed in the hope that it will be useful,
|
2004-09-30 10:24:51 +08:00
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2005-12-07 13:49:17 +08:00
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
2004-09-30 10:24:51 +08:00
|
|
|
|
2005-12-07 13:49:17 +08:00
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software Foundation,
|
|
|
|
Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <grp.h>
|
|
|
|
#include <libintl.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/mman.h>
|
2005-11-20 01:22:39 +08:00
|
|
|
|
|
|
|
#include "dbg_log.h"
|
|
|
|
#include "nscd.h"
|
|
|
|
#ifdef HAVE_SENDFILE
|
|
|
|
# include <kernel-features.h>
|
|
|
|
#endif
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
#include "../nss/nsswitch.h"
|
|
|
|
|
|
|
|
|
|
|
|
/* Type of the lookup function. */
|
|
|
|
typedef enum nss_status (*initgroups_dyn_function) (const char *, gid_t,
|
|
|
|
long int *, long int *,
|
|
|
|
gid_t **, long int, int *);
|
|
|
|
|
|
|
|
|
|
|
|
static const initgr_response_header notfound =
|
|
|
|
{
|
|
|
|
.version = NSCD_VERSION,
|
|
|
|
.found = 0,
|
|
|
|
.ngrps = 0
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#include "../grp/compat-initgroups.c"
|
|
|
|
|
|
|
|
|
2011-02-06 09:07:27 +08:00
|
|
|
static time_t
|
2004-09-30 10:24:51 +08:00
|
|
|
addinitgroupsX (struct database_dyn *db, int fd, request_header *req,
|
2009-02-14 04:36:37 +08:00
|
|
|
void *key, uid_t uid, struct hashentry *const he,
|
2004-09-30 10:24:51 +08:00
|
|
|
struct datahead *dh)
|
|
|
|
{
|
|
|
|
/* Search for the entry matching the key. Please note that we don't
|
|
|
|
look again in the table whether the dataset is now available. We
|
|
|
|
simply insert it. It does not matter if it is in there twice. The
|
|
|
|
pruning function only will look at the timestamp. */
|
|
|
|
|
|
|
|
|
|
|
|
/* We allocate all data in one memory block: the iov vector,
|
|
|
|
the response header and the dataset itself. */
|
|
|
|
struct dataset
|
|
|
|
{
|
|
|
|
struct datahead head;
|
|
|
|
initgr_response_header resp;
|
|
|
|
char strdata[0];
|
|
|
|
} *dataset = NULL;
|
|
|
|
|
|
|
|
if (__builtin_expect (debug_level > 0, 0))
|
|
|
|
{
|
|
|
|
if (he == NULL)
|
|
|
|
dbg_log (_("Haven't found \"%s\" in group cache!"), (char *) key);
|
|
|
|
else
|
|
|
|
dbg_log (_("Reloading \"%s\" in group cache!"), (char *) key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static service_user *group_database;
|
|
|
|
service_user *nip = NULL;
|
|
|
|
int no_more;
|
|
|
|
|
|
|
|
if (group_database != NULL)
|
|
|
|
{
|
|
|
|
nip = group_database;
|
|
|
|
no_more = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
no_more = __nss_database_lookup ("group", NULL,
|
|
|
|
"compat [NOTFOUND=return] files", &nip);
|
|
|
|
|
|
|
|
/* We always use sysconf even if NGROUPS_MAX is defined. That way, the
|
|
|
|
limit can be raised in the kernel configuration without having to
|
|
|
|
recompile libc. */
|
|
|
|
long int limit = __sysconf (_SC_NGROUPS_MAX);
|
|
|
|
|
|
|
|
long int size;
|
|
|
|
if (limit > 0)
|
|
|
|
/* We limit the size of the intially allocated array. */
|
|
|
|
size = MIN (limit, 64);
|
|
|
|
else
|
|
|
|
/* No fixed limit on groups. Pick a starting buffer size. */
|
|
|
|
size = 16;
|
|
|
|
|
|
|
|
long int start = 0;
|
|
|
|
bool all_tryagain = true;
|
2006-03-04 Jakub Jelinek <jakub@redhat.com>
Roland McGrath <roland@redhat.com>
* sysdeps/unix/sysv/linux/i386/lowlevellock.h
(LLL_STUB_UNWIND_INFO_START, LLL_STUB_UNWIND_INFO_END,
LLL_STUB_UNWIND_INFO_3, LLL_STUB_UNWIND_INFO_4): Define.
(lll_mutex_lock, lll_robust_mutex_lock, lll_mutex_cond_lock,
lll_robust_mutex_cond_lock, lll_mutex_timedlock,
lll_robust_mutex_timedlock, lll_mutex_unlock,
lll_robust_mutex_unlock, lll_lock, lll_unlock): Use them.
Add _L_*_ symbols around the subsection.
* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Add unwind info.
* sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Likewise.
2006-03-03 Jakub Jelinek <jakub@redhat.com>
Roland McGrath <roland@redhat.com>
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
(LLL_STUB_UNWIND_INFO_START, LLL_STUB_UNWIND_INFO_END,
LLL_STUB_UNWIND_INFO_5, LLL_STUB_UNWIND_INFO_6): Define.
(lll_mutex_lock, lll_robust_mutex_lock, lll_mutex_cond_lock,
lll_robust_mutex_cond_lock, lll_mutex_timedlock,
lll_robust_mutex_timedlock, lll_mutex_unlock,
lll_robust_mutex_unlock, lll_lock, lll_unlock): Use them.
Add _L_*_ symbols around the subsection.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Add unwind info.
* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
2006-09-05 22:49:19 +08:00
|
|
|
bool any_success = false;
|
2004-09-30 10:24:51 +08:00
|
|
|
|
2007-11-26 05:29:30 +08:00
|
|
|
/* This is temporary memory, we need not (and must not) call
|
2004-09-30 10:24:51 +08:00
|
|
|
mempool_alloc. */
|
|
|
|
// XXX This really should use alloca. need to change the backends.
|
|
|
|
gid_t *groups = (gid_t *) malloc (size * sizeof (gid_t));
|
|
|
|
if (__builtin_expect (groups == NULL, 0))
|
|
|
|
/* No more memory. */
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Nothing added yet. */
|
|
|
|
while (! no_more)
|
|
|
|
{
|
2004-10-05 23:36:41 +08:00
|
|
|
long int prev_start = start;
|
2004-09-30 10:24:51 +08:00
|
|
|
enum nss_status status;
|
|
|
|
initgroups_dyn_function fct;
|
|
|
|
fct = __nss_lookup_function (nip, "initgroups_dyn");
|
|
|
|
|
|
|
|
if (fct == NULL)
|
|
|
|
{
|
|
|
|
status = compat_call (nip, key, -1, &start, &size, &groups,
|
|
|
|
limit, &errno);
|
|
|
|
|
|
|
|
if (nss_next_action (nip, NSS_STATUS_UNAVAIL) != NSS_ACTION_CONTINUE)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
status = DL_CALL_FCT (fct, (key, -1, &start, &size, &groups,
|
|
|
|
limit, &errno));
|
|
|
|
|
2004-10-05 23:36:41 +08:00
|
|
|
/* Remove duplicates. */
|
|
|
|
long int cnt = prev_start;
|
|
|
|
while (cnt < start)
|
|
|
|
{
|
|
|
|
long int inner;
|
|
|
|
for (inner = 0; inner < prev_start; ++inner)
|
|
|
|
if (groups[inner] == groups[cnt])
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (inner < prev_start)
|
|
|
|
groups[cnt] = groups[--start];
|
|
|
|
else
|
|
|
|
++cnt;
|
|
|
|
}
|
|
|
|
|
2004-09-30 10:24:51 +08:00
|
|
|
if (status != NSS_STATUS_TRYAGAIN)
|
|
|
|
all_tryagain = false;
|
|
|
|
|
|
|
|
/* This is really only for debugging. */
|
|
|
|
if (NSS_STATUS_TRYAGAIN > status || status > NSS_STATUS_RETURN)
|
|
|
|
__libc_fatal ("illegal status in internal_getgrouplist");
|
|
|
|
|
2006-08-02 08:08:03 +08:00
|
|
|
any_success |= status == NSS_STATUS_SUCCESS;
|
|
|
|
|
2004-09-30 10:24:51 +08:00
|
|
|
if (status != NSS_STATUS_SUCCESS
|
|
|
|
&& nss_next_action (nip, status) == NSS_ACTION_RETURN)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (nip->next == NULL)
|
|
|
|
no_more = -1;
|
|
|
|
else
|
|
|
|
nip = nip->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t total;
|
|
|
|
ssize_t written;
|
2011-02-06 09:07:27 +08:00
|
|
|
time_t timeout;
|
2004-09-30 10:24:51 +08:00
|
|
|
out:
|
2011-02-06 09:07:27 +08:00
|
|
|
timeout = MAX_TIMEOUT_VALUE;
|
2006-08-02 08:08:03 +08:00
|
|
|
if (!any_success)
|
2004-09-30 10:24:51 +08:00
|
|
|
{
|
|
|
|
/* Nothing found. Create a negative result record. */
|
|
|
|
written = total = sizeof (notfound);
|
|
|
|
|
|
|
|
if (he != NULL && all_tryagain)
|
|
|
|
{
|
|
|
|
/* If we have an old record available but cannot find one now
|
|
|
|
because the service is not available we keep the old record
|
|
|
|
and make sure it does not get removed. */
|
|
|
|
if (reload_count != UINT_MAX && dh->nreloads == reload_count)
|
|
|
|
/* Do not reset the value if we never not reload the record. */
|
|
|
|
dh->nreloads = reload_count - 1;
|
2011-02-06 09:07:27 +08:00
|
|
|
|
|
|
|
/* Reload with the same time-to-live value. */
|
|
|
|
timeout = dh->timeout = time (NULL) + db->postimeout;
|
2004-09-30 10:24:51 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We have no data. This means we send the standard reply for this
|
|
|
|
case. */
|
|
|
|
if (fd != -1)
|
2005-08-24 07:21:53 +08:00
|
|
|
written = TEMP_FAILURE_RETRY (send (fd, ¬found, total,
|
|
|
|
MSG_NOSIGNAL));
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
/* If we cannot permanently store the result, so be it. */
|
2011-07-02 11:02:09 +08:00
|
|
|
if (__builtin_expect (db->negtimeout == 0, 0))
|
2011-07-02 10:53:01 +08:00
|
|
|
{
|
|
|
|
/* Mark the old entry as obsolete. */
|
|
|
|
if (dh != NULL)
|
|
|
|
dh->usable = false;
|
|
|
|
}
|
2011-07-02 11:02:09 +08:00
|
|
|
else if ((dataset = mempool_alloc (db, (sizeof (struct dataset)
|
|
|
|
+ req->key_len), 1)) != NULL)
|
2004-09-30 10:24:51 +08:00
|
|
|
{
|
|
|
|
dataset->head.allocsize = sizeof (struct dataset) + req->key_len;
|
|
|
|
dataset->head.recsize = total;
|
|
|
|
dataset->head.notfound = true;
|
|
|
|
dataset->head.nreloads = 0;
|
|
|
|
dataset->head.usable = true;
|
|
|
|
|
|
|
|
/* Compute the timeout time. */
|
2011-02-06 09:07:27 +08:00
|
|
|
timeout = dataset->head.timeout = time (NULL) + db->negtimeout;
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
/* This is the reply. */
|
|
|
|
memcpy (&dataset->resp, ¬found, total);
|
|
|
|
|
|
|
|
/* Copy the key data. */
|
|
|
|
char *key_copy = memcpy (dataset->strdata, key, req->key_len);
|
|
|
|
|
|
|
|
/* If necessary, we also propagate the data to disk. */
|
|
|
|
if (db->persistent)
|
|
|
|
{
|
|
|
|
// XXX async OK?
|
|
|
|
uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1;
|
|
|
|
msync ((void *) pval,
|
|
|
|
((uintptr_t) dataset & pagesize_m1)
|
|
|
|
+ sizeof (struct dataset) + req->key_len, MS_ASYNC);
|
|
|
|
}
|
|
|
|
|
2008-05-11 11:03:14 +08:00
|
|
|
(void) cache_add (req->type, key_copy, req->key_len,
|
2008-05-19 05:54:43 +08:00
|
|
|
&dataset->head, true, db, uid, he == NULL);
|
2004-09-30 10:24:51 +08:00
|
|
|
|
2009-07-17 22:49:16 +08:00
|
|
|
pthread_rwlock_unlock (&db->lock);
|
|
|
|
|
2004-09-30 10:24:51 +08:00
|
|
|
/* Mark the old entry as obsolete. */
|
|
|
|
if (dh != NULL)
|
|
|
|
dh->usable = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
|
2008-06-13 00:16:38 +08:00
|
|
|
written = total = (offsetof (struct dataset, strdata)
|
|
|
|
+ start * sizeof (int32_t));
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
/* If we refill the cache, first assume the reconrd did not
|
|
|
|
change. Allocate memory on the cache since it is likely
|
|
|
|
discarded anyway. If it turns out to be necessary to have a
|
|
|
|
new record we can still allocate real memory. */
|
|
|
|
bool alloca_used = false;
|
|
|
|
dataset = NULL;
|
|
|
|
|
|
|
|
if (he == NULL)
|
2009-02-14 04:36:37 +08:00
|
|
|
dataset = (struct dataset *) mempool_alloc (db, total + req->key_len,
|
|
|
|
1);
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
if (dataset == NULL)
|
|
|
|
{
|
|
|
|
/* We cannot permanently add the result in the moment. But
|
|
|
|
we can provide the result as is. Store the data in some
|
|
|
|
temporary memory. */
|
|
|
|
dataset = (struct dataset *) alloca (total + req->key_len);
|
|
|
|
|
|
|
|
/* We cannot add this record to the permanent database. */
|
|
|
|
alloca_used = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
dataset->head.allocsize = total + req->key_len;
|
|
|
|
dataset->head.recsize = total - offsetof (struct dataset, resp);
|
|
|
|
dataset->head.notfound = false;
|
|
|
|
dataset->head.nreloads = he == NULL ? 0 : (dh->nreloads + 1);
|
|
|
|
dataset->head.usable = true;
|
|
|
|
|
|
|
|
/* Compute the timeout time. */
|
2011-02-06 09:07:27 +08:00
|
|
|
timeout = dataset->head.timeout = time (NULL) + db->postimeout;
|
2004-09-30 10:24:51 +08:00
|
|
|
|
|
|
|
dataset->resp.version = NSCD_VERSION;
|
|
|
|
dataset->resp.found = 1;
|
|
|
|
dataset->resp.ngrps = start;
|
|
|
|
|
|
|
|
char *cp = dataset->strdata;
|
|
|
|
|
|
|
|
/* Copy the GID values. If the size of the types match this is
|
|
|
|
very simple. */
|
|
|
|
if (sizeof (gid_t) == sizeof (int32_t))
|
|
|
|
cp = mempcpy (cp, groups, start * sizeof (gid_t));
|
|
|
|
else
|
|
|
|
{
|
|
|
|
gid_t *gcp = (gid_t *) cp;
|
|
|
|
|
|
|
|
for (int i = 0; i < start; ++i)
|
|
|
|
*gcp++ = groups[i];
|
|
|
|
|
|
|
|
cp = (char *) gcp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally the user name. */
|
|
|
|
memcpy (cp, key, req->key_len);
|
|
|
|
|
2008-06-13 00:16:38 +08:00
|
|
|
assert (cp == dataset->strdata + total - offsetof (struct dataset,
|
|
|
|
strdata));
|
|
|
|
|
2004-09-30 10:24:51 +08:00
|
|
|
/* Now we can determine whether on refill we have to create a new
|
|
|
|
record or not. */
|
|
|
|
if (he != NULL)
|
|
|
|
{
|
|
|
|
assert (fd == -1);
|
|
|
|
|
|
|
|
if (total + req->key_len == dh->allocsize
|
|
|
|
&& total - offsetof (struct dataset, resp) == dh->recsize
|
|
|
|
&& memcmp (&dataset->resp, dh->data,
|
|
|
|
dh->allocsize - offsetof (struct dataset, resp)) == 0)
|
|
|
|
{
|
|
|
|
/* The data has not changed. We will just bump the
|
|
|
|
timeout value. Note that the new record has been
|
|
|
|
allocated on the stack and need not be freed. */
|
|
|
|
dh->timeout = dataset->head.timeout;
|
|
|
|
++dh->nreloads;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We have to create a new record. Just allocate
|
|
|
|
appropriate memory and copy it. */
|
|
|
|
struct dataset *newp
|
2008-04-20 00:42:41 +08:00
|
|
|
= (struct dataset *) mempool_alloc (db, total + req->key_len,
|
2009-02-14 04:36:37 +08:00
|
|
|
1);
|
2004-09-30 10:24:51 +08:00
|
|
|
if (newp != NULL)
|
|
|
|
{
|
|
|
|
/* Adjust pointer into the memory block. */
|
|
|
|
cp = (char *) newp + (cp - (char *) dataset);
|
|
|
|
|
|
|
|
dataset = memcpy (newp, dataset, total + req->key_len);
|
|
|
|
alloca_used = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark the old record as obsolete. */
|
|
|
|
dh->usable = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We write the dataset before inserting it to the database
|
|
|
|
since while inserting this thread might block and so would
|
|
|
|
unnecessarily let the receiver wait. */
|
|
|
|
assert (fd != -1);
|
|
|
|
|
2005-11-20 01:22:39 +08:00
|
|
|
#ifdef HAVE_SENDFILE
|
2006-01-07 03:15:13 +08:00
|
|
|
if (__builtin_expect (db->mmap_used, 1) && !alloca_used)
|
2005-11-20 01:22:39 +08:00
|
|
|
{
|
|
|
|
assert (db->wr_fd != -1);
|
|
|
|
assert ((char *) &dataset->resp > (char *) db->data);
|
2009-10-30 13:18:26 +08:00
|
|
|
assert ((char *) dataset - (char *) db->head
|
2005-11-20 01:22:39 +08:00
|
|
|
+ total
|
|
|
|
<= (sizeof (struct database_pers_head)
|
|
|
|
+ db->head->module * sizeof (ref_t)
|
|
|
|
+ db->head->data_size));
|
2005-11-23 03:06:27 +08:00
|
|
|
written = sendfileall (fd, db->wr_fd,
|
|
|
|
(char *) &dataset->resp
|
2009-10-30 13:18:26 +08:00
|
|
|
- (char *) db->head, dataset->head.recsize);
|
2005-11-20 01:22:39 +08:00
|
|
|
# ifndef __ASSUME_SENDFILE
|
|
|
|
if (written == -1 && errno == ENOSYS)
|
|
|
|
goto use_write;
|
|
|
|
# endif
|
|
|
|
}
|
|
|
|
else
|
|
|
|
# ifndef __ASSUME_SENDFILE
|
|
|
|
use_write:
|
|
|
|
# endif
|
|
|
|
#endif
|
2009-10-30 13:18:26 +08:00
|
|
|
written = writeall (fd, &dataset->resp, dataset->head.recsize);
|
2004-09-30 10:24:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Add the record to the database. But only if it has not been
|
|
|
|
stored on the stack. */
|
|
|
|
if (! alloca_used)
|
|
|
|
{
|
|
|
|
/* If necessary, we also propagate the data to disk. */
|
|
|
|
if (db->persistent)
|
|
|
|
{
|
|
|
|
// XXX async OK?
|
|
|
|
uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1;
|
|
|
|
msync ((void *) pval,
|
|
|
|
((uintptr_t) dataset & pagesize_m1) + total +
|
|
|
|
req->key_len, MS_ASYNC);
|
|
|
|
}
|
|
|
|
|
2008-05-11 11:03:14 +08:00
|
|
|
(void) cache_add (INITGROUPS, cp, req->key_len, &dataset->head, true,
|
2008-05-19 05:54:43 +08:00
|
|
|
db, uid, he == NULL);
|
2009-07-17 22:49:16 +08:00
|
|
|
|
|
|
|
pthread_rwlock_unlock (&db->lock);
|
2004-09-30 10:24:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free (groups);
|
|
|
|
|
|
|
|
if (__builtin_expect (written != total, 0) && debug_level > 0)
|
|
|
|
{
|
|
|
|
char buf[256];
|
|
|
|
dbg_log (_("short write in %s: %s"), __FUNCTION__,
|
|
|
|
strerror_r (errno, buf, sizeof (buf)));
|
|
|
|
}
|
2011-02-06 09:07:27 +08:00
|
|
|
|
|
|
|
return timeout;
|
2004-09-30 10:24:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
addinitgroups (struct database_dyn *db, int fd, request_header *req, void *key,
|
|
|
|
uid_t uid)
|
|
|
|
{
|
|
|
|
addinitgroupsX (db, fd, req, key, uid, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-06 09:07:27 +08:00
|
|
|
time_t
|
2004-09-30 10:24:51 +08:00
|
|
|
readdinitgroups (struct database_dyn *db, struct hashentry *he,
|
|
|
|
struct datahead *dh)
|
|
|
|
{
|
|
|
|
request_header req =
|
|
|
|
{
|
|
|
|
.type = INITGROUPS,
|
|
|
|
.key_len = he->len
|
|
|
|
};
|
|
|
|
|
2011-02-06 09:07:27 +08:00
|
|
|
return addinitgroupsX (db, -1, &req, db->data + he->key, he->owner, he, dh);
|
2004-09-30 10:24:51 +08:00
|
|
|
}
|