1 /* Cache handling for host lookup.
2 Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published
8 by the Free Software Foundation; version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software Foundation,
18 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
32 # include <kernel-features.h>
35 #include "../nss/nsswitch.h"
38 /* Type of the lookup function. */
39 typedef enum nss_status (*initgroups_dyn_function
) (const char *, gid_t
,
40 long int *, long int *,
41 gid_t
**, long int, int *);
44 static const initgr_response_header notfound
=
46 .version
= NSCD_VERSION
,
52 #include "../grp/compat-initgroups.c"
56 addinitgroupsX (struct database_dyn
*db
, int fd
, request_header
*req
,
57 void *key
, uid_t uid
, struct hashentry
*he
,
60 /* Search for the entry matching the key. Please note that we don't
61 look again in the table whether the dataset is now available. We
62 simply insert it. It does not matter if it is in there twice. The
63 pruning function only will look at the timestamp. */
66 /* We allocate all data in one memory block: the iov vector,
67 the response header and the dataset itself. */
71 initgr_response_header resp
;
75 if (__builtin_expect (debug_level
> 0, 0))
78 dbg_log (_("Haven't found \"%s\" in group cache!"), (char *) key
);
80 dbg_log (_("Reloading \"%s\" in group cache!"), (char *) key
);
83 static service_user
*group_database
;
84 service_user
*nip
= NULL
;
87 if (group_database
!= NULL
)
93 no_more
= __nss_database_lookup ("group", NULL
,
94 "compat [NOTFOUND=return] files", &nip
);
96 /* We always use sysconf even if NGROUPS_MAX is defined. That way, the
97 limit can be raised in the kernel configuration without having to
99 long int limit
= __sysconf (_SC_NGROUPS_MAX
);
103 /* We limit the size of the intially allocated array. */
104 size
= MIN (limit
, 64);
106 /* No fixed limit on groups. Pick a starting buffer size. */
110 bool all_tryagain
= true;
111 bool any_success
= false;
113 /* This is temporary memory, we need not (ad must not) call
115 // XXX This really should use alloca. need to change the backends.
116 gid_t
*groups
= (gid_t
*) malloc (size
* sizeof (gid_t
));
117 if (__builtin_expect (groups
== NULL
, 0))
118 /* No more memory. */
121 /* Nothing added yet. */
124 long int prev_start
= start
;
125 enum nss_status status
;
126 initgroups_dyn_function fct
;
127 fct
= __nss_lookup_function (nip
, "initgroups_dyn");
131 status
= compat_call (nip
, key
, -1, &start
, &size
, &groups
,
134 if (nss_next_action (nip
, NSS_STATUS_UNAVAIL
) != NSS_ACTION_CONTINUE
)
138 status
= DL_CALL_FCT (fct
, (key
, -1, &start
, &size
, &groups
,
141 /* Remove duplicates. */
142 long int cnt
= prev_start
;
146 for (inner
= 0; inner
< prev_start
; ++inner
)
147 if (groups
[inner
] == groups
[cnt
])
150 if (inner
< prev_start
)
151 groups
[cnt
] = groups
[--start
];
156 if (status
!= NSS_STATUS_TRYAGAIN
)
157 all_tryagain
= false;
159 /* This is really only for debugging. */
160 if (NSS_STATUS_TRYAGAIN
> status
|| status
> NSS_STATUS_RETURN
)
161 __libc_fatal ("illegal status in internal_getgrouplist");
163 any_success
|= status
== NSS_STATUS_SUCCESS
;
165 if (status
!= NSS_STATUS_SUCCESS
166 && nss_next_action (nip
, status
) == NSS_ACTION_RETURN
)
169 if (nip
->next
== NULL
)
180 /* Nothing found. Create a negative result record. */
181 written
= total
= sizeof (notfound
);
183 if (he
!= NULL
&& all_tryagain
)
185 /* If we have an old record available but cannot find one now
186 because the service is not available we keep the old record
187 and make sure it does not get removed. */
188 if (reload_count
!= UINT_MAX
&& dh
->nreloads
== reload_count
)
189 /* Do not reset the value if we never not reload the record. */
190 dh
->nreloads
= reload_count
- 1;
194 /* We have no data. This means we send the standard reply for this
197 written
= TEMP_FAILURE_RETRY (send (fd
, ¬found
, total
,
200 dataset
= mempool_alloc (db
, sizeof (struct dataset
) + req
->key_len
);
201 /* If we cannot permanently store the result, so be it. */
204 dataset
->head
.allocsize
= sizeof (struct dataset
) + req
->key_len
;
205 dataset
->head
.recsize
= total
;
206 dataset
->head
.notfound
= true;
207 dataset
->head
.nreloads
= 0;
208 dataset
->head
.usable
= true;
210 /* Compute the timeout time. */
211 dataset
->head
.timeout
= time (NULL
) + db
->negtimeout
;
213 /* This is the reply. */
214 memcpy (&dataset
->resp
, ¬found
, total
);
216 /* Copy the key data. */
217 char *key_copy
= memcpy (dataset
->strdata
, key
, req
->key_len
);
219 /* If necessary, we also propagate the data to disk. */
223 uintptr_t pval
= (uintptr_t) dataset
& ~pagesize_m1
;
224 msync ((void *) pval
,
225 ((uintptr_t) dataset
& pagesize_m1
)
226 + sizeof (struct dataset
) + req
->key_len
, MS_ASYNC
);
229 /* Now get the lock to safely insert the records. */
230 pthread_rwlock_rdlock (&db
->lock
);
232 if (cache_add (req
->type
, key_copy
, req
->key_len
,
233 &dataset
->head
, true, db
, uid
) < 0)
234 /* Ensure the data can be recovered. */
235 dataset
->head
.usable
= false;
237 pthread_rwlock_unlock (&db
->lock
);
239 /* Mark the old entry as obsolete. */
244 ++db
->head
->addfailed
;
250 written
= total
= sizeof (struct dataset
) + start
* sizeof (int32_t);
252 /* If we refill the cache, first assume the reconrd did not
253 change. Allocate memory on the cache since it is likely
254 discarded anyway. If it turns out to be necessary to have a
255 new record we can still allocate real memory. */
256 bool alloca_used
= false;
261 dataset
= (struct dataset
*) mempool_alloc (db
,
262 total
+ req
->key_len
);
264 ++db
->head
->addfailed
;
269 /* We cannot permanently add the result in the moment. But
270 we can provide the result as is. Store the data in some
272 dataset
= (struct dataset
*) alloca (total
+ req
->key_len
);
274 /* We cannot add this record to the permanent database. */
278 dataset
->head
.allocsize
= total
+ req
->key_len
;
279 dataset
->head
.recsize
= total
- offsetof (struct dataset
, resp
);
280 dataset
->head
.notfound
= false;
281 dataset
->head
.nreloads
= he
== NULL
? 0 : (dh
->nreloads
+ 1);
282 dataset
->head
.usable
= true;
284 /* Compute the timeout time. */
285 dataset
->head
.timeout
= time (NULL
) + db
->postimeout
;
287 dataset
->resp
.version
= NSCD_VERSION
;
288 dataset
->resp
.found
= 1;
289 dataset
->resp
.ngrps
= start
;
291 char *cp
= dataset
->strdata
;
293 /* Copy the GID values. If the size of the types match this is
295 if (sizeof (gid_t
) == sizeof (int32_t))
296 cp
= mempcpy (cp
, groups
, start
* sizeof (gid_t
));
299 gid_t
*gcp
= (gid_t
*) cp
;
301 for (int i
= 0; i
< start
; ++i
)
307 /* Finally the user name. */
308 memcpy (cp
, key
, req
->key_len
);
310 /* Now we can determine whether on refill we have to create a new
316 if (total
+ req
->key_len
== dh
->allocsize
317 && total
- offsetof (struct dataset
, resp
) == dh
->recsize
318 && memcmp (&dataset
->resp
, dh
->data
,
319 dh
->allocsize
- offsetof (struct dataset
, resp
)) == 0)
321 /* The data has not changed. We will just bump the
322 timeout value. Note that the new record has been
323 allocated on the stack and need not be freed. */
324 dh
->timeout
= dataset
->head
.timeout
;
329 /* We have to create a new record. Just allocate
330 appropriate memory and copy it. */
332 = (struct dataset
*) mempool_alloc (db
, total
+ req
->key_len
);
335 /* Adjust pointer into the memory block. */
336 cp
= (char *) newp
+ (cp
- (char *) dataset
);
338 dataset
= memcpy (newp
, dataset
, total
+ req
->key_len
);
342 /* Mark the old record as obsolete. */
348 /* We write the dataset before inserting it to the database
349 since while inserting this thread might block and so would
350 unnecessarily let the receiver wait. */
354 if (__builtin_expect (db
->mmap_used
, 1) && !alloca_used
)
356 assert (db
->wr_fd
!= -1);
357 assert ((char *) &dataset
->resp
> (char *) db
->data
);
358 assert ((char *) &dataset
->resp
- (char *) db
->head
360 <= (sizeof (struct database_pers_head
)
361 + db
->head
->module
* sizeof (ref_t
)
362 + db
->head
->data_size
));
363 written
= sendfileall (fd
, db
->wr_fd
,
364 (char *) &dataset
->resp
365 - (char *) db
->head
, total
);
366 # ifndef __ASSUME_SENDFILE
367 if (written
== -1 && errno
== ENOSYS
)
372 # ifndef __ASSUME_SENDFILE
376 written
= writeall (fd
, &dataset
->resp
, total
);
380 /* Add the record to the database. But only if it has not been
381 stored on the stack. */
384 /* If necessary, we also propagate the data to disk. */
388 uintptr_t pval
= (uintptr_t) dataset
& ~pagesize_m1
;
389 msync ((void *) pval
,
390 ((uintptr_t) dataset
& pagesize_m1
) + total
+
391 req
->key_len
, MS_ASYNC
);
394 /* Now get the lock to safely insert the records. */
395 pthread_rwlock_rdlock (&db
->lock
);
397 if (cache_add (INITGROUPS
, cp
, req
->key_len
, &dataset
->head
, true,
399 /* Could not allocate memory. Make sure the data gets
401 dataset
->head
.usable
= false;
403 pthread_rwlock_unlock (&db
->lock
);
409 if (__builtin_expect (written
!= total
, 0) && debug_level
> 0)
412 dbg_log (_("short write in %s: %s"), __FUNCTION__
,
413 strerror_r (errno
, buf
, sizeof (buf
)));
419 addinitgroups (struct database_dyn
*db
, int fd
, request_header
*req
, void *key
,
422 addinitgroupsX (db
, fd
, req
, key
, uid
, NULL
, NULL
);
427 readdinitgroups (struct database_dyn
*db
, struct hashentry
*he
,
436 addinitgroupsX (db
, -1, &req
, db
->data
+ he
->key
, he
->owner
, he
, dh
);