* intl/dcigettext.c (DCIGETTEXT): Avoid some code duplication.
[glibc.git] / nscd / cache.c
blobcd6e6b4440c63dbeec78e2fdbf85acab35c2ac5a
1 /* Copyright (c) 1998, 1999, 2003-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published
7 by the Free Software Foundation; version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software Foundation,
17 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
19 #include <assert.h>
20 #include <atomic.h>
21 #include <errno.h>
22 #include <error.h>
23 #include <inttypes.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <libintl.h>
28 #include <arpa/inet.h>
29 #include <rpcsvc/nis.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/uio.h>
35 #include "nscd.h"
36 #include "dbg_log.h"
39 /* Wrapper functions with error checking for standard functions. */
40 extern void *xcalloc (size_t n, size_t s);
43 /* Number of times a value is reloaded without being used. UINT_MAX
44 means unlimited. */
45 unsigned int reload_count = DEFAULT_RELOAD_LIMIT;
48 static void (*const readdfcts[LASTREQ]) (struct database_dyn *,
49 struct hashentry *,
50 struct datahead *) =
52 [GETPWBYNAME] = readdpwbyname,
53 [GETPWBYUID] = readdpwbyuid,
54 [GETGRBYNAME] = readdgrbyname,
55 [GETGRBYGID] = readdgrbygid,
56 [GETHOSTBYNAME] = readdhstbyname,
57 [GETHOSTBYNAMEv6] = readdhstbynamev6,
58 [GETHOSTBYADDR] = readdhstbyaddr,
59 [GETHOSTBYADDRv6] = readdhstbyaddrv6,
60 [GETAI] = readdhstai,
61 [INITGROUPS] = readdinitgroups,
62 [GETSERVBYNAME] = readdservbyname,
63 [GETSERVBYPORT] = readdservbyport
67 /* Search the cache for a matching entry and return it when found. If
68 this fails search the negative cache and return (void *) -1 if this
69 search was successful. Otherwise return NULL.
71 This function must be called with the read-lock held. */
72 struct datahead *
73 cache_search (request_type type, void *key, size_t len,
74 struct database_dyn *table, uid_t owner)
76 unsigned long int hash = __nis_hash (key, len) % table->head->module;
78 unsigned long int nsearched = 0;
79 struct datahead *result = NULL;
81 ref_t work = table->head->array[hash];
82 while (work != ENDREF)
84 ++nsearched;
86 struct hashentry *here = (struct hashentry *) (table->data + work);
88 if (type == here->type && len == here->len
89 && memcmp (key, table->data + here->key, len) == 0
90 && here->owner == owner)
92 /* We found the entry. Increment the appropriate counter. */
93 struct datahead *dh
94 = (struct datahead *) (table->data + here->packet);
96 /* See whether we must ignore the entry. */
97 if (dh->usable)
99 /* We do not synchronize the memory here. The statistics
100 data is not crucial, we synchronize only once in a while
101 in the cleanup threads. */
102 if (dh->notfound)
103 ++table->head->neghit;
104 else
106 ++table->head->poshit;
108 if (dh->nreloads != 0)
109 dh->nreloads = 0;
112 result = dh;
113 break;
117 work = here->next;
120 if (nsearched > table->head->maxnsearched)
121 table->head->maxnsearched = nsearched;
123 return result;
126 /* Add a new entry to the cache. The return value is zero if the function
127 call was successful.
129 This function must be called with the read-lock held.
131 We modify the table but we nevertheless only acquire a read-lock.
132 This is ok since we use operations which would be safe even without
133 locking, given that the `prune_cache' function never runs. Using
134 the readlock reduces the chance of conflicts. */
136 cache_add (int type, const void *key, size_t len, struct datahead *packet,
137 bool first, struct database_dyn *table,
138 uid_t owner, bool prune_wakeup)
140 if (__builtin_expect (debug_level >= 2, 0))
142 const char *str;
143 char buf[INET6_ADDRSTRLEN + 1];
144 if (type == GETHOSTBYADDR || type == GETHOSTBYADDRv6)
145 str = inet_ntop (type == GETHOSTBYADDR ? AF_INET : AF_INET6,
146 key, buf, sizeof (buf));
147 else
148 str = key;
150 dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
151 str, serv2str[type], dbnames[table - dbs],
152 first ? _(" (first)") : "");
155 unsigned long int hash = __nis_hash (key, len) % table->head->module;
156 struct hashentry *newp;
158 newp = mempool_alloc (table, sizeof (struct hashentry), IDX_record_data);
159 /* If we cannot allocate memory, just do not do anything. */
160 if (newp == NULL)
162 ++table->head->addfailed;
164 /* If necessary mark the entry as unusable so that lookups will
165 not use it. */
166 if (first)
167 packet->usable = false;
169 /* Mark the in-flight memory as unused. */
170 for (enum in_flight idx = 0; idx < IDX_record_data; ++idx)
171 mem_in_flight.block[idx].dbidx = -1;
173 return -1;
176 newp->type = type;
177 newp->first = first;
178 newp->len = len;
179 newp->key = (char *) key - table->data;
180 assert (newp->key + newp->len <= table->head->first_free);
181 newp->owner = owner;
182 newp->packet = (char *) packet - table->data;
183 assert ((newp->packet & BLOCK_ALIGN_M1) == 0);
185 /* Put the new entry in the first position. */
187 newp->next = table->head->array[hash];
188 while (atomic_compare_and_exchange_bool_acq (&table->head->array[hash],
189 (ref_t) ((char *) newp
190 - table->data),
191 (ref_t) newp->next));
193 /* Update the statistics. */
194 if (packet->notfound)
195 ++table->head->negmiss;
196 else if (first)
197 ++table->head->posmiss;
199 /* We depend on this value being correct and at least as high as the
200 real number of entries. */
201 atomic_increment (&table->head->nentries);
203 /* It does not matter that we are not loading the just increment
204 value, this is just for statistics. */
205 unsigned long int nentries = table->head->nentries;
206 if (nentries > table->head->maxnentries)
207 table->head->maxnentries = nentries;
209 if (table->persistent)
210 // XXX async OK?
211 msync ((void *) table->head,
212 (char *) &table->head->array[hash] - (char *) table->head
213 + sizeof (ref_t), MS_ASYNC);
215 /* We do not have to worry about the pruning thread if we are
216 re-adding the data since this is done by the pruning thread. We
217 also do not have to do anything in case this is not the first
218 time the data is entered since different data heads all have the
219 same timeout. */
220 if (first && prune_wakeup)
222 /* Perhaps the prune thread for the table is not running in a long
223 time. Wake it if necessary. */
224 pthread_mutex_lock (&table->prune_lock);
225 time_t next_wakeup = table->wakeup_time;
226 bool do_wakeup = false;
227 if (next_wakeup > packet->timeout + CACHE_PRUNE_INTERVAL)
229 table->wakeup_time = packet->timeout;
230 do_wakeup = true;
232 pthread_mutex_unlock (&table->prune_lock);
233 if (do_wakeup)
234 pthread_cond_signal (&table->prune_cond);
237 /* Mark the in-flight memory as unused. */
238 for (enum in_flight idx = 0; idx < IDX_last; ++idx)
239 mem_in_flight.block[idx].dbidx = -1;
241 return 0;
244 /* Walk through the table and remove all entries which lifetime ended.
246 We have a problem here. To actually remove the entries we must get
247 the write-lock. But since we want to keep the time we have the
248 lock as short as possible we cannot simply acquire the lock when we
249 start looking for timedout entries.
251 Therefore we do it in two stages: first we look for entries which
252 must be invalidated and remember them. Then we get the lock and
253 actually remove them. This is complicated by the way we have to
254 free the data structures since some hash table entries share the same
255 data. */
256 time_t
257 prune_cache (struct database_dyn *table, time_t now, int fd)
259 size_t cnt = table->head->module;
261 /* If this table is not actually used don't do anything. */
262 if (cnt == 0)
264 if (fd != -1)
266 /* Reply to the INVALIDATE initiator. */
267 int32_t resp = 0;
268 writeall (fd, &resp, sizeof (resp));
271 /* No need to do this again anytime soon. */
272 return 24 * 60 * 60;
275 /* If we check for the modification of the underlying file we invalidate
276 the entries also in this case. */
277 if (table->inotify_descr < 0 && table->check_file && now != LONG_MAX)
279 struct stat64 st;
281 if (stat64 (table->filename, &st) < 0)
283 char buf[128];
284 /* We cannot stat() the file, disable file checking if the
285 file does not exist. */
286 dbg_log (_("cannot stat() file `%s': %s"),
287 table->filename, strerror_r (errno, buf, sizeof (buf)));
288 if (errno == ENOENT)
289 table->check_file = 0;
291 else
293 if (st.st_mtime != table->file_mtime)
295 /* The file changed. Invalidate all entries. */
296 now = LONG_MAX;
297 table->file_mtime = st.st_mtime;
302 /* We run through the table and find values which are not valid anymore.
304 Note that for the initial step, finding the entries to be removed,
305 we don't need to get any lock. It is at all timed assured that the
306 linked lists are set up correctly and that no second thread prunes
307 the cache. */
308 bool *mark;
309 size_t memory_needed = cnt * sizeof (bool);
310 bool mark_use_alloca;
311 if (__builtin_expect (memory_needed <= MAX_STACK_USE, 1))
313 mark = alloca (cnt * sizeof (bool));
314 memset (mark, '\0', memory_needed);
315 mark_use_alloca = true;
317 else
319 mark = xcalloc (1, memory_needed);
320 mark_use_alloca = false;
322 size_t first = cnt + 1;
323 size_t last = 0;
324 char *const data = table->data;
325 bool any = false;
327 if (__builtin_expect (debug_level > 2, 0))
328 dbg_log (_("pruning %s cache; time %ld"),
329 dbnames[table - dbs], (long int) now);
331 #define NO_TIMEOUT LONG_MAX
332 time_t next_timeout = NO_TIMEOUT;
335 ref_t run = table->head->array[--cnt];
337 while (run != ENDREF)
339 struct hashentry *runp = (struct hashentry *) (data + run);
340 struct datahead *dh = (struct datahead *) (data + runp->packet);
342 /* Some debug support. */
343 if (__builtin_expect (debug_level > 2, 0))
345 char buf[INET6_ADDRSTRLEN];
346 const char *str;
348 if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6)
350 inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6,
351 data + runp->key, buf, sizeof (buf));
352 str = buf;
354 else
355 str = data + runp->key;
357 dbg_log (_("considering %s entry \"%s\", timeout %" PRIu64),
358 serv2str[runp->type], str, dh->timeout);
361 /* Check whether the entry timed out. */
362 if (dh->timeout < now)
364 /* This hash bucket could contain entries which need to
365 be looked at. */
366 mark[cnt] = true;
368 first = MIN (first, cnt);
369 last = MAX (last, cnt);
371 /* We only have to look at the data of the first entries
372 since the count information is kept in the data part
373 which is shared. */
374 if (runp->first)
377 /* At this point there are two choices: we reload the
378 value or we discard it. Do not change NRELOADS if
379 we never not reload the record. */
380 if ((reload_count != UINT_MAX
381 && __builtin_expect (dh->nreloads >= reload_count, 0))
382 /* We always remove negative entries. */
383 || dh->notfound
384 /* Discard everything if the user explicitly
385 requests it. */
386 || now == LONG_MAX)
388 /* Remove the value. */
389 dh->usable = false;
391 /* We definitely have some garbage entries now. */
392 any = true;
394 else
396 /* Reload the value. We do this only for the
397 initially used key, not the additionally
398 added derived value. */
399 assert (runp->type < LASTREQ
400 && readdfcts[runp->type] != NULL);
402 readdfcts[runp->type] (table, runp, dh);
404 /* If the entry has been replaced, we might need
405 cleanup. */
406 any |= !dh->usable;
410 else
412 assert (dh->usable);
413 next_timeout = MIN (next_timeout, dh->timeout);
416 run = runp->next;
419 while (cnt > 0);
421 if (__builtin_expect (fd != -1, 0))
423 /* Reply to the INVALIDATE initiator that the cache has been
424 invalidated. */
425 int32_t resp = 0;
426 writeall (fd, &resp, sizeof (resp));
429 if (first <= last)
431 struct hashentry *head = NULL;
433 /* Now we have to get the write lock since we are about to modify
434 the table. */
435 if (__builtin_expect (pthread_rwlock_trywrlock (&table->lock) != 0, 0))
437 ++table->head->wrlockdelayed;
438 pthread_rwlock_wrlock (&table->lock);
441 while (first <= last)
443 if (mark[first])
445 ref_t *old = &table->head->array[first];
446 ref_t run = table->head->array[first];
448 assert (run != ENDREF);
451 struct hashentry *runp = (struct hashentry *) (data + run);
452 struct datahead *dh
453 = (struct datahead *) (data + runp->packet);
455 if (! dh->usable)
457 /* We need the list only for debugging but it is
458 more costly to avoid creating the list than
459 doing it. */
460 runp->dellist = head;
461 head = runp;
463 /* No need for an atomic operation, we have the
464 write lock. */
465 --table->head->nentries;
467 run = *old = runp->next;
469 else
471 old = &runp->next;
472 run = runp->next;
475 while (run != ENDREF);
478 ++first;
481 /* It's all done. */
482 pthread_rwlock_unlock (&table->lock);
484 /* Make sure the data is saved to disk. */
485 if (table->persistent)
486 msync (table->head,
487 data + table->head->first_free - (char *) table->head,
488 MS_ASYNC);
490 /* One extra pass if we do debugging. */
491 if (__builtin_expect (debug_level > 0, 0))
493 struct hashentry *runp = head;
495 while (runp != NULL)
497 char buf[INET6_ADDRSTRLEN];
498 const char *str;
500 if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6)
502 inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6,
503 data + runp->key, buf, sizeof (buf));
504 str = buf;
506 else
507 str = data + runp->key;
509 dbg_log ("remove %s entry \"%s\"", serv2str[runp->type], str);
511 runp = runp->dellist;
516 if (__builtin_expect (! mark_use_alloca, 0))
517 free (mark);
519 /* Run garbage collection if any entry has been removed or replaced. */
520 if (any)
521 gc (table);
523 /* If there is no entry in the database and we therefore have no new
524 timeout value, tell the caller to wake up in 24 hours. */
525 return next_timeout == NO_TIMEOUT ? 24 * 60 * 60 : next_timeout - now;