Update.
[glibc.git] / nscd / cache.c
blobefac4b3bcca9305f117008aef88cb41d255e425d
1 /* Copyright (c) 1998, 1999, 2003, 2004 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <atomic.h>
22 #include <errno.h>
23 #include <error.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <libintl.h>
28 #include <arpa/inet.h>
29 #include <rpcsvc/nis.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/uio.h>
35 #include "nscd.h"
36 #include "dbg_log.h"
39 /* Number of times a value is reloaded without being used. UINT_MAX
40 means unlimited. */
41 unsigned int reload_count = DEFAULT_RELOAD_LIMIT;
44 /* Search the cache for a matching entry and return it when found. If
45 this fails search the negative cache and return (void *) -1 if this
46 search was successful. Otherwise return NULL.
48 This function must be called with the read-lock held. */
49 struct datahead *
50 cache_search (request_type type, void *key, size_t len,
51 struct database_dyn *table, uid_t owner)
53 unsigned long int hash = __nis_hash (key, len) % table->head->module;
55 unsigned long int nsearched = 0;
56 struct datahead *result = NULL;
58 ref_t work = table->head->array[hash];
59 while (work != ENDREF)
61 ++nsearched;
63 struct hashentry *here = (struct hashentry *) (table->data + work);
65 if (type == here->type && len == here->len
66 && memcmp (key, table->data + here->key, len) == 0
67 && here->owner == owner)
69 /* We found the entry. Increment the appropriate counter. */
70 struct datahead *dh
71 = (struct datahead *) (table->data + here->packet);
73 /* See whether we must ignore the entry. */
74 if (dh->usable)
76 /* We do not synchronize the memory here. The statistics
77 data is not crucial, we synchronize only once in a while
78 in the cleanup threads. */
79 if (dh->notfound)
80 ++table->head->neghit;
81 else
83 ++table->head->poshit;
85 if (dh->nreloads != 0)
86 dh->nreloads = 0;
89 result = dh;
90 break;
94 work = here->next;
97 if (nsearched > table->head->maxnsearched)
98 table->head->maxnsearched = nsearched;
100 return result;
103 /* Add a new entry to the cache. The return value is zero if the function
104 call was successful.
106 This function must be called with the read-lock held.
108 We modify the table but we nevertheless only acquire a read-lock.
109 This is ok since we use operations which would be safe even without
110 locking, given that the `prune_cache' function never runs. Using
111 the readlock reduces the chance of conflicts. */
113 cache_add (int type, const void *key, size_t len, struct datahead *packet,
114 bool first, struct database_dyn *table,
115 uid_t owner)
117 if (__builtin_expect (debug_level >= 2, 0))
119 const char *str;
120 char buf[INET6_ADDRSTRLEN + 1];
121 if (type == GETHOSTBYADDR || type == GETHOSTBYADDRv6)
122 str = inet_ntop (type == GETHOSTBYADDR ? AF_INET : AF_INET6,
123 key, buf, sizeof (buf));
124 else
125 str = key;
127 dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
128 str, serv2str[type], dbnames[table - dbs],
129 first ? " (first)" : "");
132 unsigned long int hash = __nis_hash (key, len) % table->head->module;
133 struct hashentry *newp;
135 newp = mempool_alloc (table, sizeof (struct hashentry));
136 /* If we cannot allocate memory, just do not do anything. */
137 if (newp == NULL)
138 return -1;
140 newp->type = type;
141 newp->first = first;
142 newp->len = len;
143 newp->key = (char *) key - table->data;
144 assert (newp->key + newp->len <= table->head->first_free);
145 newp->owner = owner;
146 newp->packet = (char *) packet - table->data;
148 /* Put the new entry in the first position. */
150 newp->next = table->head->array[hash];
151 while (atomic_compare_and_exchange_bool_acq (&table->head->array[hash],
152 (ref_t) ((char *) newp
153 - table->data),
154 (ref_t) newp->next));
156 /* Update the statistics. */
157 if (packet->notfound)
158 ++table->head->negmiss;
159 else if (first)
160 ++table->head->posmiss;
162 /* We depend on this value being correct and at least as high as the
163 real number of entries. */
164 atomic_increment (&table->head->nentries);
166 /* It does not matter that we are not loading the just increment
167 value, this is just for statistics. */
168 unsigned long int nentries = table->head->nentries;
169 if (nentries > table->head->maxnentries)
170 table->head->maxnentries = nentries;
172 return 0;
175 /* Walk through the table and remove all entries which lifetime ended.
177 We have a problem here. To actually remove the entries we must get
178 the write-lock. But since we want to keep the time we have the
179 lock as short as possible we cannot simply acquire the lock when we
180 start looking for timedout entries.
182 Therefore we do it in two stages: first we look for entries which
183 must be invalidated and remember them. Then we get the lock and
184 actually remove them. This is complicated by the way we have to
185 free the data structures since some hash table entries share the same
186 data. */
187 void
188 prune_cache (struct database_dyn *table, time_t now)
190 size_t cnt = table->head->module;
192 /* If this table is not actually used don't do anything. */
193 if (cnt == 0)
194 return;
196 /* If we check for the modification of the underlying file we invalidate
197 the entries also in this case. */
198 if (table->check_file)
200 struct stat st;
202 if (stat (table->filename, &st) < 0)
204 char buf[128];
205 /* We cannot stat() the file, disable file checking if the
206 file does not exist. */
207 dbg_log (_("cannot stat() file `%s': %s"),
208 table->filename, strerror_r (errno, buf, sizeof (buf)));
209 if (errno == ENOENT)
210 table->check_file = 0;
212 else
214 if (st.st_mtime != table->file_mtime)
216 /* The file changed. Invalidate all entries. */
217 now = LONG_MAX;
218 table->file_mtime = st.st_mtime;
223 /* We run through the table and find values which are not valid anymore.
225 Note that for the initial step, finding the entries to be removed,
226 we don't need to get any lock. It is at all timed assured that the
227 linked lists are set up correctly and that no second thread prunes
228 the cache. */
229 bool mark[cnt];
230 size_t first = cnt + 1;
231 size_t last = 0;
232 char *const data = table->data;
233 bool any = false;
237 ref_t run = table->head->array[--cnt];
239 while (run != ENDREF)
241 struct hashentry *runp = (struct hashentry *) (data + run);
242 struct datahead *dh = (struct datahead *) (data + runp->packet);
244 /* Check whether the entry timed out. */
245 if (dh->timeout < now)
247 /* This hash bucket could contain entries which need to
248 be looked at. */
249 mark[cnt] = true;
251 first = MIN (first, cnt);
252 last = MAX (last, cnt);
254 /* We only have to look at the data of the first entries
255 since the count information is kept in the data part
256 which is shared. */
257 if (runp->first)
260 /* At this point there are two choices: we reload the
261 value or we discard it. Do not change NRELOADS if
262 we never not reload the record. */
263 if ((reload_count != UINT_MAX
264 && __builtin_expect (dh->nreloads >= reload_count, 0))
265 /* We always remove negative entries. */
266 || dh->notfound
267 /* Discard everything if the user explicitly
268 requests it. */
269 || now == LONG_MAX)
271 /* Remove the value. */
272 dh->usable = false;
274 /* We definitely have some garbage entries now. */
275 any = true;
277 else
279 /* Reload the value. We do this only for the
280 initially used key, not the additionally
281 added derived value. */
282 switch (runp->type)
284 case GETPWBYNAME:
285 readdpwbyname (table, runp, dh);
286 break;
288 case GETPWBYUID:
289 readdpwbyuid (table, runp, dh);
290 break;
292 case GETGRBYNAME:
293 readdgrbyname (table, runp, dh);
294 break;
296 case GETGRBYGID:
297 readdgrbygid (table, runp, dh);
298 break;
300 case GETHOSTBYNAME:
301 readdhstbyname (table, runp, dh);
302 break;
304 case GETHOSTBYNAMEv6:
305 readdhstbynamev6 (table, runp, dh);
306 break;
308 case GETHOSTBYADDR:
309 readdhstbyaddr (table, runp, dh);
310 break;
312 case GETHOSTBYADDRv6:
313 readdhstbyaddrv6 (table, runp, dh);
314 break;
316 case GETAI:
317 readdhstai (table, runp, dh);
318 break;
320 case INITGROUPS:
321 readdinitgroups (table, runp, dh);
322 break;
324 default:
325 assert (! "should never happen");
328 /* If the entry has been replaced, we might need
329 cleanup. */
330 any |= !dh->usable;
334 else
335 assert (dh->usable);
337 run = runp->next;
340 while (cnt > 0);
342 if (first <= last)
344 struct hashentry *head = NULL;
346 /* Now we have to get the write lock since we are about to modify
347 the table. */
348 if (__builtin_expect (pthread_rwlock_trywrlock (&table->lock) != 0, 0))
350 ++table->head->wrlockdelayed;
351 pthread_rwlock_wrlock (&table->lock);
354 while (first <= last)
356 if (mark[first])
358 ref_t *old = &table->head->array[first];
359 ref_t run = table->head->array[first];
361 while (run != ENDREF)
363 struct hashentry *runp = (struct hashentry *) (data + run);
364 struct datahead *dh
365 = (struct datahead *) (data + runp->packet);
367 if (! dh->usable)
369 /* We need the list only for debugging but it is
370 more costly to avoid creating the list than
371 doing it. */
372 runp->dellist = head;
373 head = runp;
375 /* No need for an atomic operation, we have the
376 write lock. */
377 --table->head->nentries;
379 run = *old = runp->next;
381 else
383 old = &runp->next;
384 run = runp->next;
389 ++first;
392 /* It's all done. */
393 pthread_rwlock_unlock (&table->lock);
395 /* Make sure the data is saved to disk. */
396 if (table->persistent)
397 msync (table->head,
398 table->data + table->head->first_free - (char *) table->head,
399 MS_ASYNC);
401 /* One extra pass if we do debugging. */
402 if (__builtin_expect (debug_level > 0, 0))
404 struct hashentry *runp = head;
406 while (runp != NULL)
408 char buf[INET6_ADDRSTRLEN];
409 const char *str;
411 if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6)
413 inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6,
414 table->data + runp->key, buf, sizeof (buf));
415 str = buf;
417 else
418 str = table->data + runp->key;
420 dbg_log ("remove %s entry \"%s\"", serv2str[runp->type], str);
422 runp = runp->dellist;
427 /* Run garbage collection if any entry has been removed or replaced. */
428 if (any)
429 gc (table);