1 /* Copyright (c) 1998, 1999, 2003, 2004, 2005 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 #include <arpa/inet.h>
29 #include <rpcsvc/nis.h>
31 #include <sys/param.h>
39 /* Number of times a value is reloaded without being used. UINT_MAX
41 unsigned int reload_count
= DEFAULT_RELOAD_LIMIT
;
44 /* Search the cache for a matching entry and return it when found. If
45 this fails search the negative cache and return (void *) -1 if this
46 search was successful. Otherwise return NULL.
48 This function must be called with the read-lock held. */
50 cache_search (request_type type
, void *key
, size_t len
,
51 struct database_dyn
*table
, uid_t owner
)
53 unsigned long int hash
= __nis_hash (key
, len
) % table
->head
->module
;
55 unsigned long int nsearched
= 0;
56 struct datahead
*result
= NULL
;
58 ref_t work
= table
->head
->array
[hash
];
59 while (work
!= ENDREF
)
63 struct hashentry
*here
= (struct hashentry
*) (table
->data
+ work
);
65 if (type
== here
->type
&& len
== here
->len
66 && memcmp (key
, table
->data
+ here
->key
, len
) == 0
67 && here
->owner
== owner
)
69 /* We found the entry. Increment the appropriate counter. */
71 = (struct datahead
*) (table
->data
+ here
->packet
);
73 /* See whether we must ignore the entry. */
76 /* We do not synchronize the memory here. The statistics
77 data is not crucial, we synchronize only once in a while
78 in the cleanup threads. */
80 ++table
->head
->neghit
;
83 ++table
->head
->poshit
;
85 if (dh
->nreloads
!= 0)
97 if (nsearched
> table
->head
->maxnsearched
)
98 table
->head
->maxnsearched
= nsearched
;
103 /* Add a new entry to the cache. The return value is zero if the function
106 This function must be called with the read-lock held.
108 We modify the table but we nevertheless only acquire a read-lock.
109 This is ok since we use operations which would be safe even without
110 locking, given that the `prune_cache' function never runs. Using
111 the readlock reduces the chance of conflicts. */
113 cache_add (int type
, const void *key
, size_t len
, struct datahead
*packet
,
114 bool first
, struct database_dyn
*table
,
117 if (__builtin_expect (debug_level
>= 2, 0))
120 char buf
[INET6_ADDRSTRLEN
+ 1];
121 if (type
== GETHOSTBYADDR
|| type
== GETHOSTBYADDRv6
)
122 str
= inet_ntop (type
== GETHOSTBYADDR
? AF_INET
: AF_INET6
,
123 key
, buf
, sizeof (buf
));
127 dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
128 str
, serv2str
[type
], dbnames
[table
- dbs
],
129 first
? " (first)" : "");
132 unsigned long int hash
= __nis_hash (key
, len
) % table
->head
->module
;
133 struct hashentry
*newp
;
135 newp
= mempool_alloc (table
, sizeof (struct hashentry
));
136 /* If we cannot allocate memory, just do not do anything. */
143 newp
->key
= (char *) key
- table
->data
;
144 assert (newp
->key
+ newp
->len
<= table
->head
->first_free
);
146 newp
->packet
= (char *) packet
- table
->data
;
148 /* Put the new entry in the first position. */
150 newp
->next
= table
->head
->array
[hash
];
151 while (atomic_compare_and_exchange_bool_acq (&table
->head
->array
[hash
],
152 (ref_t
) ((char *) newp
154 (ref_t
) newp
->next
));
156 /* Update the statistics. */
157 if (packet
->notfound
)
158 ++table
->head
->negmiss
;
160 ++table
->head
->posmiss
;
162 /* We depend on this value being correct and at least as high as the
163 real number of entries. */
164 atomic_increment (&table
->head
->nentries
);
166 /* It does not matter that we are not loading the just increment
167 value, this is just for statistics. */
168 unsigned long int nentries
= table
->head
->nentries
;
169 if (nentries
> table
->head
->maxnentries
)
170 table
->head
->maxnentries
= nentries
;
172 if (table
->persistent
)
174 msync ((void *) table
->head
,
175 (char *) &table
->head
->array
[hash
] - (char *) table
->head
176 + sizeof (ref_t
), MS_ASYNC
);
181 /* Walk through the table and remove all entries which lifetime ended.
183 We have a problem here. To actually remove the entries we must get
184 the write-lock. But since we want to keep the time we have the
185 lock as short as possible we cannot simply acquire the lock when we
186 start looking for timedout entries.
188 Therefore we do it in two stages: first we look for entries which
189 must be invalidated and remember them. Then we get the lock and
190 actually remove them. This is complicated by the way we have to
191 free the data structures since some hash table entries share the same
194 prune_cache (struct database_dyn
*table
, time_t now
)
196 size_t cnt
= table
->head
->module
;
198 /* If this table is not actually used don't do anything. */
202 /* If we check for the modification of the underlying file we invalidate
203 the entries also in this case. */
204 if (table
->check_file
)
208 if (stat (table
->filename
, &st
) < 0)
211 /* We cannot stat() the file, disable file checking if the
212 file does not exist. */
213 dbg_log (_("cannot stat() file `%s': %s"),
214 table
->filename
, strerror_r (errno
, buf
, sizeof (buf
)));
216 table
->check_file
= 0;
220 if (st
.st_mtime
!= table
->file_mtime
)
222 /* The file changed. Invalidate all entries. */
224 table
->file_mtime
= st
.st_mtime
;
229 /* We run through the table and find values which are not valid anymore.
231 Note that for the initial step, finding the entries to be removed,
232 we don't need to get any lock. It is at all timed assured that the
233 linked lists are set up correctly and that no second thread prunes
236 size_t first
= cnt
+ 1;
238 char *const data
= table
->data
;
243 ref_t run
= table
->head
->array
[--cnt
];
245 while (run
!= ENDREF
)
247 struct hashentry
*runp
= (struct hashentry
*) (data
+ run
);
248 struct datahead
*dh
= (struct datahead
*) (data
+ runp
->packet
);
250 /* Check whether the entry timed out. */
251 if (dh
->timeout
< now
)
253 /* This hash bucket could contain entries which need to
257 first
= MIN (first
, cnt
);
258 last
= MAX (last
, cnt
);
260 /* We only have to look at the data of the first entries
261 since the count information is kept in the data part
266 /* At this point there are two choices: we reload the
267 value or we discard it. Do not change NRELOADS if
268 we never not reload the record. */
269 if ((reload_count
!= UINT_MAX
270 && __builtin_expect (dh
->nreloads
>= reload_count
, 0))
271 /* We always remove negative entries. */
273 /* Discard everything if the user explicitly
277 /* Remove the value. */
280 /* We definitely have some garbage entries now. */
285 /* Reload the value. We do this only for the
286 initially used key, not the additionally
287 added derived value. */
291 readdpwbyname (table
, runp
, dh
);
295 readdpwbyuid (table
, runp
, dh
);
299 readdgrbyname (table
, runp
, dh
);
303 readdgrbygid (table
, runp
, dh
);
307 readdhstbyname (table
, runp
, dh
);
310 case GETHOSTBYNAMEv6
:
311 readdhstbynamev6 (table
, runp
, dh
);
315 readdhstbyaddr (table
, runp
, dh
);
318 case GETHOSTBYADDRv6
:
319 readdhstbyaddrv6 (table
, runp
, dh
);
323 readdhstai (table
, runp
, dh
);
327 readdinitgroups (table
, runp
, dh
);
331 assert (! "should never happen");
334 /* If the entry has been replaced, we might need
350 struct hashentry
*head
= NULL
;
352 /* Now we have to get the write lock since we are about to modify
354 if (__builtin_expect (pthread_rwlock_trywrlock (&table
->lock
) != 0, 0))
356 ++table
->head
->wrlockdelayed
;
357 pthread_rwlock_wrlock (&table
->lock
);
360 while (first
<= last
)
364 ref_t
*old
= &table
->head
->array
[first
];
365 ref_t run
= table
->head
->array
[first
];
367 while (run
!= ENDREF
)
369 struct hashentry
*runp
= (struct hashentry
*) (data
+ run
);
371 = (struct datahead
*) (data
+ runp
->packet
);
375 /* We need the list only for debugging but it is
376 more costly to avoid creating the list than
378 runp
->dellist
= head
;
381 /* No need for an atomic operation, we have the
383 --table
->head
->nentries
;
385 run
= *old
= runp
->next
;
399 pthread_rwlock_unlock (&table
->lock
);
401 /* Make sure the data is saved to disk. */
402 if (table
->persistent
)
404 table
->data
+ table
->head
->first_free
- (char *) table
->head
,
407 /* One extra pass if we do debugging. */
408 if (__builtin_expect (debug_level
> 0, 0))
410 struct hashentry
*runp
= head
;
414 char buf
[INET6_ADDRSTRLEN
];
417 if (runp
->type
== GETHOSTBYADDR
|| runp
->type
== GETHOSTBYADDRv6
)
419 inet_ntop (runp
->type
== GETHOSTBYADDR
? AF_INET
: AF_INET6
,
420 table
->data
+ runp
->key
, buf
, sizeof (buf
));
424 str
= table
->data
+ runp
->key
;
426 dbg_log ("remove %s entry \"%s\"", serv2str
[runp
->type
], str
);
428 runp
= runp
->dellist
;
433 /* Run garbage collection if any entry has been removed or replaced. */