1 /* Copyright (c) 1998, 1999, 2003, 2004 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 #include <arpa/inet.h>
29 #include <rpcsvc/nis.h>
31 #include <sys/param.h>
39 /* Number of times a value is reloaded without being used. UINT_MAX
41 unsigned int reload_count
= DEFAULT_RELOAD_LIMIT
;
44 /* Search the cache for a matching entry and return it when found. If
45 this fails search the negative cache and return (void *) -1 if this
46 search was successful. Otherwise return NULL.
48 This function must be called with the read-lock held. */
50 cache_search (request_type type
, void *key
, size_t len
,
51 struct database_dyn
*table
, uid_t owner
)
53 unsigned long int hash
= __nis_hash (key
, len
) % table
->head
->module
;
55 unsigned long int nsearched
= 0;
56 struct datahead
*result
= NULL
;
58 ref_t work
= table
->head
->array
[hash
];
59 while (work
!= ENDREF
)
63 struct hashentry
*here
= (struct hashentry
*) (table
->data
+ work
);
65 if (type
== here
->type
&& len
== here
->len
66 && memcmp (key
, table
->data
+ here
->key
, len
) == 0
67 && here
->owner
== owner
)
69 /* We found the entry. Increment the appropriate counter. */
71 = (struct datahead
*) (table
->data
+ here
->packet
);
73 /* See whether we must ignore the entry. */
76 /* We do not synchronize the memory here. The statistics
77 data is not crucial, we synchronize only once in a while
78 in the cleanup threads. */
80 ++table
->head
->neghit
;
83 ++table
->head
->poshit
;
85 if (dh
->nreloads
!= 0)
97 if (nsearched
> table
->head
->maxnsearched
)
98 table
->head
->maxnsearched
= nsearched
;
103 /* Add a new entry to the cache. The return value is zero if the function
106 This function must be called with the read-lock held.
108 We modify the table but we nevertheless only acquire a read-lock.
109 This is ok since we use operations which would be safe even without
110 locking, given that the `prune_cache' function never runs. Using
111 the readlock reduces the chance of conflicts. */
113 cache_add (int type
, const void *key
, size_t len
, struct datahead
*packet
,
114 bool first
, struct database_dyn
*table
,
117 if (__builtin_expect (debug_level
>= 2, 0))
118 dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
119 (const char *) key
, serv2str
[type
], dbnames
[table
- dbs
],
120 first
? " (first)" : "");
122 unsigned long int hash
= __nis_hash (key
, len
) % table
->head
->module
;
123 struct hashentry
*newp
;
125 newp
= mempool_alloc (table
, sizeof (struct hashentry
));
126 /* If we cannot allocate memory, just do not do anything. */
133 newp
->key
= (char *) key
- table
->data
;
134 assert (newp
->key
+ newp
->len
<= table
->head
->first_free
);
136 newp
->packet
= (char *) packet
- table
->data
;
138 /* Put the new entry in the first position. */
140 newp
->next
= table
->head
->array
[hash
];
141 while (atomic_compare_and_exchange_bool_acq (&table
->head
->array
[hash
],
142 (ref_t
) ((char *) newp
144 (ref_t
) newp
->next
));
146 /* Update the statistics. */
147 if (packet
->notfound
)
148 ++table
->head
->negmiss
;
150 ++table
->head
->posmiss
;
152 /* We depend on this value being correct and at least as high as the
153 real number of entries. */
154 atomic_increment (&table
->head
->nentries
);
156 /* It does not matter that we are not loading the just increment
157 value, this is just for statistics. */
158 unsigned long int nentries
= table
->head
->nentries
;
159 if (nentries
> table
->head
->maxnentries
)
160 table
->head
->maxnentries
= nentries
;
165 /* Walk through the table and remove all entries which lifetime ended.
167 We have a problem here. To actually remove the entries we must get
168 the write-lock. But since we want to keep the time we have the
169 lock as short as possible we cannot simply acquire the lock when we
170 start looking for timedout entries.
172 Therefore we do it in two stages: first we look for entries which
173 must be invalidated and remember them. Then we get the lock and
174 actually remove them. This is complicated by the way we have to
175 free the data structures since some hash table entries share the same
178 prune_cache (struct database_dyn
*table
, time_t now
)
180 size_t cnt
= table
->head
->module
;
182 /* If this table is not actually used don't do anything. */
186 /* If we check for the modification of the underlying file we invalidate
187 the entries also in this case. */
188 if (table
->check_file
)
192 if (stat (table
->filename
, &st
) < 0)
195 /* We cannot stat() the file, disable file checking if the
196 file does not exist. */
197 dbg_log (_("cannot stat() file `%s': %s"),
198 table
->filename
, strerror_r (errno
, buf
, sizeof (buf
)));
200 table
->check_file
= 0;
204 if (st
.st_mtime
!= table
->file_mtime
)
206 /* The file changed. Invalidate all entries. */
208 table
->file_mtime
= st
.st_mtime
;
213 /* We run through the table and find values which are not valid anymore.
215 Note that for the initial step, finding the entries to be removed,
216 we don't need to get any lock. It is at all timed assured that the
217 linked lists are set up correctly and that no second thread prunes
220 size_t first
= cnt
+ 1;
222 char *const data
= table
->data
;
227 ref_t run
= table
->head
->array
[--cnt
];
229 while (run
!= ENDREF
)
231 struct hashentry
*runp
= (struct hashentry
*) (data
+ run
);
232 struct datahead
*dh
= (struct datahead
*) (data
+ runp
->packet
);
234 /* Check whether the entry timed out. */
235 if (dh
->timeout
< now
)
237 /* This hash bucket could contain entries which need to
241 first
= MIN (first
, cnt
);
242 last
= MAX (last
, cnt
);
244 /* We only have to look at the data of the first entries
245 since the count information is kept in the data part
250 /* At this point there are two choices: we reload the
251 value or we discard it. Do not change NRELOADS if
252 we never not reload the record. */
253 if ((reload_count
!= UINT_MAX
254 && __builtin_expect (dh
->nreloads
>= reload_count
, 0))
255 /* We always remove negative entries. */
257 /* Discard everything if the user explicitly
261 /* Remove the value. */
264 /* We definitely have some garbage entries now. */
269 /* Reload the value. We do this only for the
270 initially used key, not the additionally
271 added derived value. */
275 readdpwbyname (table
, runp
, dh
);
279 readdpwbyuid (table
, runp
, dh
);
283 readdgrbyname (table
, runp
, dh
);
287 readdgrbygid (table
, runp
, dh
);
291 readdhstbyname (table
, runp
, dh
);
294 case GETHOSTBYNAMEv6
:
295 readdhstbynamev6 (table
, runp
, dh
);
299 readdhstbyaddr (table
, runp
, dh
);
302 case GETHOSTBYADDRv6
:
303 readdhstbyaddrv6 (table
, runp
, dh
);
307 assert (! "should never happen");
310 /* If the entry has been replaced, we might need
326 struct hashentry
*head
= NULL
;
328 /* Now we have to get the write lock since we are about to modify
330 if (__builtin_expect (pthread_rwlock_trywrlock (&table
->lock
) != 0, 0))
332 ++table
->head
->wrlockdelayed
;
333 pthread_rwlock_wrlock (&table
->lock
);
336 while (first
<= last
)
340 ref_t
*old
= &table
->head
->array
[first
];
341 ref_t run
= table
->head
->array
[first
];
343 while (run
!= ENDREF
)
345 struct hashentry
*runp
= (struct hashentry
*) (data
+ run
);
347 = (struct datahead
*) (data
+ runp
->packet
);
351 /* We need the list only for debugging but it is
352 more costly to avoid creating the list than
354 runp
->dellist
= head
;
357 /* No need for an atomic operation, we have the
359 --table
->head
->nentries
;
361 run
= *old
= runp
->next
;
375 pthread_rwlock_unlock (&table
->lock
);
377 /* Make sure the data is saved to disk. */
378 if (table
->persistent
)
380 table
->data
+ table
->head
->first_free
- (char *) table
->head
,
383 /* One extra pass if we do debugging. */
384 if (__builtin_expect (debug_level
> 0, 0))
386 struct hashentry
*runp
= head
;
390 char buf
[INET6_ADDRSTRLEN
];
393 if (runp
->type
== GETHOSTBYADDR
|| runp
->type
== GETHOSTBYADDRv6
)
395 inet_ntop (runp
->type
== GETHOSTBYADDR
? AF_INET
: AF_INET6
,
396 table
->data
+ runp
->key
, buf
, sizeof (buf
));
400 str
= table
->data
+ runp
->key
;
402 dbg_log ("remove %s entry \"%s\"", serv2str
[runp
->type
], str
);
404 runp
= runp
->dellist
;
409 /* Run garbage collection if any entry has been removed or replaced. */