1 /* Copyright (c) 1998, 1999, 2003-2006, 2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published
7 by the Free Software Foundation; version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software Foundation,
17 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
28 #include <arpa/inet.h>
29 #include <rpcsvc/nis.h>
31 #include <sys/param.h>
39 /* Number of times a value is reloaded without being used. UINT_MAX
41 unsigned int reload_count
= DEFAULT_RELOAD_LIMIT
;
44 static void (*const readdfcts
[LASTREQ
]) (struct database_dyn
*,
48 [GETPWBYNAME
] = readdpwbyname
,
49 [GETPWBYUID
] = readdpwbyuid
,
50 [GETGRBYNAME
] = readdgrbyname
,
51 [GETGRBYGID
] = readdgrbygid
,
52 [GETHOSTBYNAME
] = readdhstbyname
,
53 [GETHOSTBYNAMEv6
] = readdhstbynamev6
,
54 [GETHOSTBYADDR
] = readdhstbyaddr
,
55 [GETHOSTBYADDRv6
] = readdhstbyaddrv6
,
57 [INITGROUPS
] = readdinitgroups
,
58 [GETSERVBYNAME
] = readdservbyname
,
59 [GETSERVBYPORT
] = readdservbyport
63 /* Search the cache for a matching entry and return it when found. If
64 this fails search the negative cache and return (void *) -1 if this
65 search was successful. Otherwise return NULL.
67 This function must be called with the read-lock held. */
69 cache_search (request_type type
, void *key
, size_t len
,
70 struct database_dyn
*table
, uid_t owner
)
72 unsigned long int hash
= __nis_hash (key
, len
) % table
->head
->module
;
74 unsigned long int nsearched
= 0;
75 struct datahead
*result
= NULL
;
77 ref_t work
= table
->head
->array
[hash
];
78 while (work
!= ENDREF
)
82 struct hashentry
*here
= (struct hashentry
*) (table
->data
+ work
);
84 if (type
== here
->type
&& len
== here
->len
85 && memcmp (key
, table
->data
+ here
->key
, len
) == 0
86 && here
->owner
== owner
)
88 /* We found the entry. Increment the appropriate counter. */
90 = (struct datahead
*) (table
->data
+ here
->packet
);
92 /* See whether we must ignore the entry. */
95 /* We do not synchronize the memory here. The statistics
96 data is not crucial, we synchronize only once in a while
97 in the cleanup threads. */
99 ++table
->head
->neghit
;
102 ++table
->head
->poshit
;
104 if (dh
->nreloads
!= 0)
116 if (nsearched
> table
->head
->maxnsearched
)
117 table
->head
->maxnsearched
= nsearched
;
122 /* Add a new entry to the cache. The return value is zero if the function
125 This function must be called with the read-lock held.
127 We modify the table but we nevertheless only acquire a read-lock.
128 This is ok since we use operations which would be safe even without
129 locking, given that the `prune_cache' function never runs. Using
130 the readlock reduces the chance of conflicts. */
132 cache_add (int type
, const void *key
, size_t len
, struct datahead
*packet
,
133 bool first
, struct database_dyn
*table
,
136 if (__builtin_expect (debug_level
>= 2, 0))
139 char buf
[INET6_ADDRSTRLEN
+ 1];
140 if (type
== GETHOSTBYADDR
|| type
== GETHOSTBYADDRv6
)
141 str
= inet_ntop (type
== GETHOSTBYADDR
? AF_INET
: AF_INET6
,
142 key
, buf
, sizeof (buf
));
146 dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
147 str
, serv2str
[type
], dbnames
[table
- dbs
],
148 first
? _(" (first)") : "");
151 unsigned long int hash
= __nis_hash (key
, len
) % table
->head
->module
;
152 struct hashentry
*newp
;
154 newp
= mempool_alloc (table
, sizeof (struct hashentry
));
155 /* If we cannot allocate memory, just do not do anything. */
158 ++table
->head
->addfailed
;
165 newp
->key
= (char *) key
- table
->data
;
166 assert (newp
->key
+ newp
->len
<= table
->head
->first_free
);
168 newp
->packet
= (char *) packet
- table
->data
;
170 /* Put the new entry in the first position. */
172 newp
->next
= table
->head
->array
[hash
];
173 while (atomic_compare_and_exchange_bool_acq (&table
->head
->array
[hash
],
174 (ref_t
) ((char *) newp
176 (ref_t
) newp
->next
));
178 /* Update the statistics. */
179 if (packet
->notfound
)
180 ++table
->head
->negmiss
;
182 ++table
->head
->posmiss
;
184 /* We depend on this value being correct and at least as high as the
185 real number of entries. */
186 atomic_increment (&table
->head
->nentries
);
188 /* It does not matter that we are not loading the just increment
189 value, this is just for statistics. */
190 unsigned long int nentries
= table
->head
->nentries
;
191 if (nentries
> table
->head
->maxnentries
)
192 table
->head
->maxnentries
= nentries
;
194 if (table
->persistent
)
196 msync ((void *) table
->head
,
197 (char *) &table
->head
->array
[hash
] - (char *) table
->head
198 + sizeof (ref_t
), MS_ASYNC
);
203 /* Walk through the table and remove all entries which lifetime ended.
205 We have a problem here. To actually remove the entries we must get
206 the write-lock. But since we want to keep the time we have the
207 lock as short as possible we cannot simply acquire the lock when we
208 start looking for timedout entries.
210 Therefore we do it in two stages: first we look for entries which
211 must be invalidated and remember them. Then we get the lock and
212 actually remove them. This is complicated by the way we have to
213 free the data structures since some hash table entries share the same
216 prune_cache (struct database_dyn
*table
, time_t now
, int fd
)
218 size_t cnt
= table
->head
->module
;
220 /* If this table is not actually used don't do anything. */
225 /* Reply to the INVALIDATE initiator. */
227 writeall (fd
, &resp
, sizeof (resp
));
232 /* This function can be called from the cleanup thread but also in
233 response to an invalidate command. Make sure only one thread is
234 running. When not serving INVALIDATE request, no need for the
235 second to wait around. */
238 if (pthread_mutex_trylock (&table
->prunelock
) != 0)
239 /* The work is already being done. */
243 pthread_mutex_lock (&table
->prunelock
);
245 /* If we check for the modification of the underlying file we invalidate
246 the entries also in this case. */
247 if (table
->check_file
)
251 if (stat64 (table
->filename
, &st
) < 0)
254 /* We cannot stat() the file, disable file checking if the
255 file does not exist. */
256 dbg_log (_("cannot stat() file `%s': %s"),
257 table
->filename
, strerror_r (errno
, buf
, sizeof (buf
)));
259 table
->check_file
= 0;
263 if (st
.st_mtime
!= table
->file_mtime
)
265 /* The file changed. Invalidate all entries. */
267 table
->file_mtime
= st
.st_mtime
;
272 /* We run through the table and find values which are not valid anymore.
274 Note that for the initial step, finding the entries to be removed,
275 we don't need to get any lock. It is at all timed assured that the
276 linked lists are set up correctly and that no second thread prunes
279 size_t first
= cnt
+ 1;
281 char *const data
= table
->data
;
284 if (__builtin_expect (debug_level
> 2, 0))
285 dbg_log (_("pruning %s cache; time %ld"),
286 dbnames
[table
- dbs
], (long int) now
);
290 ref_t run
= table
->head
->array
[--cnt
];
292 while (run
!= ENDREF
)
294 struct hashentry
*runp
= (struct hashentry
*) (data
+ run
);
295 struct datahead
*dh
= (struct datahead
*) (data
+ runp
->packet
);
297 /* Some debug support. */
298 if (__builtin_expect (debug_level
> 2, 0))
300 char buf
[INET6_ADDRSTRLEN
];
303 if (runp
->type
== GETHOSTBYADDR
|| runp
->type
== GETHOSTBYADDRv6
)
305 inet_ntop (runp
->type
== GETHOSTBYADDR
? AF_INET
: AF_INET6
,
306 data
+ runp
->key
, buf
, sizeof (buf
));
310 str
= data
+ runp
->key
;
312 dbg_log (_("considering %s entry \"%s\", timeout %" PRIu64
),
313 serv2str
[runp
->type
], str
, dh
->timeout
);
316 /* Check whether the entry timed out. */
317 if (dh
->timeout
< now
)
319 /* This hash bucket could contain entries which need to
323 first
= MIN (first
, cnt
);
324 last
= MAX (last
, cnt
);
326 /* We only have to look at the data of the first entries
327 since the count information is kept in the data part
332 /* At this point there are two choices: we reload the
333 value or we discard it. Do not change NRELOADS if
334 we never not reload the record. */
335 if ((reload_count
!= UINT_MAX
336 && __builtin_expect (dh
->nreloads
>= reload_count
, 0))
337 /* We always remove negative entries. */
339 /* Discard everything if the user explicitly
343 /* Remove the value. */
346 /* We definitely have some garbage entries now. */
351 /* Reload the value. We do this only for the
352 initially used key, not the additionally
353 added derived value. */
354 assert (runp
->type
< LASTREQ
355 && readdfcts
[runp
->type
] != NULL
);
357 readdfcts
[runp
->type
] (table
, runp
, dh
);
359 /* If the entry has been replaced, we might need
375 /* Reply to the INVALIDATE initiator that the cache has been
378 writeall (fd
, &resp
, sizeof (resp
));
383 struct hashentry
*head
= NULL
;
385 /* Now we have to get the write lock since we are about to modify
387 if (__builtin_expect (pthread_rwlock_trywrlock (&table
->lock
) != 0, 0))
389 ++table
->head
->wrlockdelayed
;
390 pthread_rwlock_wrlock (&table
->lock
);
393 while (first
<= last
)
397 ref_t
*old
= &table
->head
->array
[first
];
398 ref_t run
= table
->head
->array
[first
];
400 while (run
!= ENDREF
)
402 struct hashentry
*runp
= (struct hashentry
*) (data
+ run
);
404 = (struct datahead
*) (data
+ runp
->packet
);
408 /* We need the list only for debugging but it is
409 more costly to avoid creating the list than
411 runp
->dellist
= head
;
414 /* No need for an atomic operation, we have the
416 --table
->head
->nentries
;
418 run
= *old
= runp
->next
;
432 pthread_rwlock_unlock (&table
->lock
);
434 /* Make sure the data is saved to disk. */
435 if (table
->persistent
)
437 data
+ table
->head
->first_free
- (char *) table
->head
,
440 /* One extra pass if we do debugging. */
441 if (__builtin_expect (debug_level
> 0, 0))
443 struct hashentry
*runp
= head
;
447 char buf
[INET6_ADDRSTRLEN
];
450 if (runp
->type
== GETHOSTBYADDR
|| runp
->type
== GETHOSTBYADDRv6
)
452 inet_ntop (runp
->type
== GETHOSTBYADDR
? AF_INET
: AF_INET6
,
453 data
+ runp
->key
, buf
, sizeof (buf
));
457 str
= data
+ runp
->key
;
459 dbg_log ("remove %s entry \"%s\"", serv2str
[runp
->type
], str
);
461 runp
= runp
->dellist
;
466 /* Run garbage collection if any entry has been removed or replaced. */
470 pthread_mutex_unlock (&table
->prunelock
);