Fix typo.
[glibc.git] / nscd / mem.c
blobc3a0f967020cc58cf8d729ee4d3bcffcf31866c9
1 /* Cache memory handling.
2 Copyright (C) 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
21 #include <assert.h>
22 #include <errno.h>
23 #include <error.h>
24 #include <inttypes.h>
25 #include <libintl.h>
26 #include <limits.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
33 #include "dbg_log.h"
34 #include "nscd.h"
37 /* Maximum alignment requirement we will encounter. */
38 #define BLOCK_ALIGN_LOG 3
39 #define BLOCK_ALIGN (1 << BLOCK_ALIGN_LOG)
40 #define BLOCK_ALIGN_M1 (BLOCK_ALIGN - 1)
43 static int
44 sort_he (const void *p1, const void *p2)
46 struct hashentry *h1 = *(struct hashentry **) p1;
47 struct hashentry *h2 = *(struct hashentry **) p2;
49 if (h1 < h2)
50 return -1;
51 if (h1 > h2)
52 return 1;
53 return 0;
57 static int
58 sort_he_data (const void *p1, const void *p2)
60 struct hashentry *h1 = *(struct hashentry **) p1;
61 struct hashentry *h2 = *(struct hashentry **) p2;
63 if (h1->packet < h2->packet)
64 return -1;
65 if (h1->packet > h2->packet)
66 return 1;
67 return 0;
71 /* Basic definitions for the bitmap implementation. Only BITMAP_T
72 needs to be changed to choose a different word size. */
73 #define BITMAP_T uint8_t
74 #define BITS (CHAR_BIT * sizeof (BITMAP_T))
75 #define ALLBITS ((((BITMAP_T) 1) << BITS) - 1)
76 #define HIGHBIT (((BITMAP_T) 1) << (BITS - 1))
79 static void
80 markrange (BITMAP_T *mark, ref_t start, size_t len)
82 /* Adjust parameters for block alignment. */
83 start /= BLOCK_ALIGN;
84 len = (len + BLOCK_ALIGN_M1) / BLOCK_ALIGN;
86 size_t elem = start / BITS;
88 if (start % BITS != 0)
90 if (start % BITS + len <= BITS)
92 /* All fits in the partial byte. */
93 mark[elem] |= (ALLBITS >> (BITS - len)) << (start % BITS);
94 return;
97 mark[elem++] |= 0xff << (start % BITS);
98 len -= BITS - (start % BITS);
101 while (len >= BITS)
103 mark[elem++] = ALLBITS;
104 len -= BITS;
107 if (len > 0)
108 mark[elem] |= ALLBITS >> (BITS - len);
112 void
113 gc (struct database_dyn *db)
115 /* We need write access. */
116 pthread_rwlock_wrlock (&db->lock);
118 /* And the memory handling lock. */
119 pthread_mutex_lock (&db->memlock);
121 /* We need an array representing the data area. All memory
122 allocation is BLOCK_ALIGN aligned so this is the level at which
123 we have to look at the memory. We use a mark and sweep algorithm
124 where the marks are placed in this array. */
125 assert (db->head->first_free % BLOCK_ALIGN == 0);
126 BITMAP_T mark[(db->head->first_free / BLOCK_ALIGN + BITS - 1) / BITS];
127 memset (mark, '\0', sizeof (mark));
129 /* Create an array which can hold pointer to all the entries in hash
130 entries. */
131 struct hashentry *he[db->head->nentries];
132 struct hashentry *he_data[db->head->nentries];
134 size_t cnt = 0;
135 for (size_t idx = 0; idx < db->head->module; ++idx)
137 ref_t *prevp = &db->head->array[idx];
138 ref_t run = *prevp;
140 while (run != ENDREF)
142 assert (cnt < db->head->nentries);
143 he[cnt] = (struct hashentry *) (db->data + run);
145 he[cnt]->prevp = prevp;
146 prevp = &he[cnt]->next;
148 /* This is the hash entry itself. */
149 markrange (mark, run, sizeof (struct hashentry));
151 /* Add the information for the data itself. We do this
152 only for the one special entry marked with FIRST. */
153 if (he[cnt]->first)
155 struct datahead *dh
156 = (struct datahead *) (db->data + he[cnt]->packet);
157 markrange (mark, he[cnt]->packet, dh->allocsize);
160 run = he[cnt]->next;
162 ++cnt;
165 assert (cnt == db->head->nentries);
167 /* Sort the entries by the addresses of the referenced data. All
168 the entries pointing to the same DATAHEAD object will have the
169 same key. Stability of the sorting is unimportant. */
170 memcpy (he_data, he, cnt * sizeof (struct hashentry *));
171 qsort (he_data, cnt, sizeof (struct hashentry *), sort_he_data);
173 /* Sort the entries by their address. */
174 qsort (he, cnt, sizeof (struct hashentry *), sort_he);
176 /* Determine the highest used address. */
177 size_t high = sizeof (mark);
178 while (high > 0 && mark[high - 1] == 0)
179 --high;
181 /* No memory used. */
182 if (high == 0)
184 db->head->first_free = 0;
185 goto out;
188 /* Determine the highest offset. */
189 BITMAP_T mask = HIGHBIT;
190 ref_t highref = (high * BITS - 1) * BLOCK_ALIGN;
191 while ((mark[high - 1] & mask) == 0)
193 mask >>= 1;
194 highref -= BLOCK_ALIGN;
197 /* No we can iterate over the MARK array and find bits which are not
198 set. These represent memory which can be recovered. */
199 size_t byte = 0;
200 /* Find the first gap. */
201 while (byte < high && mark[byte] == ALLBITS)
202 ++byte;
204 if (byte == high
205 || (byte == high - 1 && (mark[byte] & ~(mask | (mask - 1))) == 0))
206 /* No gap. */
207 goto out;
209 mask = 1;
210 cnt = 0;
211 while ((mark[byte] & mask) != 0)
213 ++cnt;
214 mask <<= 1;
216 ref_t off_free = (byte * BITS + cnt) * BLOCK_ALIGN;
217 assert (off_free <= db->head->first_free);
219 struct hashentry **next_hash = he;
220 struct hashentry **next_data = he_data;
222 /* Skip over the hash entries in the first block which does not get
223 moved. */
224 while (next_hash < &he[db->head->nentries]
225 && *next_hash < (struct hashentry *) (db->data + off_free))
226 ++next_hash;
228 while (next_data < &he_data[db->head->nentries]
229 && (*next_data)->packet < off_free)
230 ++next_data;
233 /* Now we start modifying the data. Make sure all readers of the
234 data are aware of this and temporarily don't use the data. */
235 ++db->head->gc_cycle;
236 assert ((db->head->gc_cycle & 1) == 1);
239 /* We do not perform the move operations right away since the
240 he_data array is not sorted by the address of the data. */
241 struct moveinfo
243 void *from;
244 void *to;
245 size_t size;
246 struct moveinfo *next;
247 } *moves = NULL;
249 while (byte < high)
251 /* Search for the next filled block. BYTE is the index of the
252 entry in MARK, MASK is the bit, and CNT is the bit number.
253 OFF_FILLED is the corresponding offset. */
254 if ((mark[byte] & ~(mask - 1)) == 0)
256 /* No other bit set in the same element of MARK. Search in the
257 following memory. */
259 ++byte;
260 while (byte < high && mark[byte] == 0);
262 if (byte == high)
263 /* That was it. */
264 break;
266 mask = 1;
267 cnt = 0;
269 /* Find the exact bit. */
270 while ((mark[byte] & mask) == 0)
272 ++cnt;
273 mask <<= 1;
276 ref_t off_alloc = (byte * BITS + cnt) * BLOCK_ALIGN;
277 assert (off_alloc <= db->head->first_free);
279 /* Find the end of the used area. */
280 if ((mark[byte] & ~(mask - 1)) == (BITMAP_T) ~(mask - 1))
282 /* All other bits set. Search the next bytes in MARK. */
284 ++byte;
285 while (byte < high && mark[byte] == ALLBITS);
287 mask = 1;
288 cnt = 0;
290 if (byte < high)
292 /* Find the exact bit. */
293 while ((mark[byte] & mask) != 0)
295 ++cnt;
296 mask <<= 1;
300 ref_t off_allocend = (byte * BITS + cnt) * BLOCK_ALIGN;
301 assert (off_allocend <= db->head->first_free);
302 /* Now we know that we can copy the area from OFF_ALLOC to
303 OFF_ALLOCEND (not included) to the memory starting at
304 OFF_FREE. First fix up all the entries for the
305 displacement. */
306 ref_t disp = off_alloc - off_free;
308 struct moveinfo *new_move
309 = (struct moveinfo *) alloca (sizeof (*new_move));
310 new_move->from = db->data + off_alloc;
311 new_move->to = db->data + off_free;
312 new_move->size = off_allocend - off_alloc;
313 /* Create a circular list to be always able to append at the end. */
314 if (moves == NULL)
315 moves = new_move->next = new_move;
316 else
318 new_move->next = moves->next;
319 moves = moves->next = new_move;
322 /* The following loop will prepare to move this much data. */
323 off_free += off_allocend - off_alloc;
325 while (off_alloc < off_allocend)
327 /* Determine whether the next entry is for a hash entry or
328 the data. */
329 if ((struct hashentry *) (db->data + off_alloc) == *next_hash)
331 /* Just correct the forward reference. */
332 *(*next_hash++)->prevp -= disp;
334 off_alloc += ((sizeof (struct hashentry) + BLOCK_ALIGN_M1)
335 & ~BLOCK_ALIGN_M1);
337 else
339 assert (next_data < &he_data[db->head->nentries]);
340 assert ((*next_data)->packet == off_alloc);
342 struct datahead *dh = (struct datahead *) (db->data + off_alloc);
345 assert ((*next_data)->key >= (*next_data)->packet);
346 assert ((*next_data)->key + (*next_data)->len
347 <= (*next_data)->packet + dh->allocsize);
349 (*next_data)->packet -= disp;
350 (*next_data)->key -= disp;
351 ++next_data;
353 while (next_data < &he_data[db->head->nentries]
354 && (*next_data)->packet == off_alloc);
356 off_alloc += (dh->allocsize + BLOCK_ALIGN_M1) & ~BLOCK_ALIGN_M1;
359 assert (off_alloc == off_allocend);
361 assert (off_alloc <= db->head->first_free);
362 if (off_alloc == db->head->first_free)
363 /* We are done, that was the last block. */
364 break;
366 assert (next_hash == &he[db->head->nentries]);
367 assert (next_data == &he_data[db->head->nentries]);
369 /* Now perform the actual moves. */
370 if (moves != NULL)
372 struct moveinfo *runp = moves->next;
375 assert ((char *) runp->to >= db->data);
376 assert ((char *) runp->to + runp->size
377 <= db->data + db->head->first_free);
378 assert ((char *) runp->from >= db->data);
379 assert ((char *) runp->from + runp->size
380 <= db->data + db->head->first_free);
382 /* The regions may overlap. */
383 memmove (runp->to, runp->from, runp->size);
384 runp = runp->next;
386 while (runp != moves->next);
388 if (__builtin_expect (debug_level >= 3, 0))
389 dbg_log (_("freed %zu bytes in %s cache"),
390 db->head->first_free
391 - ((char *) moves->to + moves->size - db->data),
392 dbnames[db - dbs]);
394 /* The byte past the end of the last copied block is the next
395 available byte. */
396 db->head->first_free = (char *) moves->to + moves->size - db->data;
398 /* Consistency check. */
399 if (__builtin_expect (debug_level >= 3, 0))
401 for (size_t idx = 0; idx < db->head->module; ++idx)
403 ref_t run = db->head->array[idx];
404 size_t cnt = 0;
406 while (run != ENDREF)
408 if (run + sizeof (struct hashentry) > db->head->first_free)
410 dbg_log ("entry %zu in hash bucket %zu out of bounds: "
411 "%" PRIu32 "+%zu > %zu\n",
412 cnt, idx, run, sizeof (struct hashentry),
413 (size_t) db->head->first_free);
414 break;
417 struct hashentry *he = (struct hashentry *) (db->data + run);
419 if (he->key + he->len > db->head->first_free)
420 dbg_log ("key of entry %zu in hash bucket %zu out of "
421 "bounds: %" PRIu32 "+%zu > %zu\n",
422 cnt, idx, he->key, (size_t) he->len,
423 (size_t) db->head->first_free);
425 if (he->packet + sizeof (struct datahead)
426 > db->head->first_free)
427 dbg_log ("packet of entry %zu in hash bucket %zu out of "
428 "bounds: %" PRIu32 "+%zu > %zu\n",
429 cnt, idx, he->packet, sizeof (struct datahead),
430 (size_t) db->head->first_free);
431 else
433 struct datahead *dh = (struct datahead *) (db->data
434 + he->packet);
435 if (he->packet + dh->allocsize
436 > db->head->first_free)
437 dbg_log ("full key of entry %zu in hash bucket %zu "
438 "out of bounds: %" PRIu32 "+%zu > %zu",
439 cnt, idx, he->packet, (size_t) dh->allocsize,
440 (size_t) db->head->first_free);
443 run = he->next;
444 ++cnt;
450 /* Make sure the data on disk is updated. */
451 if (db->persistent)
452 msync (db->head, db->data + db->head->first_free - (char *) db->head,
453 MS_ASYNC);
456 /* Now we are done modifying the data. */
457 ++db->head->gc_cycle;
458 assert ((db->head->gc_cycle & 1) == 0);
460 /* We are done. */
461 out:
462 pthread_mutex_unlock (&db->memlock);
463 pthread_rwlock_unlock (&db->lock);
467 void *
468 mempool_alloc (struct database_dyn *db, size_t len)
470 /* Make sure LEN is a multiple of our maximum alignment so we can
471 keep track of used memory is multiples of this alignment value. */
472 if ((len & BLOCK_ALIGN_M1) != 0)
473 len += BLOCK_ALIGN - (len & BLOCK_ALIGN_M1);
475 pthread_mutex_lock (&db->memlock);
477 assert ((db->head->first_free & BLOCK_ALIGN_M1) == 0);
479 bool tried_resize = false;
480 void *res;
481 retry:
482 res = db->data + db->head->first_free;
484 if (__builtin_expect (db->head->first_free + len > db->head->data_size, 0))
486 if (! tried_resize)
488 /* Try to resize the database. Grow size of 1/8th. */
489 size_t new_data_size = db->head->data_size + db->head->data_size / 8;
490 size_t oldtotal = (sizeof (struct database_pers_head)
491 + db->head->module * sizeof (ref_t)
492 + db->head->data_size);
493 size_t newtotal = (sizeof (struct database_pers_head)
494 + db->head->module * sizeof (ref_t)
495 + new_data_size);
497 if ((!db->mmap_used || ftruncate (db->wr_fd, newtotal) != 0)
498 /* Try to resize the mapping. Note: no MREMAP_MAYMOVE. */
499 && mremap (db->head, oldtotal, newtotal, 0) == 0)
501 db->head->data_size = new_data_size;
502 tried_resize = true;
503 goto retry;
507 if (! db->last_alloc_failed)
509 dbg_log (_("no more memory for database '%s'"), dbnames[db - dbs]);
511 db->last_alloc_failed = true;
514 /* No luck. */
515 res = NULL;
517 else
519 db->head->first_free += len;
521 db->last_alloc_failed = false;
524 pthread_mutex_unlock (&db->memlock);
526 return res;