* sysdeps/s390/s390-64/dl-trampoline.S (_dl_runtime_profile): Use the
[glibc/pb-stable.git] / nscd / mem.c
blobe821729dab3575c698129243951b91d3a1e55d18
1 /* Cache memory handling.
2 Copyright (C) 2004, 2005, 2006, 2008 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published
8 by the Free Software Foundation; version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software Foundation,
18 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <error.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <libintl.h>
26 #include <limits.h>
27 #include <obstack.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <sys/mman.h>
32 #include <sys/param.h>
34 #include "dbg_log.h"
35 #include "nscd.h"
38 /* Wrapper functions with error checking for standard functions. */
39 extern void *xmalloc (size_t n);
40 extern void *xcalloc (size_t n, size_t s);
43 static int
44 sort_he (const void *p1, const void *p2)
46 struct hashentry *h1 = *(struct hashentry **) p1;
47 struct hashentry *h2 = *(struct hashentry **) p2;
49 if (h1 < h2)
50 return -1;
51 if (h1 > h2)
52 return 1;
53 return 0;
57 static int
58 sort_he_data (const void *p1, const void *p2)
60 struct hashentry *h1 = *(struct hashentry **) p1;
61 struct hashentry *h2 = *(struct hashentry **) p2;
63 if (h1->packet < h2->packet)
64 return -1;
65 if (h1->packet > h2->packet)
66 return 1;
67 return 0;
71 /* Basic definitions for the bitmap implementation. Only BITMAP_T
72 needs to be changed to choose a different word size. */
73 #define BITMAP_T uint8_t
74 #define BITS (CHAR_BIT * sizeof (BITMAP_T))
75 #define ALLBITS ((((BITMAP_T) 1) << BITS) - 1)
76 #define HIGHBIT (((BITMAP_T) 1) << (BITS - 1))
79 static void
80 markrange (BITMAP_T *mark, ref_t start, size_t len)
82 /* Adjust parameters for block alignment. */
83 assert ((start & BLOCK_ALIGN_M1) == 0);
84 start /= BLOCK_ALIGN;
85 len = (len + BLOCK_ALIGN_M1) / BLOCK_ALIGN;
87 size_t elem = start / BITS;
89 if (start % BITS != 0)
91 if (start % BITS + len <= BITS)
93 /* All fits in the partial byte. */
94 mark[elem] |= (ALLBITS >> (BITS - len)) << (start % BITS);
95 return;
98 mark[elem++] |= ALLBITS << (start % BITS);
99 len -= BITS - (start % BITS);
102 while (len >= BITS)
104 mark[elem++] = ALLBITS;
105 len -= BITS;
108 if (len > 0)
109 mark[elem] |= ALLBITS >> (BITS - len);
113 void
114 gc (struct database_dyn *db)
116 /* We need write access. */
117 pthread_rwlock_wrlock (&db->lock);
119 /* And the memory handling lock. */
120 pthread_mutex_lock (&db->memlock);
122 /* We need an array representing the data area. All memory
123 allocation is BLOCK_ALIGN aligned so this is the level at which
124 we have to look at the memory. We use a mark and sweep algorithm
125 where the marks are placed in this array. */
126 assert (db->head->first_free % BLOCK_ALIGN == 0);
128 BITMAP_T *mark;
129 bool mark_use_malloc;
130 /* In prune_cache we are also using a dynamically allocated array.
131 If the array in the caller is too large we have malloc'ed it. */
132 size_t stack_used = sizeof (bool) * db->head->module;
133 if (__builtin_expect (stack_used > MAX_STACK_USE, 0))
134 stack_used = 0;
135 size_t nmark = (db->head->first_free / BLOCK_ALIGN + BITS - 1) / BITS;
136 size_t memory_needed = nmark * sizeof (BITMAP_T);
137 if (stack_used + memory_needed <= MAX_STACK_USE)
139 mark = (BITMAP_T *) alloca (memory_needed);
140 mark_use_malloc = false;
141 memset (mark, '\0', memory_needed);
142 stack_used += memory_needed;
144 else
146 mark = (BITMAP_T *) xcalloc (1, memory_needed);
147 mark_use_malloc = true;
150 /* Create an array which can hold pointer to all the entries in hash
151 entries. */
152 memory_needed = 2 * db->head->nentries * sizeof (struct hashentry *);
153 struct hashentry **he;
154 struct hashentry **he_data;
155 bool he_use_malloc;
156 if (stack_used + memory_needed <= MAX_STACK_USE)
158 he = alloca (db->head->nentries * sizeof (struct hashentry *));
159 he_data = alloca (db->head->nentries * sizeof (struct hashentry *));
160 he_use_malloc = false;
161 stack_used += memory_needed;
163 else
165 he = xmalloc (memory_needed);
166 he_data = &he[db->head->nentries * sizeof (struct hashentry *)];
167 he_use_malloc = true;
170 size_t cnt = 0;
171 for (size_t idx = 0; idx < db->head->module; ++idx)
173 ref_t *prevp = &db->head->array[idx];
174 ref_t run = *prevp;
176 while (run != ENDREF)
178 assert (cnt < db->head->nentries);
179 he[cnt] = (struct hashentry *) (db->data + run);
181 he[cnt]->prevp = prevp;
182 prevp = &he[cnt]->next;
184 /* This is the hash entry itself. */
185 markrange (mark, run, sizeof (struct hashentry));
187 /* Add the information for the data itself. We do this
188 only for the one special entry marked with FIRST. */
189 if (he[cnt]->first)
191 struct datahead *dh
192 = (struct datahead *) (db->data + he[cnt]->packet);
193 markrange (mark, he[cnt]->packet, dh->allocsize);
196 run = he[cnt]->next;
198 ++cnt;
201 assert (cnt == db->head->nentries);
203 /* Go through the list of in-flight memory blocks. */
204 struct mem_in_flight *mrunp = mem_in_flight_list;
205 while (mrunp != NULL)
207 /* NB: There can be no race between this test and another thread
208 setting the field to the index we are looking for because
209 this would require the other thread to also have the memlock
210 for the database.
212 NB2: we do not have to look at latter blocks (higher indices) if
213 earlier blocks are not in flight. They are always allocated in
214 sequence. */
215 for (enum in_flight idx = IDX_result_data;
216 idx < IDX_last && mrunp->block[idx].dbidx == db - dbs; ++idx)
218 assert (mrunp->block[idx].blockoff >= 0);
219 assert (mrunp->block[idx].blocklen < db->memsize);
220 assert (mrunp->block[idx].blockoff
221 + mrunp->block[0].blocklen <= db->memsize);
222 markrange (mark, mrunp->block[idx].blockoff,
223 mrunp->block[idx].blocklen);
226 mrunp = mrunp->next;
229 /* Sort the entries by the addresses of the referenced data. All
230 the entries pointing to the same DATAHEAD object will have the
231 same key. Stability of the sorting is unimportant. */
232 memcpy (he_data, he, cnt * sizeof (struct hashentry *));
233 qsort (he_data, cnt, sizeof (struct hashentry *), sort_he_data);
235 /* Sort the entries by their address. */
236 qsort (he, cnt, sizeof (struct hashentry *), sort_he);
238 #define obstack_chunk_alloc xmalloc
239 #define obstack_chunk_free free
240 struct obstack ob;
241 obstack_init (&ob);
243 /* Determine the highest used address. */
244 size_t high = nmark;
245 while (high > 0 && mark[high - 1] == 0)
246 --high;
248 /* No memory used. */
249 if (high == 0)
251 db->head->first_free = 0;
252 goto out;
255 /* Determine the highest offset. */
256 BITMAP_T mask = HIGHBIT;
257 ref_t highref = (high * BITS - 1) * BLOCK_ALIGN;
258 while ((mark[high - 1] & mask) == 0)
260 mask >>= 1;
261 highref -= BLOCK_ALIGN;
264 /* Now we can iterate over the MARK array and find bits which are not
265 set. These represent memory which can be recovered. */
266 size_t byte = 0;
267 /* Find the first gap. */
268 while (byte < high && mark[byte] == ALLBITS)
269 ++byte;
271 if (byte == high
272 || (byte == high - 1 && (mark[byte] & ~(mask | (mask - 1))) == 0))
273 /* No gap. */
274 goto out;
276 mask = 1;
277 cnt = 0;
278 while ((mark[byte] & mask) != 0)
280 ++cnt;
281 mask <<= 1;
283 ref_t off_free = (byte * BITS + cnt) * BLOCK_ALIGN;
284 assert (off_free <= db->head->first_free);
286 struct hashentry **next_hash = he;
287 struct hashentry **next_data = he_data;
289 /* Skip over the hash entries in the first block which does not get
290 moved. */
291 while (next_hash < &he[db->head->nentries]
292 && *next_hash < (struct hashentry *) (db->data + off_free))
293 ++next_hash;
295 while (next_data < &he_data[db->head->nentries]
296 && (*next_data)->packet < off_free)
297 ++next_data;
300 /* Now we start modifying the data. Make sure all readers of the
301 data are aware of this and temporarily don't use the data. */
302 ++db->head->gc_cycle;
303 assert ((db->head->gc_cycle & 1) == 1);
306 /* We do not perform the move operations right away since the
307 he_data array is not sorted by the address of the data. */
308 struct moveinfo
310 void *from;
311 void *to;
312 size_t size;
313 struct moveinfo *next;
314 } *moves = NULL;
316 while (byte < high)
318 /* Search for the next filled block. BYTE is the index of the
319 entry in MARK, MASK is the bit, and CNT is the bit number.
320 OFF_FILLED is the corresponding offset. */
321 if ((mark[byte] & ~(mask - 1)) == 0)
323 /* No other bit set in the same element of MARK. Search in the
324 following memory. */
326 ++byte;
327 while (byte < high && mark[byte] == 0);
329 if (byte == high)
330 /* That was it. */
331 break;
333 mask = 1;
334 cnt = 0;
336 /* Find the exact bit. */
337 while ((mark[byte] & mask) == 0)
339 ++cnt;
340 mask <<= 1;
343 ref_t off_alloc = (byte * BITS + cnt) * BLOCK_ALIGN;
344 assert (off_alloc <= db->head->first_free);
346 /* Find the end of the used area. */
347 if ((mark[byte] & ~(mask - 1)) == (BITMAP_T) ~(mask - 1))
349 /* All other bits set. Search the next bytes in MARK. */
351 ++byte;
352 while (byte < high && mark[byte] == ALLBITS);
354 mask = 1;
355 cnt = 0;
357 if (byte < high)
359 /* Find the exact bit. */
360 while ((mark[byte] & mask) != 0)
362 ++cnt;
363 mask <<= 1;
367 ref_t off_allocend = (byte * BITS + cnt) * BLOCK_ALIGN;
368 assert (off_allocend <= db->head->first_free);
369 /* Now we know that we can copy the area from OFF_ALLOC to
370 OFF_ALLOCEND (not included) to the memory starting at
371 OFF_FREE. First fix up all the entries for the
372 displacement. */
373 ref_t disp = off_alloc - off_free;
375 struct moveinfo *new_move;
376 if (stack_used + sizeof (*new_move) <= MAX_STACK_USE)
378 new_move = alloca (sizeof (*new_move));
379 stack_used += sizeof (*new_move);
381 else
382 new_move = obstack_alloc (&ob, sizeof (*new_move));
383 new_move->from = db->data + off_alloc;
384 new_move->to = db->data + off_free;
385 new_move->size = off_allocend - off_alloc;
386 /* Create a circular list to be always able to append at the end. */
387 if (moves == NULL)
388 moves = new_move->next = new_move;
389 else
391 new_move->next = moves->next;
392 moves = moves->next = new_move;
395 /* The following loop will prepare to move this much data. */
396 off_free += off_allocend - off_alloc;
398 while (off_alloc < off_allocend)
400 /* Determine whether the next entry is for a hash entry or
401 the data. */
402 if ((struct hashentry *) (db->data + off_alloc) == *next_hash)
404 /* Just correct the forward reference. */
405 *(*next_hash++)->prevp -= disp;
407 off_alloc += ((sizeof (struct hashentry) + BLOCK_ALIGN_M1)
408 & ~BLOCK_ALIGN_M1);
410 else
412 assert (next_data < &he_data[db->head->nentries]);
413 assert ((*next_data)->packet == off_alloc);
415 struct datahead *dh = (struct datahead *) (db->data + off_alloc);
418 assert ((*next_data)->key >= (*next_data)->packet);
419 assert ((*next_data)->key + (*next_data)->len
420 <= (*next_data)->packet + dh->allocsize);
422 (*next_data)->packet -= disp;
423 (*next_data)->key -= disp;
424 ++next_data;
426 while (next_data < &he_data[db->head->nentries]
427 && (*next_data)->packet == off_alloc);
429 off_alloc += (dh->allocsize + BLOCK_ALIGN_M1) & ~BLOCK_ALIGN_M1;
432 assert (off_alloc == off_allocend);
434 assert (off_alloc <= db->head->first_free);
435 if (off_alloc == db->head->first_free)
436 /* We are done, that was the last block. */
437 break;
439 assert (next_hash == &he[db->head->nentries]);
440 assert (next_data == &he_data[db->head->nentries]);
442 /* Now perform the actual moves. */
443 if (moves != NULL)
445 struct moveinfo *runp = moves->next;
448 assert ((char *) runp->to >= db->data);
449 assert ((char *) runp->to + runp->size
450 <= db->data + db->head->first_free);
451 assert ((char *) runp->from >= db->data);
452 assert ((char *) runp->from + runp->size
453 <= db->data + db->head->first_free);
455 /* The regions may overlap. */
456 memmove (runp->to, runp->from, runp->size);
457 runp = runp->next;
459 while (runp != moves->next);
461 if (__builtin_expect (debug_level >= 3, 0))
462 dbg_log (_("freed %zu bytes in %s cache"),
463 db->head->first_free
464 - ((char *) moves->to + moves->size - db->data),
465 dbnames[db - dbs]);
467 /* The byte past the end of the last copied block is the next
468 available byte. */
469 db->head->first_free = (char *) moves->to + moves->size - db->data;
471 /* Consistency check. */
472 if (__builtin_expect (debug_level >= 3, 0))
474 for (size_t idx = 0; idx < db->head->module; ++idx)
476 ref_t run = db->head->array[idx];
477 size_t cnt = 0;
479 while (run != ENDREF)
481 if (run + sizeof (struct hashentry) > db->head->first_free)
483 dbg_log ("entry %zu in hash bucket %zu out of bounds: "
484 "%" PRIu32 "+%zu > %zu\n",
485 cnt, idx, run, sizeof (struct hashentry),
486 (size_t) db->head->first_free);
487 break;
490 struct hashentry *he = (struct hashentry *) (db->data + run);
492 if (he->key + he->len > db->head->first_free)
493 dbg_log ("key of entry %zu in hash bucket %zu out of "
494 "bounds: %" PRIu32 "+%zu > %zu\n",
495 cnt, idx, he->key, (size_t) he->len,
496 (size_t) db->head->first_free);
498 if (he->packet + sizeof (struct datahead)
499 > db->head->first_free)
500 dbg_log ("packet of entry %zu in hash bucket %zu out of "
501 "bounds: %" PRIu32 "+%zu > %zu\n",
502 cnt, idx, he->packet, sizeof (struct datahead),
503 (size_t) db->head->first_free);
504 else
506 struct datahead *dh = (struct datahead *) (db->data
507 + he->packet);
508 if (he->packet + dh->allocsize
509 > db->head->first_free)
510 dbg_log ("full key of entry %zu in hash bucket %zu "
511 "out of bounds: %" PRIu32 "+%zu > %zu",
512 cnt, idx, he->packet, (size_t) dh->allocsize,
513 (size_t) db->head->first_free);
516 run = he->next;
517 ++cnt;
523 /* Make sure the data on disk is updated. */
524 if (db->persistent)
525 msync (db->head, db->data + db->head->first_free - (char *) db->head,
526 MS_ASYNC);
529 /* Now we are done modifying the data. */
530 ++db->head->gc_cycle;
531 assert ((db->head->gc_cycle & 1) == 0);
533 /* We are done. */
534 out:
535 pthread_mutex_unlock (&db->memlock);
536 pthread_rwlock_unlock (&db->lock);
538 if (he_use_malloc)
539 free (he);
540 if (mark_use_malloc)
541 free (mark);
543 obstack_free (&ob, NULL);
547 void *
548 mempool_alloc (struct database_dyn *db, size_t len, enum in_flight idx)
550 /* Make sure LEN is a multiple of our maximum alignment so we can
551 keep track of used memory is multiples of this alignment value. */
552 if ((len & BLOCK_ALIGN_M1) != 0)
553 len += BLOCK_ALIGN - (len & BLOCK_ALIGN_M1);
555 pthread_mutex_lock (&db->memlock);
557 assert ((db->head->first_free & BLOCK_ALIGN_M1) == 0);
559 bool tried_resize = false;
560 void *res;
561 retry:
562 res = db->data + db->head->first_free;
564 if (__builtin_expect (db->head->first_free + len > db->head->data_size, 0))
566 if (! tried_resize)
568 /* Try to resize the database. Grow size of 1/8th. */
569 size_t oldtotal = (sizeof (struct database_pers_head)
570 + roundup (db->head->module * sizeof (ref_t),
571 ALIGN)
572 + db->head->data_size);
573 size_t new_data_size = (db->head->data_size
574 + MAX (2 * len, db->head->data_size / 8));
575 size_t newtotal = (sizeof (struct database_pers_head)
576 + roundup (db->head->module * sizeof (ref_t), ALIGN)
577 + new_data_size);
578 if (newtotal > db->max_db_size)
580 new_data_size -= newtotal - db->max_db_size;
581 newtotal = db->max_db_size;
584 if (db->mmap_used && newtotal > oldtotal
585 /* We only have to adjust the file size. The new pages
586 become magically available. */
587 && TEMP_FAILURE_RETRY_VAL (posix_fallocate (db->wr_fd, oldtotal,
588 newtotal
589 - oldtotal)) == 0)
591 db->head->data_size = new_data_size;
592 tried_resize = true;
593 goto retry;
597 if (! db->last_alloc_failed)
599 dbg_log (_("no more memory for database '%s'"), dbnames[db - dbs]);
601 db->last_alloc_failed = true;
604 /* No luck. */
605 res = NULL;
607 else
609 /* Remember that we have allocated this memory. */
610 assert (idx >= 0 && idx < IDX_last);
611 mem_in_flight.block[idx].dbidx = db - dbs;
612 mem_in_flight.block[idx].blocklen = len;
613 mem_in_flight.block[idx].blockoff = db->head->first_free;
615 db->head->first_free += len;
617 db->last_alloc_failed = false;
621 pthread_mutex_unlock (&db->memlock);
623 return res;