1 /* Cache memory handling.
2 Copyright (C) 2004, 2005, 2006, 2008 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published
8 by the Free Software Foundation; version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software Foundation,
18 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
32 #include <sys/param.h>
38 /* Wrapper functions with error checking for standard functions. */
39 extern void *xmalloc (size_t n
);
40 extern void *xcalloc (size_t n
, size_t s
);
44 sort_he (const void *p1
, const void *p2
)
46 struct hashentry
*h1
= *(struct hashentry
**) p1
;
47 struct hashentry
*h2
= *(struct hashentry
**) p2
;
58 sort_he_data (const void *p1
, const void *p2
)
60 struct hashentry
*h1
= *(struct hashentry
**) p1
;
61 struct hashentry
*h2
= *(struct hashentry
**) p2
;
63 if (h1
->packet
< h2
->packet
)
65 if (h1
->packet
> h2
->packet
)
71 /* Basic definitions for the bitmap implementation. Only BITMAP_T
72 needs to be changed to choose a different word size. */
73 #define BITMAP_T uint8_t
74 #define BITS (CHAR_BIT * sizeof (BITMAP_T))
75 #define ALLBITS ((((BITMAP_T) 1) << BITS) - 1)
76 #define HIGHBIT (((BITMAP_T) 1) << (BITS - 1))
80 markrange (BITMAP_T
*mark
, ref_t start
, size_t len
)
82 /* Adjust parameters for block alignment. */
83 assert ((start
& BLOCK_ALIGN_M1
) == 0);
85 len
= (len
+ BLOCK_ALIGN_M1
) / BLOCK_ALIGN
;
87 size_t elem
= start
/ BITS
;
89 if (start
% BITS
!= 0)
91 if (start
% BITS
+ len
<= BITS
)
93 /* All fits in the partial byte. */
94 mark
[elem
] |= (ALLBITS
>> (BITS
- len
)) << (start
% BITS
);
98 mark
[elem
++] |= ALLBITS
<< (start
% BITS
);
99 len
-= BITS
- (start
% BITS
);
104 mark
[elem
++] = ALLBITS
;
109 mark
[elem
] |= ALLBITS
>> (BITS
- len
);
114 gc (struct database_dyn
*db
)
116 /* We need write access. */
117 pthread_rwlock_wrlock (&db
->lock
);
119 /* And the memory handling lock. */
120 pthread_mutex_lock (&db
->memlock
);
122 /* We need an array representing the data area. All memory
123 allocation is BLOCK_ALIGN aligned so this is the level at which
124 we have to look at the memory. We use a mark and sweep algorithm
125 where the marks are placed in this array. */
126 assert (db
->head
->first_free
% BLOCK_ALIGN
== 0);
129 bool mark_use_malloc
;
130 /* In prune_cache we are also using a dynamically allocated array.
131 If the array in the caller is too large we have malloc'ed it. */
132 size_t stack_used
= sizeof (bool) * db
->head
->module
;
133 if (__builtin_expect (stack_used
> MAX_STACK_USE
, 0))
135 size_t nmark
= (db
->head
->first_free
/ BLOCK_ALIGN
+ BITS
- 1) / BITS
;
136 size_t memory_needed
= nmark
* sizeof (BITMAP_T
);
137 if (stack_used
+ memory_needed
<= MAX_STACK_USE
)
139 mark
= (BITMAP_T
*) alloca (memory_needed
);
140 mark_use_malloc
= false;
141 memset (mark
, '\0', memory_needed
);
142 stack_used
+= memory_needed
;
146 mark
= (BITMAP_T
*) xcalloc (1, memory_needed
);
147 mark_use_malloc
= true;
150 /* Create an array which can hold pointer to all the entries in hash
152 memory_needed
= 2 * db
->head
->nentries
* sizeof (struct hashentry
*);
153 struct hashentry
**he
;
154 struct hashentry
**he_data
;
156 if (stack_used
+ memory_needed
<= MAX_STACK_USE
)
158 he
= alloca (db
->head
->nentries
* sizeof (struct hashentry
*));
159 he_data
= alloca (db
->head
->nentries
* sizeof (struct hashentry
*));
160 he_use_malloc
= false;
161 stack_used
+= memory_needed
;
165 he
= xmalloc (memory_needed
);
166 he_data
= &he
[db
->head
->nentries
* sizeof (struct hashentry
*)];
167 he_use_malloc
= true;
171 for (size_t idx
= 0; idx
< db
->head
->module
; ++idx
)
173 ref_t
*prevp
= &db
->head
->array
[idx
];
176 while (run
!= ENDREF
)
178 assert (cnt
< db
->head
->nentries
);
179 he
[cnt
] = (struct hashentry
*) (db
->data
+ run
);
181 he
[cnt
]->prevp
= prevp
;
182 prevp
= &he
[cnt
]->next
;
184 /* This is the hash entry itself. */
185 markrange (mark
, run
, sizeof (struct hashentry
));
187 /* Add the information for the data itself. We do this
188 only for the one special entry marked with FIRST. */
192 = (struct datahead
*) (db
->data
+ he
[cnt
]->packet
);
193 markrange (mark
, he
[cnt
]->packet
, dh
->allocsize
);
201 assert (cnt
== db
->head
->nentries
);
203 /* Go through the list of in-flight memory blocks. */
204 struct mem_in_flight
*mrunp
= mem_in_flight_list
;
205 while (mrunp
!= NULL
)
207 /* NB: There can be no race between this test and another thread
208 setting the field to the index we are looking for because
209 this would require the other thread to also have the memlock
212 NB2: we do not have to look at latter blocks (higher indices) if
213 earlier blocks are not in flight. They are always allocated in
215 for (enum in_flight idx
= IDX_result_data
;
216 idx
< IDX_last
&& mrunp
->block
[idx
].dbidx
== db
- dbs
; ++idx
)
218 assert (mrunp
->block
[idx
].blockoff
>= 0);
219 assert (mrunp
->block
[idx
].blocklen
< db
->memsize
);
220 assert (mrunp
->block
[idx
].blockoff
221 + mrunp
->block
[0].blocklen
<= db
->memsize
);
222 markrange (mark
, mrunp
->block
[idx
].blockoff
,
223 mrunp
->block
[idx
].blocklen
);
229 /* Sort the entries by the addresses of the referenced data. All
230 the entries pointing to the same DATAHEAD object will have the
231 same key. Stability of the sorting is unimportant. */
232 memcpy (he_data
, he
, cnt
* sizeof (struct hashentry
*));
233 qsort (he_data
, cnt
, sizeof (struct hashentry
*), sort_he_data
);
235 /* Sort the entries by their address. */
236 qsort (he
, cnt
, sizeof (struct hashentry
*), sort_he
);
238 #define obstack_chunk_alloc xmalloc
239 #define obstack_chunk_free free
243 /* Determine the highest used address. */
245 while (high
> 0 && mark
[high
- 1] == 0)
248 /* No memory used. */
251 db
->head
->first_free
= 0;
255 /* Determine the highest offset. */
256 BITMAP_T mask
= HIGHBIT
;
257 ref_t highref
= (high
* BITS
- 1) * BLOCK_ALIGN
;
258 while ((mark
[high
- 1] & mask
) == 0)
261 highref
-= BLOCK_ALIGN
;
264 /* Now we can iterate over the MARK array and find bits which are not
265 set. These represent memory which can be recovered. */
267 /* Find the first gap. */
268 while (byte
< high
&& mark
[byte
] == ALLBITS
)
272 || (byte
== high
- 1 && (mark
[byte
] & ~(mask
| (mask
- 1))) == 0))
278 while ((mark
[byte
] & mask
) != 0)
283 ref_t off_free
= (byte
* BITS
+ cnt
) * BLOCK_ALIGN
;
284 assert (off_free
<= db
->head
->first_free
);
286 struct hashentry
**next_hash
= he
;
287 struct hashentry
**next_data
= he_data
;
289 /* Skip over the hash entries in the first block which does not get
291 while (next_hash
< &he
[db
->head
->nentries
]
292 && *next_hash
< (struct hashentry
*) (db
->data
+ off_free
))
295 while (next_data
< &he_data
[db
->head
->nentries
]
296 && (*next_data
)->packet
< off_free
)
300 /* Now we start modifying the data. Make sure all readers of the
301 data are aware of this and temporarily don't use the data. */
302 ++db
->head
->gc_cycle
;
303 assert ((db
->head
->gc_cycle
& 1) == 1);
306 /* We do not perform the move operations right away since the
307 he_data array is not sorted by the address of the data. */
313 struct moveinfo
*next
;
318 /* Search for the next filled block. BYTE is the index of the
319 entry in MARK, MASK is the bit, and CNT is the bit number.
320 OFF_FILLED is the corresponding offset. */
321 if ((mark
[byte
] & ~(mask
- 1)) == 0)
323 /* No other bit set in the same element of MARK. Search in the
327 while (byte
< high
&& mark
[byte
] == 0);
336 /* Find the exact bit. */
337 while ((mark
[byte
] & mask
) == 0)
343 ref_t off_alloc
= (byte
* BITS
+ cnt
) * BLOCK_ALIGN
;
344 assert (off_alloc
<= db
->head
->first_free
);
346 /* Find the end of the used area. */
347 if ((mark
[byte
] & ~(mask
- 1)) == (BITMAP_T
) ~(mask
- 1))
349 /* All other bits set. Search the next bytes in MARK. */
352 while (byte
< high
&& mark
[byte
] == ALLBITS
);
359 /* Find the exact bit. */
360 while ((mark
[byte
] & mask
) != 0)
367 ref_t off_allocend
= (byte
* BITS
+ cnt
) * BLOCK_ALIGN
;
368 assert (off_allocend
<= db
->head
->first_free
);
369 /* Now we know that we can copy the area from OFF_ALLOC to
370 OFF_ALLOCEND (not included) to the memory starting at
371 OFF_FREE. First fix up all the entries for the
373 ref_t disp
= off_alloc
- off_free
;
375 struct moveinfo
*new_move
;
376 if (stack_used
+ sizeof (*new_move
) <= MAX_STACK_USE
)
378 new_move
= alloca (sizeof (*new_move
));
379 stack_used
+= sizeof (*new_move
);
382 new_move
= obstack_alloc (&ob
, sizeof (*new_move
));
383 new_move
->from
= db
->data
+ off_alloc
;
384 new_move
->to
= db
->data
+ off_free
;
385 new_move
->size
= off_allocend
- off_alloc
;
386 /* Create a circular list to be always able to append at the end. */
388 moves
= new_move
->next
= new_move
;
391 new_move
->next
= moves
->next
;
392 moves
= moves
->next
= new_move
;
395 /* The following loop will prepare to move this much data. */
396 off_free
+= off_allocend
- off_alloc
;
398 while (off_alloc
< off_allocend
)
400 /* Determine whether the next entry is for a hash entry or
402 if ((struct hashentry
*) (db
->data
+ off_alloc
) == *next_hash
)
404 /* Just correct the forward reference. */
405 *(*next_hash
++)->prevp
-= disp
;
407 off_alloc
+= ((sizeof (struct hashentry
) + BLOCK_ALIGN_M1
)
412 assert (next_data
< &he_data
[db
->head
->nentries
]);
413 assert ((*next_data
)->packet
== off_alloc
);
415 struct datahead
*dh
= (struct datahead
*) (db
->data
+ off_alloc
);
418 assert ((*next_data
)->key
>= (*next_data
)->packet
);
419 assert ((*next_data
)->key
+ (*next_data
)->len
420 <= (*next_data
)->packet
+ dh
->allocsize
);
422 (*next_data
)->packet
-= disp
;
423 (*next_data
)->key
-= disp
;
426 while (next_data
< &he_data
[db
->head
->nentries
]
427 && (*next_data
)->packet
== off_alloc
);
429 off_alloc
+= (dh
->allocsize
+ BLOCK_ALIGN_M1
) & ~BLOCK_ALIGN_M1
;
432 assert (off_alloc
== off_allocend
);
434 assert (off_alloc
<= db
->head
->first_free
);
435 if (off_alloc
== db
->head
->first_free
)
436 /* We are done, that was the last block. */
439 assert (next_hash
== &he
[db
->head
->nentries
]);
440 assert (next_data
== &he_data
[db
->head
->nentries
]);
442 /* Now perform the actual moves. */
445 struct moveinfo
*runp
= moves
->next
;
448 assert ((char *) runp
->to
>= db
->data
);
449 assert ((char *) runp
->to
+ runp
->size
450 <= db
->data
+ db
->head
->first_free
);
451 assert ((char *) runp
->from
>= db
->data
);
452 assert ((char *) runp
->from
+ runp
->size
453 <= db
->data
+ db
->head
->first_free
);
455 /* The regions may overlap. */
456 memmove (runp
->to
, runp
->from
, runp
->size
);
459 while (runp
!= moves
->next
);
461 if (__builtin_expect (debug_level
>= 3, 0))
462 dbg_log (_("freed %zu bytes in %s cache"),
464 - ((char *) moves
->to
+ moves
->size
- db
->data
),
467 /* The byte past the end of the last copied block is the next
469 db
->head
->first_free
= (char *) moves
->to
+ moves
->size
- db
->data
;
471 /* Consistency check. */
472 if (__builtin_expect (debug_level
>= 3, 0))
474 for (size_t idx
= 0; idx
< db
->head
->module
; ++idx
)
476 ref_t run
= db
->head
->array
[idx
];
479 while (run
!= ENDREF
)
481 if (run
+ sizeof (struct hashentry
) > db
->head
->first_free
)
483 dbg_log ("entry %zu in hash bucket %zu out of bounds: "
484 "%" PRIu32
"+%zu > %zu\n",
485 cnt
, idx
, run
, sizeof (struct hashentry
),
486 (size_t) db
->head
->first_free
);
490 struct hashentry
*he
= (struct hashentry
*) (db
->data
+ run
);
492 if (he
->key
+ he
->len
> db
->head
->first_free
)
493 dbg_log ("key of entry %zu in hash bucket %zu out of "
494 "bounds: %" PRIu32
"+%zu > %zu\n",
495 cnt
, idx
, he
->key
, (size_t) he
->len
,
496 (size_t) db
->head
->first_free
);
498 if (he
->packet
+ sizeof (struct datahead
)
499 > db
->head
->first_free
)
500 dbg_log ("packet of entry %zu in hash bucket %zu out of "
501 "bounds: %" PRIu32
"+%zu > %zu\n",
502 cnt
, idx
, he
->packet
, sizeof (struct datahead
),
503 (size_t) db
->head
->first_free
);
506 struct datahead
*dh
= (struct datahead
*) (db
->data
508 if (he
->packet
+ dh
->allocsize
509 > db
->head
->first_free
)
510 dbg_log ("full key of entry %zu in hash bucket %zu "
511 "out of bounds: %" PRIu32
"+%zu > %zu",
512 cnt
, idx
, he
->packet
, (size_t) dh
->allocsize
,
513 (size_t) db
->head
->first_free
);
523 /* Make sure the data on disk is updated. */
525 msync (db
->head
, db
->data
+ db
->head
->first_free
- (char *) db
->head
,
529 /* Now we are done modifying the data. */
530 ++db
->head
->gc_cycle
;
531 assert ((db
->head
->gc_cycle
& 1) == 0);
535 pthread_mutex_unlock (&db
->memlock
);
536 pthread_rwlock_unlock (&db
->lock
);
543 obstack_free (&ob
, NULL
);
548 mempool_alloc (struct database_dyn
*db
, size_t len
, enum in_flight idx
)
550 /* Make sure LEN is a multiple of our maximum alignment so we can
551 keep track of used memory is multiples of this alignment value. */
552 if ((len
& BLOCK_ALIGN_M1
) != 0)
553 len
+= BLOCK_ALIGN
- (len
& BLOCK_ALIGN_M1
);
555 pthread_mutex_lock (&db
->memlock
);
557 assert ((db
->head
->first_free
& BLOCK_ALIGN_M1
) == 0);
559 bool tried_resize
= false;
562 res
= db
->data
+ db
->head
->first_free
;
564 if (__builtin_expect (db
->head
->first_free
+ len
> db
->head
->data_size
, 0))
568 /* Try to resize the database. Grow size of 1/8th. */
569 size_t oldtotal
= (sizeof (struct database_pers_head
)
570 + roundup (db
->head
->module
* sizeof (ref_t
),
572 + db
->head
->data_size
);
573 size_t new_data_size
= (db
->head
->data_size
574 + MAX (2 * len
, db
->head
->data_size
/ 8));
575 size_t newtotal
= (sizeof (struct database_pers_head
)
576 + roundup (db
->head
->module
* sizeof (ref_t
), ALIGN
)
578 if (newtotal
> db
->max_db_size
)
580 new_data_size
-= newtotal
- db
->max_db_size
;
581 newtotal
= db
->max_db_size
;
584 if (db
->mmap_used
&& newtotal
> oldtotal
585 /* We only have to adjust the file size. The new pages
586 become magically available. */
587 && TEMP_FAILURE_RETRY_VAL (posix_fallocate (db
->wr_fd
, oldtotal
,
591 db
->head
->data_size
= new_data_size
;
597 if (! db
->last_alloc_failed
)
599 dbg_log (_("no more memory for database '%s'"), dbnames
[db
- dbs
]);
601 db
->last_alloc_failed
= true;
609 /* Remember that we have allocated this memory. */
610 assert (idx
>= 0 && idx
< IDX_last
);
611 mem_in_flight
.block
[idx
].dbidx
= db
- dbs
;
612 mem_in_flight
.block
[idx
].blocklen
= len
;
613 mem_in_flight
.block
[idx
].blockoff
= db
->head
->first_free
;
615 db
->head
->first_free
+= len
;
617 db
->last_alloc_failed
= false;
621 pthread_mutex_unlock (&db
->memlock
);