2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 /* 'right' merges can involve O(n^2) cost when combined with a
31 traverse, so they are disabled until we find a way to do them in
34 #define USE_RIGHT_MERGES 0
36 /* read a freelist record and check for simple errors */
37 int tdb_rec_free_read(struct tdb_context
*tdb
, tdb_off_t off
, struct tdb_record
*rec
)
39 if (tdb
->methods
->tdb_read(tdb
, off
, rec
, sizeof(*rec
),DOCONV()) == -1)
42 if (rec
->magic
== TDB_MAGIC
) {
43 /* this happens when a app is showdown while deleting a record - we should
44 not completely fail when this happens */
45 TDB_LOG((tdb
, TDB_DEBUG_WARNING
, "tdb_rec_free_read non-free magic 0x%x at offset=%u - fixing\n",
47 rec
->magic
= TDB_FREE_MAGIC
;
48 if (tdb_rec_write(tdb
, off
, rec
) == -1)
52 if (rec
->magic
!= TDB_FREE_MAGIC
) {
53 /* Ensure ecode is set for log fn. */
54 tdb
->ecode
= TDB_ERR_CORRUPT
;
55 TDB_LOG((tdb
, TDB_DEBUG_WARNING
, "tdb_rec_free_read bad magic 0x%x at offset=%u\n",
59 if (tdb
->methods
->tdb_oob(tdb
, rec
->next
, sizeof(*rec
), 0) != 0)
66 /* Remove an element from the freelist. Must have alloc lock. */
67 static int remove_from_freelist(struct tdb_context
*tdb
, tdb_off_t off
, tdb_off_t next
)
69 tdb_off_t last_ptr
, i
;
71 /* read in the freelist top */
72 last_ptr
= FREELIST_TOP
;
73 while (tdb_ofs_read(tdb
, last_ptr
, &i
) != -1 && i
!= 0) {
76 return tdb_ofs_write(tdb
, last_ptr
, &next
);
78 /* Follow chain (next offset is at start of record) */
81 tdb
->ecode
= TDB_ERR_CORRUPT
;
82 TDB_LOG((tdb
, TDB_DEBUG_FATAL
,"remove_from_freelist: not on list at off=%u\n", off
));
88 /* update a record tailer (must hold allocation lock) */
89 static int update_tailer(struct tdb_context
*tdb
, tdb_off_t offset
,
90 const struct tdb_record
*rec
)
94 /* Offset of tailer from record header */
95 totalsize
= sizeof(*rec
) + rec
->rec_len
;
96 return tdb_ofs_write(tdb
, offset
+ totalsize
- sizeof(tdb_off_t
),
100 /* Add an element into the freelist. Merge adjacent records if
102 int tdb_free(struct tdb_context
*tdb
, tdb_off_t offset
, struct tdb_record
*rec
)
104 /* Allocation and tailer lock */
105 if (tdb_lock(tdb
, -1, F_WRLCK
) != 0)
108 /* set an initial tailer, so if we fail we don't leave a bogus record */
109 if (update_tailer(tdb
, offset
, rec
) != 0) {
110 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: update_tailer failed!\n"));
115 /* Look right first (I'm an Australian, dammit) */
116 if (offset
+ sizeof(*rec
) + rec
->rec_len
+ sizeof(*rec
) <= tdb
->map_size
) {
117 tdb_off_t right
= offset
+ sizeof(*rec
) + rec
->rec_len
;
120 if (tdb
->methods
->tdb_read(tdb
, right
, &r
, sizeof(r
), DOCONV()) == -1) {
121 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: right read failed at %u\n", right
));
125 /* If it's free, expand to include it. */
126 if (r
.magic
== TDB_FREE_MAGIC
) {
127 if (remove_from_freelist(tdb
, right
, r
.next
) == -1) {
128 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: right free failed at %u\n", right
));
131 rec
->rec_len
+= sizeof(r
) + r
.rec_len
;
132 if (update_tailer(tdb
, offset
, rec
) == -1) {
133 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: update_tailer failed at %u\n", offset
));
142 if (offset
- sizeof(tdb_off_t
) > TDB_DATA_START(tdb
->hash_size
)) {
143 tdb_off_t left
= offset
- sizeof(tdb_off_t
);
147 /* Read in tailer and jump back to header */
148 if (tdb_ofs_read(tdb
, left
, &leftsize
) == -1) {
149 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: left offset read failed at %u\n", left
));
153 /* it could be uninitialised data */
154 if (leftsize
== 0 || leftsize
== TDB_PAD_U32
) {
158 left
= offset
- leftsize
;
160 if (leftsize
> offset
||
161 left
< TDB_DATA_START(tdb
->hash_size
)) {
165 /* Now read in the left record */
166 if (tdb
->methods
->tdb_read(tdb
, left
, &l
, sizeof(l
), DOCONV()) == -1) {
167 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: left read failed at %u (%u)\n", left
, leftsize
));
171 /* If it's free, expand to include it. */
172 if (l
.magic
== TDB_FREE_MAGIC
) {
173 /* we now merge the new record into the left record, rather than the other
174 way around. This makes the operation O(1) instead of O(n). This change
175 prevents traverse from being O(n^2) after a lot of deletes */
176 l
.rec_len
+= sizeof(*rec
) + rec
->rec_len
;
177 if (tdb_rec_write(tdb
, left
, &l
) == -1) {
178 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: update_left failed at %u\n", left
));
181 if (update_tailer(tdb
, left
, &l
) == -1) {
182 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free: update_tailer failed at %u\n", offset
));
185 tdb_unlock(tdb
, -1, F_WRLCK
);
192 /* Now, prepend to free list */
193 rec
->magic
= TDB_FREE_MAGIC
;
195 if (tdb_ofs_read(tdb
, FREELIST_TOP
, &rec
->next
) == -1 ||
196 tdb_rec_write(tdb
, offset
, rec
) == -1 ||
197 tdb_ofs_write(tdb
, FREELIST_TOP
, &offset
) == -1) {
198 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "tdb_free record write failed at offset=%u\n", offset
));
202 /* And we're done. */
203 tdb_unlock(tdb
, -1, F_WRLCK
);
207 tdb_unlock(tdb
, -1, F_WRLCK
);
214 the core of tdb_allocate - called when we have decided which
215 free list entry to use
217 Note that we try to allocate by grabbing data from the end of an existing record,
218 not the beginning. This is so the left merge in a free is more likely to be
219 able to free up the record without fragmentation
221 static tdb_off_t
tdb_allocate_ofs(struct tdb_context
*tdb
,
222 tdb_len_t length
, tdb_off_t rec_ptr
,
223 struct tdb_record
*rec
, tdb_off_t last_ptr
)
225 #define MIN_REC_SIZE (sizeof(struct tdb_record) + sizeof(tdb_off_t) + 8)
227 if (rec
->rec_len
< length
+ MIN_REC_SIZE
) {
228 /* we have to grab the whole record */
230 /* unlink it from the previous record */
231 if (tdb_ofs_write(tdb
, last_ptr
, &rec
->next
) == -1) {
235 /* mark it not free */
236 rec
->magic
= TDB_MAGIC
;
237 if (tdb_rec_write(tdb
, rec_ptr
, rec
) == -1) {
243 /* we're going to just shorten the existing record */
244 rec
->rec_len
-= (length
+ sizeof(*rec
));
245 if (tdb_rec_write(tdb
, rec_ptr
, rec
) == -1) {
248 if (update_tailer(tdb
, rec_ptr
, rec
) == -1) {
252 /* and setup the new record */
253 rec_ptr
+= sizeof(*rec
) + rec
->rec_len
;
255 memset(rec
, '\0', sizeof(*rec
));
256 rec
->rec_len
= length
;
257 rec
->magic
= TDB_MAGIC
;
259 if (tdb_rec_write(tdb
, rec_ptr
, rec
) == -1) {
263 if (update_tailer(tdb
, rec_ptr
, rec
) == -1) {
270 /* allocate some space from the free list. The offset returned points
271 to a unconnected tdb_record within the database with room for at
272 least length bytes of total data
274 0 is returned if the space could not be allocated
276 static tdb_off_t
tdb_allocate_from_freelist(
277 struct tdb_context
*tdb
, tdb_len_t length
, struct tdb_record
*rec
)
279 tdb_off_t rec_ptr
, last_ptr
, newrec_ptr
;
281 tdb_off_t rec_ptr
, last_ptr
;
284 float multiplier
= 1.0;
286 /* over-allocate to reduce fragmentation */
289 /* Extra bytes required for tailer */
290 length
+= sizeof(tdb_off_t
);
291 length
= TDB_ALIGN(length
, TDB_ALIGNMENT
);
294 last_ptr
= FREELIST_TOP
;
296 /* read in the freelist top */
297 if (tdb_ofs_read(tdb
, FREELIST_TOP
, &rec_ptr
) == -1)
301 bestfit
.last_ptr
= 0;
305 this is a best fit allocation strategy. Originally we used
306 a first fit strategy, but it suffered from massive fragmentation
307 issues when faced with a slowly increasing record size.
310 if (tdb_rec_free_read(tdb
, rec_ptr
, rec
) == -1) {
314 if (rec
->rec_len
>= length
) {
315 if (bestfit
.rec_ptr
== 0 ||
316 rec
->rec_len
< bestfit
.rec_len
) {
317 bestfit
.rec_len
= rec
->rec_len
;
318 bestfit
.rec_ptr
= rec_ptr
;
319 bestfit
.last_ptr
= last_ptr
;
323 /* move to the next record */
327 /* if we've found a record that is big enough, then
328 stop searching if its also not too big. The
329 definition of 'too big' changes as we scan
331 if (bestfit
.rec_len
> 0 &&
332 bestfit
.rec_len
< length
* multiplier
) {
336 /* this multiplier means we only extremely rarely
337 search more than 50 or so records. At 50 records we
338 accept records up to 11 times larger than what we
343 if (bestfit
.rec_ptr
!= 0) {
344 if (tdb_rec_free_read(tdb
, bestfit
.rec_ptr
, rec
) == -1) {
348 newrec_ptr
= tdb_allocate_ofs(tdb
, length
, bestfit
.rec_ptr
,
349 rec
, bestfit
.last_ptr
);
353 /* we didn't find enough space. See if we can expand the
354 database and if we can then try again */
355 if (tdb_expand(tdb
, length
+ sizeof(*rec
)) == 0)
361 static bool tdb_alloc_dead(
362 struct tdb_context
*tdb
, int hash
, tdb_len_t length
,
363 tdb_off_t
*rec_ptr
, struct tdb_record
*rec
)
367 *rec_ptr
= tdb_find_dead(tdb
, hash
, rec
, length
, &last_ptr
);
372 * Unlink the record from the hash chain, it's about to be moved into
375 return (tdb_ofs_write(tdb
, last_ptr
, &rec
->next
) == 0);
379 * Chain "hash" is assumed to be locked
382 tdb_off_t
tdb_allocate(struct tdb_context
*tdb
, int hash
, tdb_len_t length
,
383 struct tdb_record
*rec
)
388 if (tdb
->max_dead_records
== 0) {
390 * No dead records to expect anywhere. Do the blocking
391 * freelist lock without trying to steal from others
393 goto blocking_freelist_allocate
;
397 * The following loop tries to get the freelist lock nonblocking. If
398 * it gets the lock, allocate from there. If the freelist is busy,
399 * instead of waiting we try to steal dead records from other hash
402 * Be aware that we do nonblocking locks on the other hash chains as
403 * well and fail gracefully. This way we avoid deadlocks (we block two
404 * hash chains, something which is pretty bad normally)
407 for (i
=1; i
<tdb
->hash_size
; i
++) {
411 if (tdb_lock_nonblock(tdb
, -1, F_WRLCK
) == 0) {
413 * Under the freelist lock take the chance to give
414 * back our dead records.
416 tdb_purge_dead(tdb
, hash
);
418 ret
= tdb_allocate_from_freelist(tdb
, length
, rec
);
419 tdb_unlock(tdb
, -1, F_WRLCK
);
423 list
= BUCKET(hash
+i
);
425 if (tdb_lock_nonblock(tdb
, list
, F_WRLCK
) == 0) {
428 got_dead
= tdb_alloc_dead(tdb
, list
, length
, &ret
, rec
);
429 tdb_unlock(tdb
, list
, F_WRLCK
);
437 blocking_freelist_allocate
:
439 if (tdb_lock(tdb
, -1, F_WRLCK
) == -1) {
442 ret
= tdb_allocate_from_freelist(tdb
, length
, rec
);
443 tdb_unlock(tdb
, -1, F_WRLCK
);
448 return the size of the freelist - used to decide if we should repack
450 _PUBLIC_
int tdb_freelist_size(struct tdb_context
*tdb
)
455 if (tdb_lock(tdb
, -1, F_RDLCK
) == -1) {
460 while (tdb_ofs_read(tdb
, ptr
, &ptr
) == 0 && ptr
!= 0) {
464 tdb_unlock(tdb
, -1, F_RDLCK
);