s3-libgpo/gpo_filesync.c: return on read error
[Samba/gebeck_regimport.git] / lib / tdb2 / free.c
bloba770751dc022b074b6ecae3e35e9b3c28df5ccf6
1 /*
2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
21 #include <time.h>
22 #include <assert.h>
23 #include <limits.h>
25 static unsigned fls64(uint64_t val)
27 return ilog64(val);
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
33 unsigned int bucket;
35 /* We can't have records smaller than this. */
36 assert(data_len >= TDB_MIN_DATA_LEN);
38 /* Ignoring the header... */
39 if (data_len - TDB_MIN_DATA_LEN <= 64) {
40 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
42 } else {
43 /* After that we go power of 2. */
44 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
47 if (unlikely(bucket >= TDB_FREE_BUCKETS))
48 bucket = TDB_FREE_BUCKETS - 1;
49 return bucket;
52 tdb_off_t first_ftable(struct tdb_context *tdb)
54 return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
57 tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
59 return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
62 enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb)
64 /* Use reservoir sampling algorithm to select a free list at random. */
65 unsigned int rnd, max = 0, count = 0;
66 tdb_off_t off;
68 tdb->ftable_off = off = first_ftable(tdb);
69 tdb->ftable = 0;
71 while (off) {
72 if (TDB_OFF_IS_ERR(off)) {
73 return off;
76 rnd = random();
77 if (rnd >= max) {
78 tdb->ftable_off = off;
79 tdb->ftable = count;
80 max = rnd;
83 off = next_ftable(tdb, off);
84 count++;
86 return TDB_SUCCESS;
89 /* Offset of a given bucket. */
90 tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
92 return ftable_off + offsetof(struct tdb_freetable, buckets)
93 + bucket * sizeof(tdb_off_t);
96 /* Returns free_buckets + 1, or list number to search, or -ve error. */
97 static tdb_off_t find_free_head(struct tdb_context *tdb,
98 tdb_off_t ftable_off,
99 tdb_off_t bucket)
101 /* Speculatively search for a non-zero bucket. */
102 return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
103 bucket, TDB_FREE_BUCKETS);
106 static void check_list(struct tdb_context *tdb, tdb_off_t b_off)
108 #ifdef CCAN_TDB2_DEBUG
109 tdb_off_t off, prev = 0, first;
110 struct tdb_free_record r;
112 first = off = (tdb_read_off(tdb, b_off) & TDB_OFF_MASK);
113 while (off != 0) {
114 tdb_read_convert(tdb, off, &r, sizeof(r));
115 if (frec_magic(&r) != TDB_FREE_MAGIC)
116 abort();
117 if (prev && frec_prev(&r) != prev)
118 abort();
119 prev = off;
120 off = r.next;
123 if (first) {
124 tdb_read_convert(tdb, first, &r, sizeof(r));
125 if (frec_prev(&r) != prev)
126 abort();
128 #endif
131 /* Remove from free bucket. */
132 static enum TDB_ERROR remove_from_list(struct tdb_context *tdb,
133 tdb_off_t b_off, tdb_off_t r_off,
134 const struct tdb_free_record *r)
136 tdb_off_t off, prev_next, head;
137 enum TDB_ERROR ecode;
139 /* Is this only element in list? Zero out bucket, and we're done. */
140 if (frec_prev(r) == r_off)
141 return tdb_write_off(tdb, b_off, 0);
143 /* off = &r->prev->next */
144 off = frec_prev(r) + offsetof(struct tdb_free_record, next);
146 /* Get prev->next */
147 prev_next = tdb_read_off(tdb, off);
148 if (TDB_OFF_IS_ERR(prev_next))
149 return prev_next;
151 /* If prev->next == 0, we were head: update bucket to point to next. */
152 if (prev_next == 0) {
153 /* We must preserve upper bits. */
154 head = tdb_read_off(tdb, b_off);
155 if (TDB_OFF_IS_ERR(head))
156 return head;
158 if ((head & TDB_OFF_MASK) != r_off) {
159 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
160 "remove_from_list:"
161 " %llu head %llu on list %llu",
162 (long long)r_off,
163 (long long)head,
164 (long long)b_off);
166 head = ((head & ~TDB_OFF_MASK) | r->next);
167 ecode = tdb_write_off(tdb, b_off, head);
168 if (ecode != TDB_SUCCESS)
169 return ecode;
170 } else {
171 /* r->prev->next = r->next */
172 ecode = tdb_write_off(tdb, off, r->next);
173 if (ecode != TDB_SUCCESS)
174 return ecode;
177 /* If we were the tail, off = &head->prev. */
178 if (r->next == 0) {
179 head = tdb_read_off(tdb, b_off);
180 if (TDB_OFF_IS_ERR(head))
181 return head;
182 head &= TDB_OFF_MASK;
183 off = head + offsetof(struct tdb_free_record, magic_and_prev);
184 } else {
185 /* off = &r->next->prev */
186 off = r->next + offsetof(struct tdb_free_record,
187 magic_and_prev);
190 #ifdef CCAN_TDB2_DEBUG
191 /* *off == r */
192 if ((tdb_read_off(tdb, off) & TDB_OFF_MASK) != r_off) {
193 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
194 "remove_from_list:"
195 " %llu bad prev in list %llu",
196 (long long)r_off, (long long)b_off);
198 #endif
199 /* r->next->prev = r->prev */
200 return tdb_write_off(tdb, off, r->magic_and_prev);
203 /* Enqueue in this free bucket: sets coalesce if we've added 128
204 * entries to it. */
205 static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
206 tdb_off_t b_off,
207 tdb_off_t off,
208 tdb_len_t len,
209 bool *coalesce)
211 struct tdb_free_record new;
212 enum TDB_ERROR ecode;
213 tdb_off_t prev, head;
214 uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
216 head = tdb_read_off(tdb, b_off);
217 if (TDB_OFF_IS_ERR(head))
218 return head;
220 /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
221 new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
222 | len;
224 /* new->next = head. */
225 new.next = (head & TDB_OFF_MASK);
227 /* First element? Prev points to ourselves. */
228 if (!new.next) {
229 new.magic_and_prev = (magic | off);
230 } else {
231 /* new->prev = next->prev */
232 prev = tdb_read_off(tdb,
233 new.next + offsetof(struct tdb_free_record,
234 magic_and_prev));
235 new.magic_and_prev = prev;
236 if (frec_magic(&new) != TDB_FREE_MAGIC) {
237 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
238 "enqueue_in_free: %llu bad head"
239 " prev %llu",
240 (long long)new.next,
241 (long long)prev);
243 /* next->prev = new. */
244 ecode = tdb_write_off(tdb, new.next
245 + offsetof(struct tdb_free_record,
246 magic_and_prev),
247 off | magic);
248 if (ecode != TDB_SUCCESS) {
249 return ecode;
252 #ifdef CCAN_TDB2_DEBUG
253 prev = tdb_read_off(tdb, frec_prev(&new)
254 + offsetof(struct tdb_free_record, next));
255 if (prev != 0) {
256 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
257 "enqueue_in_free:"
258 " %llu bad tail next ptr %llu",
259 (long long)frec_prev(&new)
260 + offsetof(struct tdb_free_record,
261 next),
262 (long long)prev);
264 #endif
267 /* Update enqueue count, but don't set high bit: see TDB_OFF_IS_ERR */
268 if (*coalesce)
269 head += (1ULL << (64 - TDB_OFF_UPPER_STEAL));
270 head &= ~(TDB_OFF_MASK | (1ULL << 63));
271 head |= off;
273 ecode = tdb_write_off(tdb, b_off, head);
274 if (ecode != TDB_SUCCESS) {
275 return ecode;
278 /* It's time to coalesce if counter wrapped. */
279 if (*coalesce)
280 *coalesce = ((head & ~TDB_OFF_MASK) == 0);
282 return tdb_write_convert(tdb, off, &new, sizeof(new));
285 static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
287 tdb_off_t off;
288 unsigned int i;
290 if (likely(tdb->ftable == ftable))
291 return tdb->ftable_off;
293 off = first_ftable(tdb);
294 for (i = 0; i < ftable; i++) {
295 if (TDB_OFF_IS_ERR(off)) {
296 break;
298 off = next_ftable(tdb, off);
300 return off;
303 /* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and
304 * need to blatt the *protect record (which is set to an error). */
305 static tdb_len_t coalesce(struct tdb_context *tdb,
306 tdb_off_t off, tdb_off_t b_off,
307 tdb_len_t data_len,
308 tdb_off_t *protect)
310 tdb_off_t end;
311 struct tdb_free_record rec;
312 enum TDB_ERROR ecode;
314 tdb->stats.alloc_coalesce_tried++;
315 end = off + sizeof(struct tdb_used_record) + data_len;
317 while (end < tdb->file->map_size) {
318 const struct tdb_free_record *r;
319 tdb_off_t nb_off;
320 unsigned ftable, bucket;
322 r = tdb_access_read(tdb, end, sizeof(*r), true);
323 if (TDB_PTR_IS_ERR(r)) {
324 ecode = TDB_PTR_ERR(r);
325 goto err;
328 if (frec_magic(r) != TDB_FREE_MAGIC
329 || frec_ftable(r) == TDB_FTABLE_NONE) {
330 tdb_access_release(tdb, r);
331 break;
334 ftable = frec_ftable(r);
335 bucket = size_to_bucket(frec_len(r));
336 nb_off = ftable_offset(tdb, ftable);
337 if (TDB_OFF_IS_ERR(nb_off)) {
338 tdb_access_release(tdb, r);
339 ecode = nb_off;
340 goto err;
342 nb_off = bucket_off(nb_off, bucket);
343 tdb_access_release(tdb, r);
345 /* We may be violating lock order here, so best effort. */
346 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
347 != TDB_SUCCESS) {
348 tdb->stats.alloc_coalesce_lockfail++;
349 break;
352 /* Now we have lock, re-check. */
353 ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
354 if (ecode != TDB_SUCCESS) {
355 tdb_unlock_free_bucket(tdb, nb_off);
356 goto err;
359 if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
360 tdb->stats.alloc_coalesce_race++;
361 tdb_unlock_free_bucket(tdb, nb_off);
362 break;
365 if (unlikely(frec_ftable(&rec) != ftable)
366 || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
367 tdb->stats.alloc_coalesce_race++;
368 tdb_unlock_free_bucket(tdb, nb_off);
369 break;
372 /* Did we just mess up a record you were hoping to use? */
373 if (end == *protect) {
374 tdb->stats.alloc_coalesce_iterate_clash++;
375 *protect = TDB_ERR_NOEXIST;
378 ecode = remove_from_list(tdb, nb_off, end, &rec);
379 check_list(tdb, nb_off);
380 if (ecode != TDB_SUCCESS) {
381 tdb_unlock_free_bucket(tdb, nb_off);
382 goto err;
385 end += sizeof(struct tdb_used_record) + frec_len(&rec);
386 tdb_unlock_free_bucket(tdb, nb_off);
387 tdb->stats.alloc_coalesce_num_merged++;
390 /* Didn't find any adjacent free? */
391 if (end == off + sizeof(struct tdb_used_record) + data_len)
392 return 0;
394 /* Before we expand, check this isn't one you wanted protected? */
395 if (off == *protect) {
396 *protect = TDB_ERR_EXISTS;
397 tdb->stats.alloc_coalesce_iterate_clash++;
400 /* OK, expand initial record */
401 ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
402 if (ecode != TDB_SUCCESS) {
403 goto err;
406 if (frec_len(&rec) != data_len) {
407 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
408 "coalesce: expected data len %zu not %zu",
409 (size_t)data_len, (size_t)frec_len(&rec));
410 goto err;
413 ecode = remove_from_list(tdb, b_off, off, &rec);
414 check_list(tdb, b_off);
415 if (ecode != TDB_SUCCESS) {
416 goto err;
419 /* Try locking violation first. We don't allow coalesce recursion! */
420 ecode = add_free_record(tdb, off, end - off, TDB_LOCK_NOWAIT, false);
421 if (ecode != TDB_SUCCESS) {
422 /* Need to drop lock. Can't rely on anything stable. */
423 tdb->stats.alloc_coalesce_lockfail++;
424 *protect = TDB_ERR_CORRUPT;
426 /* We have to drop this to avoid deadlocks, so make sure record
427 * doesn't get coalesced by someone else! */
428 rec.ftable_and_len = (TDB_FTABLE_NONE
429 << (64 - TDB_OFF_UPPER_STEAL))
430 | (end - off - sizeof(struct tdb_used_record));
431 ecode = tdb_write_off(tdb,
432 off + offsetof(struct tdb_free_record,
433 ftable_and_len),
434 rec.ftable_and_len);
435 if (ecode != TDB_SUCCESS) {
436 goto err;
439 tdb_unlock_free_bucket(tdb, b_off);
441 ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT,
442 false);
443 if (ecode != TDB_SUCCESS) {
444 return ecode;
446 } else if (TDB_OFF_IS_ERR(*protect)) {
447 /* For simplicity, we always drop lock if they can't continue */
448 tdb_unlock_free_bucket(tdb, b_off);
450 tdb->stats.alloc_coalesce_succeeded++;
452 /* Return usable length. */
453 return end - off - sizeof(struct tdb_used_record);
455 err:
456 /* To unify error paths, we *always* unlock bucket on error. */
457 tdb_unlock_free_bucket(tdb, b_off);
458 return ecode;
461 /* List is locked: we unlock it. */
462 static enum TDB_ERROR coalesce_list(struct tdb_context *tdb,
463 tdb_off_t ftable_off,
464 tdb_off_t b_off,
465 unsigned int limit)
467 enum TDB_ERROR ecode;
468 tdb_off_t off;
470 off = tdb_read_off(tdb, b_off);
471 if (TDB_OFF_IS_ERR(off)) {
472 ecode = off;
473 goto unlock_err;
475 /* A little bit of paranoia: counter should be 0. */
476 off &= TDB_OFF_MASK;
478 while (off && limit--) {
479 struct tdb_free_record rec;
480 tdb_len_t coal;
481 tdb_off_t next;
483 ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
484 if (ecode != TDB_SUCCESS)
485 goto unlock_err;
487 next = rec.next;
488 coal = coalesce(tdb, off, b_off, frec_len(&rec), &next);
489 if (TDB_OFF_IS_ERR(coal)) {
490 /* This has already unlocked on error. */
491 return coal;
493 if (TDB_OFF_IS_ERR(next)) {
494 /* Coalescing had to unlock, so stop. */
495 return TDB_SUCCESS;
497 /* Keep going if we're doing well... */
498 limit += size_to_bucket(coal / 16 + TDB_MIN_DATA_LEN);
499 off = next;
502 /* Now, move those elements to the tail of the list so we get something
503 * else next time. */
504 if (off) {
505 struct tdb_free_record oldhrec, newhrec, oldtrec, newtrec;
506 tdb_off_t oldhoff, oldtoff, newtoff;
508 /* The record we were up to is the new head. */
509 ecode = tdb_read_convert(tdb, off, &newhrec, sizeof(newhrec));
510 if (ecode != TDB_SUCCESS)
511 goto unlock_err;
513 /* Get the new tail. */
514 newtoff = frec_prev(&newhrec);
515 ecode = tdb_read_convert(tdb, newtoff, &newtrec,
516 sizeof(newtrec));
517 if (ecode != TDB_SUCCESS)
518 goto unlock_err;
520 /* Get the old head. */
521 oldhoff = tdb_read_off(tdb, b_off);
522 if (TDB_OFF_IS_ERR(oldhoff)) {
523 ecode = oldhoff;
524 goto unlock_err;
527 /* This could happen if they all coalesced away. */
528 if (oldhoff == off)
529 goto out;
531 ecode = tdb_read_convert(tdb, oldhoff, &oldhrec,
532 sizeof(oldhrec));
533 if (ecode != TDB_SUCCESS)
534 goto unlock_err;
536 /* Get the old tail. */
537 oldtoff = frec_prev(&oldhrec);
538 ecode = tdb_read_convert(tdb, oldtoff, &oldtrec,
539 sizeof(oldtrec));
540 if (ecode != TDB_SUCCESS)
541 goto unlock_err;
543 /* Old tail's next points to old head. */
544 oldtrec.next = oldhoff;
546 /* Old head's prev points to old tail. */
547 oldhrec.magic_and_prev
548 = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL))
549 | oldtoff;
551 /* New tail's next is 0. */
552 newtrec.next = 0;
554 /* Write out the modified versions. */
555 ecode = tdb_write_convert(tdb, oldtoff, &oldtrec,
556 sizeof(oldtrec));
557 if (ecode != TDB_SUCCESS)
558 goto unlock_err;
560 ecode = tdb_write_convert(tdb, oldhoff, &oldhrec,
561 sizeof(oldhrec));
562 if (ecode != TDB_SUCCESS)
563 goto unlock_err;
565 ecode = tdb_write_convert(tdb, newtoff, &newtrec,
566 sizeof(newtrec));
567 if (ecode != TDB_SUCCESS)
568 goto unlock_err;
570 /* And finally link in new head. */
571 ecode = tdb_write_off(tdb, b_off, off);
572 if (ecode != TDB_SUCCESS)
573 goto unlock_err;
575 out:
576 tdb_unlock_free_bucket(tdb, b_off);
577 return TDB_SUCCESS;
579 unlock_err:
580 tdb_unlock_free_bucket(tdb, b_off);
581 return ecode;
584 /* List must not be locked if coalesce_ok is set. */
585 enum TDB_ERROR add_free_record(struct tdb_context *tdb,
586 tdb_off_t off, tdb_len_t len_with_header,
587 enum tdb_lock_flags waitflag,
588 bool coalesce)
590 tdb_off_t b_off;
591 tdb_len_t len;
592 enum TDB_ERROR ecode;
594 assert(len_with_header >= sizeof(struct tdb_free_record));
596 len = len_with_header - sizeof(struct tdb_used_record);
598 b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
599 ecode = tdb_lock_free_bucket(tdb, b_off, waitflag);
600 if (ecode != TDB_SUCCESS) {
601 return ecode;
604 ecode = enqueue_in_free(tdb, b_off, off, len, &coalesce);
605 check_list(tdb, b_off);
607 /* Coalescing unlocks free list. */
608 if (!ecode && coalesce)
609 ecode = coalesce_list(tdb, tdb->ftable_off, b_off, 2);
610 else
611 tdb_unlock_free_bucket(tdb, b_off);
612 return ecode;
615 static size_t adjust_size(size_t keylen, size_t datalen)
617 size_t size = keylen + datalen;
619 if (size < TDB_MIN_DATA_LEN)
620 size = TDB_MIN_DATA_LEN;
622 /* Round to next uint64_t boundary. */
623 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
626 /* If we have enough left over to be useful, split that off. */
627 static size_t record_leftover(size_t keylen, size_t datalen,
628 bool want_extra, size_t total_len)
630 ssize_t leftover;
632 if (want_extra)
633 datalen += datalen / 2;
634 leftover = total_len - adjust_size(keylen, datalen);
636 if (leftover < (ssize_t)sizeof(struct tdb_free_record))
637 return 0;
639 return leftover;
642 /* We need size bytes to put our key and data in. */
643 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
644 tdb_off_t ftable_off,
645 tdb_off_t bucket,
646 size_t keylen, size_t datalen,
647 bool want_extra,
648 unsigned magic,
649 unsigned hashlow)
651 tdb_off_t off, b_off,best_off;
652 struct tdb_free_record best = { 0 };
653 double multiplier;
654 size_t size = adjust_size(keylen, datalen);
655 enum TDB_ERROR ecode;
657 tdb->stats.allocs++;
658 b_off = bucket_off(ftable_off, bucket);
660 /* FIXME: Try non-blocking wait first, to measure contention. */
661 /* Lock this bucket. */
662 ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
663 if (ecode != TDB_SUCCESS) {
664 return ecode;
667 best.ftable_and_len = -1ULL;
668 best_off = 0;
670 /* Get slack if we're after extra. */
671 if (want_extra)
672 multiplier = 1.5;
673 else
674 multiplier = 1.0;
676 /* Walk the list to see if any are large enough, getting less fussy
677 * as we go. */
678 off = tdb_read_off(tdb, b_off);
679 if (TDB_OFF_IS_ERR(off)) {
680 ecode = off;
681 goto unlock_err;
683 off &= TDB_OFF_MASK;
685 while (off) {
686 const struct tdb_free_record *r;
687 tdb_len_t len;
688 tdb_off_t next;
690 r = tdb_access_read(tdb, off, sizeof(*r), true);
691 if (TDB_PTR_IS_ERR(r)) {
692 ecode = TDB_PTR_ERR(r);
693 goto unlock_err;
696 if (frec_magic(r) != TDB_FREE_MAGIC) {
697 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
698 "lock_and_alloc:"
699 " %llu non-free 0x%llx",
700 (long long)off,
701 (long long)r->magic_and_prev);
702 tdb_access_release(tdb, r);
703 goto unlock_err;
706 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
707 best_off = off;
708 best = *r;
711 if (frec_len(&best) <= size * multiplier && best_off) {
712 tdb_access_release(tdb, r);
713 break;
716 multiplier *= 1.01;
718 next = r->next;
719 len = frec_len(r);
720 tdb_access_release(tdb, r);
721 off = next;
724 /* If we found anything at all, use it. */
725 if (best_off) {
726 struct tdb_used_record rec;
727 size_t leftover;
729 /* We're happy with this size: take it. */
730 ecode = remove_from_list(tdb, b_off, best_off, &best);
731 check_list(tdb, b_off);
732 if (ecode != TDB_SUCCESS) {
733 goto unlock_err;
736 leftover = record_leftover(keylen, datalen, want_extra,
737 frec_len(&best));
739 assert(keylen + datalen + leftover <= frec_len(&best));
740 /* We need to mark non-free before we drop lock, otherwise
741 * coalesce() could try to merge it! */
742 ecode = set_header(tdb, &rec, magic, keylen, datalen,
743 frec_len(&best) - leftover, hashlow);
744 if (ecode != TDB_SUCCESS) {
745 goto unlock_err;
748 ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
749 if (ecode != TDB_SUCCESS) {
750 goto unlock_err;
753 /* For futureproofing, we put a 0 in any unused space. */
754 if (rec_extra_padding(&rec)) {
755 ecode = tdb->methods->twrite(tdb, best_off + sizeof(rec)
756 + keylen + datalen, "", 1);
757 if (ecode != TDB_SUCCESS) {
758 goto unlock_err;
762 /* Bucket of leftover will be <= current bucket, so nested
763 * locking is allowed. */
764 if (leftover) {
765 tdb->stats.alloc_leftover++;
766 ecode = add_free_record(tdb,
767 best_off + sizeof(rec)
768 + frec_len(&best) - leftover,
769 leftover, TDB_LOCK_WAIT, false);
770 if (ecode != TDB_SUCCESS) {
771 best_off = ecode;
774 tdb_unlock_free_bucket(tdb, b_off);
776 return best_off;
779 tdb_unlock_free_bucket(tdb, b_off);
780 return 0;
782 unlock_err:
783 tdb_unlock_free_bucket(tdb, b_off);
784 return ecode;
787 /* Get a free block from current free list, or 0 if none, -ve on error. */
788 static tdb_off_t get_free(struct tdb_context *tdb,
789 size_t keylen, size_t datalen, bool want_extra,
790 unsigned magic, unsigned hashlow)
792 tdb_off_t off, ftable_off;
793 tdb_off_t start_b, b, ftable;
794 bool wrapped = false;
796 /* If they are growing, add 50% to get to higher bucket. */
797 if (want_extra)
798 start_b = size_to_bucket(adjust_size(keylen,
799 datalen + datalen / 2));
800 else
801 start_b = size_to_bucket(adjust_size(keylen, datalen));
803 ftable_off = tdb->ftable_off;
804 ftable = tdb->ftable;
805 while (!wrapped || ftable_off != tdb->ftable_off) {
806 /* Start at exact size bucket, and search up... */
807 for (b = find_free_head(tdb, ftable_off, start_b);
808 b < TDB_FREE_BUCKETS;
809 b = find_free_head(tdb, ftable_off, b + 1)) {
810 /* Try getting one from list. */
811 off = lock_and_alloc(tdb, ftable_off,
812 b, keylen, datalen, want_extra,
813 magic, hashlow);
814 if (TDB_OFF_IS_ERR(off))
815 return off;
816 if (off != 0) {
817 if (b == start_b)
818 tdb->stats.alloc_bucket_exact++;
819 if (b == TDB_FREE_BUCKETS - 1)
820 tdb->stats.alloc_bucket_max++;
821 /* Worked? Stay using this list. */
822 tdb->ftable_off = ftable_off;
823 tdb->ftable = ftable;
824 return off;
826 /* Didn't work. Try next bucket. */
829 if (TDB_OFF_IS_ERR(b)) {
830 return b;
833 /* Hmm, try next table. */
834 ftable_off = next_ftable(tdb, ftable_off);
835 if (TDB_OFF_IS_ERR(ftable_off)) {
836 return ftable_off;
838 ftable++;
840 if (ftable_off == 0) {
841 wrapped = true;
842 ftable_off = first_ftable(tdb);
843 if (TDB_OFF_IS_ERR(ftable_off)) {
844 return ftable_off;
846 ftable = 0;
850 return 0;
853 enum TDB_ERROR set_header(struct tdb_context *tdb,
854 struct tdb_used_record *rec,
855 unsigned magic, uint64_t keylen, uint64_t datalen,
856 uint64_t actuallen, unsigned hashlow)
858 uint64_t keybits = (fls64(keylen) + 1) / 2;
860 /* Use bottom bits of hash, so it's independent of hash table size. */
861 rec->magic_and_meta = (hashlow & ((1 << 11)-1))
862 | ((actuallen - (keylen + datalen)) << 11)
863 | (keybits << 43)
864 | ((uint64_t)magic << 48);
865 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
867 /* Encoding can fail on big values. */
868 if (rec_key_length(rec) != keylen
869 || rec_data_length(rec) != datalen
870 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
871 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
872 "Could not encode k=%llu,d=%llu,a=%llu",
873 (long long)keylen, (long long)datalen,
874 (long long)actuallen);
876 return TDB_SUCCESS;
879 /* Expand the database. */
880 static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size)
882 uint64_t old_size, rec_size, map_size;
883 tdb_len_t wanted;
884 enum TDB_ERROR ecode;
886 /* Need to hold a hash lock to expand DB: transactions rely on it. */
887 if (!(tdb->flags & TDB_NOLOCK)
888 && !tdb->file->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
889 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
890 "tdb_expand: must hold lock during expand");
893 /* Only one person can expand file at a time. */
894 ecode = tdb_lock_expand(tdb, F_WRLCK);
895 if (ecode != TDB_SUCCESS) {
896 return ecode;
899 /* Someone else may have expanded the file, so retry. */
900 old_size = tdb->file->map_size;
901 tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
902 if (tdb->file->map_size != old_size) {
903 tdb_unlock_expand(tdb, F_WRLCK);
904 return TDB_SUCCESS;
907 /* limit size in order to avoid using up huge amounts of memory for
908 * in memory tdbs if an oddball huge record creeps in */
909 if (size > 100 * 1024) {
910 rec_size = size * 2;
911 } else {
912 rec_size = size * 100;
915 /* always make room for at least rec_size more records, and at
916 least 25% more space. if the DB is smaller than 100MiB,
917 otherwise grow it by 10% only. */
918 if (old_size > 100 * 1024 * 1024) {
919 map_size = old_size / 10;
920 } else {
921 map_size = old_size / 4;
924 if (map_size > rec_size) {
925 wanted = map_size;
926 } else {
927 wanted = rec_size;
930 /* We need room for the record header too. */
931 wanted = adjust_size(0, sizeof(struct tdb_used_record) + wanted);
933 ecode = tdb->methods->expand_file(tdb, wanted);
934 if (ecode != TDB_SUCCESS) {
935 tdb_unlock_expand(tdb, F_WRLCK);
936 return ecode;
939 /* We need to drop this lock before adding free record. */
940 tdb_unlock_expand(tdb, F_WRLCK);
942 tdb->stats.expands++;
943 return add_free_record(tdb, old_size, wanted, TDB_LOCK_WAIT, true);
946 /* This won't fail: it will expand the database if it has to. */
947 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
948 uint64_t hash, unsigned magic, bool growing)
950 tdb_off_t off;
952 /* We can't hold pointers during this: we could unmap! */
953 assert(!tdb->direct_access);
955 for (;;) {
956 enum TDB_ERROR ecode;
957 off = get_free(tdb, keylen, datalen, growing, magic, hash);
958 if (likely(off != 0))
959 break;
961 ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
962 if (ecode != TDB_SUCCESS) {
963 return ecode;
967 return off;