HAMMER 60I/Many: Mirroring
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
blob64b793e8ef5a911797ce266bca14fcd55b27d2e2
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.18 2008/06/27 20:56:59 dillon Exp $
38 * HAMMER undo - undo buffer/FIFO management.
41 #include "hammer.h"
43 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
45 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
46 hammer_und_rb_compare, hammer_off_t, offset);
49 * Convert a zone-3 undo offset into a zone-2 buffer offset.
51 hammer_off_t
52 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
54 hammer_volume_t root_volume;
55 hammer_blockmap_t undomap;
56 hammer_off_t result_offset;
57 int i;
59 KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
60 root_volume = hammer_get_root_volume(hmp, errorp);
61 if (*errorp)
62 return(0);
63 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
64 KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
65 KKASSERT (zone3_off < undomap->alloc_offset);
67 i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
68 result_offset = root_volume->ondisk->vol0_undo_array[i] +
69 (zone3_off & HAMMER_LARGEBLOCK_MASK64);
71 hammer_rel_volume(root_volume, 0);
72 return(result_offset);
76 * Generate an UNDO record for the block of data at the specified zone1
77 * or zone2 offset.
79 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
80 * All the UNDOs are executed together so if we already laid one down we
81 * do not have to lay another one down for the same range.
83 int
84 hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
85 hammer_off_t zone_off, void *base, int len)
87 hammer_mount_t hmp;
88 hammer_volume_t root_volume;
89 hammer_blockmap_t undomap;
90 hammer_buffer_t buffer = NULL;
91 hammer_fifo_undo_t undo;
92 hammer_fifo_tail_t tail;
93 hammer_off_t next_offset;
94 int error;
95 int bytes;
97 hmp = trans->hmp;
100 * Enter the offset into our undo history. If there is an existing
101 * undo we do not have to generate a new one.
103 if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
104 return(0);
106 root_volume = trans->rootvol;
107 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
109 /* no undo recursion */
110 hammer_modify_volume(NULL, root_volume, NULL, 0);
112 hammer_lock_ex(&hmp->undo_lock);
113 again:
115 * Allocate space in the FIFO
117 bytes = ((len + HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK) +
118 sizeof(struct hammer_fifo_undo) +
119 sizeof(struct hammer_fifo_tail);
120 if (hammer_undo_space(trans) < bytes + HAMMER_BUFSIZE*2)
121 panic("hammer: insufficient undo FIFO space!");
123 next_offset = undomap->next_offset;
126 * Wrap next_offset
128 if (undomap->next_offset == undomap->alloc_offset) {
129 next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
130 undomap->next_offset = next_offset;
131 hkprintf("undo zone's next_offset wrapped\n");
135 * This is a tail-chasing FIFO, when we hit the start of a new
136 * buffer we don't have to read it in.
138 if ((next_offset & HAMMER_BUFMASK) == 0)
139 undo = hammer_bnew(hmp, next_offset, &error, &buffer);
140 else
141 undo = hammer_bread(hmp, next_offset, &error, &buffer);
142 hammer_modify_buffer(NULL, buffer, NULL, 0);
144 KKASSERT(undomap->next_offset == next_offset);
147 * The FIFO entry would cross a buffer boundary, PAD to the end
148 * of the buffer and try again. Due to our data alignment, the
149 * worst case (smallest) PAD record is 8 bytes. PAD records only
150 * populate the first 8 bytes of hammer_fifo_head and the tail may
151 * be at the same offset as the head.
153 if ((next_offset ^ (next_offset + bytes)) & ~HAMMER_BUFMASK64) {
154 bytes = HAMMER_BUFSIZE - ((int)next_offset & HAMMER_BUFMASK);
155 tail = (void *)((char *)undo + bytes - sizeof(*tail));
156 if ((void *)undo != (void *)tail) {
157 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
158 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
159 tail->tail_size = bytes;
161 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
162 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
163 undo->head.hdr_size = bytes;
164 /* NO CRC */
165 undomap->next_offset += bytes;
166 hammer_modify_buffer_done(buffer);
167 goto again;
169 if (hammer_debug_general & 0x0080)
170 kprintf("undo %016llx %d %d\n", next_offset, bytes, len);
173 * We're good, create the entry.
175 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
176 undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
177 undo->head.hdr_size = bytes;
178 undo->head.reserved01 = 0;
179 undo->head.hdr_crc = 0;
180 undo->undo_offset = zone_off;
181 undo->undo_data_bytes = len;
182 bcopy(base, undo + 1, len);
184 tail = (void *)((char *)undo + bytes - sizeof(*tail));
185 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
186 tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
187 tail->tail_size = bytes;
189 KKASSERT(bytes >= sizeof(undo->head));
190 undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
191 crc32(&undo->head + 1, bytes - sizeof(undo->head));
192 undomap->next_offset += bytes;
194 hammer_modify_buffer_done(buffer);
195 hammer_modify_volume_done(root_volume);
197 hammer_unlock(&hmp->undo_lock);
199 if (buffer)
200 hammer_rel_buffer(buffer, 0);
201 return(error);
205 * UNDO HISTORY API
207 * It is not necessary to layout an undo record for the same address space
208 * multiple times. Maintain a cache of recent undo's.
212 * Enter an undo into the history. Return EALREADY if the request completely
213 * covers a previous request.
216 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
218 hammer_undo_t node;
219 hammer_undo_t onode;
221 node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
222 if (node) {
223 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
224 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
225 if (bytes <= node->bytes)
226 return(EALREADY);
227 node->bytes = bytes;
228 return(0);
230 if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
231 node = &hmp->undos[hmp->undo_alloc++];
232 } else {
233 node = TAILQ_FIRST(&hmp->undo_lru_list);
234 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
235 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
237 node->offset = offset;
238 node->bytes = bytes;
239 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
240 onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
241 KKASSERT(onode == NULL);
242 return(0);
245 void
246 hammer_clear_undo_history(hammer_mount_t hmp)
248 RB_INIT(&hmp->rb_undo_root);
249 TAILQ_INIT(&hmp->undo_lru_list);
250 hmp->undo_alloc = 0;
254 * Return how much of the undo FIFO has been used
256 * The calculation includes undo FIFO space still reserved from a previous
257 * flush (because it will still be run on recovery if a crash occurs and
258 * we can't overwrite it yet).
260 int64_t
261 hammer_undo_used(hammer_transaction_t trans)
263 hammer_blockmap_t cundomap;
264 hammer_blockmap_t dundomap;
265 int64_t max_bytes;
266 int64_t bytes;
268 cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
269 dundomap = &trans->rootvol->ondisk->
270 vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
272 if (dundomap->first_offset <= cundomap->next_offset) {
273 bytes = cundomap->next_offset - dundomap->first_offset;
274 } else {
275 bytes = cundomap->alloc_offset - dundomap->first_offset +
276 (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
278 max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
279 KKASSERT(bytes <= max_bytes);
280 return(bytes);
284 * Return how much of the undo FIFO is available for new records.
286 int64_t
287 hammer_undo_space(hammer_transaction_t trans)
289 hammer_blockmap_t rootmap;
290 int64_t max_bytes;
292 rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
293 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
294 return(max_bytes - hammer_undo_used(trans));
297 int64_t
298 hammer_undo_max(hammer_mount_t hmp)
300 hammer_blockmap_t rootmap;
301 int64_t max_bytes;
303 rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
304 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
306 return(max_bytes);
309 static int
310 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
312 if (node1->offset < node2->offset)
313 return(-1);
314 if (node1->offset > node2->offset)
315 return(1);
316 return(0);