Add SATA ATAPI support for AHCI controllers.
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob8f01efb887bb2e6f1774cc02973b03cbd607531c
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.3 2007/11/20 07:16:28 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
56 * Helper routine which disassociates a buffer cache buf from a
57 * hammer structure.
59 * If the io structures indicates that the buffer is not in a released
60 * state we must dispose of it.
62 static void
63 hammer_io_disassociate(union hammer_io_structure *io)
65 struct buf *bp = io->io.bp;
66 int modified;
67 int released;
69 LIST_INIT(&bp->b_dep); /* clear the association */
70 io->io.bp = NULL;
71 modified = io->io.modified;
72 released = io->io.released;
74 switch(io->io.type) {
75 case HAMMER_STRUCTURE_VOLUME:
76 io->volume.ondisk = NULL;
77 io->volume.alist.meta = NULL;
78 io->io.modified = 0;
79 break;
80 case HAMMER_STRUCTURE_SUPERCL:
81 io->supercl.ondisk = NULL;
82 io->supercl.alist.meta = NULL;
83 io->io.modified = 0;
84 break;
85 case HAMMER_STRUCTURE_CLUSTER:
86 io->cluster.ondisk = NULL;
87 io->cluster.alist_master.meta = NULL;
88 io->cluster.alist_btree.meta = NULL;
89 io->cluster.alist_record.meta = NULL;
90 io->cluster.alist_mdata.meta = NULL;
91 io->io.modified = 0;
92 break;
93 case HAMMER_STRUCTURE_BUFFER:
94 io->buffer.ondisk = NULL;
95 io->buffer.alist.meta = NULL;
96 io->io.modified = 0;
97 break;
100 * 'io' now invalid. If the buffer was not released we have to
101 * dispose of it.
103 * disassociate can be called via hammer_io_checkwrite() with
104 * the buffer in a released state (possibly with no lock held
105 * at all, in fact). Don't mess with it if we are in a released
106 * state.
108 if (released == 0) {
109 if (modified)
110 bdwrite(bp);
111 else
112 bqrelse(bp);
117 * Load bp for a HAMMER structure.
120 hammer_io_read(struct vnode *devvp, struct hammer_io *io)
122 struct buf *bp;
123 int error;
125 if ((bp = io->bp) == NULL) {
126 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
127 if (error == 0) {
128 bp = io->bp;
129 bp->b_ops = &hammer_bioops;
130 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
131 BUF_KERNPROC(bp);
133 io->modified = 0; /* no new modifications yet */
134 io->released = 0; /* we hold an active lock on bp */
135 } else {
136 error = 0;
138 return(error);
142 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
143 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
144 * I/O so we can call it.
147 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
149 struct buf *bp;
151 if ((bp = io->bp) == NULL) {
152 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
153 bp = io->bp;
154 bp->b_ops = &hammer_bioops;
155 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
156 io->released = 0; /* we hold an active lock on bp */
157 BUF_KERNPROC(bp);
158 } else {
159 if (io->released) {
160 regetblk(bp);
161 io->released = 0;
162 BUF_KERNPROC(bp);
165 io->modified = 1;
166 vfs_bio_clrbuf(bp);
167 return(0);
171 * Release the IO buffer on the last reference to a hammer structure. At
172 * this time the lock still has a reference.
174 * We flush and disassociate the bp if flush is non-zero or if the kernel
175 * tried to free/reuse the buffer.
177 void
178 hammer_io_release(struct hammer_io *io, int flush)
180 union hammer_io_structure *iou = (void *)io;
181 struct buf *bp;
183 if ((bp = io->bp) != NULL) {
184 if (bp->b_flags & B_LOCKED) {
186 * The kernel wanted the buffer but couldn't get it,
187 * give it up now.
189 KKASSERT(io->released);
190 regetblk(bp);
191 io->released = 0;
192 BUF_KERNPROC(bp);
193 bp->b_flags &= ~B_LOCKED;
194 hammer_io_disassociate(iou);
195 } else if (io->released == 0) {
197 * We are holding a real lock on the buffer, release
198 * it passively (hammer_io_deallocate is called
199 * when the kernel really wants to reuse the buffer).
201 if (flush) {
202 hammer_io_disassociate(iou);
203 } else {
204 if (io->modified)
205 bdwrite(bp);
206 else
207 bqrelse(bp);
208 io->modified = 0;
209 io->released = 1;
211 } else if (io->modified && (bp->b_flags & B_DELWRI) == 0) {
213 * We are holding the buffer passively but made
214 * modifications to it. The kernel has not initiated
215 * I/O (else B_LOCKED would have been set), so just
216 * check whether B_DELWRI is set. Since we still
217 * have lock references on the HAMMER structure the
218 * kernel cannot throw the buffer away.
220 * We have to do this to avoid the situation where
221 * the buffer is not marked B_DELWRI when modified
222 * and in a released state, otherwise the kernel
223 * will never try to flush the modified buffer!
225 regetblk(bp);
226 BUF_KERNPROC(bp);
227 io->released = 0;
228 if (flush) {
229 hammer_io_disassociate(iou);
230 } else {
231 bdwrite(bp);
232 io->modified = 0;
234 } else if (flush) {
236 * We are holding the buffer passively but were
237 * asked to disassociate and flush it.
239 regetblk(bp);
240 BUF_KERNPROC(bp);
241 io->released = 0;
242 hammer_io_disassociate(iou);
243 /* io->released ignored */
244 } /* else just leave it associated in a released state */
249 * HAMMER_BIOOPS
253 * Pre and post I/O callbacks. No buffer munging is done so there is
254 * nothing to do here.
256 static void hammer_io_deallocate(struct buf *bp);
258 static void
259 hammer_io_start(struct buf *bp)
263 static void
264 hammer_io_complete(struct buf *bp)
269 * Callback from kernel when it wishes to deallocate a passively
270 * associated structure. This can only occur if the buffer is
271 * passively associated with the structure. The kernel has locked
272 * the buffer.
274 * If we cannot disassociate we set B_LOCKED to prevent the buffer
275 * from getting reused.
277 static void
278 hammer_io_deallocate(struct buf *bp)
280 union hammer_io_structure *io = (void *)LIST_FIRST(&bp->b_dep);
282 /* XXX memory interlock, spinlock to sync cpus */
284 KKASSERT(io->io.released);
285 crit_enter();
288 * First, ref the structure to prevent either the buffer or the
289 * structure from going away or being unexpectedly flushed.
291 hammer_ref(&io->io.lock);
293 kprintf("iodeallocate bp %p\n", bp);
296 * Buffers can have active references from cached hammer_node's,
297 * even if those nodes are themselves passively cached. Attempt
298 * to clean them out. This may not succeed.
300 if (io->io.type == HAMMER_STRUCTURE_BUFFER &&
301 hammer_lock_ex_try(&io->io.lock) == 0) {
302 hammer_flush_buffer_nodes(&io->buffer);
303 hammer_unlock(&io->io.lock);
306 if (hammer_islastref(&io->io.lock)) {
308 * If we are the only ref left we can disassociate the
309 * I/O. It had better still be in a released state because
310 * the kernel is holding a lock on the buffer.
312 KKASSERT(io->io.released);
313 hammer_io_disassociate(io);
314 bp->b_flags &= ~B_LOCKED;
315 kprintf("iodeallocate bp %p - unlocked and dissed\n", bp);
318 * Perform final rights on the structure. This can cause
319 * a chain reaction - e.g. last buffer -> last cluster ->
320 * last supercluster -> last volume.
322 switch(io->io.type) {
323 case HAMMER_STRUCTURE_VOLUME:
324 hammer_rel_volume(&io->volume, 1);
325 break;
326 case HAMMER_STRUCTURE_SUPERCL:
327 hammer_rel_supercl(&io->supercl, 1);
328 break;
329 case HAMMER_STRUCTURE_CLUSTER:
330 hammer_rel_cluster(&io->cluster, 1);
331 break;
332 case HAMMER_STRUCTURE_BUFFER:
333 hammer_rel_buffer(&io->buffer, 1);
334 break;
336 } else {
338 * Otherwise tell the kernel not to destroy the buffer.
340 * We have to unref the structure without performing any
341 * final rights to it to avoid a deadlock.
343 bp->b_flags |= B_LOCKED;
344 hammer_unref(&io->io.lock);
345 kprintf("iodeallocate bp %p - locked\n", bp);
348 crit_exit();
351 static int
352 hammer_io_fsync(struct vnode *vp)
354 return(0);
358 * NOTE: will not be called unless we tell the kernel about the
359 * bioops. Unused... we use the mount's VFS_SYNC instead.
361 static int
362 hammer_io_sync(struct mount *mp)
364 return(0);
367 static void
368 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
373 * I/O pre-check for reading and writing. HAMMER only uses this for
374 * B_CACHE buffers so checkread just shouldn't happen, but if it does
375 * allow it.
377 * Writing is a different case. We don't want to write out a buffer
378 * that HAMMER may be modifying passively.
380 static int
381 hammer_io_checkread(struct buf *bp)
383 return(0);
386 static int
387 hammer_io_checkwrite(struct buf *bp)
389 union hammer_io_structure *io = (void *)LIST_FIRST(&bp->b_dep);
391 if (io->io.lock.refs) {
392 bp->b_flags |= B_LOCKED;
393 return(-1);
394 } else {
395 KKASSERT(bp->b_flags & B_DELWRI);
396 hammer_io_disassociate(io);
397 return(0);
402 * Return non-zero if the caller should flush the structure associated
403 * with this io sub-structure.
406 hammer_io_checkflush(struct hammer_io *io)
408 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED))
409 return(1);
410 return(0);
414 * Return non-zero if we wish to delay the kernel's attempt to flush
415 * this buffer to disk.
417 static int
418 hammer_io_countdeps(struct buf *bp, int n)
420 return(0);
423 struct bio_ops hammer_bioops = {
424 .io_start = hammer_io_start,
425 .io_complete = hammer_io_complete,
426 .io_deallocate = hammer_io_deallocate,
427 .io_fsync = hammer_io_fsync,
428 .io_sync = hammer_io_sync,
429 .io_movedeps = hammer_io_movedeps,
430 .io_countdeps = hammer_io_countdeps,
431 .io_checkread = hammer_io_checkread,
432 .io_checkwrite = hammer_io_checkwrite,