nfs: Panic when commit fails
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / block / blk-settings.c
blob476d870650737eda44dc3177675a34afae63fd53
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
12 #include "blk.h"
14 unsigned long blk_max_low_pfn;
15 EXPORT_SYMBOL(blk_max_low_pfn);
17 unsigned long blk_max_pfn;
19 /**
20 * blk_queue_prep_rq - set a prepare_request function for queue
21 * @q: queue
22 * @pfn: prepare_request function
24 * It's possible for a queue to register a prepare_request callback which
25 * is invoked before the request is handed to the request_fn. The goal of
26 * the function is to prepare a request for I/O, it can be used to build a
27 * cdb from the request data for instance.
30 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
32 q->prep_rq_fn = pfn;
34 EXPORT_SYMBOL(blk_queue_prep_rq);
36 /**
37 * blk_queue_set_discard - set a discard_sectors function for queue
38 * @q: queue
39 * @dfn: prepare_discard function
41 * It's possible for a queue to register a discard callback which is used
42 * to transform a discard request into the appropriate type for the
43 * hardware. If none is registered, then discard requests are failed
44 * with %EOPNOTSUPP.
47 void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
49 q->prepare_discard_fn = dfn;
51 EXPORT_SYMBOL(blk_queue_set_discard);
53 /**
54 * blk_queue_merge_bvec - set a merge_bvec function for queue
55 * @q: queue
56 * @mbfn: merge_bvec_fn
58 * Usually queues have static limitations on the max sectors or segments that
59 * we can put in a request. Stacking drivers may have some settings that
60 * are dynamic, and thus we have to query the queue whether it is ok to
61 * add a new bio_vec to a bio at a given offset or not. If the block device
62 * has such limitations, it needs to register a merge_bvec_fn to control
63 * the size of bio's sent to it. Note that a block device *must* allow a
64 * single page to be added to an empty bio. The block device driver may want
65 * to use the bio_split() function to deal with these bio's. By default
66 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
67 * honored.
69 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
71 q->merge_bvec_fn = mbfn;
73 EXPORT_SYMBOL(blk_queue_merge_bvec);
75 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
77 q->softirq_done_fn = fn;
79 EXPORT_SYMBOL(blk_queue_softirq_done);
81 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
83 q->rq_timeout = timeout;
85 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
87 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
89 q->rq_timed_out_fn = fn;
91 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
93 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
95 q->lld_busy_fn = fn;
97 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
99 /**
100 * blk_set_default_limits - reset limits to default values
101 * @lim: the queue_limits structure to reset
103 * Description:
104 * Returns a queue_limit struct to its default state. Can be used by
105 * stacking drivers like DM that stage table swaps and reuse an
106 * existing device queue.
108 void blk_set_default_limits(struct queue_limits *lim)
110 lim->max_phys_segments = MAX_PHYS_SEGMENTS;
111 lim->max_hw_segments = MAX_HW_SEGMENTS;
112 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
113 lim->max_segment_size = MAX_SEGMENT_SIZE;
114 lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
115 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
116 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
117 lim->alignment_offset = 0;
118 lim->io_opt = 0;
119 lim->misaligned = 0;
120 lim->no_cluster = 0;
122 EXPORT_SYMBOL(blk_set_default_limits);
125 * blk_queue_make_request - define an alternate make_request function for a device
126 * @q: the request queue for the device to be affected
127 * @mfn: the alternate make_request function
129 * Description:
130 * The normal way for &struct bios to be passed to a device
131 * driver is for them to be collected into requests on a request
132 * queue, and then to allow the device driver to select requests
133 * off that queue when it is ready. This works well for many block
134 * devices. However some block devices (typically virtual devices
135 * such as md or lvm) do not benefit from the processing on the
136 * request queue, and are served best by having the requests passed
137 * directly to them. This can be achieved by providing a function
138 * to blk_queue_make_request().
140 * Caveat:
141 * The driver that does this *must* be able to deal appropriately
142 * with buffers in "highmemory". This can be accomplished by either calling
143 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
144 * blk_queue_bounce() to create a buffer in normal memory.
146 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
149 * set defaults
151 q->nr_requests = BLKDEV_MAX_RQ;
153 q->make_request_fn = mfn;
154 blk_queue_dma_alignment(q, 511);
155 blk_queue_congestion_threshold(q);
156 q->nr_batching = BLK_BATCH_REQ;
158 q->unplug_thresh = 4; /* hmm */
159 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
160 if (q->unplug_delay == 0)
161 q->unplug_delay = 1;
163 q->unplug_timer.function = blk_unplug_timeout;
164 q->unplug_timer.data = (unsigned long)q;
166 blk_set_default_limits(&q->limits);
169 * If the caller didn't supply a lock, fall back to our embedded
170 * per-queue locks
172 if (!q->queue_lock)
173 q->queue_lock = &q->__queue_lock;
176 * by default assume old behaviour and bounce for any highmem page
178 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
180 EXPORT_SYMBOL(blk_queue_make_request);
183 * blk_queue_bounce_limit - set bounce buffer limit for queue
184 * @q: the request queue for the device
185 * @dma_mask: the maximum address the device can handle
187 * Description:
188 * Different hardware can have different requirements as to what pages
189 * it can do I/O directly to. A low level driver can call
190 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
191 * buffers for doing I/O to pages residing above @dma_mask.
193 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
195 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
196 int dma = 0;
198 q->bounce_gfp = GFP_NOIO;
199 #if BITS_PER_LONG == 64
201 * Assume anything <= 4GB can be handled by IOMMU. Actually
202 * some IOMMUs can handle everything, but I don't know of a
203 * way to test this here.
205 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
206 dma = 1;
207 q->limits.bounce_pfn = max_low_pfn;
208 #else
209 if (b_pfn < blk_max_low_pfn)
210 dma = 1;
211 q->limits.bounce_pfn = b_pfn;
212 #endif
213 if (dma) {
214 init_emergency_isa_pool();
215 q->bounce_gfp = GFP_NOIO | GFP_DMA;
216 q->limits.bounce_pfn = b_pfn;
219 EXPORT_SYMBOL(blk_queue_bounce_limit);
222 * blk_queue_max_sectors - set max sectors for a request for this queue
223 * @q: the request queue for the device
224 * @max_sectors: max sectors in the usual 512b unit
226 * Description:
227 * Enables a low level driver to set an upper limit on the size of
228 * received requests.
230 void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
232 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
233 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
234 printk(KERN_INFO "%s: set to minimum %d\n",
235 __func__, max_sectors);
238 if (BLK_DEF_MAX_SECTORS > max_sectors)
239 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
240 else {
241 q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
242 q->limits.max_hw_sectors = max_sectors;
245 EXPORT_SYMBOL(blk_queue_max_sectors);
247 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
249 if (BLK_DEF_MAX_SECTORS > max_sectors)
250 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
251 else
252 q->limits.max_hw_sectors = max_sectors;
254 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
257 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
258 * @q: the request queue for the device
259 * @max_segments: max number of segments
261 * Description:
262 * Enables a low level driver to set an upper limit on the number of
263 * physical data segments in a request. This would be the largest sized
264 * scatter list the driver could handle.
266 void blk_queue_max_phys_segments(struct request_queue *q,
267 unsigned short max_segments)
269 if (!max_segments) {
270 max_segments = 1;
271 printk(KERN_INFO "%s: set to minimum %d\n",
272 __func__, max_segments);
275 q->limits.max_phys_segments = max_segments;
277 EXPORT_SYMBOL(blk_queue_max_phys_segments);
280 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
281 * @q: the request queue for the device
282 * @max_segments: max number of segments
284 * Description:
285 * Enables a low level driver to set an upper limit on the number of
286 * hw data segments in a request. This would be the largest number of
287 * address/length pairs the host adapter can actually give at once
288 * to the device.
290 void blk_queue_max_hw_segments(struct request_queue *q,
291 unsigned short max_segments)
293 if (!max_segments) {
294 max_segments = 1;
295 printk(KERN_INFO "%s: set to minimum %d\n",
296 __func__, max_segments);
299 q->limits.max_hw_segments = max_segments;
301 EXPORT_SYMBOL(blk_queue_max_hw_segments);
304 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
305 * @q: the request queue for the device
306 * @max_size: max size of segment in bytes
308 * Description:
309 * Enables a low level driver to set an upper limit on the size of a
310 * coalesced segment
312 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
314 if (max_size < PAGE_CACHE_SIZE) {
315 max_size = PAGE_CACHE_SIZE;
316 printk(KERN_INFO "%s: set to minimum %d\n",
317 __func__, max_size);
320 q->limits.max_segment_size = max_size;
322 EXPORT_SYMBOL(blk_queue_max_segment_size);
325 * blk_queue_logical_block_size - set logical block size for the queue
326 * @q: the request queue for the device
327 * @size: the logical block size, in bytes
329 * Description:
330 * This should be set to the lowest possible block size that the
331 * storage device can address. The default of 512 covers most
332 * hardware.
334 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
336 q->limits.logical_block_size = size;
338 if (q->limits.physical_block_size < size)
339 q->limits.physical_block_size = size;
341 if (q->limits.io_min < q->limits.physical_block_size)
342 q->limits.io_min = q->limits.physical_block_size;
344 EXPORT_SYMBOL(blk_queue_logical_block_size);
347 * blk_queue_physical_block_size - set physical block size for the queue
348 * @q: the request queue for the device
349 * @size: the physical block size, in bytes
351 * Description:
352 * This should be set to the lowest possible sector size that the
353 * hardware can operate on without reverting to read-modify-write
354 * operations.
356 void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
358 q->limits.physical_block_size = size;
360 if (q->limits.physical_block_size < q->limits.logical_block_size)
361 q->limits.physical_block_size = q->limits.logical_block_size;
363 if (q->limits.io_min < q->limits.physical_block_size)
364 q->limits.io_min = q->limits.physical_block_size;
366 EXPORT_SYMBOL(blk_queue_physical_block_size);
369 * blk_queue_alignment_offset - set physical block alignment offset
370 * @q: the request queue for the device
371 * @offset: alignment offset in bytes
373 * Description:
374 * Some devices are naturally misaligned to compensate for things like
375 * the legacy DOS partition table 63-sector offset. Low-level drivers
376 * should call this function for devices whose first sector is not
377 * naturally aligned.
379 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
381 q->limits.alignment_offset =
382 offset & (q->limits.physical_block_size - 1);
383 q->limits.misaligned = 0;
385 EXPORT_SYMBOL(blk_queue_alignment_offset);
388 * blk_limits_io_min - set minimum request size for a device
389 * @limits: the queue limits
390 * @min: smallest I/O size in bytes
392 * Description:
393 * Some devices have an internal block size bigger than the reported
394 * hardware sector size. This function can be used to signal the
395 * smallest I/O the device can perform without incurring a performance
396 * penalty.
398 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
400 limits->io_min = min;
402 if (limits->io_min < limits->logical_block_size)
403 limits->io_min = limits->logical_block_size;
405 if (limits->io_min < limits->physical_block_size)
406 limits->io_min = limits->physical_block_size;
408 EXPORT_SYMBOL(blk_limits_io_min);
411 * blk_queue_io_min - set minimum request size for the queue
412 * @q: the request queue for the device
413 * @min: smallest I/O size in bytes
415 * Description:
416 * Storage devices may report a granularity or preferred minimum I/O
417 * size which is the smallest request the device can perform without
418 * incurring a performance penalty. For disk drives this is often the
419 * physical block size. For RAID arrays it is often the stripe chunk
420 * size. A properly aligned multiple of minimum_io_size is the
421 * preferred request size for workloads where a high number of I/O
422 * operations is desired.
424 void blk_queue_io_min(struct request_queue *q, unsigned int min)
426 blk_limits_io_min(&q->limits, min);
428 EXPORT_SYMBOL(blk_queue_io_min);
431 * blk_queue_io_opt - set optimal request size for the queue
432 * @q: the request queue for the device
433 * @opt: optimal request size in bytes
435 * Description:
436 * Storage devices may report an optimal I/O size, which is the
437 * device's preferred unit for sustained I/O. This is rarely reported
438 * for disk drives. For RAID arrays it is usually the stripe width or
439 * the internal track size. A properly aligned multiple of
440 * optimal_io_size is the preferred request size for workloads where
441 * sustained throughput is desired.
443 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
445 q->limits.io_opt = opt;
447 EXPORT_SYMBOL(blk_queue_io_opt);
450 * Returns the minimum that is _not_ zero, unless both are zero.
452 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
455 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
456 * @t: the stacking driver (top)
457 * @b: the underlying device (bottom)
459 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
461 blk_stack_limits(&t->limits, &b->limits, 0);
463 if (!t->queue_lock)
464 WARN_ON_ONCE(1);
465 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
466 unsigned long flags;
467 spin_lock_irqsave(t->queue_lock, flags);
468 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
469 spin_unlock_irqrestore(t->queue_lock, flags);
472 EXPORT_SYMBOL(blk_queue_stack_limits);
475 * blk_stack_limits - adjust queue_limits for stacked devices
476 * @t: the stacking driver limits (top)
477 * @b: the underlying queue limits (bottom)
478 * @offset: offset to beginning of data within component device
480 * Description:
481 * Merges two queue_limit structs. Returns 0 if alignment didn't
482 * change. Returns -1 if adding the bottom device caused
483 * misalignment.
485 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
486 sector_t offset)
488 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
489 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
490 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
492 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
493 b->seg_boundary_mask);
495 t->max_phys_segments = min_not_zero(t->max_phys_segments,
496 b->max_phys_segments);
498 t->max_hw_segments = min_not_zero(t->max_hw_segments,
499 b->max_hw_segments);
501 t->max_segment_size = min_not_zero(t->max_segment_size,
502 b->max_segment_size);
504 t->logical_block_size = max(t->logical_block_size,
505 b->logical_block_size);
507 t->physical_block_size = max(t->physical_block_size,
508 b->physical_block_size);
510 t->io_min = max(t->io_min, b->io_min);
511 t->no_cluster |= b->no_cluster;
513 /* Bottom device offset aligned? */
514 if (offset &&
515 (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
516 t->misaligned = 1;
517 return -1;
520 /* If top has no alignment offset, inherit from bottom */
521 if (!t->alignment_offset)
522 t->alignment_offset =
523 b->alignment_offset & (b->physical_block_size - 1);
525 /* Top device aligned on logical block boundary? */
526 if (t->alignment_offset & (t->logical_block_size - 1)) {
527 t->misaligned = 1;
528 return -1;
531 /* Find lcm() of optimal I/O size */
532 if (t->io_opt && b->io_opt)
533 t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
534 else if (b->io_opt)
535 t->io_opt = b->io_opt;
537 /* Verify that optimal I/O size is a multiple of io_min */
538 if (t->io_min && t->io_opt % t->io_min)
539 return -1;
541 return 0;
543 EXPORT_SYMBOL(blk_stack_limits);
546 * disk_stack_limits - adjust queue limits for stacked drivers
547 * @disk: MD/DM gendisk (top)
548 * @bdev: the underlying block device (bottom)
549 * @offset: offset to beginning of data within component device
551 * Description:
552 * Merges the limits for two queues. Returns 0 if alignment
553 * didn't change. Returns -1 if adding the bottom device caused
554 * misalignment.
556 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
557 sector_t offset)
559 struct request_queue *t = disk->queue;
560 struct request_queue *b = bdev_get_queue(bdev);
562 offset += get_start_sect(bdev) << 9;
564 if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
565 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
567 disk_name(disk, 0, top);
568 bdevname(bdev, bottom);
570 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
571 top, bottom);
574 if (!t->queue_lock)
575 WARN_ON_ONCE(1);
576 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
577 unsigned long flags;
579 spin_lock_irqsave(t->queue_lock, flags);
580 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
581 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
582 spin_unlock_irqrestore(t->queue_lock, flags);
585 EXPORT_SYMBOL(disk_stack_limits);
588 * blk_queue_dma_pad - set pad mask
589 * @q: the request queue for the device
590 * @mask: pad mask
592 * Set dma pad mask.
594 * Appending pad buffer to a request modifies the last entry of a
595 * scatter list such that it includes the pad buffer.
597 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
599 q->dma_pad_mask = mask;
601 EXPORT_SYMBOL(blk_queue_dma_pad);
604 * blk_queue_update_dma_pad - update pad mask
605 * @q: the request queue for the device
606 * @mask: pad mask
608 * Update dma pad mask.
610 * Appending pad buffer to a request modifies the last entry of a
611 * scatter list such that it includes the pad buffer.
613 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
615 if (mask > q->dma_pad_mask)
616 q->dma_pad_mask = mask;
618 EXPORT_SYMBOL(blk_queue_update_dma_pad);
621 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
622 * @q: the request queue for the device
623 * @dma_drain_needed: fn which returns non-zero if drain is necessary
624 * @buf: physically contiguous buffer
625 * @size: size of the buffer in bytes
627 * Some devices have excess DMA problems and can't simply discard (or
628 * zero fill) the unwanted piece of the transfer. They have to have a
629 * real area of memory to transfer it into. The use case for this is
630 * ATAPI devices in DMA mode. If the packet command causes a transfer
631 * bigger than the transfer size some HBAs will lock up if there
632 * aren't DMA elements to contain the excess transfer. What this API
633 * does is adjust the queue so that the buf is always appended
634 * silently to the scatterlist.
636 * Note: This routine adjusts max_hw_segments to make room for
637 * appending the drain buffer. If you call
638 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
639 * calling this routine, you must set the limit to one fewer than your
640 * device can support otherwise there won't be room for the drain
641 * buffer.
643 int blk_queue_dma_drain(struct request_queue *q,
644 dma_drain_needed_fn *dma_drain_needed,
645 void *buf, unsigned int size)
647 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
648 return -EINVAL;
649 /* make room for appending the drain */
650 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
651 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
652 q->dma_drain_needed = dma_drain_needed;
653 q->dma_drain_buffer = buf;
654 q->dma_drain_size = size;
656 return 0;
658 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
661 * blk_queue_segment_boundary - set boundary rules for segment merging
662 * @q: the request queue for the device
663 * @mask: the memory boundary mask
665 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
667 if (mask < PAGE_CACHE_SIZE - 1) {
668 mask = PAGE_CACHE_SIZE - 1;
669 printk(KERN_INFO "%s: set to minimum %lx\n",
670 __func__, mask);
673 q->limits.seg_boundary_mask = mask;
675 EXPORT_SYMBOL(blk_queue_segment_boundary);
678 * blk_queue_dma_alignment - set dma length and memory alignment
679 * @q: the request queue for the device
680 * @mask: alignment mask
682 * description:
683 * set required memory and length alignment for direct dma transactions.
684 * this is used when building direct io requests for the queue.
687 void blk_queue_dma_alignment(struct request_queue *q, int mask)
689 q->dma_alignment = mask;
691 EXPORT_SYMBOL(blk_queue_dma_alignment);
694 * blk_queue_update_dma_alignment - update dma length and memory alignment
695 * @q: the request queue for the device
696 * @mask: alignment mask
698 * description:
699 * update required memory and length alignment for direct dma transactions.
700 * If the requested alignment is larger than the current alignment, then
701 * the current queue alignment is updated to the new value, otherwise it
702 * is left alone. The design of this is to allow multiple objects
703 * (driver, device, transport etc) to set their respective
704 * alignments without having them interfere.
707 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
709 BUG_ON(mask > PAGE_SIZE);
711 if (mask > q->dma_alignment)
712 q->dma_alignment = mask;
714 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
716 static int __init blk_settings_init(void)
718 blk_max_low_pfn = max_low_pfn - 1;
719 blk_max_pfn = max_pfn - 1;
720 return 0;
722 subsys_initcall(blk_settings_init);