1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
4 #include <linux/major.h>
5 #include <linux/sched.h>
6 #include <linux/genhd.h>
7 #include <linux/tqueue.h>
8 #include <linux/list.h>
11 typedef struct request_queue request_queue_t
;
13 typedef struct elevator_s elevator_t
;
16 * Ok, this is an expanded form so that we can use the same
17 * request for paging requests when that is implemented. In
18 * paging, 'bh' is NULL, and the semaphore is used to wait
19 * for read/write completion.
22 struct list_head queue
;
23 int elevator_sequence
;
24 struct list_head table
;
26 struct list_head
*free_list
;
28 volatile int rq_status
; /* should split this into a few status bits */
29 #define RQ_INACTIVE (-1)
31 #define RQ_SCSI_BUSY 0xffff
32 #define RQ_SCSI_DONE 0xfffe
33 #define RQ_SCSI_DISCONNECTING 0xffe0
36 int cmd
; /* READ or WRITE */
39 unsigned long nr_sectors
;
40 unsigned long hard_sector
, hard_nr_sectors
;
41 unsigned int nr_segments
;
42 unsigned int nr_hw_segments
;
43 unsigned long current_nr_sectors
;
46 struct semaphore
* sem
;
47 struct buffer_head
* bh
;
48 struct buffer_head
* bhtail
;
53 #include <linux/elevator.h>
55 typedef int (merge_request_fn
) (request_queue_t
*q
,
57 struct buffer_head
*bh
,
59 typedef int (merge_requests_fn
) (request_queue_t
*q
,
63 typedef void (request_fn_proc
) (request_queue_t
*q
);
64 typedef request_queue_t
* (queue_proc
) (kdev_t dev
);
65 typedef int (make_request_fn
) (request_queue_t
*q
, int rw
, struct buffer_head
*bh
);
66 typedef void (plug_device_fn
) (request_queue_t
*q
, kdev_t device
);
67 typedef void (unplug_device_fn
) (void *q
);
70 * Default nr free requests per queue
72 #define QUEUE_NR_REQUESTS 256
77 * the queue request freelist, one for reads and one for writes
79 struct list_head request_freelist
[2];
82 * Together with queue_head for cacheline sharing
84 struct list_head queue_head
;
87 request_fn_proc
* request_fn
;
88 merge_request_fn
* back_merge_fn
;
89 merge_request_fn
* front_merge_fn
;
90 merge_requests_fn
* merge_requests_fn
;
91 make_request_fn
* make_request_fn
;
92 plug_device_fn
* plug_device_fn
;
94 * The queue owner gets to use this for whatever they like.
95 * ll_rw_blk doesn't touch it.
100 * This is used to remove the plug when tq_disk runs.
102 struct tq_struct plug_tq
;
105 * Boolean that indicates whether this queue is plugged or not.
110 * Boolean that indicates whether current_request is active or
116 * Is meant to protect the queue in the future instead of
119 spinlock_t request_lock
;
122 * Tasks wait here for free request
124 wait_queue_head_t wait_for_request
;
127 struct blk_dev_struct
{
129 * queue_proc has to be atomic
131 request_queue_t request_queue
;
138 unsigned block_size_bits
;
142 * Used to indicate the default queue for drivers that don't bother
143 * to implement multiple queues. We have this access macro here
144 * so as to eliminate the need for each and every block device
145 * driver to know about the internal structure of blk_dev[].
147 #define BLK_DEFAULT_QUEUE(_MAJOR) &blk_dev[_MAJOR].request_queue
149 extern struct sec_size
* blk_sec
[MAX_BLKDEV
];
150 extern struct blk_dev_struct blk_dev
[MAX_BLKDEV
];
151 extern void grok_partitions(struct gendisk
*dev
, int drive
, unsigned minors
, long size
);
152 extern void register_disk(struct gendisk
*dev
, kdev_t first
, unsigned minors
, struct block_device_operations
*ops
, long size
);
153 extern void generic_make_request(int rw
, struct buffer_head
* bh
);
154 extern request_queue_t
*blk_get_queue(kdev_t dev
);
155 extern void blkdev_release_request(struct request
*);
158 * Access functions for manipulating queue properties
160 extern void blk_init_queue(request_queue_t
*, request_fn_proc
*);
161 extern void blk_cleanup_queue(request_queue_t
*);
162 extern void blk_queue_headactive(request_queue_t
*, int);
163 extern void blk_queue_pluggable(request_queue_t
*, plug_device_fn
*);
164 extern void blk_queue_make_request(request_queue_t
*, make_request_fn
*);
166 extern int * blk_size
[MAX_BLKDEV
];
168 extern int * blksize_size
[MAX_BLKDEV
];
170 extern int * hardsect_size
[MAX_BLKDEV
];
172 extern int * max_readahead
[MAX_BLKDEV
];
174 extern int * max_sectors
[MAX_BLKDEV
];
176 extern int * max_segments
[MAX_BLKDEV
];
178 #define MAX_SECTORS 254
180 #define MAX_SEGMENTS MAX_SECTORS
182 #define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
184 /* read-ahead in pages.. */
185 #define MAX_READAHEAD 31
186 #define MIN_READAHEAD 3
188 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
189 #define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
190 #define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
191 #define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
192 #define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
194 extern void drive_stat_acct (kdev_t dev
, int rw
,
195 unsigned long nr_sectors
, int new_io
);
197 static inline int get_hardsect_size(kdev_t dev
)
199 extern int *hardsect_size
[];
200 if (hardsect_size
[MAJOR(dev
)] != NULL
)
201 return hardsect_size
[MAJOR(dev
)][MINOR(dev
)];