Import 1.3.39
[davej-history.git] / drivers / block / ll_rw_blk.c
blobd5554b7f9399124e1cddc41b64e2888542db8796
1 /*
2 * linux/drivers/block/ll_rw_blk.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 */
8 /*
9 * This handles all read/write requests to block devices
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
25 * The request-struct contains all necessary data
26 * to load a nr of sectors into memory
28 static struct request all_requests[NR_REQUEST];
31 * used to wait on when there are no free requests
33 struct wait_queue * wait_for_request = NULL;
35 /* This specifies how many sectors to read ahead on the disk. */
37 int read_ahead[MAX_BLKDEV] = {0, };
39 /* blk_dev_struct is:
40 * do_request-address
41 * next-request
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL }, /* 0 no_dev */
45 { NULL, NULL }, /* 1 dev mem */
46 { NULL, NULL }, /* 2 dev fd */
47 { NULL, NULL }, /* 3 dev ide0 or hd */
48 { NULL, NULL }, /* 4 dev ttyx */
49 { NULL, NULL }, /* 5 dev tty */
50 { NULL, NULL }, /* 6 dev lp */
51 { NULL, NULL }, /* 7 dev pipes */
52 { NULL, NULL }, /* 8 dev sd */
53 { NULL, NULL }, /* 9 dev st */
54 { NULL, NULL }, /* 10 */
55 { NULL, NULL }, /* 11 */
56 { NULL, NULL }, /* 12 */
57 { NULL, NULL }, /* 13 */
58 { NULL, NULL }, /* 14 */
59 { NULL, NULL }, /* 15 */
60 { NULL, NULL }, /* 16 */
61 { NULL, NULL }, /* 17 */
62 { NULL, NULL }, /* 18 */
63 { NULL, NULL }, /* 19 */
64 { NULL, NULL }, /* 20 */
65 { NULL, NULL }, /* 21 */
66 { NULL, NULL } /* 22 dev ide1 */
70 * blk_size contains the size of all block-devices in units of 1024 byte
71 * sectors:
73 * blk_size[MAJOR][MINOR]
75 * if (!blk_size[MAJOR]) then no minor size checking is done.
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
80 * blksize_size contains the size of all block-devices:
82 * blksize_size[MAJOR][MINOR]
84 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
89 * hardsect_size contains the size of the hardware sector of a device.
91 * hardsect_size[MAJOR][MINOR]
93 * if (!hardsect_size[MAJOR])
94 * then 512 bytes is assumed.
95 * else
96 * sector_size is hardsect_size[MAJOR][MINOR]
97 * This is currently set by some scsi device and read by the msdos fs driver
98 * This might be a some uses later.
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
103 * "plug" the device if there are no outstanding requests: this will
104 * force the transfer to start only after we have put all the requests
105 * on the list.
107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
109 unsigned long flags;
111 plug->rq_status = RQ_INACTIVE;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
122 * remove the plug and let it rip..
124 static void unplug_device(struct blk_dev_struct * dev)
126 struct request * req;
127 unsigned long flags;
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
136 restore_flags(flags);
140 * look for a free request in the first N entries.
141 * NOTE: interrupts must be disabled on the way in, and will still
142 * be disabled on the way out.
144 static inline struct request * get_request(int n, kdev_t dev)
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->rq_status == RQ_INACTIVE)
161 break;
162 if (req == prev_found)
163 return NULL;
165 prev_found = req;
166 req->rq_status = RQ_ACTIVE;
167 req->rq_dev = dev;
168 return req;
172 * wait until a free request in the first N entries is available.
174 static struct request * __get_request_wait(int n, kdev_t dev)
176 register struct request *req;
177 struct wait_queue wait = { current, NULL };
179 add_wait_queue(&wait_for_request, &wait);
180 for (;;) {
181 unplug_device(MAJOR(dev)+blk_dev);
182 current->state = TASK_UNINTERRUPTIBLE;
183 cli();
184 req = get_request(n, dev);
185 sti();
186 if (req)
187 break;
188 schedule();
190 remove_wait_queue(&wait_for_request, &wait);
191 current->state = TASK_RUNNING;
192 return req;
195 static inline struct request * get_request_wait(int n, kdev_t dev)
197 register struct request *req;
199 cli();
200 req = get_request(n, dev);
201 sti();
202 if (req)
203 return req;
204 return __get_request_wait(n, dev);
207 /* RO fail safe mechanism */
209 static long ro_bits[MAX_BLKDEV][8];
211 int is_read_only(kdev_t dev)
213 int minor,major;
215 major = MAJOR(dev);
216 minor = MINOR(dev);
217 if (major < 0 || major >= MAX_BLKDEV) return 0;
218 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
221 void set_device_ro(kdev_t dev,int flag)
223 int minor,major;
225 major = MAJOR(dev);
226 minor = MINOR(dev);
227 if (major < 0 || major >= MAX_BLKDEV) return;
228 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
229 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
233 * add-request adds a request to the linked list.
234 * It disables interrupts so that it can muck with the
235 * request-lists in peace.
237 static void add_request(struct blk_dev_struct * dev, struct request * req)
239 struct request * tmp;
240 short disk_index;
242 switch (MAJOR(req->rq_dev)) {
243 case SCSI_DISK_MAJOR:
244 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
245 if (disk_index < 4)
246 kstat.dk_drive[disk_index]++;
247 break;
248 case IDE0_MAJOR: /* same as HD_MAJOR */
249 case XT_DISK_MAJOR:
250 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
251 kstat.dk_drive[disk_index]++;
252 break;
253 case IDE1_MAJOR:
254 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
255 kstat.dk_drive[disk_index]++;
256 default:
257 break;
260 req->next = NULL;
261 cli();
262 if (req->bh)
263 mark_buffer_clean(req->bh);
264 if (!(tmp = dev->current_request)) {
265 dev->current_request = req;
266 (dev->request_fn)();
267 sti();
268 return;
270 for ( ; tmp->next ; tmp = tmp->next) {
271 if ((IN_ORDER(tmp,req) ||
272 !IN_ORDER(tmp,tmp->next)) &&
273 IN_ORDER(req,tmp->next))
274 break;
276 req->next = tmp->next;
277 tmp->next = req;
279 /* for SCSI devices, call request_fn unconditionally */
280 if (scsi_major(MAJOR(req->rq_dev)))
281 (dev->request_fn)();
283 sti();
286 static void make_request(int major,int rw, struct buffer_head * bh)
288 unsigned int sector, count;
289 struct request * req;
290 int rw_ahead, max_req;
292 /* WRITEA/READA is special case - it is not really needed, so if the */
293 /* buffer is locked, we just forget about it, else it's a normal read */
294 rw_ahead = (rw == READA || rw == WRITEA);
295 if (rw_ahead) {
296 if (bh->b_lock)
297 return;
298 if (rw == READA)
299 rw = READ;
300 else
301 rw = WRITE;
303 if (rw!=READ && rw!=WRITE) {
304 printk("Bad block dev command, must be R/W/RA/WA\n");
305 return;
307 count = bh->b_size >> 9;
308 sector = bh->b_blocknr * count;
309 if (blk_size[major])
310 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
311 bh->b_dirt = bh->b_uptodate = 0;
312 bh->b_req = 0;
313 printk("attempt to access beyond end of device\n");
314 return;
316 /* Uhhuh.. Nasty dead-lock possible here.. */
317 if (bh->b_lock)
318 return;
319 /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
320 lock_buffer(bh);
321 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
322 unlock_buffer(bh);
323 return;
326 /* we don't allow the write-requests to fill up the queue completely:
327 * we want some room for reads: they take precedence. The last third
328 * of the requests are only for reads.
330 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
332 /* look for a free request. */
333 cli();
335 /* The scsi disk drivers and the IDE driver completely remove the request
336 * from the queue when they start processing an entry. For this reason
337 * it is safe to continue to add links to the top entry for those devices.
339 if (( major == IDE0_MAJOR /* same as HD_MAJOR */
340 || major == IDE1_MAJOR
341 || major == FLOPPY_MAJOR
342 || major == SCSI_DISK_MAJOR
343 || major == SCSI_CDROM_MAJOR
344 || major == IDE2_MAJOR
345 || major == IDE3_MAJOR)
346 && (req = blk_dev[major].current_request))
348 #ifdef CONFIG_BLK_DEV_HD
349 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
350 #else
351 if (major == FLOPPY_MAJOR)
352 #endif CONFIG_BLK_DEV_HD
353 req = req->next;
354 while (req) {
355 if (req->rq_dev == bh->b_dev &&
356 !req->sem &&
357 req->cmd == rw &&
358 req->sector + req->nr_sectors == sector &&
359 req->nr_sectors < 244)
361 req->bhtail->b_reqnext = bh;
362 req->bhtail = bh;
363 req->nr_sectors += count;
364 mark_buffer_clean(bh);
365 sti();
366 return;
369 if (req->rq_dev == bh->b_dev &&
370 !req->sem &&
371 req->cmd == rw &&
372 req->sector - count == sector &&
373 req->nr_sectors < 244)
375 req->nr_sectors += count;
376 bh->b_reqnext = req->bh;
377 req->buffer = bh->b_data;
378 req->current_nr_sectors = count;
379 req->sector = sector;
380 mark_buffer_clean(bh);
381 req->bh = bh;
382 sti();
383 return;
386 req = req->next;
390 /* find an unused request. */
391 req = get_request(max_req, bh->b_dev);
392 sti();
394 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
395 if (!req) {
396 if (rw_ahead) {
397 unlock_buffer(bh);
398 return;
400 req = __get_request_wait(max_req, bh->b_dev);
403 /* fill up the request-info, and add it to the queue */
404 req->cmd = rw;
405 req->errors = 0;
406 req->sector = sector;
407 req->nr_sectors = count;
408 req->current_nr_sectors = count;
409 req->buffer = bh->b_data;
410 req->sem = NULL;
411 req->bh = bh;
412 req->bhtail = bh;
413 req->next = NULL;
414 add_request(major+blk_dev,req);
417 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
419 struct request * req;
420 unsigned int major = MAJOR(dev);
421 unsigned long sector = page * (PAGE_SIZE / 512);
422 struct semaphore sem = MUTEX_LOCKED;
424 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
425 printk("Trying to read nonexistent block-device %s (%ld)\n",
426 kdevname(dev), sector);
427 return;
429 if (rw!=READ && rw!=WRITE)
430 panic("Bad block dev command, must be R/W");
431 if (rw == WRITE && is_read_only(dev)) {
432 printk("Can't page to read-only device %s\n",
433 kdevname(dev));
434 return;
436 req = get_request_wait(NR_REQUEST, dev);
437 /* fill up the request-info, and add it to the queue */
438 req->cmd = rw;
439 req->errors = 0;
440 req->sector = sector;
441 req->nr_sectors = PAGE_SIZE / 512;
442 req->current_nr_sectors = PAGE_SIZE / 512;
443 req->buffer = buffer;
444 req->sem = &sem;
445 req->bh = NULL;
446 req->next = NULL;
447 add_request(major+blk_dev,req);
448 down(&sem);
451 /* This function can be used to request a number of buffers from a block
452 device. Currently the only restriction is that all buffers must belong to
453 the same device */
455 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
457 unsigned int major;
458 struct request plug;
459 int correct_size;
460 struct blk_dev_struct * dev;
461 int i;
463 /* Make sure that the first block contains something reasonable */
464 while (!*bh) {
465 bh++;
466 if (--nr <= 0)
467 return;
470 dev = NULL;
471 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
472 dev = blk_dev + major;
473 if (!dev || !dev->request_fn) {
474 printk(
475 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
476 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
477 goto sorry;
480 /* Determine correct block size for this device. */
481 correct_size = BLOCK_SIZE;
482 if (blksize_size[major]) {
483 i = blksize_size[major][MINOR(bh[0]->b_dev)];
484 if (i)
485 correct_size = i;
488 /* Verify requested block sizes. */
489 for (i = 0; i < nr; i++) {
490 if (bh[i] && bh[i]->b_size != correct_size) {
491 printk(
492 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
493 correct_size, bh[i]->b_size);
494 goto sorry;
498 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
499 printk("Can't write to read-only device %s\n",
500 kdevname(bh[0]->b_dev));
501 goto sorry;
504 /* If there are no pending requests for this device, then we insert
505 a dummy request for that device. This will prevent the request
506 from starting until we have shoved all of the blocks into the
507 queue, and then we let it rip. */
509 if (nr > 1)
510 plug_device(dev, &plug);
511 for (i = 0; i < nr; i++) {
512 if (bh[i]) {
513 bh[i]->b_req = 1;
514 make_request(major, rw, bh[i]);
515 if (rw == READ || rw == READA)
516 kstat.pgpgin++;
517 else
518 kstat.pgpgout++;
521 unplug_device(dev);
522 return;
524 sorry:
525 for (i = 0; i < nr; i++) {
526 if (bh[i])
527 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
529 return;
532 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
534 int i, j;
535 int buffersize;
536 struct request * req[8];
537 unsigned int major = MAJOR(dev);
538 struct semaphore sem = MUTEX_LOCKED;
540 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
541 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
542 return;
545 if (rw != READ && rw != WRITE) {
546 printk("ll_rw_swap: bad block dev command, must be R/W");
547 return;
549 if (rw == WRITE && is_read_only(dev)) {
550 printk("Can't swap to read-only device %s\n",
551 kdevname(dev));
552 return;
555 buffersize = PAGE_SIZE / nb;
557 for (j=0, i=0; i<nb;)
559 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
561 if (j == 0) {
562 req[j] = get_request_wait(NR_REQUEST, dev);
563 } else {
564 cli();
565 req[j] = get_request(NR_REQUEST, dev);
566 sti();
567 if (req[j] == NULL)
568 break;
570 req[j]->cmd = rw;
571 req[j]->errors = 0;
572 req[j]->sector = (b[i] * buffersize) >> 9;
573 req[j]->nr_sectors = buffersize >> 9;
574 req[j]->current_nr_sectors = buffersize >> 9;
575 req[j]->buffer = buf;
576 req[j]->sem = &sem;
577 req[j]->bh = NULL;
578 req[j]->next = NULL;
579 add_request(major+blk_dev,req[j]);
581 while (j > 0) {
582 j--;
583 down(&sem);
588 int blk_dev_init(void)
590 struct request * req;
592 req = all_requests + NR_REQUEST;
593 while (--req >= all_requests) {
594 req->rq_status = RQ_INACTIVE;
595 req->next = NULL;
597 memset(ro_bits,0,sizeof(ro_bits));
598 #ifdef CONFIG_BLK_DEV_IDE
599 ide_init(); /* this MUST preceed hd_init */
600 #endif
601 #ifdef CONFIG_BLK_DEV_HD
602 hd_init();
603 #endif
604 #ifdef CONFIG_BLK_DEV_XD
605 xd_init();
606 #endif
607 #ifdef CONFIG_BLK_DEV_FD
608 floppy_init();
609 #else
610 outb_p(0xc, 0x3f2);
611 #endif
612 #ifdef CONFIG_CDU31A
613 cdu31a_init();
614 #endif CONFIG_CDU31A
615 #ifdef CONFIG_MCD
616 mcd_init();
617 #endif CONFIG_MCD
618 #ifdef CONFIG_MCDX
619 mcdx_init();
620 #endif CONFIG_MCDX
621 #ifdef CONFIG_SBPCD
622 sbpcd_init();
623 #endif CONFIG_SBPCD
624 #ifdef CONFIG_AZTCD
625 aztcd_init();
626 #endif CONFIG_AZTCD
627 #ifdef CONFIG_CDU535
628 sony535_init();
629 #endif CONFIG_CDU535
630 #ifdef CONFIG_GSCD
631 gscd_init();
632 #endif CONFIG_GSCD
633 #ifdef CONFIG_CM206
634 cm206_init();
635 #endif
636 #ifdef CONFIG_OPTCD
637 optcd_init();
638 #endif CONFIG_OPTCD
639 #ifdef CONFIG_SJCD
640 sjcd_init();
641 #endif CONFIG_SJCD
642 return 0;