Import 2.3.40pre5
[davej-history.git] / drivers / i2o / i2o_block.c
blob8e8bf4cd80d830ecaf200a9def4f057b4f6dd6e8
1 /*
2 * I2O block device driver.
4 * (C) Copyright 1999 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * This is a beta test release. Most of the good code was taken
14 * from the nbd driver by Pavel Machek, who in turn took some of it
15 * from loop.c. Isn't free software great for reusability 8)
17 * Fixes:
18 * Steve Ralston: Multiple device handling error fixes,
19 * Added a queue depth.
20 * Alan Cox: FC920 has an rmw bug. Dont or in the
21 * end marker.
22 * Removed queue walk, fixed for 64bitness.
23 * To do:
24 * Multiple majors
25 * Serial number scanning to find duplicates for FC multipathing
26 * Set the new max_sectors according to max message size
27 * Use scatter gather chains for bigger I/O sizes
30 #include <linux/major.h>
32 #include <linux/module.h>
34 #include <linux/sched.h>
35 #include <linux/fs.h>
36 #include <linux/stat.h>
37 #include <linux/errno.h>
38 #include <linux/file.h>
39 #include <linux/ioctl.h>
40 #include <linux/i2o.h>
41 #include <linux/blkdev.h>
42 #include <linux/blkpg.h>
43 #include <linux/malloc.h>
44 #include <linux/hdreg.h>
46 #include <linux/notifier.h>
47 #include <linux/reboot.h>
49 #include <asm/uaccess.h>
50 #include <asm/io.h>
51 #include <asm/atomic.h>
53 #define MAJOR_NR I2O_MAJOR
55 #include <linux/blk.h>
57 #define MAX_I2OB 16
59 #define MAX_I2OB_DEPTH 32
60 #define MAX_I2OB_RETRIES 4
63 * Some of these can be made smaller later
66 static int i2ob_blksizes[MAX_I2OB<<4];
67 static int i2ob_hardsizes[MAX_I2OB<<4];
68 static int i2ob_sizes[MAX_I2OB<<4];
69 static int i2ob_media_change_flag[MAX_I2OB];
70 static u32 i2ob_max_sectors[MAX_I2OB<<4];
72 static int i2ob_context;
74 struct i2ob_device
76 struct i2o_controller *controller;
77 struct i2o_device *i2odev;
78 int tid;
79 int flags;
80 int refcnt;
81 struct request *head, *tail;
82 int done_flag;
86 * FIXME:
87 * We should cache align these to avoid ping-ponging lines on SMP
88 * boxes under heavy I/O load...
91 struct i2ob_request
93 struct i2ob_request *next;
94 struct request *req;
95 int num;
100 * Each I2O disk is one of these.
103 static struct i2ob_device i2ob_dev[MAX_I2OB<<4];
104 static int i2ob_devices = 0;
105 static struct hd_struct i2ob[MAX_I2OB<<4];
106 static struct gendisk i2ob_gendisk; /* Declared later */
108 static atomic_t queue_depth; /* For flow control later on */
109 static struct i2ob_request i2ob_queue[MAX_I2OB_DEPTH+1];
110 static struct i2ob_request *i2ob_qhead;
112 static struct timer_list i2ob_timer;
113 static int i2ob_timer_started = 0;
115 #define DEBUG( s )
116 /* #define DEBUG( s ) printk( s )
119 static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);
120 static void i2ob_end_request(struct request *);
121 static void i2ob_request(request_queue_t * q);
124 * Dump messages.
126 static void i2ob_dump_msg(struct i2ob_device *dev,u32 *msg,int size)
128 int cnt;
130 printk(KERN_INFO "\n\ni2o message:\n");
131 for (cnt = 0; cnt<size; cnt++)
133 printk(KERN_INFO "m[%d]=%x\n",cnt,msg[cnt]);
135 printk(KERN_INFO "\n");
139 * Get a message
142 static u32 i2ob_get(struct i2ob_device *dev)
144 struct i2o_controller *c=dev->controller;
145 return I2O_POST_READ32(c);
149 * Turn a Linux block request into an I2O block read/write.
152 static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, u32 base, int unit)
154 struct i2o_controller *c = dev->controller;
155 int tid = dev->tid;
156 unsigned long msg;
157 unsigned long mptr;
158 u64 offset;
159 struct request *req = ireq->req;
160 struct buffer_head *bh = req->bh;
161 int count = req->nr_sectors<<9;
163 /* Map the message to a virtual address */
164 msg = c->mem_offset + m;
167 * Build the message based on the request.
169 __raw_writel(i2ob_context|(unit<<8), msg+8);
170 __raw_writel(ireq->num, msg+12);
171 __raw_writel(req->nr_sectors << 9, msg+20);
173 /* This can be optimised later - just want to be sure its right for
174 starters */
175 offset = ((u64)(req->sector+base)) << 9;
176 __raw_writel( offset & 0xFFFFFFFF, msg+24);
177 __raw_writel(offset>>32, msg+28);
178 mptr=msg+32;
180 if(req->cmd == READ)
182 __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4);
183 /* We don't yet do cache/readahead and other magic */
184 __raw_writel(1<<16, msg+16);
185 while(bh!=NULL)
188 * Its best to do this in one not or it in
189 * later. mptr is in PCI space so fast to write
190 * sucky to read.
192 if(bh->b_reqnext)
193 __raw_writel(0x10000000|(bh->b_size), mptr);
194 else
195 __raw_writel(0xD0000000|(bh->b_size), mptr);
197 __raw_writel(virt_to_bus(bh->b_data), mptr+4);
198 mptr+=8;
199 count -= bh->b_size;
200 bh = bh->b_reqnext;
203 else if(req->cmd == WRITE)
205 __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4);
206 __raw_writel(1<<16, msg+16);
207 while(bh!=NULL)
209 if(bh->b_reqnext)
210 __raw_writel(0x14000000|(bh->b_size), mptr);
211 else
212 __raw_writel(0xD4000000|(bh->b_size), mptr);
213 count -= bh->b_size;
214 __raw_writel(virt_to_bus(bh->b_data), mptr+4);
215 mptr+=8;
216 bh = bh->b_reqnext;
219 __raw_writel(I2O_MESSAGE_SIZE(mptr-msg)>>2 | SGL_OFFSET_8, msg);
221 if(req->current_nr_sectors > 8)
222 printk("Gathered sectors %ld.\n",
223 req->current_nr_sectors);
225 if(count != 0)
227 printk(KERN_ERR "Request count botched by %d.\n", count);
230 i2o_post_message(c,m);
231 atomic_inc(&queue_depth);
233 return 0;
237 * Remove a request from the _locked_ request list. We update both the
238 * list chain and if this is the last item the tail pointer. Caller
239 * must hold the lock.
242 static inline void i2ob_unhook_request(struct i2ob_request *ireq)
244 ireq->next = i2ob_qhead;
245 i2ob_qhead = ireq;
249 * Request completion handler
252 static void i2ob_end_request(struct request *req)
255 * Loop until all of the buffers that are linked
256 * to this request have been marked updated and
257 * unlocked.
260 // printk("ending request %p: ", req);
261 while (end_that_request_first( req, !req->errors, "i2o block" ))
263 // printk(" +\n");
267 * It is now ok to complete the request.
270 // printk("finishing ");
271 end_that_request_last( req );
272 // printk("done\n");
277 * OSM reply handler. This gets all the message replies
280 static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
282 unsigned long flags;
283 struct i2ob_request *ireq;
284 u8 st;
285 u32 *m = (u32 *)msg;
286 u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */
288 if(m[0] & (1<<13))
290 printk("IOP fail.\n");
291 printk("From %d To %d Cmd %d.\n",
292 (m[1]>>12)&0xFFF,
293 m[1]&0xFFF,
294 m[1]>>24);
295 printk("Failure Code %d.\n", m[4]>>24);
296 if(m[4]&(1<<16))
297 printk("Format error.\n");
298 if(m[4]&(1<<17))
299 printk("Path error.\n");
300 if(m[4]&(1<<18))
301 printk("Path State.\n");
302 if(m[4]&(1<<18))
303 printk("Congestion.\n");
305 m=(u32 *)bus_to_virt(m[7]);
306 printk("Failing message is %p.\n", m);
308 /* We need to up the request failure count here and maybe
309 abort it */
310 ireq=&i2ob_queue[m[3]];
311 /* Now flush the message by making it a NOP */
312 m[0]&=0x00FFFFFF;
313 m[0]|=(I2O_CMD_UTIL_NOP)<<24;
314 i2o_post_message(c,virt_to_bus(m));
317 else
319 if(m[2]&0x40000000)
321 int * ptr = (int *)m[3];
322 if(m[4]>>24)
323 *ptr = -1;
324 else
325 *ptr = 1;
326 return;
329 * Lets see what is cooking. We stuffed the
330 * request in the context.
333 ireq=&i2ob_queue[m[3]];
334 st=m[4]>>24;
336 if(st!=0)
338 printk(KERN_ERR "i2ob: error %08X\n", m[4]);
339 ireq->req->errors++;
340 if (ireq->req->errors < MAX_I2OB_RETRIES)
342 u32 retry_msg;
343 struct i2ob_device *dev;
345 printk(KERN_ERR "i2ob: attempting retry %d for request %p\n",ireq->req->errors+1,ireq->req);
348 * Get a message for this retry.
350 dev = &i2ob_dev[(unit&0xF0)];
351 retry_msg = i2ob_get(dev);
354 * If we cannot get a message then
355 * forget the retry and fail the
356 * request. Note that since this is
357 * being called from the interrupt
358 * handler, a request has just been
359 * completed and there will most likely
360 * be space on the inbound message
361 * fifo so this won't happen often.
363 if(retry_msg!=0xFFFFFFFF)
366 * Decrement the queue depth since
367 * this request has completed and
368 * it will be incremented again when
369 * i2ob_send is called below.
371 atomic_dec(&queue_depth);
374 * Send the request again.
376 i2ob_send(retry_msg, dev,ireq,i2ob[unit].start_sect, (unit&0xF0));
378 * Don't fall through.
380 return;
384 else
385 ireq->req->errors = 0;
389 * Dequeue the request. We use irqsave locks as one day we
390 * may be running polled controllers from a BH...
393 spin_lock_irqsave(&io_request_lock, flags);
394 i2ob_unhook_request(ireq);
395 i2ob_end_request(ireq->req);
398 * We may be able to do more I/O
401 atomic_dec(&queue_depth);
402 i2ob_request(NULL);
403 spin_unlock_irqrestore(&io_request_lock, flags);
406 static struct i2o_handler i2o_block_handler =
408 i2o_block_reply,
409 "I2O Block OSM",
411 I2O_CLASS_RANDOM_BLOCK_STORAGE
415 * The timer handler will attempt to restart requests
416 * that are queued to the driver. This handler
417 * currently only gets called if the controller
418 * had no more room in its inbound fifo.
421 static void i2ob_timer_handler(unsigned long dummy)
423 unsigned long flags;
426 * We cannot touch the request queue or the timer
427 * flag without holding the io_request_lock.
429 spin_lock_irqsave(&io_request_lock,flags);
432 * Clear the timer started flag so that
433 * the timer can be queued again.
435 i2ob_timer_started = 0;
438 * Restart any requests.
440 i2ob_request(NULL);
443 * Free the lock.
445 spin_unlock_irqrestore(&io_request_lock,flags);
449 * The I2O block driver is listed as one of those that pulls the
450 * front entry off the queue before processing it. This is important
451 * to remember here. If we drop the io lock then CURRENT will change
452 * on us. We must unlink CURRENT in this routine before we return, if
453 * we use it.
456 static void i2ob_request(request_queue_t * q)
458 struct request *req;
459 struct i2ob_request *ireq;
460 int unit;
461 struct i2ob_device *dev;
462 u32 m;
464 while (CURRENT) {
466 * On an IRQ completion if there is an inactive
467 * request on the queue head it means it isnt yet
468 * ready to dispatch.
470 if(CURRENT->rq_status == RQ_INACTIVE)
471 return;
474 * Queue depths probably belong with some kind of
475 * generic IOP commit control. Certainly its not right
476 * its global!
478 if(atomic_read(&queue_depth)>=MAX_I2OB_DEPTH)
479 break;
481 req = CURRENT;
482 unit = MINOR(req->rq_dev);
483 dev = &i2ob_dev[(unit&0xF0)];
484 /* Get a message */
485 m = i2ob_get(dev);
487 if(m==0xFFFFFFFF)
490 * See if the timer has already been queued.
492 if (!i2ob_timer_started)
494 printk(KERN_ERR "i2ob: starting timer\n");
497 * Set the timer_started flag to insure
498 * that the timer is only queued once.
499 * Queing it more than once will corrupt
500 * the timer queue.
502 i2ob_timer_started = 1;
505 * Set up the timer to expire in
506 * 500ms.
508 i2ob_timer.expires = jiffies + (HZ >> 1);
511 * Start it.
514 add_timer(&i2ob_timer);
517 req->errors = 0;
518 CURRENT = CURRENT->next;
519 req->next = NULL;
520 req->sem = NULL;
522 ireq = i2ob_qhead;
523 i2ob_qhead = ireq->next;
524 ireq->req = req;
526 i2ob_send(m, dev, ireq, i2ob[unit].start_sect, (unit&0xF0));
531 * SCSI-CAM for ioctl geometry mapping
532 * Duplicated with SCSI - this should be moved into somewhere common
533 * perhaps genhd ?
536 static void i2o_block_biosparam(
537 unsigned long capacity,
538 unsigned short *cyls,
539 unsigned char *hds,
540 unsigned char *secs)
542 unsigned long heads, sectors, cylinders, temp;
544 cylinders = 1024L; /* Set number of cylinders to max */
545 sectors = 62L; /* Maximize sectors per track */
547 temp = cylinders * sectors; /* Compute divisor for heads */
548 heads = capacity / temp; /* Compute value for number of heads */
549 if (capacity % temp) { /* If no remainder, done! */
550 heads++; /* Else, increment number of heads */
551 temp = cylinders * heads; /* Compute divisor for sectors */
552 sectors = capacity / temp; /* Compute value for sectors per
553 track */
554 if (capacity % temp) { /* If no remainder, done! */
555 sectors++; /* Else, increment number of sectors */
556 temp = heads * sectors; /* Compute divisor for cylinders */
557 cylinders = capacity / temp;/* Compute number of cylinders */
560 /* if something went wrong, then apparently we have to return
561 a geometry with more than 1024 cylinders */
562 if (cylinders == 0 || heads > 255 || sectors > 63 || cylinders >1023)
564 unsigned long temp_cyl;
566 heads = 64;
567 sectors = 32;
568 temp_cyl = capacity / (heads * sectors);
569 if (temp_cyl > 1024)
571 heads = 255;
572 sectors = 63;
574 cylinders = capacity / (heads * sectors);
576 *cyls = (unsigned int) cylinders; /* Stuff return values */
577 *secs = (unsigned int) sectors;
578 *hds = (unsigned int) heads;
582 * Rescan the partition tables
585 static int do_i2ob_revalidate(kdev_t dev, int maxu)
587 int minor=MINOR(dev);
588 int i;
590 minor&=0xF0;
592 i2ob_dev[minor].refcnt++;
593 if(i2ob_dev[minor].refcnt>maxu+1)
595 i2ob_dev[minor].refcnt--;
596 return -EBUSY;
599 for( i = 15; i>=0 ; i--)
601 int m = minor+i;
602 kdev_t d = MKDEV(MAJOR_NR, m);
603 struct super_block *sb = get_super(d);
605 sync_dev(d);
606 if(sb)
607 invalidate_inodes(sb);
608 invalidate_buffers(d);
609 i2ob_gendisk.part[m].start_sect = 0;
610 i2ob_gendisk.part[m].nr_sects = 0;
614 * Do a physical check and then reconfigure
617 i2ob_install_device(i2ob_dev[minor].controller, i2ob_dev[minor].i2odev,
618 minor);
619 i2ob_dev[minor].refcnt--;
620 return 0;
624 * Issue device specific ioctl calls.
627 static int i2ob_ioctl(struct inode *inode, struct file *file,
628 unsigned int cmd, unsigned long arg)
630 struct i2ob_device *dev;
631 int minor;
633 /* Anyone capable of this syscall can do *real bad* things */
635 if (!capable(CAP_SYS_ADMIN))
636 return -EPERM;
637 if (!inode)
638 return -EINVAL;
639 minor = MINOR(inode->i_rdev);
640 if (minor >= (MAX_I2OB<<4))
641 return -ENODEV;
643 dev = &i2ob_dev[minor];
644 switch (cmd) {
645 case BLKGETSIZE:
646 return put_user(i2ob[minor].nr_sects, (long *) arg);
648 case HDIO_GETGEO:
650 struct hd_geometry g;
651 int u=minor&0xF0;
652 i2o_block_biosparam(i2ob_sizes[u]<<1,
653 &g.cylinders, &g.heads, &g.sectors);
654 g.start = i2ob[minor].start_sect;
655 return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0;
658 case BLKRRPART:
659 if(!capable(CAP_SYS_ADMIN))
660 return -EACCES;
661 return do_i2ob_revalidate(inode->i_rdev,1);
663 case BLKFLSBUF:
664 case BLKROSET:
665 case BLKROGET:
666 case BLKRASET:
667 case BLKRAGET:
668 case BLKPG:
669 return blk_ioctl(inode->i_rdev, cmd, arg);
671 default:
672 return -EINVAL;
677 * Close the block device down
680 static int i2ob_release(struct inode *inode, struct file *file)
682 struct i2ob_device *dev;
683 int minor;
685 minor = MINOR(inode->i_rdev);
686 if (minor >= (MAX_I2OB<<4))
687 return -ENODEV;
688 dev = &i2ob_dev[(minor&0xF0)];
689 if (dev->refcnt <= 0)
690 printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt);
691 dev->refcnt--;
692 if(dev->refcnt==0)
695 * Flush the onboard cache on unmount
697 u32 msg[5];
698 int *query_done = &dev->done_flag;
699 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;
701 msg[2] = i2ob_context|0x40000000;
702 msg[3] = (u32)query_done;
703 msg[4] = 60<<16;
704 i2o_post_wait(dev->controller, msg, 20, 2);
706 * Unlock the media
708 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
709 msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;
710 msg[2] = i2ob_context|0x40000000;
711 msg[3] = (u32)query_done;
712 msg[4] = -1;
713 i2o_post_wait(dev->controller, msg, 20, 2);
716 * Now unclaim the device.
718 if (i2o_release_device(dev->i2odev, &i2o_block_handler, I2O_CLAIM_PRIMARY)<0)
719 printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n");
722 MOD_DEC_USE_COUNT;
723 return 0;
727 * Open the block device.
730 static int i2ob_open(struct inode *inode, struct file *file)
732 int minor;
733 struct i2ob_device *dev;
735 if (!inode)
736 return -EINVAL;
737 minor = MINOR(inode->i_rdev);
738 if (minor >= MAX_I2OB<<4)
739 return -ENODEV;
740 dev=&i2ob_dev[(minor&0xF0)];
742 if(dev->refcnt++==0)
744 u32 msg[6];
745 int *query_done;
748 if(i2o_claim_device(dev->i2odev, &i2o_block_handler, I2O_CLAIM_PRIMARY)<0)
750 dev->refcnt--;
751 return -EBUSY;
754 query_done = &dev->done_flag;
756 * Mount the media if needed. Note that we don't use
757 * the lock bit. Since we have to issue a lock if it
758 * refuses a mount (quite possible) then we might as
759 * well just send two messages out.
761 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
762 msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid;
763 msg[2] = i2ob_context|0x40000000;
764 msg[3] = (u32)query_done;
765 msg[4] = -1;
766 msg[5] = 0;
767 i2o_post_wait(dev->controller, msg, 24, 2);
769 * Lock the media
771 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
772 msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid;
773 msg[2] = i2ob_context|0x40000000;
774 msg[3] = (u32)query_done;
775 msg[4] = -1;
776 i2o_post_wait(dev->controller, msg, 20, 2);
778 MOD_INC_USE_COUNT;
779 return 0;
783 * Issue a device query
786 static int i2ob_query_device(struct i2ob_device *dev, int table,
787 int field, void *buf, int buflen)
789 return i2o_query_scalar(dev->controller, dev->tid,
790 table, field, buf, buflen);
795 * Install the I2O block device we found.
798 static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit)
800 u64 size;
801 u32 blocksize;
802 u32 limit;
803 u8 type;
804 u32 flags, status;
805 struct i2ob_device *dev=&i2ob_dev[unit];
806 int i;
809 * Ask for the current media data. If that isn't supported
810 * then we ask for the device capacity data
813 if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0
814 || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 )
816 i2ob_query_device(dev, 0x0000, 3, &blocksize, 4);
817 i2ob_query_device(dev, 0x0000, 4, &size, 8);
820 i2ob_query_device(dev, 0x0000, 5, &flags, 4);
821 i2ob_query_device(dev, 0x0000, 6, &status, 4);
822 i2ob_sizes[unit] = (int)(size>>10);
823 i2ob_hardsizes[unit] = blocksize;
825 limit=4096; /* 8 deep scatter gather */
827 printk("Byte limit is %d.\n", limit);
829 for(i=unit;i<=unit+15;i++)
830 i2ob_max_sectors[i]=(limit>>9);
832 i2ob[unit].nr_sects = (int)(size>>9);
834 i2ob_query_device(dev, 0x0000, 0, &type, 1);
836 sprintf(d->dev_name, "%s%c", i2ob_gendisk.major_name, 'a' + (unit>>4));
838 printk("%s: ", d->dev_name);
839 if(status&(1<<10))
840 printk("RAID ");
841 switch(type)
843 case 0: printk("Disk Storage");break;
844 case 4: printk("WORM");break;
845 case 5: printk("CD-ROM");break;
846 case 7: printk("Optical device");break;
847 default:
848 printk("Type %d", type);
850 if(((flags & (1<<3)) && !(status & (1<<3))) ||
851 ((flags & (1<<4)) && !(status & (1<<4))))
853 printk(" Not loaded.\n");
854 return 0;
856 printk(" %dMb, %d byte sectors",
857 (int)(size>>20), blocksize);
858 if(status&(1<<0))
860 u32 cachesize;
861 i2ob_query_device(dev, 0x0003, 0, &cachesize, 4);
862 cachesize>>=10;
863 if(cachesize>4095)
864 printk(", %dMb cache", cachesize>>10);
865 else
866 printk(", %dKb cache", cachesize);
868 printk(".\n");
869 printk("%s: Maximum sectors/read set to %d.\n",
870 d->dev_name, i2ob_max_sectors[unit]);
871 grok_partitions(&i2ob_gendisk, unit>>4, 1<<4, (long)(size>>9));
872 return 0;
875 static void i2ob_probe(void)
877 int i;
878 int unit = 0;
879 int warned = 0;
881 for(i=0; i< MAX_I2O_CONTROLLERS; i++)
883 struct i2o_controller *c=i2o_find_controller(i);
884 struct i2o_device *d;
886 if(c==NULL)
887 continue;
889 for(d=c->devices;d!=NULL;d=d->next)
891 if(d->lct_data->class_id!=I2O_CLASS_RANDOM_BLOCK_STORAGE)
892 continue;
894 if(d->lct_data->user_tid != 0xFFF)
895 continue;
897 if(unit<MAX_I2OB<<4)
900 * Get the device and fill in the
901 * Tid and controller.
903 struct i2ob_device *dev=&i2ob_dev[unit];
904 dev->i2odev = d;
905 dev->controller = c;
906 dev->tid = d->lct_data->tid;
909 * Insure the device can be claimed
910 * before installing it.
912 if(i2o_claim_device(dev->i2odev, &i2o_block_handler, I2O_CLAIM_PRIMARY )==0)
914 printk(KERN_INFO "Claimed Dev %p Tid %d Unit %d\n",dev,dev->tid,unit);
915 i2ob_install_device(c,d,unit);
916 unit+=16;
919 * Now that the device has been
920 * installed, unclaim it so that
921 * it can be claimed by either
922 * the block or scsi driver.
924 if(i2o_release_device(dev->i2odev, &i2o_block_handler, I2O_CLAIM_PRIMARY))
925 printk(KERN_INFO "Could not unclaim Dev %p Tid %d\n",dev,dev->tid);
927 else
928 printk(KERN_INFO "TID %d not claimed\n",dev->tid);
930 else
932 if(!warned++)
933 printk("i2o_block: too many device, registering only %d.\n", unit>>4);
936 i2o_unlock_controller(c);
938 i2ob_devices = unit;
942 * Have we seen a media change ?
945 static int i2ob_media_change(kdev_t dev)
947 int i=MINOR(dev);
948 i>>=4;
949 if(i2ob_media_change_flag[i])
951 i2ob_media_change_flag[i]=0;
952 return 1;
954 return 0;
957 static int i2ob_revalidate(kdev_t dev)
959 return do_i2ob_revalidate(dev, 0);
962 static int i2ob_reboot_event(struct notifier_block *n, unsigned long code, void *p)
964 int i;
966 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
967 return NOTIFY_DONE;
968 for(i=0;i<MAX_I2OB;i++)
970 struct i2ob_device *dev=&i2ob_dev[(i<<4)];
972 if(dev->refcnt!=0)
975 * Flush the onboard cache on power down
976 * also unlock the media
978 u32 msg[5];
979 int *query_done = &dev->done_flag;
980 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
981 msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;
982 msg[2] = i2ob_context|0x40000000;
983 msg[3] = (u32)query_done;
984 msg[4] = 60<<16;
985 i2o_post_wait(dev->controller, msg, 20, 2);
987 * Unlock the media
989 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
990 msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;
991 msg[2] = i2ob_context|0x40000000;
992 msg[3] = (u32)query_done;
993 msg[4] = -1;
994 i2o_post_wait(dev->controller, msg, 20, 2);
997 return NOTIFY_DONE;
1000 struct notifier_block i2ob_reboot_notifier =
1002 i2ob_reboot_event,
1003 NULL,
1007 static struct block_device_operations i2ob_fops =
1009 open: i2ob_open,
1010 release: i2ob_release,
1011 ioctl: i2ob_ioctl,
1012 check_media_change: i2ob_media_change,
1013 revalidate: i2ob_revalidate,
1016 static struct gendisk i2ob_gendisk =
1018 MAJOR_NR,
1019 "i2ohd",
1021 1<<4,
1022 i2ob,
1023 i2ob_sizes,
1025 NULL,
1026 NULL
1030 * And here should be modules and kernel interface
1031 * (Just smiley confuses emacs :-)
1034 #ifdef MODULE
1035 #define i2o_block_init init_module
1036 #endif
1038 int i2o_block_init(void)
1040 int i;
1042 printk(KERN_INFO "I2O Block Storage OSM v0.07. (C) 1999 Red Hat Software.\n");
1045 * Register the block device interfaces
1048 if (register_blkdev(MAJOR_NR, "i2o_block", &i2ob_fops)) {
1049 printk("Unable to get major number %d for i2o_block\n",
1050 MAJOR_NR);
1051 return -EIO;
1053 #ifdef MODULE
1054 printk("i2o_block: registered device at major %d\n", MAJOR_NR);
1055 #endif
1058 * Now fill in the boiler plate
1061 blksize_size[MAJOR_NR] = i2ob_blksizes;
1062 hardsect_size[MAJOR_NR] = i2ob_hardsizes;
1063 blk_size[MAJOR_NR] = i2ob_sizes;
1064 max_sectors[MAJOR_NR] = i2ob_max_sectors;
1066 blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), i2ob_request);
1067 blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
1069 for (i = 0; i < MAX_I2OB << 4; i++) {
1070 i2ob_dev[i].refcnt = 0;
1071 i2ob_dev[i].flags = 0;
1072 i2ob_dev[i].controller = NULL;
1073 i2ob_dev[i].i2odev = NULL;
1074 i2ob_dev[i].tid = 0;
1075 i2ob_dev[i].head = NULL;
1076 i2ob_dev[i].tail = NULL;
1077 i2ob_blksizes[i] = 1024;
1078 i2ob_max_sectors[i] = 2;
1082 * Set up the queue
1085 for(i = 0; i< MAX_I2OB_DEPTH; i++)
1087 i2ob_queue[i].next = &i2ob_queue[i+1];
1088 i2ob_queue[i].num = i;
1091 /* Queue is MAX_I2OB + 1... */
1092 i2ob_queue[i].next = NULL;
1093 i2ob_qhead = &i2ob_queue[0];
1096 * Timers
1099 init_timer(&i2ob_timer);
1100 i2ob_timer.function = i2ob_timer_handler;
1101 i2ob_timer.data = 0;
1104 * Register the OSM handler as we will need this to probe for
1105 * drives, geometry and other goodies.
1108 if(i2o_install_handler(&i2o_block_handler)<0)
1110 unregister_blkdev(MAJOR_NR, "i2o_block");
1111 printk(KERN_ERR "i2o_block: unable to register OSM.\n");
1112 return -EINVAL;
1114 i2ob_context = i2o_block_handler.context;
1117 * Finally see what is actually plugged in to our controllers
1120 i2ob_probe();
1122 register_reboot_notifier(&i2ob_reboot_notifier);
1123 return 0;
1126 #ifdef MODULE
1128 EXPORT_NO_SYMBOLS;
1129 MODULE_AUTHOR("Red Hat Software");
1130 MODULE_DESCRIPTION("I2O Block Device OSM");
1132 void cleanup_module(void)
1134 struct gendisk **gdp;
1136 unregister_reboot_notifier(&i2ob_reboot_notifier);
1139 * Flush the OSM
1142 i2o_remove_handler(&i2o_block_handler);
1145 * Return the block device
1147 if (unregister_blkdev(MAJOR_NR, "i2o_block") != 0)
1148 printk("i2o_block: cleanup_module failed\n");
1151 * Why isnt register/unregister gendisk in the kernel ???
1154 for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
1155 if (*gdp == &i2ob_gendisk)
1156 break;
1159 #endif