2 * I2O block device driver.
4 * (C) Copyright 1999 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * This is a beta test release. Most of the good code was taken
14 * from the nbd driver by Pavel Machek, who in turn took some of it
15 * from loop.c. Isn't free software great for reusability 8)
18 * Steve Ralston: Multiple device handling error fixes,
19 * Added a queue depth.
20 * Alan Cox: FC920 has an rmw bug. Dont or in the
22 * Removed queue walk, fixed for 64bitness.
25 * Serial number scanning to find duplicates for FC multipathing
26 * Set the new max_sectors according to max message size
27 * Use scatter gather chains for bigger I/O sizes
30 #include <linux/major.h>
32 #include <linux/module.h>
34 #include <linux/sched.h>
36 #include <linux/stat.h>
37 #include <linux/errno.h>
38 #include <linux/file.h>
39 #include <linux/ioctl.h>
40 #include <linux/i2o.h>
41 #include <linux/blkdev.h>
42 #include <linux/blkpg.h>
43 #include <linux/malloc.h>
44 #include <linux/hdreg.h>
46 #include <linux/notifier.h>
47 #include <linux/reboot.h>
49 #include <asm/uaccess.h>
51 #include <asm/atomic.h>
53 #define MAJOR_NR I2O_MAJOR
55 #include <linux/blk.h>
59 #define MAX_I2OB_DEPTH 32
60 #define MAX_I2OB_RETRIES 4
63 * Some of these can be made smaller later
66 static int i2ob_blksizes
[MAX_I2OB
<<4];
67 static int i2ob_hardsizes
[MAX_I2OB
<<4];
68 static int i2ob_sizes
[MAX_I2OB
<<4];
69 static int i2ob_media_change_flag
[MAX_I2OB
];
70 static u32 i2ob_max_sectors
[MAX_I2OB
<<4];
72 static int i2ob_context
;
76 struct i2o_controller
*controller
;
77 struct i2o_device
*i2odev
;
81 struct request
*head
, *tail
;
87 * We should cache align these to avoid ping-ponging lines on SMP
88 * boxes under heavy I/O load...
93 struct i2ob_request
*next
;
100 * Each I2O disk is one of these.
103 static struct i2ob_device i2ob_dev
[MAX_I2OB
<<4];
104 static int i2ob_devices
= 0;
105 static struct hd_struct i2ob
[MAX_I2OB
<<4];
106 static struct gendisk i2ob_gendisk
; /* Declared later */
108 static atomic_t queue_depth
; /* For flow control later on */
109 static struct i2ob_request i2ob_queue
[MAX_I2OB_DEPTH
+1];
110 static struct i2ob_request
*i2ob_qhead
;
112 static struct timer_list i2ob_timer
;
113 static int i2ob_timer_started
= 0;
116 /* #define DEBUG( s ) printk( s )
119 static int i2ob_install_device(struct i2o_controller
*, struct i2o_device
*, int);
120 static void i2ob_end_request(struct request
*);
121 static void i2ob_request(request_queue_t
* q
);
126 static void i2ob_dump_msg(struct i2ob_device
*dev
,u32
*msg
,int size
)
130 printk(KERN_INFO
"\n\ni2o message:\n");
131 for (cnt
= 0; cnt
<size
; cnt
++)
133 printk(KERN_INFO
"m[%d]=%x\n",cnt
,msg
[cnt
]);
135 printk(KERN_INFO
"\n");
142 static u32
i2ob_get(struct i2ob_device
*dev
)
144 struct i2o_controller
*c
=dev
->controller
;
145 return I2O_POST_READ32(c
);
149 * Turn a Linux block request into an I2O block read/write.
152 static int i2ob_send(u32 m
, struct i2ob_device
*dev
, struct i2ob_request
*ireq
, u32 base
, int unit
)
154 struct i2o_controller
*c
= dev
->controller
;
159 struct request
*req
= ireq
->req
;
160 struct buffer_head
*bh
= req
->bh
;
161 int count
= req
->nr_sectors
<<9;
163 /* Map the message to a virtual address */
164 msg
= c
->mem_offset
+ m
;
167 * Build the message based on the request.
169 __raw_writel(i2ob_context
|(unit
<<8), msg
+8);
170 __raw_writel(ireq
->num
, msg
+12);
171 __raw_writel(req
->nr_sectors
<< 9, msg
+20);
173 /* This can be optimised later - just want to be sure its right for
175 offset
= ((u64
)(req
->sector
+base
)) << 9;
176 __raw_writel( offset
& 0xFFFFFFFF, msg
+24);
177 __raw_writel(offset
>>32, msg
+28);
182 __raw_writel(I2O_CMD_BLOCK_READ
<<24|HOST_TID
<<12|tid
, msg
+4);
183 /* We don't yet do cache/readahead and other magic */
184 __raw_writel(1<<16, msg
+16);
188 * Its best to do this in one not or it in
189 * later. mptr is in PCI space so fast to write
193 __raw_writel(0x10000000|(bh
->b_size
), mptr
);
195 __raw_writel(0xD0000000|(bh
->b_size
), mptr
);
197 __raw_writel(virt_to_bus(bh
->b_data
), mptr
+4);
203 else if(req
->cmd
== WRITE
)
205 __raw_writel(I2O_CMD_BLOCK_WRITE
<<24|HOST_TID
<<12|tid
, msg
+4);
206 __raw_writel(1<<16, msg
+16);
210 __raw_writel(0x14000000|(bh
->b_size
), mptr
);
212 __raw_writel(0xD4000000|(bh
->b_size
), mptr
);
214 __raw_writel(virt_to_bus(bh
->b_data
), mptr
+4);
219 __raw_writel(I2O_MESSAGE_SIZE(mptr
-msg
)>>2 | SGL_OFFSET_8
, msg
);
221 if(req
->current_nr_sectors
> 8)
222 printk("Gathered sectors %ld.\n",
223 req
->current_nr_sectors
);
227 printk(KERN_ERR
"Request count botched by %d.\n", count
);
230 i2o_post_message(c
,m
);
231 atomic_inc(&queue_depth
);
237 * Remove a request from the _locked_ request list. We update both the
238 * list chain and if this is the last item the tail pointer. Caller
239 * must hold the lock.
242 static inline void i2ob_unhook_request(struct i2ob_request
*ireq
)
244 ireq
->next
= i2ob_qhead
;
249 * Request completion handler
252 static void i2ob_end_request(struct request
*req
)
255 * Loop until all of the buffers that are linked
256 * to this request have been marked updated and
260 // printk("ending request %p: ", req);
261 while (end_that_request_first( req
, !req
->errors
, "i2o block" ))
267 * It is now ok to complete the request.
270 // printk("finishing ");
271 end_that_request_last( req
);
277 * OSM reply handler. This gets all the message replies
280 static void i2o_block_reply(struct i2o_handler
*h
, struct i2o_controller
*c
, struct i2o_message
*msg
)
283 struct i2ob_request
*ireq
;
286 u8 unit
= (m
[2]>>8)&0xF0; /* low 4 bits are partition */
290 printk("IOP fail.\n");
291 printk("From %d To %d Cmd %d.\n",
295 printk("Failure Code %d.\n", m
[4]>>24);
297 printk("Format error.\n");
299 printk("Path error.\n");
301 printk("Path State.\n");
303 printk("Congestion.\n");
305 m
=(u32
*)bus_to_virt(m
[7]);
306 printk("Failing message is %p.\n", m
);
308 /* We need to up the request failure count here and maybe
310 ireq
=&i2ob_queue
[m
[3]];
311 /* Now flush the message by making it a NOP */
313 m
[0]|=(I2O_CMD_UTIL_NOP
)<<24;
314 i2o_post_message(c
,virt_to_bus(m
));
321 int * ptr
= (int *)m
[3];
329 * Lets see what is cooking. We stuffed the
330 * request in the context.
333 ireq
=&i2ob_queue
[m
[3]];
338 printk(KERN_ERR
"i2ob: error %08X\n", m
[4]);
340 if (ireq
->req
->errors
< MAX_I2OB_RETRIES
)
343 struct i2ob_device
*dev
;
345 printk(KERN_ERR
"i2ob: attempting retry %d for request %p\n",ireq
->req
->errors
+1,ireq
->req
);
348 * Get a message for this retry.
350 dev
= &i2ob_dev
[(unit
&0xF0)];
351 retry_msg
= i2ob_get(dev
);
354 * If we cannot get a message then
355 * forget the retry and fail the
356 * request. Note that since this is
357 * being called from the interrupt
358 * handler, a request has just been
359 * completed and there will most likely
360 * be space on the inbound message
361 * fifo so this won't happen often.
363 if(retry_msg
!=0xFFFFFFFF)
366 * Decrement the queue depth since
367 * this request has completed and
368 * it will be incremented again when
369 * i2ob_send is called below.
371 atomic_dec(&queue_depth
);
374 * Send the request again.
376 i2ob_send(retry_msg
, dev
,ireq
,i2ob
[unit
].start_sect
, (unit
&0xF0));
378 * Don't fall through.
385 ireq
->req
->errors
= 0;
389 * Dequeue the request. We use irqsave locks as one day we
390 * may be running polled controllers from a BH...
393 spin_lock_irqsave(&io_request_lock
, flags
);
394 i2ob_unhook_request(ireq
);
395 i2ob_end_request(ireq
->req
);
398 * We may be able to do more I/O
401 atomic_dec(&queue_depth
);
403 spin_unlock_irqrestore(&io_request_lock
, flags
);
406 static struct i2o_handler i2o_block_handler
=
411 I2O_CLASS_RANDOM_BLOCK_STORAGE
415 * The timer handler will attempt to restart requests
416 * that are queued to the driver. This handler
417 * currently only gets called if the controller
418 * had no more room in its inbound fifo.
421 static void i2ob_timer_handler(unsigned long dummy
)
426 * We cannot touch the request queue or the timer
427 * flag without holding the io_request_lock.
429 spin_lock_irqsave(&io_request_lock
,flags
);
432 * Clear the timer started flag so that
433 * the timer can be queued again.
435 i2ob_timer_started
= 0;
438 * Restart any requests.
445 spin_unlock_irqrestore(&io_request_lock
,flags
);
449 * The I2O block driver is listed as one of those that pulls the
450 * front entry off the queue before processing it. This is important
451 * to remember here. If we drop the io lock then CURRENT will change
452 * on us. We must unlink CURRENT in this routine before we return, if
456 static void i2ob_request(request_queue_t
* q
)
459 struct i2ob_request
*ireq
;
461 struct i2ob_device
*dev
;
466 * On an IRQ completion if there is an inactive
467 * request on the queue head it means it isnt yet
470 if(CURRENT
->rq_status
== RQ_INACTIVE
)
474 * Queue depths probably belong with some kind of
475 * generic IOP commit control. Certainly its not right
478 if(atomic_read(&queue_depth
)>=MAX_I2OB_DEPTH
)
482 unit
= MINOR(req
->rq_dev
);
483 dev
= &i2ob_dev
[(unit
&0xF0)];
490 * See if the timer has already been queued.
492 if (!i2ob_timer_started
)
494 printk(KERN_ERR
"i2ob: starting timer\n");
497 * Set the timer_started flag to insure
498 * that the timer is only queued once.
499 * Queing it more than once will corrupt
502 i2ob_timer_started
= 1;
505 * Set up the timer to expire in
508 i2ob_timer
.expires
= jiffies
+ (HZ
>> 1);
514 add_timer(&i2ob_timer
);
518 CURRENT
= CURRENT
->next
;
523 i2ob_qhead
= ireq
->next
;
526 i2ob_send(m
, dev
, ireq
, i2ob
[unit
].start_sect
, (unit
&0xF0));
531 * SCSI-CAM for ioctl geometry mapping
532 * Duplicated with SCSI - this should be moved into somewhere common
536 static void i2o_block_biosparam(
537 unsigned long capacity
,
538 unsigned short *cyls
,
542 unsigned long heads
, sectors
, cylinders
, temp
;
544 cylinders
= 1024L; /* Set number of cylinders to max */
545 sectors
= 62L; /* Maximize sectors per track */
547 temp
= cylinders
* sectors
; /* Compute divisor for heads */
548 heads
= capacity
/ temp
; /* Compute value for number of heads */
549 if (capacity
% temp
) { /* If no remainder, done! */
550 heads
++; /* Else, increment number of heads */
551 temp
= cylinders
* heads
; /* Compute divisor for sectors */
552 sectors
= capacity
/ temp
; /* Compute value for sectors per
554 if (capacity
% temp
) { /* If no remainder, done! */
555 sectors
++; /* Else, increment number of sectors */
556 temp
= heads
* sectors
; /* Compute divisor for cylinders */
557 cylinders
= capacity
/ temp
;/* Compute number of cylinders */
560 /* if something went wrong, then apparently we have to return
561 a geometry with more than 1024 cylinders */
562 if (cylinders
== 0 || heads
> 255 || sectors
> 63 || cylinders
>1023)
564 unsigned long temp_cyl
;
568 temp_cyl
= capacity
/ (heads
* sectors
);
574 cylinders
= capacity
/ (heads
* sectors
);
576 *cyls
= (unsigned int) cylinders
; /* Stuff return values */
577 *secs
= (unsigned int) sectors
;
578 *hds
= (unsigned int) heads
;
582 * Rescan the partition tables
585 static int do_i2ob_revalidate(kdev_t dev
, int maxu
)
587 int minor
=MINOR(dev
);
592 i2ob_dev
[minor
].refcnt
++;
593 if(i2ob_dev
[minor
].refcnt
>maxu
+1)
595 i2ob_dev
[minor
].refcnt
--;
599 for( i
= 15; i
>=0 ; i
--)
602 kdev_t d
= MKDEV(MAJOR_NR
, m
);
603 struct super_block
*sb
= get_super(d
);
607 invalidate_inodes(sb
);
608 invalidate_buffers(d
);
609 i2ob_gendisk
.part
[m
].start_sect
= 0;
610 i2ob_gendisk
.part
[m
].nr_sects
= 0;
614 * Do a physical check and then reconfigure
617 i2ob_install_device(i2ob_dev
[minor
].controller
, i2ob_dev
[minor
].i2odev
,
619 i2ob_dev
[minor
].refcnt
--;
624 * Issue device specific ioctl calls.
627 static int i2ob_ioctl(struct inode
*inode
, struct file
*file
,
628 unsigned int cmd
, unsigned long arg
)
630 struct i2ob_device
*dev
;
633 /* Anyone capable of this syscall can do *real bad* things */
635 if (!capable(CAP_SYS_ADMIN
))
639 minor
= MINOR(inode
->i_rdev
);
640 if (minor
>= (MAX_I2OB
<<4))
643 dev
= &i2ob_dev
[minor
];
646 return put_user(i2ob
[minor
].nr_sects
, (long *) arg
);
650 struct hd_geometry g
;
652 i2o_block_biosparam(i2ob_sizes
[u
]<<1,
653 &g
.cylinders
, &g
.heads
, &g
.sectors
);
654 g
.start
= i2ob
[minor
].start_sect
;
655 return copy_to_user((void *)arg
,&g
, sizeof(g
))?-EFAULT
:0;
659 if(!capable(CAP_SYS_ADMIN
))
661 return do_i2ob_revalidate(inode
->i_rdev
,1);
669 return blk_ioctl(inode
->i_rdev
, cmd
, arg
);
677 * Close the block device down
680 static int i2ob_release(struct inode
*inode
, struct file
*file
)
682 struct i2ob_device
*dev
;
685 minor
= MINOR(inode
->i_rdev
);
686 if (minor
>= (MAX_I2OB
<<4))
688 dev
= &i2ob_dev
[(minor
&0xF0)];
689 if (dev
->refcnt
<= 0)
690 printk(KERN_ALERT
"i2ob_release: refcount(%d) <= 0\n", dev
->refcnt
);
695 * Flush the onboard cache on unmount
698 int *query_done
= &dev
->done_flag
;
699 msg
[0] = FIVE_WORD_MSG_SIZE
|SGL_OFFSET_0
;
700 msg
[1] = I2O_CMD_BLOCK_CFLUSH
<<24|HOST_TID
<<12|dev
->tid
;
701 msg
[2] = i2ob_context
|0x40000000;
702 msg
[3] = (u32
)query_done
;
704 i2o_post_wait(dev
->controller
, msg
, 20, 2);
708 msg
[0] = FIVE_WORD_MSG_SIZE
|SGL_OFFSET_0
;
709 msg
[1] = I2O_CMD_BLOCK_MUNLOCK
<<24|HOST_TID
<<12|dev
->tid
;
710 msg
[2] = i2ob_context
|0x40000000;
711 msg
[3] = (u32
)query_done
;
713 i2o_post_wait(dev
->controller
, msg
, 20, 2);
716 * Now unclaim the device.
718 if (i2o_release_device(dev
->i2odev
, &i2o_block_handler
, I2O_CLAIM_PRIMARY
)<0)
719 printk(KERN_ERR
"i2ob_release: controller rejected unclaim.\n");
727 * Open the block device.
730 static int i2ob_open(struct inode
*inode
, struct file
*file
)
733 struct i2ob_device
*dev
;
737 minor
= MINOR(inode
->i_rdev
);
738 if (minor
>= MAX_I2OB
<<4)
740 dev
=&i2ob_dev
[(minor
&0xF0)];
748 if(i2o_claim_device(dev
->i2odev
, &i2o_block_handler
, I2O_CLAIM_PRIMARY
)<0)
754 query_done
= &dev
->done_flag
;
756 * Mount the media if needed. Note that we don't use
757 * the lock bit. Since we have to issue a lock if it
758 * refuses a mount (quite possible) then we might as
759 * well just send two messages out.
761 msg
[0] = FIVE_WORD_MSG_SIZE
|SGL_OFFSET_0
;
762 msg
[1] = I2O_CMD_BLOCK_MMOUNT
<<24|HOST_TID
<<12|dev
->tid
;
763 msg
[2] = i2ob_context
|0x40000000;
764 msg
[3] = (u32
)query_done
;
767 i2o_post_wait(dev
->controller
, msg
, 24, 2);
771 msg
[0] = FIVE_WORD_MSG_SIZE
|SGL_OFFSET_0
;
772 msg
[1] = I2O_CMD_BLOCK_MLOCK
<<24|HOST_TID
<<12|dev
->tid
;
773 msg
[2] = i2ob_context
|0x40000000;
774 msg
[3] = (u32
)query_done
;
776 i2o_post_wait(dev
->controller
, msg
, 20, 2);
783 * Issue a device query
786 static int i2ob_query_device(struct i2ob_device
*dev
, int table
,
787 int field
, void *buf
, int buflen
)
789 return i2o_query_scalar(dev
->controller
, dev
->tid
,
790 table
, field
, buf
, buflen
);
795 * Install the I2O block device we found.
798 static int i2ob_install_device(struct i2o_controller
*c
, struct i2o_device
*d
, int unit
)
805 struct i2ob_device
*dev
=&i2ob_dev
[unit
];
809 * Ask for the current media data. If that isn't supported
810 * then we ask for the device capacity data
813 if(i2ob_query_device(dev
, 0x0004, 1, &blocksize
, 4) != 0
814 || i2ob_query_device(dev
, 0x0004, 0, &size
, 8) !=0 )
816 i2ob_query_device(dev
, 0x0000, 3, &blocksize
, 4);
817 i2ob_query_device(dev
, 0x0000, 4, &size
, 8);
820 i2ob_query_device(dev
, 0x0000, 5, &flags
, 4);
821 i2ob_query_device(dev
, 0x0000, 6, &status
, 4);
822 i2ob_sizes
[unit
] = (int)(size
>>10);
823 i2ob_hardsizes
[unit
] = blocksize
;
825 limit
=4096; /* 8 deep scatter gather */
827 printk("Byte limit is %d.\n", limit
);
829 for(i
=unit
;i
<=unit
+15;i
++)
830 i2ob_max_sectors
[i
]=(limit
>>9);
832 i2ob
[unit
].nr_sects
= (int)(size
>>9);
834 i2ob_query_device(dev
, 0x0000, 0, &type
, 1);
836 sprintf(d
->dev_name
, "%s%c", i2ob_gendisk
.major_name
, 'a' + (unit
>>4));
838 printk("%s: ", d
->dev_name
);
843 case 0: printk("Disk Storage");break;
844 case 4: printk("WORM");break;
845 case 5: printk("CD-ROM");break;
846 case 7: printk("Optical device");break;
848 printk("Type %d", type
);
850 if(((flags
& (1<<3)) && !(status
& (1<<3))) ||
851 ((flags
& (1<<4)) && !(status
& (1<<4))))
853 printk(" Not loaded.\n");
856 printk(" %dMb, %d byte sectors",
857 (int)(size
>>20), blocksize
);
861 i2ob_query_device(dev
, 0x0003, 0, &cachesize
, 4);
864 printk(", %dMb cache", cachesize
>>10);
866 printk(", %dKb cache", cachesize
);
869 printk("%s: Maximum sectors/read set to %d.\n",
870 d
->dev_name
, i2ob_max_sectors
[unit
]);
871 grok_partitions(&i2ob_gendisk
, unit
>>4, 1<<4, (long)(size
>>9));
875 static void i2ob_probe(void)
881 for(i
=0; i
< MAX_I2O_CONTROLLERS
; i
++)
883 struct i2o_controller
*c
=i2o_find_controller(i
);
884 struct i2o_device
*d
;
889 for(d
=c
->devices
;d
!=NULL
;d
=d
->next
)
891 if(d
->lct_data
->class_id
!=I2O_CLASS_RANDOM_BLOCK_STORAGE
)
894 if(d
->lct_data
->user_tid
!= 0xFFF)
900 * Get the device and fill in the
901 * Tid and controller.
903 struct i2ob_device
*dev
=&i2ob_dev
[unit
];
906 dev
->tid
= d
->lct_data
->tid
;
909 * Insure the device can be claimed
910 * before installing it.
912 if(i2o_claim_device(dev
->i2odev
, &i2o_block_handler
, I2O_CLAIM_PRIMARY
)==0)
914 printk(KERN_INFO
"Claimed Dev %p Tid %d Unit %d\n",dev
,dev
->tid
,unit
);
915 i2ob_install_device(c
,d
,unit
);
919 * Now that the device has been
920 * installed, unclaim it so that
921 * it can be claimed by either
922 * the block or scsi driver.
924 if(i2o_release_device(dev
->i2odev
, &i2o_block_handler
, I2O_CLAIM_PRIMARY
))
925 printk(KERN_INFO
"Could not unclaim Dev %p Tid %d\n",dev
,dev
->tid
);
928 printk(KERN_INFO
"TID %d not claimed\n",dev
->tid
);
933 printk("i2o_block: too many device, registering only %d.\n", unit
>>4);
936 i2o_unlock_controller(c
);
942 * Have we seen a media change ?
945 static int i2ob_media_change(kdev_t dev
)
949 if(i2ob_media_change_flag
[i
])
951 i2ob_media_change_flag
[i
]=0;
957 static int i2ob_revalidate(kdev_t dev
)
959 return do_i2ob_revalidate(dev
, 0);
962 static int i2ob_reboot_event(struct notifier_block
*n
, unsigned long code
, void *p
)
966 if(code
!= SYS_RESTART
&& code
!= SYS_HALT
&& code
!= SYS_POWER_OFF
)
968 for(i
=0;i
<MAX_I2OB
;i
++)
970 struct i2ob_device
*dev
=&i2ob_dev
[(i
<<4)];
975 * Flush the onboard cache on power down
976 * also unlock the media
979 int *query_done
= &dev
->done_flag
;
980 msg
[0] = FIVE_WORD_MSG_SIZE
|SGL_OFFSET_0
;
981 msg
[1] = I2O_CMD_BLOCK_CFLUSH
<<24|HOST_TID
<<12|dev
->tid
;
982 msg
[2] = i2ob_context
|0x40000000;
983 msg
[3] = (u32
)query_done
;
985 i2o_post_wait(dev
->controller
, msg
, 20, 2);
989 msg
[0] = FIVE_WORD_MSG_SIZE
|SGL_OFFSET_0
;
990 msg
[1] = I2O_CMD_BLOCK_MUNLOCK
<<24|HOST_TID
<<12|dev
->tid
;
991 msg
[2] = i2ob_context
|0x40000000;
992 msg
[3] = (u32
)query_done
;
994 i2o_post_wait(dev
->controller
, msg
, 20, 2);
1000 struct notifier_block i2ob_reboot_notifier
=
1007 static struct block_device_operations i2ob_fops
=
1010 release
: i2ob_release
,
1012 check_media_change
: i2ob_media_change
,
1013 revalidate
: i2ob_revalidate
,
1016 static struct gendisk i2ob_gendisk
=
1030 * And here should be modules and kernel interface
1031 * (Just smiley confuses emacs :-)
1035 #define i2o_block_init init_module
1038 int i2o_block_init(void)
1042 printk(KERN_INFO
"I2O Block Storage OSM v0.07. (C) 1999 Red Hat Software.\n");
1045 * Register the block device interfaces
1048 if (register_blkdev(MAJOR_NR
, "i2o_block", &i2ob_fops
)) {
1049 printk("Unable to get major number %d for i2o_block\n",
1054 printk("i2o_block: registered device at major %d\n", MAJOR_NR
);
1058 * Now fill in the boiler plate
1061 blksize_size
[MAJOR_NR
] = i2ob_blksizes
;
1062 hardsect_size
[MAJOR_NR
] = i2ob_hardsizes
;
1063 blk_size
[MAJOR_NR
] = i2ob_sizes
;
1064 max_sectors
[MAJOR_NR
] = i2ob_max_sectors
;
1066 blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR
), i2ob_request
);
1067 blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR
), 0);
1069 for (i
= 0; i
< MAX_I2OB
<< 4; i
++) {
1070 i2ob_dev
[i
].refcnt
= 0;
1071 i2ob_dev
[i
].flags
= 0;
1072 i2ob_dev
[i
].controller
= NULL
;
1073 i2ob_dev
[i
].i2odev
= NULL
;
1074 i2ob_dev
[i
].tid
= 0;
1075 i2ob_dev
[i
].head
= NULL
;
1076 i2ob_dev
[i
].tail
= NULL
;
1077 i2ob_blksizes
[i
] = 1024;
1078 i2ob_max_sectors
[i
] = 2;
1085 for(i
= 0; i
< MAX_I2OB_DEPTH
; i
++)
1087 i2ob_queue
[i
].next
= &i2ob_queue
[i
+1];
1088 i2ob_queue
[i
].num
= i
;
1091 /* Queue is MAX_I2OB + 1... */
1092 i2ob_queue
[i
].next
= NULL
;
1093 i2ob_qhead
= &i2ob_queue
[0];
1099 init_timer(&i2ob_timer
);
1100 i2ob_timer
.function
= i2ob_timer_handler
;
1101 i2ob_timer
.data
= 0;
1104 * Register the OSM handler as we will need this to probe for
1105 * drives, geometry and other goodies.
1108 if(i2o_install_handler(&i2o_block_handler
)<0)
1110 unregister_blkdev(MAJOR_NR
, "i2o_block");
1111 printk(KERN_ERR
"i2o_block: unable to register OSM.\n");
1114 i2ob_context
= i2o_block_handler
.context
;
1117 * Finally see what is actually plugged in to our controllers
1122 register_reboot_notifier(&i2ob_reboot_notifier
);
1129 MODULE_AUTHOR("Red Hat Software");
1130 MODULE_DESCRIPTION("I2O Block Device OSM");
1132 void cleanup_module(void)
1134 struct gendisk
**gdp
;
1136 unregister_reboot_notifier(&i2ob_reboot_notifier
);
1142 i2o_remove_handler(&i2o_block_handler
);
1145 * Return the block device
1147 if (unregister_blkdev(MAJOR_NR
, "i2o_block") != 0)
1148 printk("i2o_block: cleanup_module failed\n");
1151 * Why isnt register/unregister gendisk in the kernel ???
1154 for (gdp
= &gendisk_head
; *gdp
; gdp
= &((*gdp
)->next
))
1155 if (*gdp
== &i2ob_gendisk
)