1 /* sunvdc.c: Sun LDOM Virtual Disk Client.
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/blkdev.h>
10 #include <linux/hdreg.h>
11 #include <linux/genhd.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
22 #define DRV_MODULE_NAME "sunvdc"
23 #define PFX DRV_MODULE_NAME ": "
24 #define DRV_MODULE_VERSION "1.0"
25 #define DRV_MODULE_RELDATE "June 25, 2007"
27 static char version
[] __devinitdata
=
28 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
29 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
30 MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
31 MODULE_LICENSE("GPL");
32 MODULE_VERSION(DRV_MODULE_VERSION
);
34 #define VDC_TX_RING_SIZE 256
36 #define WAITING_FOR_LINK_UP 0x01
37 #define WAITING_FOR_TX_SPACE 0x02
38 #define WAITING_FOR_GEN_CMD 0x04
39 #define WAITING_FOR_ANY -1
41 struct vdc_req_entry
{
46 struct vio_driver_state vio
;
52 struct vdc_completion
*cmp
;
56 struct vdc_req_entry rq_arr
[VDC_TX_RING_SIZE
];
58 unsigned long ring_cookies
;
63 /* The server fills these in for us in the disk attribute
73 struct vio_disk_geom geom
;
74 struct vio_disk_vtoc label
;
76 struct list_head list
;
79 static inline struct vdc_port
*to_vdc_port(struct vio_driver_state
*vio
)
81 return container_of(vio
, struct vdc_port
, vio
);
85 /* Protects prot_list. */
90 struct list_head port_list
;
93 /* Ordered from largest major to lowest */
94 static struct vio_version vdc_versions
[] = {
95 { .major
= 1, .minor
= 0 },
98 #define VDCBLK_NAME "vdisk"
100 #define PARTITION_SHIFT 3
102 static inline u32
vdc_tx_dring_avail(struct vio_dring_state
*dr
)
104 return vio_dring_avail(dr
, VDC_TX_RING_SIZE
);
107 static int vdc_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
109 struct gendisk
*disk
= bdev
->bd_disk
;
110 struct vdc_port
*port
= disk
->private_data
;
112 geo
->heads
= (u8
) port
->geom
.num_hd
;
113 geo
->sectors
= (u8
) port
->geom
.num_sec
;
114 geo
->cylinders
= port
->geom
.num_cyl
;
119 static struct block_device_operations vdc_fops
= {
120 .owner
= THIS_MODULE
,
121 .getgeo
= vdc_getgeo
,
124 static void vdc_finish(struct vio_driver_state
*vio
, int err
, int waiting_for
)
127 (waiting_for
== -1 ||
128 vio
->cmp
->waiting_for
== waiting_for
)) {
130 complete(&vio
->cmp
->com
);
135 static void vdc_handshake_complete(struct vio_driver_state
*vio
)
137 vdc_finish(vio
, 0, WAITING_FOR_LINK_UP
);
140 static int vdc_handle_unknown(struct vdc_port
*port
, void *arg
)
142 struct vio_msg_tag
*pkt
= arg
;
144 printk(KERN_ERR PFX
"Received unknown msg [%02x:%02x:%04x:%08x]\n",
145 pkt
->type
, pkt
->stype
, pkt
->stype_env
, pkt
->sid
);
146 printk(KERN_ERR PFX
"Resetting connection.\n");
148 ldc_disconnect(port
->vio
.lp
);
153 static int vdc_send_attr(struct vio_driver_state
*vio
)
155 struct vdc_port
*port
= to_vdc_port(vio
);
156 struct vio_disk_attr_info pkt
;
158 memset(&pkt
, 0, sizeof(pkt
));
160 pkt
.tag
.type
= VIO_TYPE_CTRL
;
161 pkt
.tag
.stype
= VIO_SUBTYPE_INFO
;
162 pkt
.tag
.stype_env
= VIO_ATTR_INFO
;
163 pkt
.tag
.sid
= vio_send_sid(vio
);
165 pkt
.xfer_mode
= VIO_DRING_MODE
;
166 pkt
.vdisk_block_size
= port
->vdisk_block_size
;
167 pkt
.max_xfer_size
= port
->max_xfer_size
;
169 viodbg(HS
, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
170 pkt
.xfer_mode
, pkt
.vdisk_block_size
, pkt
.max_xfer_size
);
172 return vio_ldc_send(&port
->vio
, &pkt
, sizeof(pkt
));
175 static int vdc_handle_attr(struct vio_driver_state
*vio
, void *arg
)
177 struct vdc_port
*port
= to_vdc_port(vio
);
178 struct vio_disk_attr_info
*pkt
= arg
;
180 viodbg(HS
, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] "
181 "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
182 pkt
->tag
.stype
, pkt
->operations
,
183 pkt
->vdisk_size
, pkt
->vdisk_type
,
184 pkt
->xfer_mode
, pkt
->vdisk_block_size
,
187 if (pkt
->tag
.stype
== VIO_SUBTYPE_ACK
) {
188 switch (pkt
->vdisk_type
) {
189 case VD_DISK_TYPE_DISK
:
190 case VD_DISK_TYPE_SLICE
:
194 printk(KERN_ERR PFX
"%s: Bogus vdisk_type 0x%x\n",
195 vio
->name
, pkt
->vdisk_type
);
199 if (pkt
->vdisk_block_size
> port
->vdisk_block_size
) {
200 printk(KERN_ERR PFX
"%s: BLOCK size increased "
203 port
->vdisk_block_size
, pkt
->vdisk_block_size
);
207 port
->operations
= pkt
->operations
;
208 port
->vdisk_size
= pkt
->vdisk_size
;
209 port
->vdisk_type
= pkt
->vdisk_type
;
210 if (pkt
->max_xfer_size
< port
->max_xfer_size
)
211 port
->max_xfer_size
= pkt
->max_xfer_size
;
212 port
->vdisk_block_size
= pkt
->vdisk_block_size
;
215 printk(KERN_ERR PFX
"%s: Attribute NACK\n", vio
->name
);
221 static void vdc_end_special(struct vdc_port
*port
, struct vio_disk_desc
*desc
)
223 int err
= desc
->status
;
225 vdc_finish(&port
->vio
, -err
, WAITING_FOR_GEN_CMD
);
228 static void vdc_end_request(struct request
*req
, int uptodate
, int num_sectors
)
230 if (end_that_request_first(req
, uptodate
, num_sectors
))
232 add_disk_randomness(req
->rq_disk
);
233 end_that_request_last(req
, uptodate
);
236 static void vdc_end_one(struct vdc_port
*port
, struct vio_dring_state
*dr
,
239 struct vio_disk_desc
*desc
= vio_dring_entry(dr
, index
);
240 struct vdc_req_entry
*rqe
= &port
->rq_arr
[index
];
243 if (unlikely(desc
->hdr
.state
!= VIO_DESC_DONE
))
246 ldc_unmap(port
->vio
.lp
, desc
->cookies
, desc
->ncookies
);
247 desc
->hdr
.state
= VIO_DESC_FREE
;
248 dr
->cons
= (index
+ 1) & (VDC_TX_RING_SIZE
- 1);
252 vdc_end_special(port
, desc
);
258 vdc_end_request(req
, !desc
->status
, desc
->size
>> 9);
260 if (blk_queue_stopped(port
->disk
->queue
))
261 blk_start_queue(port
->disk
->queue
);
264 static int vdc_ack(struct vdc_port
*port
, void *msgbuf
)
266 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
267 struct vio_dring_data
*pkt
= msgbuf
;
269 if (unlikely(pkt
->dring_ident
!= dr
->ident
||
270 pkt
->start_idx
!= pkt
->end_idx
||
271 pkt
->start_idx
>= VDC_TX_RING_SIZE
))
274 vdc_end_one(port
, dr
, pkt
->start_idx
);
279 static int vdc_nack(struct vdc_port
*port
, void *msgbuf
)
281 /* XXX Implement me XXX */
285 static void vdc_event(void *arg
, int event
)
287 struct vdc_port
*port
= arg
;
288 struct vio_driver_state
*vio
= &port
->vio
;
292 spin_lock_irqsave(&vio
->lock
, flags
);
294 if (unlikely(event
== LDC_EVENT_RESET
||
295 event
== LDC_EVENT_UP
)) {
296 vio_link_state_change(vio
, event
);
297 spin_unlock_irqrestore(&vio
->lock
, flags
);
301 if (unlikely(event
!= LDC_EVENT_DATA_READY
)) {
302 printk(KERN_WARNING PFX
"Unexpected LDC event %d\n", event
);
303 spin_unlock_irqrestore(&vio
->lock
, flags
);
310 struct vio_msg_tag tag
;
314 err
= ldc_read(vio
->lp
, &msgbuf
, sizeof(msgbuf
));
315 if (unlikely(err
< 0)) {
316 if (err
== -ECONNRESET
)
322 viodbg(DATA
, "TAG [%02x:%02x:%04x:%08x]\n",
325 msgbuf
.tag
.stype_env
,
327 err
= vio_validate_sid(vio
, &msgbuf
.tag
);
331 if (likely(msgbuf
.tag
.type
== VIO_TYPE_DATA
)) {
332 if (msgbuf
.tag
.stype
== VIO_SUBTYPE_ACK
)
333 err
= vdc_ack(port
, &msgbuf
);
334 else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_NACK
)
335 err
= vdc_nack(port
, &msgbuf
);
337 err
= vdc_handle_unknown(port
, &msgbuf
);
338 } else if (msgbuf
.tag
.type
== VIO_TYPE_CTRL
) {
339 err
= vio_control_pkt_engine(vio
, &msgbuf
);
341 err
= vdc_handle_unknown(port
, &msgbuf
);
347 vdc_finish(&port
->vio
, err
, WAITING_FOR_ANY
);
348 spin_unlock_irqrestore(&vio
->lock
, flags
);
351 static int __vdc_tx_trigger(struct vdc_port
*port
)
353 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
354 struct vio_dring_data hdr
= {
356 .type
= VIO_TYPE_DATA
,
357 .stype
= VIO_SUBTYPE_INFO
,
358 .stype_env
= VIO_DRING_DATA
,
359 .sid
= vio_send_sid(&port
->vio
),
361 .dring_ident
= dr
->ident
,
362 .start_idx
= dr
->prod
,
367 hdr
.seq
= dr
->snd_nxt
;
370 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
376 if ((delay
<<= 1) > 128)
378 } while (err
== -EAGAIN
);
383 static int __send_request(struct request
*req
)
385 struct vdc_port
*port
= req
->rq_disk
->private_data
;
386 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
387 struct scatterlist sg
[port
->ring_cookies
];
388 struct vdc_req_entry
*rqe
;
389 struct vio_disk_desc
*desc
;
390 unsigned int map_perm
;
395 map_perm
= LDC_MAP_SHADOW
| LDC_MAP_DIRECT
| LDC_MAP_IO
;
397 if (rq_data_dir(req
) == READ
) {
398 map_perm
|= LDC_MAP_W
;
401 map_perm
|= LDC_MAP_R
;
405 nsg
= blk_rq_map_sg(req
->q
, req
, sg
);
408 for (i
= 0; i
< nsg
; i
++)
411 if (unlikely(vdc_tx_dring_avail(dr
) < 1)) {
412 blk_stop_queue(port
->disk
->queue
);
417 desc
= vio_dring_cur(dr
);
419 err
= ldc_map_sg(port
->vio
.lp
, sg
, nsg
,
420 desc
->cookies
, port
->ring_cookies
,
423 printk(KERN_ERR PFX
"ldc_map_sg() failure, err=%d.\n", err
);
427 rqe
= &port
->rq_arr
[dr
->prod
];
430 desc
->hdr
.ack
= VIO_ACK_ENABLE
;
431 desc
->req_id
= port
->req_id
;
432 desc
->operation
= op
;
433 if (port
->vdisk_type
== VD_DISK_TYPE_DISK
) {
439 desc
->offset
= (req
->sector
<< 9) / port
->vdisk_block_size
;
441 desc
->ncookies
= err
;
443 /* This has to be a non-SMP write barrier because we are writing
444 * to memory which is shared with the peer LDOM.
447 desc
->hdr
.state
= VIO_DESC_READY
;
449 err
= __vdc_tx_trigger(port
);
451 printk(KERN_ERR PFX
"vdc_tx_trigger() failure, err=%d\n", err
);
454 dr
->prod
= (dr
->prod
+ 1) & (VDC_TX_RING_SIZE
- 1);
461 static void do_vdc_request(request_queue_t
*q
)
464 struct request
*req
= elv_next_request(q
);
469 blkdev_dequeue_request(req
);
470 if (__send_request(req
) < 0)
471 vdc_end_request(req
, 0, req
->hard_nr_sectors
);
475 static int generic_request(struct vdc_port
*port
, u8 op
, void *buf
, int len
)
477 struct vio_dring_state
*dr
;
478 struct vio_completion comp
;
479 struct vio_disk_desc
*desc
;
480 unsigned int map_perm
;
485 if (!(((u64
)1 << ((u64
)op
- 1)) & port
->operations
))
500 op_len
= sizeof(u32
);
501 map_perm
= LDC_MAP_W
;
505 op_len
= sizeof(u32
);
506 map_perm
= LDC_MAP_R
;
510 op_len
= sizeof(struct vio_disk_vtoc
);
511 map_perm
= LDC_MAP_W
;
515 op_len
= sizeof(struct vio_disk_vtoc
);
516 map_perm
= LDC_MAP_R
;
519 case VD_OP_GET_DISKGEOM
:
520 op_len
= sizeof(struct vio_disk_geom
);
521 map_perm
= LDC_MAP_W
;
524 case VD_OP_SET_DISKGEOM
:
525 op_len
= sizeof(struct vio_disk_geom
);
526 map_perm
= LDC_MAP_R
;
531 map_perm
= LDC_MAP_RW
;
534 case VD_OP_GET_DEVID
:
535 op_len
= sizeof(struct vio_disk_devid
);
536 map_perm
= LDC_MAP_W
;
545 map_perm
|= LDC_MAP_SHADOW
| LDC_MAP_DIRECT
| LDC_MAP_IO
;
547 op_len
= (op_len
+ 7) & ~7;
548 req_buf
= kzalloc(op_len
, GFP_KERNEL
);
555 if (map_perm
& LDC_MAP_R
)
556 memcpy(req_buf
, buf
, len
);
558 spin_lock_irqsave(&port
->vio
.lock
, flags
);
560 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
562 /* XXX If we want to use this code generically we have to
563 * XXX handle TX ring exhaustion etc.
565 desc
= vio_dring_cur(dr
);
567 err
= ldc_map_single(port
->vio
.lp
, req_buf
, op_len
,
568 desc
->cookies
, port
->ring_cookies
,
571 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
576 init_completion(&comp
.com
);
577 comp
.waiting_for
= WAITING_FOR_GEN_CMD
;
578 port
->vio
.cmp
= &comp
;
580 desc
->hdr
.ack
= VIO_ACK_ENABLE
;
581 desc
->req_id
= port
->req_id
;
582 desc
->operation
= op
;
587 desc
->ncookies
= err
;
589 /* This has to be a non-SMP write barrier because we are writing
590 * to memory which is shared with the peer LDOM.
593 desc
->hdr
.state
= VIO_DESC_READY
;
595 err
= __vdc_tx_trigger(port
);
598 dr
->prod
= (dr
->prod
+ 1) & (VDC_TX_RING_SIZE
- 1);
599 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
601 wait_for_completion(&comp
.com
);
604 port
->vio
.cmp
= NULL
;
605 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
608 if (map_perm
& LDC_MAP_W
)
609 memcpy(buf
, req_buf
, len
);
616 static int __devinit
vdc_alloc_tx_ring(struct vdc_port
*port
)
618 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
619 unsigned long len
, entry_size
;
623 entry_size
= sizeof(struct vio_disk_desc
) +
624 (sizeof(struct ldc_trans_cookie
) * port
->ring_cookies
);
625 len
= (VDC_TX_RING_SIZE
* entry_size
);
627 ncookies
= VIO_MAX_RING_COOKIES
;
628 dring
= ldc_alloc_exp_dring(port
->vio
.lp
, len
,
629 dr
->cookies
, &ncookies
,
634 return PTR_ERR(dring
);
637 dr
->entry_size
= entry_size
;
638 dr
->num_entries
= VDC_TX_RING_SIZE
;
639 dr
->prod
= dr
->cons
= 0;
640 dr
->pending
= VDC_TX_RING_SIZE
;
641 dr
->ncookies
= ncookies
;
646 static void vdc_free_tx_ring(struct vdc_port
*port
)
648 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
651 ldc_free_exp_dring(port
->vio
.lp
, dr
->base
,
652 (dr
->entry_size
* dr
->num_entries
),
653 dr
->cookies
, dr
->ncookies
);
662 static int probe_disk(struct vdc_port
*port
)
664 struct vio_completion comp
;
665 struct request_queue
*q
;
669 init_completion(&comp
.com
);
671 comp
.waiting_for
= WAITING_FOR_LINK_UP
;
672 port
->vio
.cmp
= &comp
;
674 vio_port_up(&port
->vio
);
676 wait_for_completion(&comp
.com
);
680 err
= generic_request(port
, VD_OP_GET_VTOC
,
681 &port
->label
, sizeof(port
->label
));
683 printk(KERN_ERR PFX
"VD_OP_GET_VTOC returns error %d\n", err
);
687 err
= generic_request(port
, VD_OP_GET_DISKGEOM
,
688 &port
->geom
, sizeof(port
->geom
));
690 printk(KERN_ERR PFX
"VD_OP_GET_DISKGEOM returns "
695 port
->vdisk_size
= ((u64
)port
->geom
.num_cyl
*
696 (u64
)port
->geom
.num_hd
*
697 (u64
)port
->geom
.num_sec
);
699 q
= blk_init_queue(do_vdc_request
, &port
->vio
.lock
);
701 printk(KERN_ERR PFX
"%s: Could not allocate queue.\n",
705 g
= alloc_disk(1 << PARTITION_SHIFT
);
707 printk(KERN_ERR PFX
"%s: Could not allocate gendisk.\n",
709 blk_cleanup_queue(q
);
715 blk_queue_max_hw_segments(q
, port
->ring_cookies
);
716 blk_queue_max_phys_segments(q
, port
->ring_cookies
);
717 blk_queue_max_sectors(q
, port
->max_xfer_size
);
718 g
->major
= vdc_major
;
719 g
->first_minor
= port
->dev_no
<< PARTITION_SHIFT
;
720 strcpy(g
->disk_name
, port
->disk_name
);
724 g
->private_data
= port
;
725 g
->driverfs_dev
= &port
->vio
.vdev
->dev
;
727 set_capacity(g
, port
->vdisk_size
);
729 printk(KERN_INFO PFX
"%s: %u sectors (%u MB)\n",
731 port
->vdisk_size
, (port
->vdisk_size
>> (20 - 9)));
738 static struct ldc_channel_config vdc_ldc_cfg
= {
741 .mode
= LDC_MODE_UNRELIABLE
,
744 static struct vio_driver_ops vdc_vio_ops
= {
745 .send_attr
= vdc_send_attr
,
746 .handle_attr
= vdc_handle_attr
,
747 .handshake_complete
= vdc_handshake_complete
,
750 static int __devinit
vdc_port_probe(struct vio_dev
*vdev
,
751 const struct vio_device_id
*id
)
753 struct mdesc_handle
*hp
;
754 struct vdc_port
*port
;
760 vp
= dev_get_drvdata(vdev
->dev
.parent
);
762 printk(KERN_ERR PFX
"Cannot find port parent vdc.\n");
768 port_id
= mdesc_get_property(hp
, vdev
->mp
, "id", NULL
);
771 printk(KERN_ERR PFX
"Port lacks id property.\n");
772 goto err_out_release_mdesc
;
774 if ((*port_id
<< PARTITION_SHIFT
) & ~(u64
)MINORMASK
) {
775 printk(KERN_ERR PFX
"Port id [%lu] too large.\n", *port_id
);
776 goto err_out_release_mdesc
;
779 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
782 printk(KERN_ERR PFX
"Cannot allocate vdc_port.\n");
783 goto err_out_release_mdesc
;
787 port
->dev_no
= *port_id
;
789 if (port
->dev_no
>= 26)
790 snprintf(port
->disk_name
, sizeof(port
->disk_name
),
792 'a' + (port
->dev_no
/ 26) - 1,
793 'a' + (port
->dev_no
% 26));
795 snprintf(port
->disk_name
, sizeof(port
->disk_name
),
796 VDCBLK_NAME
"%c", 'a' + (port
->dev_no
% 26));
798 err
= vio_driver_init(&port
->vio
, vdev
, VDEV_DISK
,
799 vdc_versions
, ARRAY_SIZE(vdc_versions
),
800 &vdc_vio_ops
, port
->disk_name
);
802 goto err_out_free_port
;
804 port
->vdisk_block_size
= 512;
805 port
->max_xfer_size
= ((128 * 1024) / port
->vdisk_block_size
);
806 port
->ring_cookies
= ((port
->max_xfer_size
*
807 port
->vdisk_block_size
) / PAGE_SIZE
) + 2;
809 err
= vio_ldc_alloc(&port
->vio
, &vdc_ldc_cfg
, port
);
811 goto err_out_free_port
;
813 err
= vdc_alloc_tx_ring(port
);
815 goto err_out_free_ldc
;
817 err
= probe_disk(port
);
819 goto err_out_free_tx_ring
;
821 INIT_LIST_HEAD(&port
->list
);
823 spin_lock_irqsave(&vp
->lock
, flags
);
824 list_add(&port
->list
, &vp
->port_list
);
825 spin_unlock_irqrestore(&vp
->lock
, flags
);
827 dev_set_drvdata(&vdev
->dev
, port
);
833 err_out_free_tx_ring
:
834 vdc_free_tx_ring(port
);
837 vio_ldc_free(&port
->vio
);
842 err_out_release_mdesc
:
847 static int vdc_port_remove(struct vio_dev
*vdev
)
849 struct vdc_port
*port
= dev_get_drvdata(&vdev
->dev
);
852 del_timer_sync(&port
->vio
.timer
);
854 vdc_free_tx_ring(port
);
855 vio_ldc_free(&port
->vio
);
857 dev_set_drvdata(&vdev
->dev
, NULL
);
864 static struct vio_device_id vdc_port_match
[] = {
870 MODULE_DEVICE_TABLE(vio
, vdc_match
);
872 static struct vio_driver vdc_port_driver
= {
873 .id_table
= vdc_port_match
,
874 .probe
= vdc_port_probe
,
875 .remove
= vdc_port_remove
,
878 .owner
= THIS_MODULE
,
882 static int __devinit
vdc_probe(struct vio_dev
*vdev
,
883 const struct vio_device_id
*id
)
885 static int vdc_version_printed
;
888 if (vdc_version_printed
++ == 0)
889 printk(KERN_INFO
"%s", version
);
891 vp
= kzalloc(sizeof(struct vdc
), GFP_KERNEL
);
895 spin_lock_init(&vp
->lock
);
897 INIT_LIST_HEAD(&vp
->port_list
);
899 dev_set_drvdata(&vdev
->dev
, vp
);
904 static int vdc_remove(struct vio_dev
*vdev
)
907 struct vdc
*vp
= dev_get_drvdata(&vdev
->dev
);
911 dev_set_drvdata(&vdev
->dev
, NULL
);
916 static struct vio_device_id vdc_match
[] = {
922 MODULE_DEVICE_TABLE(vio
, vdc_match
);
924 static struct vio_driver vdc_driver
= {
925 .id_table
= vdc_match
,
927 .remove
= vdc_remove
,
930 .owner
= THIS_MODULE
,
934 static int __init
vdc_init(void)
938 err
= register_blkdev(0, VDCBLK_NAME
);
943 err
= vio_register_driver(&vdc_driver
);
945 goto out_unregister_blkdev
;
947 err
= vio_register_driver(&vdc_port_driver
);
949 goto out_unregister_vdc
;
954 vio_unregister_driver(&vdc_driver
);
956 out_unregister_blkdev
:
957 unregister_blkdev(vdc_major
, VDCBLK_NAME
);
964 static void __exit
vdc_exit(void)
966 vio_unregister_driver(&vdc_port_driver
);
967 vio_unregister_driver(&vdc_driver
);
968 unregister_blkdev(vdc_major
, VDCBLK_NAME
);
971 module_init(vdc_init
);
972 module_exit(vdc_exit
);