4 * Copyright 2011 Marvell. <jyli@marvell.com>
6 * This file is licensed under GPLv2.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; version 2 of the
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/device.h>
29 #include <linux/pci.h>
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/blkdev.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_transport.h>
40 #include <scsi/scsi_eh.h>
41 #include <linux/uaccess.h>
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("jyli@marvell.com");
47 MODULE_DESCRIPTION("Marvell UMI Driver");
49 static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table
) = {
50 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2
, PCI_DEVICE_ID_MARVELL_MV9143
) },
54 MODULE_DEVICE_TABLE(pci
, mvumi_pci_table
);
56 static void tag_init(struct mvumi_tag
*st
, unsigned short size
)
59 BUG_ON(size
!= st
->size
);
61 for (i
= 0; i
< size
; i
++)
62 st
->stack
[i
] = size
- 1 - i
;
65 static unsigned short tag_get_one(struct mvumi_hba
*mhba
, struct mvumi_tag
*st
)
68 return st
->stack
[--st
->top
];
71 static void tag_release_one(struct mvumi_hba
*mhba
, struct mvumi_tag
*st
,
74 BUG_ON(st
->top
>= st
->size
);
75 st
->stack
[st
->top
++] = tag
;
78 static bool tag_is_empty(struct mvumi_tag
*st
)
86 static void mvumi_unmap_pci_addr(struct pci_dev
*dev
, void **addr_array
)
90 for (i
= 0; i
< MAX_BASE_ADDRESS
; i
++)
91 if ((pci_resource_flags(dev
, i
) & IORESOURCE_MEM
) &&
93 pci_iounmap(dev
, addr_array
[i
]);
96 static int mvumi_map_pci_addr(struct pci_dev
*dev
, void **addr_array
)
100 for (i
= 0; i
< MAX_BASE_ADDRESS
; i
++) {
101 if (pci_resource_flags(dev
, i
) & IORESOURCE_MEM
) {
102 addr_array
[i
] = pci_iomap(dev
, i
, 0);
103 if (!addr_array
[i
]) {
104 dev_err(&dev
->dev
, "failed to map Bar[%d]\n",
106 mvumi_unmap_pci_addr(dev
, addr_array
);
110 addr_array
[i
] = NULL
;
112 dev_dbg(&dev
->dev
, "Bar %d : %p.\n", i
, addr_array
[i
]);
118 static struct mvumi_res
*mvumi_alloc_mem_resource(struct mvumi_hba
*mhba
,
119 enum resource_type type
, unsigned int size
)
121 struct mvumi_res
*res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
124 dev_err(&mhba
->pdev
->dev
,
125 "Failed to allocate memory for resouce manager.\n");
130 case RESOURCE_CACHED_MEMORY
:
131 res
->virt_addr
= kzalloc(size
, GFP_KERNEL
);
132 if (!res
->virt_addr
) {
133 dev_err(&mhba
->pdev
->dev
,
134 "unable to allocate memory,size = %d.\n", size
);
140 case RESOURCE_UNCACHED_MEMORY
:
141 size
= round_up(size
, 8);
142 res
->virt_addr
= pci_alloc_consistent(mhba
->pdev
, size
,
144 if (!res
->virt_addr
) {
145 dev_err(&mhba
->pdev
->dev
,
146 "unable to allocate consistent mem,"
147 "size = %d.\n", size
);
151 memset(res
->virt_addr
, 0, size
);
155 dev_err(&mhba
->pdev
->dev
, "unknown resource type %d.\n", type
);
162 INIT_LIST_HEAD(&res
->entry
);
163 list_add_tail(&res
->entry
, &mhba
->res_list
);
168 static void mvumi_release_mem_resource(struct mvumi_hba
*mhba
)
170 struct mvumi_res
*res
, *tmp
;
172 list_for_each_entry_safe(res
, tmp
, &mhba
->res_list
, entry
) {
174 case RESOURCE_UNCACHED_MEMORY
:
175 pci_free_consistent(mhba
->pdev
, res
->size
,
176 res
->virt_addr
, res
->bus_addr
);
178 case RESOURCE_CACHED_MEMORY
:
179 kfree(res
->virt_addr
);
182 dev_err(&mhba
->pdev
->dev
,
183 "unknown resource type %d\n", res
->type
);
186 list_del(&res
->entry
);
189 mhba
->fw_flag
&= ~MVUMI_FW_ALLOC
;
193 * mvumi_make_sgl - Prepares SGL
194 * @mhba: Adapter soft state
195 * @scmd: SCSI command from the mid-layer
196 * @sgl_p: SGL to be filled in
197 * @sg_count return the number of SG elements
199 * If successful, this function returns 0. otherwise, it returns -1.
201 static int mvumi_make_sgl(struct mvumi_hba
*mhba
, struct scsi_cmnd
*scmd
,
202 void *sgl_p
, unsigned char *sg_count
)
204 struct scatterlist
*sg
;
205 struct mvumi_sgl
*m_sg
= (struct mvumi_sgl
*) sgl_p
;
207 unsigned int sgnum
= scsi_sg_count(scmd
);
211 sg
= scsi_sglist(scmd
);
212 *sg_count
= pci_map_sg(mhba
->pdev
, sg
, sgnum
,
213 (int) scmd
->sc_data_direction
);
214 if (*sg_count
> mhba
->max_sge
) {
215 dev_err(&mhba
->pdev
->dev
, "sg count[0x%x] is bigger "
216 "than max sg[0x%x].\n",
217 *sg_count
, mhba
->max_sge
);
220 for (i
= 0; i
< *sg_count
; i
++) {
221 busaddr
= sg_dma_address(&sg
[i
]);
222 m_sg
->baseaddr_l
= cpu_to_le32(lower_32_bits(busaddr
));
223 m_sg
->baseaddr_h
= cpu_to_le32(upper_32_bits(busaddr
));
225 m_sg
->size
= cpu_to_le32(sg_dma_len(&sg
[i
]));
226 if ((i
+ 1) == *sg_count
)
227 m_sg
->flags
|= SGD_EOT
;
232 scmd
->SCp
.dma_handle
= scsi_bufflen(scmd
) ?
233 pci_map_single(mhba
->pdev
, scsi_sglist(scmd
),
235 (int) scmd
->sc_data_direction
)
237 busaddr
= scmd
->SCp
.dma_handle
;
238 m_sg
->baseaddr_l
= cpu_to_le32(lower_32_bits(busaddr
));
239 m_sg
->baseaddr_h
= cpu_to_le32(upper_32_bits(busaddr
));
240 m_sg
->flags
= SGD_EOT
;
241 m_sg
->size
= cpu_to_le32(scsi_bufflen(scmd
));
248 static int mvumi_internal_cmd_sgl(struct mvumi_hba
*mhba
, struct mvumi_cmd
*cmd
,
251 struct mvumi_sgl
*m_sg
;
258 virt_addr
= pci_alloc_consistent(mhba
->pdev
, size
, &phy_addr
);
262 memset(virt_addr
, 0, size
);
264 m_sg
= (struct mvumi_sgl
*) &cmd
->frame
->payload
[0];
265 cmd
->frame
->sg_counts
= 1;
266 cmd
->data_buf
= virt_addr
;
268 m_sg
->baseaddr_l
= cpu_to_le32(lower_32_bits(phy_addr
));
269 m_sg
->baseaddr_h
= cpu_to_le32(upper_32_bits(phy_addr
));
270 m_sg
->flags
= SGD_EOT
;
271 m_sg
->size
= cpu_to_le32(size
);
276 static struct mvumi_cmd
*mvumi_create_internal_cmd(struct mvumi_hba
*mhba
,
277 unsigned int buf_size
)
279 struct mvumi_cmd
*cmd
;
281 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
283 dev_err(&mhba
->pdev
->dev
, "failed to create a internal cmd\n");
286 INIT_LIST_HEAD(&cmd
->queue_pointer
);
288 cmd
->frame
= kzalloc(mhba
->ib_max_size
, GFP_KERNEL
);
290 dev_err(&mhba
->pdev
->dev
, "failed to allocate memory for FW"
291 " frame,size = %d.\n", mhba
->ib_max_size
);
297 if (mvumi_internal_cmd_sgl(mhba
, cmd
, buf_size
)) {
298 dev_err(&mhba
->pdev
->dev
, "failed to allocate memory"
299 " for internal frame\n");
305 cmd
->frame
->sg_counts
= 0;
310 static void mvumi_delete_internal_cmd(struct mvumi_hba
*mhba
,
311 struct mvumi_cmd
*cmd
)
313 struct mvumi_sgl
*m_sg
;
317 if (cmd
&& cmd
->frame
) {
318 if (cmd
->frame
->sg_counts
) {
319 m_sg
= (struct mvumi_sgl
*) &cmd
->frame
->payload
[0];
322 phy_addr
= (dma_addr_t
) m_sg
->baseaddr_l
|
323 (dma_addr_t
) ((m_sg
->baseaddr_h
<< 16) << 16);
325 pci_free_consistent(mhba
->pdev
, size
, cmd
->data_buf
,
334 * mvumi_get_cmd - Get a command from the free pool
335 * @mhba: Adapter soft state
337 * Returns a free command from the pool
339 static struct mvumi_cmd
*mvumi_get_cmd(struct mvumi_hba
*mhba
)
341 struct mvumi_cmd
*cmd
= NULL
;
343 if (likely(!list_empty(&mhba
->cmd_pool
))) {
344 cmd
= list_entry((&mhba
->cmd_pool
)->next
,
345 struct mvumi_cmd
, queue_pointer
);
346 list_del_init(&cmd
->queue_pointer
);
348 dev_warn(&mhba
->pdev
->dev
, "command pool is empty!\n");
354 * mvumi_return_cmd - Return a cmd to free command pool
355 * @mhba: Adapter soft state
356 * @cmd: Command packet to be returned to free command pool
358 static inline void mvumi_return_cmd(struct mvumi_hba
*mhba
,
359 struct mvumi_cmd
*cmd
)
362 list_add_tail(&cmd
->queue_pointer
, &mhba
->cmd_pool
);
366 * mvumi_free_cmds - Free all the cmds in the free cmd pool
367 * @mhba: Adapter soft state
369 static void mvumi_free_cmds(struct mvumi_hba
*mhba
)
371 struct mvumi_cmd
*cmd
;
373 while (!list_empty(&mhba
->cmd_pool
)) {
374 cmd
= list_first_entry(&mhba
->cmd_pool
, struct mvumi_cmd
,
376 list_del(&cmd
->queue_pointer
);
383 * mvumi_alloc_cmds - Allocates the command packets
384 * @mhba: Adapter soft state
387 static int mvumi_alloc_cmds(struct mvumi_hba
*mhba
)
390 struct mvumi_cmd
*cmd
;
392 for (i
= 0; i
< mhba
->max_io
; i
++) {
393 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
397 INIT_LIST_HEAD(&cmd
->queue_pointer
);
398 list_add_tail(&cmd
->queue_pointer
, &mhba
->cmd_pool
);
399 cmd
->frame
= kzalloc(mhba
->ib_max_size
, GFP_KERNEL
);
406 dev_err(&mhba
->pdev
->dev
,
407 "failed to allocate memory for cmd[0x%x].\n", i
);
408 while (!list_empty(&mhba
->cmd_pool
)) {
409 cmd
= list_first_entry(&mhba
->cmd_pool
, struct mvumi_cmd
,
411 list_del(&cmd
->queue_pointer
);
418 static int mvumi_get_ib_list_entry(struct mvumi_hba
*mhba
, void **ib_entry
)
420 unsigned int ib_rp_reg
, cur_ib_entry
;
422 if (atomic_read(&mhba
->fw_outstanding
) >= mhba
->max_io
) {
423 dev_warn(&mhba
->pdev
->dev
, "firmware io overflow.\n");
426 ib_rp_reg
= ioread32(mhba
->mmio
+ CLA_INB_READ_POINTER
);
428 if (unlikely(((ib_rp_reg
& CL_SLOT_NUM_MASK
) ==
429 (mhba
->ib_cur_slot
& CL_SLOT_NUM_MASK
)) &&
430 ((ib_rp_reg
& CL_POINTER_TOGGLE
) !=
431 (mhba
->ib_cur_slot
& CL_POINTER_TOGGLE
)))) {
432 dev_warn(&mhba
->pdev
->dev
, "no free slot to use.\n");
436 cur_ib_entry
= mhba
->ib_cur_slot
& CL_SLOT_NUM_MASK
;
438 if (cur_ib_entry
>= mhba
->list_num_io
) {
439 cur_ib_entry
-= mhba
->list_num_io
;
440 mhba
->ib_cur_slot
^= CL_POINTER_TOGGLE
;
442 mhba
->ib_cur_slot
&= ~CL_SLOT_NUM_MASK
;
443 mhba
->ib_cur_slot
|= (cur_ib_entry
& CL_SLOT_NUM_MASK
);
444 *ib_entry
= mhba
->ib_list
+ cur_ib_entry
* mhba
->ib_max_size
;
445 atomic_inc(&mhba
->fw_outstanding
);
450 static void mvumi_send_ib_list_entry(struct mvumi_hba
*mhba
)
452 iowrite32(0xfff, mhba
->ib_shadow
);
453 iowrite32(mhba
->ib_cur_slot
, mhba
->mmio
+ CLA_INB_WRITE_POINTER
);
456 static char mvumi_check_ob_frame(struct mvumi_hba
*mhba
,
457 unsigned int cur_obf
, struct mvumi_rsp_frame
*p_outb_frame
)
459 unsigned short tag
, request_id
;
462 p_outb_frame
= mhba
->ob_list
+ cur_obf
* mhba
->ob_max_size
;
463 request_id
= p_outb_frame
->request_id
;
464 tag
= p_outb_frame
->tag
;
465 if (tag
> mhba
->tag_pool
.size
) {
466 dev_err(&mhba
->pdev
->dev
, "ob frame data error\n");
469 if (mhba
->tag_cmd
[tag
] == NULL
) {
470 dev_err(&mhba
->pdev
->dev
, "tag[0x%x] with NO command\n", tag
);
472 } else if (mhba
->tag_cmd
[tag
]->request_id
!= request_id
&&
473 mhba
->request_id_enabled
) {
474 dev_err(&mhba
->pdev
->dev
, "request ID from FW:0x%x,"
475 "cmd request ID:0x%x\n", request_id
,
476 mhba
->tag_cmd
[tag
]->request_id
);
483 static void mvumi_receive_ob_list_entry(struct mvumi_hba
*mhba
)
485 unsigned int ob_write_reg
, ob_write_shadow_reg
;
486 unsigned int cur_obf
, assign_obf_end
, i
;
487 struct mvumi_ob_data
*ob_data
;
488 struct mvumi_rsp_frame
*p_outb_frame
;
491 ob_write_reg
= ioread32(mhba
->mmio
+ CLA_OUTB_COPY_POINTER
);
492 ob_write_shadow_reg
= ioread32(mhba
->ob_shadow
);
493 } while ((ob_write_reg
& CL_SLOT_NUM_MASK
) != ob_write_shadow_reg
);
495 cur_obf
= mhba
->ob_cur_slot
& CL_SLOT_NUM_MASK
;
496 assign_obf_end
= ob_write_reg
& CL_SLOT_NUM_MASK
;
498 if ((ob_write_reg
& CL_POINTER_TOGGLE
) !=
499 (mhba
->ob_cur_slot
& CL_POINTER_TOGGLE
)) {
500 assign_obf_end
+= mhba
->list_num_io
;
503 for (i
= (assign_obf_end
- cur_obf
); i
!= 0; i
--) {
505 if (cur_obf
>= mhba
->list_num_io
) {
506 cur_obf
-= mhba
->list_num_io
;
507 mhba
->ob_cur_slot
^= CL_POINTER_TOGGLE
;
510 p_outb_frame
= mhba
->ob_list
+ cur_obf
* mhba
->ob_max_size
;
512 /* Copy pointer may point to entry in outbound list
513 * before entry has valid data
515 if (unlikely(p_outb_frame
->tag
> mhba
->tag_pool
.size
||
516 mhba
->tag_cmd
[p_outb_frame
->tag
] == NULL
||
517 p_outb_frame
->request_id
!=
518 mhba
->tag_cmd
[p_outb_frame
->tag
]->request_id
))
519 if (mvumi_check_ob_frame(mhba
, cur_obf
, p_outb_frame
))
522 if (!list_empty(&mhba
->ob_data_list
)) {
523 ob_data
= (struct mvumi_ob_data
*)
524 list_first_entry(&mhba
->ob_data_list
,
525 struct mvumi_ob_data
, list
);
526 list_del_init(&ob_data
->list
);
530 cur_obf
= mhba
->list_num_io
- 1;
531 mhba
->ob_cur_slot
^= CL_POINTER_TOGGLE
;
537 memcpy(ob_data
->data
, p_outb_frame
, mhba
->ob_max_size
);
538 p_outb_frame
->tag
= 0xff;
540 list_add_tail(&ob_data
->list
, &mhba
->free_ob_list
);
542 mhba
->ob_cur_slot
&= ~CL_SLOT_NUM_MASK
;
543 mhba
->ob_cur_slot
|= (cur_obf
& CL_SLOT_NUM_MASK
);
544 iowrite32(mhba
->ob_cur_slot
, mhba
->mmio
+ CLA_OUTB_READ_POINTER
);
547 static void mvumi_reset(void *regs
)
549 iowrite32(0, regs
+ CPU_ENPOINTA_MASK_REG
);
550 if (ioread32(regs
+ CPU_ARM_TO_PCIEA_MSG1
) != HANDSHAKE_DONESTATE
)
553 iowrite32(DRBL_SOFT_RESET
, regs
+ CPU_PCIEA_TO_ARM_DRBL_REG
);
556 static unsigned char mvumi_start(struct mvumi_hba
*mhba
);
558 static int mvumi_wait_for_outstanding(struct mvumi_hba
*mhba
)
560 mhba
->fw_state
= FW_STATE_ABORT
;
561 mvumi_reset(mhba
->mmio
);
563 if (mvumi_start(mhba
))
569 static int mvumi_host_reset(struct scsi_cmnd
*scmd
)
571 struct mvumi_hba
*mhba
;
573 mhba
= (struct mvumi_hba
*) scmd
->device
->host
->hostdata
;
575 scmd_printk(KERN_NOTICE
, scmd
, "RESET -%ld cmd=%x retries=%x\n",
576 scmd
->serial_number
, scmd
->cmnd
[0], scmd
->retries
);
578 return mvumi_wait_for_outstanding(mhba
);
581 static int mvumi_issue_blocked_cmd(struct mvumi_hba
*mhba
,
582 struct mvumi_cmd
*cmd
)
586 cmd
->cmd_status
= REQ_STATUS_PENDING
;
588 if (atomic_read(&cmd
->sync_cmd
)) {
589 dev_err(&mhba
->pdev
->dev
,
590 "last blocked cmd not finished, sync_cmd = %d\n",
591 atomic_read(&cmd
->sync_cmd
));
595 atomic_inc(&cmd
->sync_cmd
);
596 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
597 mhba
->instancet
->fire_cmd(mhba
, cmd
);
598 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
600 wait_event_timeout(mhba
->int_cmd_wait_q
,
601 (cmd
->cmd_status
!= REQ_STATUS_PENDING
),
602 MVUMI_INTERNAL_CMD_WAIT_TIME
* HZ
);
604 /* command timeout */
605 if (atomic_read(&cmd
->sync_cmd
)) {
606 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
607 atomic_dec(&cmd
->sync_cmd
);
608 if (mhba
->tag_cmd
[cmd
->frame
->tag
]) {
609 mhba
->tag_cmd
[cmd
->frame
->tag
] = 0;
610 dev_warn(&mhba
->pdev
->dev
, "TIMEOUT:release tag [%d]\n",
612 tag_release_one(mhba
, &mhba
->tag_pool
, cmd
->frame
->tag
);
614 if (!list_empty(&cmd
->queue_pointer
)) {
615 dev_warn(&mhba
->pdev
->dev
,
616 "TIMEOUT:A internal command doesn't send!\n");
617 list_del_init(&cmd
->queue_pointer
);
619 atomic_dec(&mhba
->fw_outstanding
);
621 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
626 static void mvumi_release_fw(struct mvumi_hba
*mhba
)
628 mvumi_free_cmds(mhba
);
629 mvumi_release_mem_resource(mhba
);
630 mvumi_unmap_pci_addr(mhba
->pdev
, mhba
->base_addr
);
631 kfree(mhba
->handshake_page
);
632 pci_release_regions(mhba
->pdev
);
635 static unsigned char mvumi_flush_cache(struct mvumi_hba
*mhba
)
637 struct mvumi_cmd
*cmd
;
638 struct mvumi_msg_frame
*frame
;
639 unsigned char device_id
, retry
= 0;
640 unsigned char bitcount
= sizeof(unsigned char) * 8;
642 for (device_id
= 0; device_id
< mhba
->max_target_id
; device_id
++) {
643 if (!(mhba
->target_map
[device_id
/ bitcount
] &
644 (1 << (device_id
% bitcount
))))
646 get_cmd
: cmd
= mvumi_create_internal_cmd(mhba
, 0);
649 dev_err(&mhba
->pdev
->dev
, "failed to get memory"
650 " for internal flush cache cmd for "
651 "device %d", device_id
);
658 cmd
->cmd_status
= REQ_STATUS_PENDING
;
659 atomic_set(&cmd
->sync_cmd
, 0);
661 frame
->req_function
= CL_FUN_SCSI_CMD
;
662 frame
->device_id
= device_id
;
663 frame
->cmd_flag
= CMD_FLAG_NON_DATA
;
664 frame
->data_transfer_length
= 0;
665 frame
->cdb_length
= MAX_COMMAND_SIZE
;
666 memset(frame
->cdb
, 0, MAX_COMMAND_SIZE
);
667 frame
->cdb
[0] = SCSI_CMD_MARVELL_SPECIFIC
;
668 frame
->cdb
[2] = CDB_CORE_SHUTDOWN
;
670 mvumi_issue_blocked_cmd(mhba
, cmd
);
671 if (cmd
->cmd_status
!= SAM_STAT_GOOD
) {
672 dev_err(&mhba
->pdev
->dev
,
673 "device %d flush cache failed, status=0x%x.\n",
674 device_id
, cmd
->cmd_status
);
677 mvumi_delete_internal_cmd(mhba
, cmd
);
683 mvumi_calculate_checksum(struct mvumi_hs_header
*p_header
,
687 unsigned char ret
= 0, i
;
689 ptr
= (unsigned char *) p_header
->frame_content
;
690 for (i
= 0; i
< len
; i
++) {
698 void mvumi_hs_build_page(struct mvumi_hba
*mhba
,
699 struct mvumi_hs_header
*hs_header
)
701 struct mvumi_hs_page2
*hs_page2
;
702 struct mvumi_hs_page4
*hs_page4
;
703 struct mvumi_hs_page3
*hs_page3
;
705 unsigned int local_time
;
707 switch (hs_header
->page_code
) {
708 case HS_PAGE_HOST_INFO
:
709 hs_page2
= (struct mvumi_hs_page2
*) hs_header
;
710 hs_header
->frame_length
= sizeof(*hs_page2
) - 4;
711 memset(hs_header
->frame_content
, 0, hs_header
->frame_length
);
712 hs_page2
->host_type
= 3; /* 3 mean linux*/
713 hs_page2
->host_ver
.ver_major
= VER_MAJOR
;
714 hs_page2
->host_ver
.ver_minor
= VER_MINOR
;
715 hs_page2
->host_ver
.ver_oem
= VER_OEM
;
716 hs_page2
->host_ver
.ver_build
= VER_BUILD
;
717 hs_page2
->system_io_bus
= 0;
718 hs_page2
->slot_number
= 0;
719 hs_page2
->intr_level
= 0;
720 hs_page2
->intr_vector
= 0;
721 do_gettimeofday(&time
);
722 local_time
= (unsigned int) (time
.tv_sec
-
723 (sys_tz
.tz_minuteswest
* 60));
724 hs_page2
->seconds_since1970
= local_time
;
725 hs_header
->checksum
= mvumi_calculate_checksum(hs_header
,
726 hs_header
->frame_length
);
729 case HS_PAGE_FIRM_CTL
:
730 hs_page3
= (struct mvumi_hs_page3
*) hs_header
;
731 hs_header
->frame_length
= sizeof(*hs_page3
) - 4;
732 memset(hs_header
->frame_content
, 0, hs_header
->frame_length
);
733 hs_header
->checksum
= mvumi_calculate_checksum(hs_header
,
734 hs_header
->frame_length
);
737 case HS_PAGE_CL_INFO
:
738 hs_page4
= (struct mvumi_hs_page4
*) hs_header
;
739 hs_header
->frame_length
= sizeof(*hs_page4
) - 4;
740 memset(hs_header
->frame_content
, 0, hs_header
->frame_length
);
741 hs_page4
->ib_baseaddr_l
= lower_32_bits(mhba
->ib_list_phys
);
742 hs_page4
->ib_baseaddr_h
= upper_32_bits(mhba
->ib_list_phys
);
744 hs_page4
->ob_baseaddr_l
= lower_32_bits(mhba
->ob_list_phys
);
745 hs_page4
->ob_baseaddr_h
= upper_32_bits(mhba
->ob_list_phys
);
746 hs_page4
->ib_entry_size
= mhba
->ib_max_size_setting
;
747 hs_page4
->ob_entry_size
= mhba
->ob_max_size_setting
;
748 hs_page4
->ob_depth
= mhba
->list_num_io
;
749 hs_page4
->ib_depth
= mhba
->list_num_io
;
750 hs_header
->checksum
= mvumi_calculate_checksum(hs_header
,
751 hs_header
->frame_length
);
755 dev_err(&mhba
->pdev
->dev
, "cannot build page, code[0x%x]\n",
756 hs_header
->page_code
);
762 * mvumi_init_data - Initialize requested date for FW
763 * @mhba: Adapter soft state
765 static int mvumi_init_data(struct mvumi_hba
*mhba
)
767 struct mvumi_ob_data
*ob_pool
;
768 struct mvumi_res
*res_mgnt
;
769 unsigned int tmp_size
, offset
, i
;
773 if (mhba
->fw_flag
& MVUMI_FW_ALLOC
)
776 tmp_size
= mhba
->ib_max_size
* mhba
->max_io
;
777 tmp_size
+= 128 + mhba
->ob_max_size
* mhba
->max_io
;
778 tmp_size
+= 8 + sizeof(u32
) + 16;
780 res_mgnt
= mvumi_alloc_mem_resource(mhba
,
781 RESOURCE_UNCACHED_MEMORY
, tmp_size
);
783 dev_err(&mhba
->pdev
->dev
,
784 "failed to allocate memory for inbound list\n");
785 goto fail_alloc_dma_buf
;
788 p
= res_mgnt
->bus_addr
;
789 v
= res_mgnt
->virt_addr
;
791 offset
= round_up(p
, 128) - p
;
795 mhba
->ib_list_phys
= p
;
796 v
+= mhba
->ib_max_size
* mhba
->max_io
;
797 p
+= mhba
->ib_max_size
* mhba
->max_io
;
799 offset
= round_up(p
, 8) - p
;
803 mhba
->ib_shadow_phys
= p
;
807 offset
= round_up(p
, 8) - p
;
811 mhba
->ob_shadow_phys
= p
;
816 offset
= round_up(p
, 128) - p
;
821 mhba
->ob_list_phys
= p
;
824 tmp_size
= mhba
->max_io
* (mhba
->ob_max_size
+ sizeof(*ob_pool
));
825 tmp_size
= round_up(tmp_size
, 8);
827 res_mgnt
= mvumi_alloc_mem_resource(mhba
,
828 RESOURCE_CACHED_MEMORY
, tmp_size
);
830 dev_err(&mhba
->pdev
->dev
,
831 "failed to allocate memory for outbound data buffer\n");
832 goto fail_alloc_dma_buf
;
834 virmem
= res_mgnt
->virt_addr
;
836 for (i
= mhba
->max_io
; i
!= 0; i
--) {
837 ob_pool
= (struct mvumi_ob_data
*) virmem
;
838 list_add_tail(&ob_pool
->list
, &mhba
->ob_data_list
);
839 virmem
+= mhba
->ob_max_size
+ sizeof(*ob_pool
);
842 tmp_size
= sizeof(unsigned short) * mhba
->max_io
+
843 sizeof(struct mvumi_cmd
*) * mhba
->max_io
;
844 tmp_size
+= round_up(mhba
->max_target_id
, sizeof(unsigned char) * 8) /
845 (sizeof(unsigned char) * 8);
847 res_mgnt
= mvumi_alloc_mem_resource(mhba
,
848 RESOURCE_CACHED_MEMORY
, tmp_size
);
850 dev_err(&mhba
->pdev
->dev
,
851 "failed to allocate memory for tag and target map\n");
852 goto fail_alloc_dma_buf
;
855 virmem
= res_mgnt
->virt_addr
;
856 mhba
->tag_pool
.stack
= virmem
;
857 mhba
->tag_pool
.size
= mhba
->max_io
;
858 tag_init(&mhba
->tag_pool
, mhba
->max_io
);
859 virmem
+= sizeof(unsigned short) * mhba
->max_io
;
861 mhba
->tag_cmd
= virmem
;
862 virmem
+= sizeof(struct mvumi_cmd
*) * mhba
->max_io
;
864 mhba
->target_map
= virmem
;
866 mhba
->fw_flag
|= MVUMI_FW_ALLOC
;
870 mvumi_release_mem_resource(mhba
);
874 static int mvumi_hs_process_page(struct mvumi_hba
*mhba
,
875 struct mvumi_hs_header
*hs_header
)
877 struct mvumi_hs_page1
*hs_page1
;
878 unsigned char page_checksum
;
880 page_checksum
= mvumi_calculate_checksum(hs_header
,
881 hs_header
->frame_length
);
882 if (page_checksum
!= hs_header
->checksum
) {
883 dev_err(&mhba
->pdev
->dev
, "checksum error\n");
887 switch (hs_header
->page_code
) {
888 case HS_PAGE_FIRM_CAP
:
889 hs_page1
= (struct mvumi_hs_page1
*) hs_header
;
891 mhba
->max_io
= hs_page1
->max_io_support
;
892 mhba
->list_num_io
= hs_page1
->cl_inout_list_depth
;
893 mhba
->max_transfer_size
= hs_page1
->max_transfer_size
;
894 mhba
->max_target_id
= hs_page1
->max_devices_support
;
895 mhba
->hba_capability
= hs_page1
->capability
;
896 mhba
->ib_max_size_setting
= hs_page1
->cl_in_max_entry_size
;
897 mhba
->ib_max_size
= (1 << hs_page1
->cl_in_max_entry_size
) << 2;
899 mhba
->ob_max_size_setting
= hs_page1
->cl_out_max_entry_size
;
900 mhba
->ob_max_size
= (1 << hs_page1
->cl_out_max_entry_size
) << 2;
902 dev_dbg(&mhba
->pdev
->dev
, "FW version:%d\n",
903 hs_page1
->fw_ver
.ver_build
);
907 dev_err(&mhba
->pdev
->dev
, "handshake: page code error\n");
914 * mvumi_handshake - Move the FW to READY state
915 * @mhba: Adapter soft state
917 * During the initialization, FW passes can potentially be in any one of
918 * several possible states. If the FW in operational, waiting-for-handshake
919 * states, driver must take steps to bring it to ready state. Otherwise, it
920 * has to wait for the ready state.
922 static int mvumi_handshake(struct mvumi_hba
*mhba
)
924 unsigned int hs_state
, tmp
, hs_fun
;
925 struct mvumi_hs_header
*hs_header
;
926 void *regs
= mhba
->mmio
;
928 if (mhba
->fw_state
== FW_STATE_STARTING
)
929 hs_state
= HS_S_START
;
931 tmp
= ioread32(regs
+ CPU_ARM_TO_PCIEA_MSG0
);
932 hs_state
= HS_GET_STATE(tmp
);
933 dev_dbg(&mhba
->pdev
->dev
, "handshake state[0x%x].\n", hs_state
);
934 if (HS_GET_STATUS(tmp
) != HS_STATUS_OK
) {
935 mhba
->fw_state
= FW_STATE_STARTING
;
943 mhba
->fw_state
= FW_STATE_HANDSHAKING
;
944 HS_SET_STATUS(hs_fun
, HS_STATUS_OK
);
945 HS_SET_STATE(hs_fun
, HS_S_RESET
);
946 iowrite32(HANDSHAKE_SIGNATURE
, regs
+ CPU_PCIEA_TO_ARM_MSG1
);
947 iowrite32(hs_fun
, regs
+ CPU_PCIEA_TO_ARM_MSG0
);
948 iowrite32(DRBL_HANDSHAKE
, regs
+ CPU_PCIEA_TO_ARM_DRBL_REG
);
952 iowrite32(lower_32_bits(mhba
->handshake_page_phys
),
953 regs
+ CPU_PCIEA_TO_ARM_MSG1
);
954 iowrite32(upper_32_bits(mhba
->handshake_page_phys
),
955 regs
+ CPU_ARM_TO_PCIEA_MSG1
);
956 HS_SET_STATUS(hs_fun
, HS_STATUS_OK
);
957 HS_SET_STATE(hs_fun
, HS_S_PAGE_ADDR
);
958 iowrite32(hs_fun
, regs
+ CPU_PCIEA_TO_ARM_MSG0
);
959 iowrite32(DRBL_HANDSHAKE
, regs
+ CPU_PCIEA_TO_ARM_DRBL_REG
);
964 case HS_S_QUERY_PAGE
:
966 hs_header
= (struct mvumi_hs_header
*) mhba
->handshake_page
;
967 if (hs_header
->page_code
== HS_PAGE_FIRM_CAP
) {
968 mhba
->hba_total_pages
=
969 ((struct mvumi_hs_page1
*) hs_header
)->total_pages
;
971 if (mhba
->hba_total_pages
== 0)
972 mhba
->hba_total_pages
= HS_PAGE_TOTAL
-1;
975 if (hs_state
== HS_S_QUERY_PAGE
) {
976 if (mvumi_hs_process_page(mhba
, hs_header
)) {
977 HS_SET_STATE(hs_fun
, HS_S_ABORT
);
980 if (mvumi_init_data(mhba
)) {
981 HS_SET_STATE(hs_fun
, HS_S_ABORT
);
984 } else if (hs_state
== HS_S_PAGE_ADDR
) {
985 hs_header
->page_code
= 0;
986 mhba
->hba_total_pages
= HS_PAGE_TOTAL
-1;
989 if ((hs_header
->page_code
+ 1) <= mhba
->hba_total_pages
) {
990 hs_header
->page_code
++;
991 if (hs_header
->page_code
!= HS_PAGE_FIRM_CAP
) {
992 mvumi_hs_build_page(mhba
, hs_header
);
993 HS_SET_STATE(hs_fun
, HS_S_SEND_PAGE
);
995 HS_SET_STATE(hs_fun
, HS_S_QUERY_PAGE
);
997 HS_SET_STATE(hs_fun
, HS_S_END
);
999 HS_SET_STATUS(hs_fun
, HS_STATUS_OK
);
1000 iowrite32(hs_fun
, regs
+ CPU_PCIEA_TO_ARM_MSG0
);
1001 iowrite32(DRBL_HANDSHAKE
, regs
+ CPU_PCIEA_TO_ARM_DRBL_REG
);
1005 /* Set communication list ISR */
1006 tmp
= ioread32(regs
+ CPU_ENPOINTA_MASK_REG
);
1007 tmp
|= INT_MAP_COMAOUT
| INT_MAP_COMAERR
;
1008 iowrite32(tmp
, regs
+ CPU_ENPOINTA_MASK_REG
);
1009 iowrite32(mhba
->list_num_io
, mhba
->ib_shadow
);
1010 /* Set InBound List Avaliable count shadow */
1011 iowrite32(lower_32_bits(mhba
->ib_shadow_phys
),
1012 regs
+ CLA_INB_AVAL_COUNT_BASEL
);
1013 iowrite32(upper_32_bits(mhba
->ib_shadow_phys
),
1014 regs
+ CLA_INB_AVAL_COUNT_BASEH
);
1016 /* Set OutBound List Avaliable count shadow */
1017 iowrite32((mhba
->list_num_io
-1) | CL_POINTER_TOGGLE
,
1019 iowrite32(lower_32_bits(mhba
->ob_shadow_phys
), regs
+ 0x5B0);
1020 iowrite32(upper_32_bits(mhba
->ob_shadow_phys
), regs
+ 0x5B4);
1022 mhba
->ib_cur_slot
= (mhba
->list_num_io
- 1) | CL_POINTER_TOGGLE
;
1023 mhba
->ob_cur_slot
= (mhba
->list_num_io
- 1) | CL_POINTER_TOGGLE
;
1024 mhba
->fw_state
= FW_STATE_STARTED
;
1028 dev_err(&mhba
->pdev
->dev
, "unknown handshake state [0x%x].\n",
1035 static unsigned char mvumi_handshake_event(struct mvumi_hba
*mhba
)
1037 unsigned int isr_status
;
1038 unsigned long before
;
1041 mvumi_handshake(mhba
);
1043 isr_status
= mhba
->instancet
->read_fw_status_reg(mhba
->mmio
);
1045 if (mhba
->fw_state
== FW_STATE_STARTED
)
1047 if (time_after(jiffies
, before
+ FW_MAX_DELAY
* HZ
)) {
1048 dev_err(&mhba
->pdev
->dev
,
1049 "no handshake response at state 0x%x.\n",
1051 dev_err(&mhba
->pdev
->dev
,
1052 "isr : global=0x%x,status=0x%x.\n",
1053 mhba
->global_isr
, isr_status
);
1057 usleep_range(1000, 2000);
1058 } while (!(isr_status
& DRBL_HANDSHAKE_ISR
));
1063 static unsigned char mvumi_check_handshake(struct mvumi_hba
*mhba
)
1065 void *regs
= mhba
->mmio
;
1067 unsigned long before
;
1070 tmp
= ioread32(regs
+ CPU_ARM_TO_PCIEA_MSG1
);
1071 while ((tmp
!= HANDSHAKE_READYSTATE
) && (tmp
!= HANDSHAKE_DONESTATE
)) {
1072 if (tmp
!= HANDSHAKE_READYSTATE
)
1073 iowrite32(DRBL_MU_RESET
,
1074 regs
+ CPU_PCIEA_TO_ARM_DRBL_REG
);
1075 if (time_after(jiffies
, before
+ FW_MAX_DELAY
* HZ
)) {
1076 dev_err(&mhba
->pdev
->dev
,
1077 "invalid signature [0x%x].\n", tmp
);
1080 usleep_range(1000, 2000);
1082 tmp
= ioread32(regs
+ CPU_ARM_TO_PCIEA_MSG1
);
1085 mhba
->fw_state
= FW_STATE_STARTING
;
1086 dev_dbg(&mhba
->pdev
->dev
, "start firmware handshake...\n");
1088 if (mvumi_handshake_event(mhba
)) {
1089 dev_err(&mhba
->pdev
->dev
,
1090 "handshake failed at state 0x%x.\n",
1094 } while (mhba
->fw_state
!= FW_STATE_STARTED
);
1096 dev_dbg(&mhba
->pdev
->dev
, "firmware handshake done\n");
1101 static unsigned char mvumi_start(struct mvumi_hba
*mhba
)
1103 void *regs
= mhba
->mmio
;
1105 /* clear Door bell */
1106 tmp
= ioread32(regs
+ CPU_ARM_TO_PCIEA_DRBL_REG
);
1107 iowrite32(tmp
, regs
+ CPU_ARM_TO_PCIEA_DRBL_REG
);
1109 iowrite32(0x3FFFFFFF, regs
+ CPU_ARM_TO_PCIEA_MASK_REG
);
1110 tmp
= ioread32(regs
+ CPU_ENPOINTA_MASK_REG
) | INT_MAP_DL_CPU2PCIEA
;
1111 iowrite32(tmp
, regs
+ CPU_ENPOINTA_MASK_REG
);
1112 if (mvumi_check_handshake(mhba
))
1119 * mvumi_complete_cmd - Completes a command
1120 * @mhba: Adapter soft state
1121 * @cmd: Command to be completed
1123 static void mvumi_complete_cmd(struct mvumi_hba
*mhba
, struct mvumi_cmd
*cmd
,
1124 struct mvumi_rsp_frame
*ob_frame
)
1126 struct scsi_cmnd
*scmd
= cmd
->scmd
;
1128 cmd
->scmd
->SCp
.ptr
= NULL
;
1129 scmd
->result
= ob_frame
->req_status
;
1131 switch (ob_frame
->req_status
) {
1133 scmd
->result
|= DID_OK
<< 16;
1136 scmd
->result
|= DID_BUS_BUSY
<< 16;
1138 case SAM_STAT_CHECK_CONDITION
:
1139 scmd
->result
|= (DID_OK
<< 16);
1140 if (ob_frame
->rsp_flag
& CL_RSP_FLAG_SENSEDATA
) {
1141 memcpy(cmd
->scmd
->sense_buffer
, ob_frame
->payload
,
1142 sizeof(struct mvumi_sense_data
));
1143 scmd
->result
|= (DRIVER_SENSE
<< 24);
1147 scmd
->result
|= (DRIVER_INVALID
<< 24) | (DID_ABORT
<< 16);
1151 if (scsi_bufflen(scmd
)) {
1152 if (scsi_sg_count(scmd
)) {
1153 pci_unmap_sg(mhba
->pdev
,
1155 scsi_sg_count(scmd
),
1156 (int) scmd
->sc_data_direction
);
1158 pci_unmap_single(mhba
->pdev
,
1159 scmd
->SCp
.dma_handle
,
1161 (int) scmd
->sc_data_direction
);
1163 scmd
->SCp
.dma_handle
= 0;
1166 cmd
->scmd
->scsi_done(scmd
);
1167 mvumi_return_cmd(mhba
, cmd
);
1169 static void mvumi_complete_internal_cmd(struct mvumi_hba
*mhba
,
1170 struct mvumi_cmd
*cmd
,
1171 struct mvumi_rsp_frame
*ob_frame
)
1173 if (atomic_read(&cmd
->sync_cmd
)) {
1174 cmd
->cmd_status
= ob_frame
->req_status
;
1176 if ((ob_frame
->req_status
== SAM_STAT_CHECK_CONDITION
) &&
1177 (ob_frame
->rsp_flag
& CL_RSP_FLAG_SENSEDATA
) &&
1179 memcpy(cmd
->data_buf
, ob_frame
->payload
,
1180 sizeof(struct mvumi_sense_data
));
1182 atomic_dec(&cmd
->sync_cmd
);
1183 wake_up(&mhba
->int_cmd_wait_q
);
1187 static void mvumi_show_event(struct mvumi_hba
*mhba
,
1188 struct mvumi_driver_event
*ptr
)
1192 dev_warn(&mhba
->pdev
->dev
,
1193 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1194 ptr
->sequence_no
, ptr
->event_id
, ptr
->severity
, ptr
->device_id
);
1195 if (ptr
->param_count
) {
1196 printk(KERN_WARNING
"Event param(len 0x%x): ",
1198 for (i
= 0; i
< ptr
->param_count
; i
++)
1199 printk(KERN_WARNING
"0x%x ", ptr
->params
[i
]);
1201 printk(KERN_WARNING
"\n");
1204 if (ptr
->sense_data_length
) {
1205 printk(KERN_WARNING
"Event sense data(len 0x%x): ",
1206 ptr
->sense_data_length
);
1207 for (i
= 0; i
< ptr
->sense_data_length
; i
++)
1208 printk(KERN_WARNING
"0x%x ", ptr
->sense_data
[i
]);
1209 printk(KERN_WARNING
"\n");
1213 static void mvumi_notification(struct mvumi_hba
*mhba
, u8 msg
, void *buffer
)
1215 if (msg
== APICDB1_EVENT_GETEVENT
) {
1217 struct mvumi_driver_event
*param
= NULL
;
1218 struct mvumi_event_req
*er
= buffer
;
1220 if (count
> MAX_EVENTS_RETURNED
) {
1221 dev_err(&mhba
->pdev
->dev
, "event count[0x%x] is bigger"
1222 " than max event count[0x%x].\n",
1223 count
, MAX_EVENTS_RETURNED
);
1226 for (i
= 0; i
< count
; i
++) {
1227 param
= &er
->events
[i
];
1228 mvumi_show_event(mhba
, param
);
1233 static int mvumi_get_event(struct mvumi_hba
*mhba
, unsigned char msg
)
1235 struct mvumi_cmd
*cmd
;
1236 struct mvumi_msg_frame
*frame
;
1238 cmd
= mvumi_create_internal_cmd(mhba
, 512);
1242 cmd
->cmd_status
= REQ_STATUS_PENDING
;
1243 atomic_set(&cmd
->sync_cmd
, 0);
1245 frame
->device_id
= 0;
1246 frame
->cmd_flag
= CMD_FLAG_DATA_IN
;
1247 frame
->req_function
= CL_FUN_SCSI_CMD
;
1248 frame
->cdb_length
= MAX_COMMAND_SIZE
;
1249 frame
->data_transfer_length
= sizeof(struct mvumi_event_req
);
1250 memset(frame
->cdb
, 0, MAX_COMMAND_SIZE
);
1251 frame
->cdb
[0] = APICDB0_EVENT
;
1252 frame
->cdb
[1] = msg
;
1253 mvumi_issue_blocked_cmd(mhba
, cmd
);
1255 if (cmd
->cmd_status
!= SAM_STAT_GOOD
)
1256 dev_err(&mhba
->pdev
->dev
, "get event failed, status=0x%x.\n",
1259 mvumi_notification(mhba
, cmd
->frame
->cdb
[1], cmd
->data_buf
);
1261 mvumi_delete_internal_cmd(mhba
, cmd
);
1265 static void mvumi_scan_events(struct work_struct
*work
)
1267 struct mvumi_events_wq
*mu_ev
=
1268 container_of(work
, struct mvumi_events_wq
, work_q
);
1270 mvumi_get_event(mu_ev
->mhba
, mu_ev
->event
);
1274 static void mvumi_launch_events(struct mvumi_hba
*mhba
, u8 msg
)
1276 struct mvumi_events_wq
*mu_ev
;
1278 mu_ev
= kzalloc(sizeof(*mu_ev
), GFP_ATOMIC
);
1280 INIT_WORK(&mu_ev
->work_q
, mvumi_scan_events
);
1283 mu_ev
->param
= NULL
;
1284 schedule_work(&mu_ev
->work_q
);
1288 static void mvumi_handle_clob(struct mvumi_hba
*mhba
)
1290 struct mvumi_rsp_frame
*ob_frame
;
1291 struct mvumi_cmd
*cmd
;
1292 struct mvumi_ob_data
*pool
;
1294 while (!list_empty(&mhba
->free_ob_list
)) {
1295 pool
= list_first_entry(&mhba
->free_ob_list
,
1296 struct mvumi_ob_data
, list
);
1297 list_del_init(&pool
->list
);
1298 list_add_tail(&pool
->list
, &mhba
->ob_data_list
);
1300 ob_frame
= (struct mvumi_rsp_frame
*) &pool
->data
[0];
1301 cmd
= mhba
->tag_cmd
[ob_frame
->tag
];
1303 atomic_dec(&mhba
->fw_outstanding
);
1304 mhba
->tag_cmd
[ob_frame
->tag
] = 0;
1305 tag_release_one(mhba
, &mhba
->tag_pool
, ob_frame
->tag
);
1307 mvumi_complete_cmd(mhba
, cmd
, ob_frame
);
1309 mvumi_complete_internal_cmd(mhba
, cmd
, ob_frame
);
1311 mhba
->instancet
->fire_cmd(mhba
, NULL
);
1314 static irqreturn_t
mvumi_isr_handler(int irq
, void *devp
)
1316 struct mvumi_hba
*mhba
= (struct mvumi_hba
*) devp
;
1317 unsigned long flags
;
1319 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
1320 if (unlikely(mhba
->instancet
->clear_intr(mhba
) || !mhba
->global_isr
)) {
1321 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
1325 if (mhba
->global_isr
& INT_MAP_DL_CPU2PCIEA
) {
1326 if (mhba
->isr_status
& DRBL_HANDSHAKE_ISR
) {
1327 dev_warn(&mhba
->pdev
->dev
, "enter handshake again!\n");
1328 mvumi_handshake(mhba
);
1330 if (mhba
->isr_status
& DRBL_EVENT_NOTIFY
)
1331 mvumi_launch_events(mhba
, APICDB1_EVENT_GETEVENT
);
1334 if (mhba
->global_isr
& INT_MAP_COMAOUT
)
1335 mvumi_receive_ob_list_entry(mhba
);
1337 mhba
->global_isr
= 0;
1338 mhba
->isr_status
= 0;
1339 if (mhba
->fw_state
== FW_STATE_STARTED
)
1340 mvumi_handle_clob(mhba
);
1341 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
1345 static enum mvumi_qc_result
mvumi_send_command(struct mvumi_hba
*mhba
,
1346 struct mvumi_cmd
*cmd
)
1349 struct mvumi_msg_frame
*ib_frame
;
1350 unsigned int frame_len
;
1352 ib_frame
= cmd
->frame
;
1353 if (unlikely(mhba
->fw_state
!= FW_STATE_STARTED
)) {
1354 dev_dbg(&mhba
->pdev
->dev
, "firmware not ready.\n");
1355 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
;
1357 if (tag_is_empty(&mhba
->tag_pool
)) {
1358 dev_dbg(&mhba
->pdev
->dev
, "no free tag.\n");
1359 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
;
1361 if (mvumi_get_ib_list_entry(mhba
, &ib_entry
))
1362 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
;
1364 cmd
->frame
->tag
= tag_get_one(mhba
, &mhba
->tag_pool
);
1365 cmd
->frame
->request_id
= mhba
->io_seq
++;
1366 cmd
->request_id
= cmd
->frame
->request_id
;
1367 mhba
->tag_cmd
[cmd
->frame
->tag
] = cmd
;
1368 frame_len
= sizeof(*ib_frame
) - 4 +
1369 ib_frame
->sg_counts
* sizeof(struct mvumi_sgl
);
1370 memcpy(ib_entry
, ib_frame
, frame_len
);
1371 return MV_QUEUE_COMMAND_RESULT_SENT
;
1374 static void mvumi_fire_cmd(struct mvumi_hba
*mhba
, struct mvumi_cmd
*cmd
)
1376 unsigned short num_of_cl_sent
= 0;
1377 enum mvumi_qc_result result
;
1380 list_add_tail(&cmd
->queue_pointer
, &mhba
->waiting_req_list
);
1382 while (!list_empty(&mhba
->waiting_req_list
)) {
1383 cmd
= list_first_entry(&mhba
->waiting_req_list
,
1384 struct mvumi_cmd
, queue_pointer
);
1385 list_del_init(&cmd
->queue_pointer
);
1386 result
= mvumi_send_command(mhba
, cmd
);
1388 case MV_QUEUE_COMMAND_RESULT_SENT
:
1391 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
:
1392 list_add(&cmd
->queue_pointer
, &mhba
->waiting_req_list
);
1393 if (num_of_cl_sent
> 0)
1394 mvumi_send_ib_list_entry(mhba
);
1399 if (num_of_cl_sent
> 0)
1400 mvumi_send_ib_list_entry(mhba
);
1404 * mvumi_enable_intr - Enables interrupts
1405 * @regs: FW register set
1407 static void mvumi_enable_intr(void *regs
)
1411 iowrite32(0x3FFFFFFF, regs
+ CPU_ARM_TO_PCIEA_MASK_REG
);
1412 mask
= ioread32(regs
+ CPU_ENPOINTA_MASK_REG
);
1413 mask
|= INT_MAP_DL_CPU2PCIEA
| INT_MAP_COMAOUT
| INT_MAP_COMAERR
;
1414 iowrite32(mask
, regs
+ CPU_ENPOINTA_MASK_REG
);
1418 * mvumi_disable_intr -Disables interrupt
1419 * @regs: FW register set
1421 static void mvumi_disable_intr(void *regs
)
1425 iowrite32(0, regs
+ CPU_ARM_TO_PCIEA_MASK_REG
);
1426 mask
= ioread32(regs
+ CPU_ENPOINTA_MASK_REG
);
1427 mask
&= ~(INT_MAP_DL_CPU2PCIEA
| INT_MAP_COMAOUT
| INT_MAP_COMAERR
);
1428 iowrite32(mask
, regs
+ CPU_ENPOINTA_MASK_REG
);
1431 static int mvumi_clear_intr(void *extend
)
1433 struct mvumi_hba
*mhba
= (struct mvumi_hba
*) extend
;
1434 unsigned int status
, isr_status
= 0, tmp
= 0;
1435 void *regs
= mhba
->mmio
;
1437 status
= ioread32(regs
+ CPU_MAIN_INT_CAUSE_REG
);
1438 if (!(status
& INT_MAP_MU
) || status
== 0xFFFFFFFF)
1440 if (unlikely(status
& INT_MAP_COMAERR
)) {
1441 tmp
= ioread32(regs
+ CLA_ISR_CAUSE
);
1442 if (tmp
& (CLIC_IN_ERR_IRQ
| CLIC_OUT_ERR_IRQ
))
1443 iowrite32(tmp
& (CLIC_IN_ERR_IRQ
| CLIC_OUT_ERR_IRQ
),
1444 regs
+ CLA_ISR_CAUSE
);
1445 status
^= INT_MAP_COMAERR
;
1446 /* inbound or outbound parity error, command will timeout */
1448 if (status
& INT_MAP_COMAOUT
) {
1449 tmp
= ioread32(regs
+ CLA_ISR_CAUSE
);
1450 if (tmp
& CLIC_OUT_IRQ
)
1451 iowrite32(tmp
& CLIC_OUT_IRQ
, regs
+ CLA_ISR_CAUSE
);
1453 if (status
& INT_MAP_DL_CPU2PCIEA
) {
1454 isr_status
= ioread32(regs
+ CPU_ARM_TO_PCIEA_DRBL_REG
);
1456 iowrite32(isr_status
, regs
+ CPU_ARM_TO_PCIEA_DRBL_REG
);
1459 mhba
->global_isr
= status
;
1460 mhba
->isr_status
= isr_status
;
1466 * mvumi_read_fw_status_reg - returns the current FW status value
1467 * @regs: FW register set
1469 static unsigned int mvumi_read_fw_status_reg(void *regs
)
1471 unsigned int status
;
1473 status
= ioread32(regs
+ CPU_ARM_TO_PCIEA_DRBL_REG
);
1475 iowrite32(status
, regs
+ CPU_ARM_TO_PCIEA_DRBL_REG
);
1479 static struct mvumi_instance_template mvumi_instance_template
= {
1480 .fire_cmd
= mvumi_fire_cmd
,
1481 .enable_intr
= mvumi_enable_intr
,
1482 .disable_intr
= mvumi_disable_intr
,
1483 .clear_intr
= mvumi_clear_intr
,
1484 .read_fw_status_reg
= mvumi_read_fw_status_reg
,
1487 static int mvumi_slave_configure(struct scsi_device
*sdev
)
1489 struct mvumi_hba
*mhba
;
1490 unsigned char bitcount
= sizeof(unsigned char) * 8;
1492 mhba
= (struct mvumi_hba
*) sdev
->host
->hostdata
;
1493 if (sdev
->id
>= mhba
->max_target_id
)
1496 mhba
->target_map
[sdev
->id
/ bitcount
] |= (1 << (sdev
->id
% bitcount
));
1501 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
1502 * @mhba: Adapter soft state
1503 * @scmd: SCSI command
1504 * @cmd: Command to be prepared in
1506 * This function prepares CDB commands. These are typcially pass-through
1507 * commands to the devices.
1509 static unsigned char mvumi_build_frame(struct mvumi_hba
*mhba
,
1510 struct scsi_cmnd
*scmd
, struct mvumi_cmd
*cmd
)
1512 struct mvumi_msg_frame
*pframe
;
1515 cmd
->cmd_status
= REQ_STATUS_PENDING
;
1516 pframe
= cmd
->frame
;
1517 pframe
->device_id
= ((unsigned short) scmd
->device
->id
) |
1518 (((unsigned short) scmd
->device
->lun
) << 8);
1519 pframe
->cmd_flag
= 0;
1521 switch (scmd
->sc_data_direction
) {
1523 pframe
->cmd_flag
|= CMD_FLAG_NON_DATA
;
1525 case DMA_FROM_DEVICE
:
1526 pframe
->cmd_flag
|= CMD_FLAG_DATA_IN
;
1529 pframe
->cmd_flag
|= CMD_FLAG_DATA_OUT
;
1531 case DMA_BIDIRECTIONAL
:
1533 dev_warn(&mhba
->pdev
->dev
, "unexpected data direction[%d] "
1534 "cmd[0x%x]\n", scmd
->sc_data_direction
, scmd
->cmnd
[0]);
1538 pframe
->cdb_length
= scmd
->cmd_len
;
1539 memcpy(pframe
->cdb
, scmd
->cmnd
, pframe
->cdb_length
);
1540 pframe
->req_function
= CL_FUN_SCSI_CMD
;
1541 if (scsi_bufflen(scmd
)) {
1542 if (mvumi_make_sgl(mhba
, scmd
, &pframe
->payload
[0],
1543 &pframe
->sg_counts
))
1546 pframe
->data_transfer_length
= scsi_bufflen(scmd
);
1548 pframe
->sg_counts
= 0;
1549 pframe
->data_transfer_length
= 0;
1554 scmd
->result
= (DID_OK
<< 16) | (DRIVER_SENSE
<< 24) |
1555 SAM_STAT_CHECK_CONDITION
;
1556 scsi_build_sense_buffer(0, scmd
->sense_buffer
, ILLEGAL_REQUEST
, 0x24,
1562 * mvumi_queue_command - Queue entry point
1563 * @scmd: SCSI command to be queued
1564 * @done: Callback entry point
1566 static int mvumi_queue_command(struct Scsi_Host
*shost
,
1567 struct scsi_cmnd
*scmd
)
1569 struct mvumi_cmd
*cmd
;
1570 struct mvumi_hba
*mhba
;
1571 unsigned long irq_flags
;
1573 spin_lock_irqsave(shost
->host_lock
, irq_flags
);
1574 scsi_cmd_get_serial(shost
, scmd
);
1576 mhba
= (struct mvumi_hba
*) shost
->hostdata
;
1578 cmd
= mvumi_get_cmd(mhba
);
1579 if (unlikely(!cmd
)) {
1580 spin_unlock_irqrestore(shost
->host_lock
, irq_flags
);
1581 return SCSI_MLQUEUE_HOST_BUSY
;
1584 if (unlikely(mvumi_build_frame(mhba
, scmd
, cmd
)))
1585 goto out_return_cmd
;
1588 scmd
->SCp
.ptr
= (char *) cmd
;
1589 mhba
->instancet
->fire_cmd(mhba
, cmd
);
1590 spin_unlock_irqrestore(shost
->host_lock
, irq_flags
);
1594 mvumi_return_cmd(mhba
, cmd
);
1595 scmd
->scsi_done(scmd
);
1596 spin_unlock_irqrestore(shost
->host_lock
, irq_flags
);
1600 static enum blk_eh_timer_return
mvumi_timed_out(struct scsi_cmnd
*scmd
)
1602 struct mvumi_cmd
*cmd
= (struct mvumi_cmd
*) scmd
->SCp
.ptr
;
1603 struct Scsi_Host
*host
= scmd
->device
->host
;
1604 struct mvumi_hba
*mhba
= shost_priv(host
);
1605 unsigned long flags
;
1607 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
1609 if (mhba
->tag_cmd
[cmd
->frame
->tag
]) {
1610 mhba
->tag_cmd
[cmd
->frame
->tag
] = 0;
1611 tag_release_one(mhba
, &mhba
->tag_pool
, cmd
->frame
->tag
);
1613 if (!list_empty(&cmd
->queue_pointer
))
1614 list_del_init(&cmd
->queue_pointer
);
1616 atomic_dec(&mhba
->fw_outstanding
);
1618 scmd
->result
= (DRIVER_INVALID
<< 24) | (DID_ABORT
<< 16);
1619 scmd
->SCp
.ptr
= NULL
;
1620 if (scsi_bufflen(scmd
)) {
1621 if (scsi_sg_count(scmd
)) {
1622 pci_unmap_sg(mhba
->pdev
,
1624 scsi_sg_count(scmd
),
1625 (int)scmd
->sc_data_direction
);
1627 pci_unmap_single(mhba
->pdev
,
1628 scmd
->SCp
.dma_handle
,
1630 (int)scmd
->sc_data_direction
);
1632 scmd
->SCp
.dma_handle
= 0;
1635 mvumi_return_cmd(mhba
, cmd
);
1636 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
1638 return BLK_EH_NOT_HANDLED
;
1642 mvumi_bios_param(struct scsi_device
*sdev
, struct block_device
*bdev
,
1643 sector_t capacity
, int geom
[])
1651 tmp
= heads
* sectors
;
1652 cylinders
= capacity
;
1653 sector_div(cylinders
, tmp
);
1655 if (capacity
>= 0x200000) {
1658 tmp
= heads
* sectors
;
1659 cylinders
= capacity
;
1660 sector_div(cylinders
, tmp
);
1664 geom
[2] = cylinders
;
1669 static struct scsi_host_template mvumi_template
= {
1671 .module
= THIS_MODULE
,
1672 .name
= "Marvell Storage Controller",
1673 .slave_configure
= mvumi_slave_configure
,
1674 .queuecommand
= mvumi_queue_command
,
1675 .eh_host_reset_handler
= mvumi_host_reset
,
1676 .bios_param
= mvumi_bios_param
,
1680 static struct scsi_transport_template mvumi_transport_template
= {
1681 .eh_timed_out
= mvumi_timed_out
,
1685 * mvumi_init_fw - Initializes the FW
1686 * @mhba: Adapter soft state
1688 * This is the main function for initializing firmware.
1690 static int mvumi_init_fw(struct mvumi_hba
*mhba
)
1694 if (pci_request_regions(mhba
->pdev
, MV_DRIVER_NAME
)) {
1695 dev_err(&mhba
->pdev
->dev
, "IO memory region busy!\n");
1698 ret
= mvumi_map_pci_addr(mhba
->pdev
, mhba
->base_addr
);
1702 mhba
->mmio
= mhba
->base_addr
[0];
1704 switch (mhba
->pdev
->device
) {
1705 case PCI_DEVICE_ID_MARVELL_MV9143
:
1706 mhba
->instancet
= &mvumi_instance_template
;
1708 mhba
->max_sge
= MVUMI_MAX_SG_ENTRY
;
1709 mhba
->request_id_enabled
= 1;
1712 dev_err(&mhba
->pdev
->dev
, "device 0x%x not supported!\n",
1713 mhba
->pdev
->device
);
1714 mhba
->instancet
= NULL
;
1716 goto fail_alloc_mem
;
1718 dev_dbg(&mhba
->pdev
->dev
, "device id : %04X is found.\n",
1719 mhba
->pdev
->device
);
1721 mhba
->handshake_page
= kzalloc(HSP_MAX_SIZE
, GFP_KERNEL
);
1722 if (!mhba
->handshake_page
) {
1723 dev_err(&mhba
->pdev
->dev
,
1724 "failed to allocate memory for handshake\n");
1726 goto fail_alloc_mem
;
1728 mhba
->handshake_page_phys
= virt_to_phys(mhba
->handshake_page
);
1730 if (mvumi_start(mhba
)) {
1732 goto fail_ready_state
;
1734 ret
= mvumi_alloc_cmds(mhba
);
1736 goto fail_ready_state
;
1741 mvumi_release_mem_resource(mhba
);
1742 kfree(mhba
->handshake_page
);
1744 mvumi_unmap_pci_addr(mhba
->pdev
, mhba
->base_addr
);
1746 pci_release_regions(mhba
->pdev
);
1752 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
1753 * @mhba: Adapter soft state
1755 static int mvumi_io_attach(struct mvumi_hba
*mhba
)
1757 struct Scsi_Host
*host
= mhba
->shost
;
1759 unsigned int max_sg
= (mhba
->ib_max_size
+ 4 -
1760 sizeof(struct mvumi_msg_frame
)) / sizeof(struct mvumi_sgl
);
1762 host
->irq
= mhba
->pdev
->irq
;
1763 host
->unique_id
= mhba
->unique_id
;
1764 host
->can_queue
= (mhba
->max_io
- 1) ? (mhba
->max_io
- 1) : 1;
1765 host
->sg_tablesize
= mhba
->max_sge
> max_sg
? max_sg
: mhba
->max_sge
;
1766 host
->max_sectors
= mhba
->max_transfer_size
/ 512;
1767 host
->cmd_per_lun
= (mhba
->max_io
- 1) ? (mhba
->max_io
- 1) : 1;
1768 host
->max_id
= mhba
->max_target_id
;
1769 host
->max_cmd_len
= MAX_COMMAND_SIZE
;
1770 host
->transportt
= &mvumi_transport_template
;
1772 ret
= scsi_add_host(host
, &mhba
->pdev
->dev
);
1774 dev_err(&mhba
->pdev
->dev
, "scsi_add_host failed\n");
1777 mhba
->fw_flag
|= MVUMI_FW_ATTACH
;
1778 scsi_scan_host(host
);
1784 * mvumi_probe_one - PCI hotplug entry point
1785 * @pdev: PCI device structure
1786 * @id: PCI ids of supported hotplugged adapter
1788 static int __devinit
mvumi_probe_one(struct pci_dev
*pdev
,
1789 const struct pci_device_id
*id
)
1791 struct Scsi_Host
*host
;
1792 struct mvumi_hba
*mhba
;
1795 dev_dbg(&pdev
->dev
, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
1796 pdev
->vendor
, pdev
->device
, pdev
->subsystem_vendor
,
1797 pdev
->subsystem_device
);
1799 ret
= pci_enable_device(pdev
);
1803 pci_set_master(pdev
);
1806 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1808 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1810 goto fail_set_dma_mask
;
1813 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1815 goto fail_set_dma_mask
;
1818 host
= scsi_host_alloc(&mvumi_template
, sizeof(*mhba
));
1820 dev_err(&pdev
->dev
, "scsi_host_alloc failed\n");
1822 goto fail_alloc_instance
;
1824 mhba
= shost_priv(host
);
1826 INIT_LIST_HEAD(&mhba
->cmd_pool
);
1827 INIT_LIST_HEAD(&mhba
->ob_data_list
);
1828 INIT_LIST_HEAD(&mhba
->free_ob_list
);
1829 INIT_LIST_HEAD(&mhba
->res_list
);
1830 INIT_LIST_HEAD(&mhba
->waiting_req_list
);
1831 atomic_set(&mhba
->fw_outstanding
, 0);
1832 init_waitqueue_head(&mhba
->int_cmd_wait_q
);
1836 mhba
->unique_id
= pdev
->bus
->number
<< 8 | pdev
->devfn
;
1838 ret
= mvumi_init_fw(mhba
);
1842 ret
= request_irq(mhba
->pdev
->irq
, mvumi_isr_handler
, IRQF_SHARED
,
1845 dev_err(&pdev
->dev
, "failed to register IRQ\n");
1848 mhba
->instancet
->enable_intr(mhba
->mmio
);
1849 pci_set_drvdata(pdev
, mhba
);
1851 ret
= mvumi_io_attach(mhba
);
1853 goto fail_io_attach
;
1854 dev_dbg(&pdev
->dev
, "probe mvumi driver successfully.\n");
1859 pci_set_drvdata(pdev
, NULL
);
1860 mhba
->instancet
->disable_intr(mhba
->mmio
);
1861 free_irq(mhba
->pdev
->irq
, mhba
);
1863 mvumi_release_fw(mhba
);
1865 scsi_host_put(host
);
1867 fail_alloc_instance
:
1869 pci_disable_device(pdev
);
1874 static void mvumi_detach_one(struct pci_dev
*pdev
)
1876 struct Scsi_Host
*host
;
1877 struct mvumi_hba
*mhba
;
1879 mhba
= pci_get_drvdata(pdev
);
1881 scsi_remove_host(mhba
->shost
);
1882 mvumi_flush_cache(mhba
);
1884 mhba
->instancet
->disable_intr(mhba
->mmio
);
1885 free_irq(mhba
->pdev
->irq
, mhba
);
1886 mvumi_release_fw(mhba
);
1887 scsi_host_put(host
);
1888 pci_set_drvdata(pdev
, NULL
);
1889 pci_disable_device(pdev
);
1890 dev_dbg(&pdev
->dev
, "driver is removed!\n");
1894 * mvumi_shutdown - Shutdown entry point
1895 * @device: Generic device structure
1897 static void mvumi_shutdown(struct pci_dev
*pdev
)
1899 struct mvumi_hba
*mhba
= pci_get_drvdata(pdev
);
1901 mvumi_flush_cache(mhba
);
1904 static int mvumi_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1906 struct mvumi_hba
*mhba
= NULL
;
1908 mhba
= pci_get_drvdata(pdev
);
1909 mvumi_flush_cache(mhba
);
1911 pci_set_drvdata(pdev
, mhba
);
1912 mhba
->instancet
->disable_intr(mhba
->mmio
);
1913 free_irq(mhba
->pdev
->irq
, mhba
);
1914 mvumi_unmap_pci_addr(pdev
, mhba
->base_addr
);
1915 pci_release_regions(pdev
);
1916 pci_save_state(pdev
);
1917 pci_disable_device(pdev
);
1918 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
1923 static int mvumi_resume(struct pci_dev
*pdev
)
1926 struct mvumi_hba
*mhba
= NULL
;
1928 mhba
= pci_get_drvdata(pdev
);
1930 pci_set_power_state(pdev
, PCI_D0
);
1931 pci_enable_wake(pdev
, PCI_D0
, 0);
1932 pci_restore_state(pdev
);
1934 ret
= pci_enable_device(pdev
);
1936 dev_err(&pdev
->dev
, "enable device failed\n");
1939 pci_set_master(pdev
);
1941 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1943 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1948 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1952 ret
= pci_request_regions(mhba
->pdev
, MV_DRIVER_NAME
);
1955 ret
= mvumi_map_pci_addr(mhba
->pdev
, mhba
->base_addr
);
1957 goto release_regions
;
1959 mhba
->mmio
= mhba
->base_addr
[0];
1960 mvumi_reset(mhba
->mmio
);
1962 if (mvumi_start(mhba
)) {
1964 goto unmap_pci_addr
;
1967 ret
= request_irq(mhba
->pdev
->irq
, mvumi_isr_handler
, IRQF_SHARED
,
1970 dev_err(&pdev
->dev
, "failed to register IRQ\n");
1971 goto unmap_pci_addr
;
1973 mhba
->instancet
->enable_intr(mhba
->mmio
);
1978 mvumi_unmap_pci_addr(pdev
, mhba
->base_addr
);
1980 pci_release_regions(pdev
);
1982 pci_disable_device(pdev
);
1987 static struct pci_driver mvumi_pci_driver
= {
1989 .name
= MV_DRIVER_NAME
,
1990 .id_table
= mvumi_pci_table
,
1991 .probe
= mvumi_probe_one
,
1992 .remove
= __devexit_p(mvumi_detach_one
),
1993 .shutdown
= mvumi_shutdown
,
1995 .suspend
= mvumi_suspend
,
1996 .resume
= mvumi_resume
,
2001 * mvumi_init - Driver load entry point
2003 static int __init
mvumi_init(void)
2005 return pci_register_driver(&mvumi_pci_driver
);
2009 * mvumi_exit - Driver unload entry point
2011 static void __exit
mvumi_exit(void)
2014 pci_unregister_driver(&mvumi_pci_driver
);
2017 module_init(mvumi_init
);
2018 module_exit(mvumi_exit
);