Merge branch 'next' of git://git.monstr.eu/linux-2.6-microblaze
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / hptiop.c
blob10b65556937b0478885ab5f528ab6098e61a1663
1 /*
2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
16 * For more information, visit http://www.highpoint-tech.com
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
38 #include "hptiop.h"
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45 static const char driver_ver[] = "v1.6 (090910)";
47 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
49 struct hpt_iop_request_scsi_command *req);
50 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
51 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
52 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
54 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
56 u32 req = 0;
57 int i;
59 for (i = 0; i < millisec; i++) {
60 req = readl(&hba->u.itl.iop->inbound_queue);
61 if (req != IOPMU_QUEUE_EMPTY)
62 break;
63 msleep(1);
66 if (req != IOPMU_QUEUE_EMPTY) {
67 writel(req, &hba->u.itl.iop->outbound_queue);
68 readl(&hba->u.itl.iop->outbound_intstatus);
69 return 0;
72 return -1;
75 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
77 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
80 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
82 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
83 hptiop_host_request_callback_itl(hba,
84 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
85 else
86 hptiop_iop_request_callback_itl(hba, tag);
89 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
91 u32 req;
93 while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
94 IOPMU_QUEUE_EMPTY) {
96 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
97 hptiop_request_callback_itl(hba, req);
98 else {
99 struct hpt_iop_request_header __iomem * p;
101 p = (struct hpt_iop_request_header __iomem *)
102 ((char __iomem *)hba->u.itl.iop + req);
104 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
105 if (readl(&p->context))
106 hptiop_request_callback_itl(hba, req);
107 else
108 writel(1, &p->context);
110 else
111 hptiop_request_callback_itl(hba, req);
116 static int iop_intr_itl(struct hptiop_hba *hba)
118 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
119 void __iomem *plx = hba->u.itl.plx;
120 u32 status;
121 int ret = 0;
123 if (plx && readl(plx + 0x11C5C) & 0xf)
124 writel(1, plx + 0x11C60);
126 status = readl(&iop->outbound_intstatus);
128 if (status & IOPMU_OUTBOUND_INT_MSG0) {
129 u32 msg = readl(&iop->outbound_msgaddr0);
131 dprintk("received outbound msg %x\n", msg);
132 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
133 hptiop_message_callback(hba, msg);
134 ret = 1;
137 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
138 hptiop_drain_outbound_queue_itl(hba);
139 ret = 1;
142 return ret;
145 static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
147 u32 outbound_tail = readl(&mu->outbound_tail);
148 u32 outbound_head = readl(&mu->outbound_head);
150 if (outbound_tail != outbound_head) {
151 u64 p;
153 memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
154 outbound_tail++;
156 if (outbound_tail == MVIOP_QUEUE_LEN)
157 outbound_tail = 0;
158 writel(outbound_tail, &mu->outbound_tail);
159 return p;
160 } else
161 return 0;
164 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
166 u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
167 u32 head = inbound_head + 1;
169 if (head == MVIOP_QUEUE_LEN)
170 head = 0;
172 memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
173 writel(head, &hba->u.mv.mu->inbound_head);
174 writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
175 &hba->u.mv.regs->inbound_doorbell);
178 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
180 u32 req_type = (tag >> 5) & 0x7;
181 struct hpt_iop_request_scsi_command *req;
183 dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
185 BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
187 switch (req_type) {
188 case IOP_REQUEST_TYPE_GET_CONFIG:
189 case IOP_REQUEST_TYPE_SET_CONFIG:
190 hba->msg_done = 1;
191 break;
193 case IOP_REQUEST_TYPE_SCSI_COMMAND:
194 req = hba->reqs[tag >> 8].req_virt;
195 if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
196 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
198 hptiop_finish_scsi_req(hba, tag>>8, req);
199 break;
201 default:
202 break;
206 static int iop_intr_mv(struct hptiop_hba *hba)
208 u32 status;
209 int ret = 0;
211 status = readl(&hba->u.mv.regs->outbound_doorbell);
212 writel(~status, &hba->u.mv.regs->outbound_doorbell);
214 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
215 u32 msg;
216 msg = readl(&hba->u.mv.mu->outbound_msg);
217 dprintk("received outbound msg %x\n", msg);
218 hptiop_message_callback(hba, msg);
219 ret = 1;
222 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
223 u64 tag;
225 while ((tag = mv_outbound_read(hba->u.mv.mu)))
226 hptiop_request_callback_mv(hba, tag);
227 ret = 1;
230 return ret;
233 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
234 void __iomem *_req, u32 millisec)
236 struct hpt_iop_request_header __iomem *req = _req;
237 u32 i;
239 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
240 writel(0, &req->context);
241 writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
242 &hba->u.itl.iop->inbound_queue);
243 readl(&hba->u.itl.iop->outbound_intstatus);
245 for (i = 0; i < millisec; i++) {
246 iop_intr_itl(hba);
247 if (readl(&req->context))
248 return 0;
249 msleep(1);
252 return -1;
255 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
256 u32 size_bits, u32 millisec)
258 struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
259 u32 i;
261 hba->msg_done = 0;
262 reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
263 mv_inbound_write(hba->u.mv.internal_req_phy |
264 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
266 for (i = 0; i < millisec; i++) {
267 iop_intr_mv(hba);
268 if (hba->msg_done)
269 return 0;
270 msleep(1);
272 return -1;
275 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
277 writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
278 readl(&hba->u.itl.iop->outbound_intstatus);
281 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
283 writel(msg, &hba->u.mv.mu->inbound_msg);
284 writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
285 readl(&hba->u.mv.regs->inbound_doorbell);
288 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
290 u32 i;
292 hba->msg_done = 0;
293 hba->ops->post_msg(hba, msg);
295 for (i = 0; i < millisec; i++) {
296 spin_lock_irq(hba->host->host_lock);
297 hba->ops->iop_intr(hba);
298 spin_unlock_irq(hba->host->host_lock);
299 if (hba->msg_done)
300 break;
301 msleep(1);
304 return hba->msg_done? 0 : -1;
307 static int iop_get_config_itl(struct hptiop_hba *hba,
308 struct hpt_iop_request_get_config *config)
310 u32 req32;
311 struct hpt_iop_request_get_config __iomem *req;
313 req32 = readl(&hba->u.itl.iop->inbound_queue);
314 if (req32 == IOPMU_QUEUE_EMPTY)
315 return -1;
317 req = (struct hpt_iop_request_get_config __iomem *)
318 ((unsigned long)hba->u.itl.iop + req32);
320 writel(0, &req->header.flags);
321 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
322 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
323 writel(IOP_RESULT_PENDING, &req->header.result);
325 if (iop_send_sync_request_itl(hba, req, 20000)) {
326 dprintk("Get config send cmd failed\n");
327 return -1;
330 memcpy_fromio(config, req, sizeof(*config));
331 writel(req32, &hba->u.itl.iop->outbound_queue);
332 return 0;
335 static int iop_get_config_mv(struct hptiop_hba *hba,
336 struct hpt_iop_request_get_config *config)
338 struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
340 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
341 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
342 req->header.size =
343 cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
344 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
345 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
346 req->header.context_hi32 = 0;
348 if (iop_send_sync_request_mv(hba, 0, 20000)) {
349 dprintk("Get config send cmd failed\n");
350 return -1;
353 memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
354 return 0;
357 static int iop_set_config_itl(struct hptiop_hba *hba,
358 struct hpt_iop_request_set_config *config)
360 u32 req32;
361 struct hpt_iop_request_set_config __iomem *req;
363 req32 = readl(&hba->u.itl.iop->inbound_queue);
364 if (req32 == IOPMU_QUEUE_EMPTY)
365 return -1;
367 req = (struct hpt_iop_request_set_config __iomem *)
368 ((unsigned long)hba->u.itl.iop + req32);
370 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
371 (u8 *)config + sizeof(struct hpt_iop_request_header),
372 sizeof(struct hpt_iop_request_set_config) -
373 sizeof(struct hpt_iop_request_header));
375 writel(0, &req->header.flags);
376 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
377 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
378 writel(IOP_RESULT_PENDING, &req->header.result);
380 if (iop_send_sync_request_itl(hba, req, 20000)) {
381 dprintk("Set config send cmd failed\n");
382 return -1;
385 writel(req32, &hba->u.itl.iop->outbound_queue);
386 return 0;
389 static int iop_set_config_mv(struct hptiop_hba *hba,
390 struct hpt_iop_request_set_config *config)
392 struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
394 memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
395 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
396 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
397 req->header.size =
398 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
399 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
400 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
401 req->header.context_hi32 = 0;
403 if (iop_send_sync_request_mv(hba, 0, 20000)) {
404 dprintk("Set config send cmd failed\n");
405 return -1;
408 return 0;
411 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
413 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
414 &hba->u.itl.iop->outbound_intmask);
417 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
419 writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
420 &hba->u.mv.regs->outbound_intmask);
423 static int hptiop_initialize_iop(struct hptiop_hba *hba)
425 /* enable interrupts */
426 hba->ops->enable_intr(hba);
428 hba->initialized = 1;
430 /* start background tasks */
431 if (iop_send_sync_msg(hba,
432 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
433 printk(KERN_ERR "scsi%d: fail to start background task\n",
434 hba->host->host_no);
435 return -1;
437 return 0;
440 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
442 u32 mem_base_phy, length;
443 void __iomem *mem_base_virt;
445 struct pci_dev *pcidev = hba->pcidev;
448 if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
449 printk(KERN_ERR "scsi%d: pci resource invalid\n",
450 hba->host->host_no);
451 return NULL;
454 mem_base_phy = pci_resource_start(pcidev, index);
455 length = pci_resource_len(pcidev, index);
456 mem_base_virt = ioremap(mem_base_phy, length);
458 if (!mem_base_virt) {
459 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
460 hba->host->host_no);
461 return NULL;
463 return mem_base_virt;
466 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
468 struct pci_dev *pcidev = hba->pcidev;
469 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
470 if (hba->u.itl.iop == NULL)
471 return -1;
472 if ((pcidev->device & 0xff00) == 0x4400) {
473 hba->u.itl.plx = hba->u.itl.iop;
474 hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
475 if (hba->u.itl.iop == NULL) {
476 iounmap(hba->u.itl.plx);
477 return -1;
480 return 0;
483 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
485 if (hba->u.itl.plx)
486 iounmap(hba->u.itl.plx);
487 iounmap(hba->u.itl.iop);
490 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
492 hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
493 if (hba->u.mv.regs == NULL)
494 return -1;
496 hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
497 if (hba->u.mv.mu == NULL) {
498 iounmap(hba->u.mv.regs);
499 return -1;
502 return 0;
505 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
507 iounmap(hba->u.mv.regs);
508 iounmap(hba->u.mv.mu);
511 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
513 dprintk("iop message 0x%x\n", msg);
515 if (msg == IOPMU_INBOUND_MSG0_NOP)
516 hba->msg_done = 1;
518 if (!hba->initialized)
519 return;
521 if (msg == IOPMU_INBOUND_MSG0_RESET) {
522 atomic_set(&hba->resetting, 0);
523 wake_up(&hba->reset_wq);
525 else if (msg <= IOPMU_INBOUND_MSG0_MAX)
526 hba->msg_done = 1;
529 static struct hptiop_request *get_req(struct hptiop_hba *hba)
531 struct hptiop_request *ret;
533 dprintk("get_req : req=%p\n", hba->req_list);
535 ret = hba->req_list;
536 if (ret)
537 hba->req_list = ret->next;
539 return ret;
542 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
544 dprintk("free_req(%d, %p)\n", req->index, req);
545 req->next = hba->req_list;
546 hba->req_list = req;
549 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
550 struct hpt_iop_request_scsi_command *req)
552 struct scsi_cmnd *scp;
554 dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
555 "result=%d, context=0x%x tag=%d\n",
556 req, req->header.type, req->header.result,
557 req->header.context, tag);
559 BUG_ON(!req->header.result);
560 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
562 scp = hba->reqs[tag].scp;
564 if (HPT_SCP(scp)->mapped)
565 scsi_dma_unmap(scp);
567 switch (le32_to_cpu(req->header.result)) {
568 case IOP_RESULT_SUCCESS:
569 scsi_set_resid(scp,
570 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
571 scp->result = (DID_OK<<16);
572 break;
573 case IOP_RESULT_BAD_TARGET:
574 scp->result = (DID_BAD_TARGET<<16);
575 break;
576 case IOP_RESULT_BUSY:
577 scp->result = (DID_BUS_BUSY<<16);
578 break;
579 case IOP_RESULT_RESET:
580 scp->result = (DID_RESET<<16);
581 break;
582 case IOP_RESULT_FAIL:
583 scp->result = (DID_ERROR<<16);
584 break;
585 case IOP_RESULT_INVALID_REQUEST:
586 scp->result = (DID_ABORT<<16);
587 break;
588 case IOP_RESULT_CHECK_CONDITION:
589 scsi_set_resid(scp,
590 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
591 scp->result = SAM_STAT_CHECK_CONDITION;
592 memcpy(scp->sense_buffer, &req->sg_list,
593 min_t(size_t, SCSI_SENSE_BUFFERSIZE,
594 le32_to_cpu(req->dataxfer_length)));
595 break;
597 default:
598 scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
599 break;
602 dprintk("scsi_done(%p)\n", scp);
603 scp->scsi_done(scp);
604 free_req(hba, &hba->reqs[tag]);
607 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
609 struct hpt_iop_request_scsi_command *req;
610 u32 tag;
612 if (hba->iopintf_v2) {
613 tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
614 req = hba->reqs[tag].req_virt;
615 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
616 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
617 } else {
618 tag = _tag;
619 req = hba->reqs[tag].req_virt;
622 hptiop_finish_scsi_req(hba, tag, req);
625 void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
627 struct hpt_iop_request_header __iomem *req;
628 struct hpt_iop_request_ioctl_command __iomem *p;
629 struct hpt_ioctl_k *arg;
631 req = (struct hpt_iop_request_header __iomem *)
632 ((unsigned long)hba->u.itl.iop + tag);
633 dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
634 "result=%d, context=0x%x tag=%d\n",
635 req, readl(&req->type), readl(&req->result),
636 readl(&req->context), tag);
638 BUG_ON(!readl(&req->result));
639 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
641 p = (struct hpt_iop_request_ioctl_command __iomem *)req;
642 arg = (struct hpt_ioctl_k *)(unsigned long)
643 (readl(&req->context) |
644 ((u64)readl(&req->context_hi32)<<32));
646 if (readl(&req->result) == IOP_RESULT_SUCCESS) {
647 arg->result = HPT_IOCTL_RESULT_OK;
649 if (arg->outbuf_size)
650 memcpy_fromio(arg->outbuf,
651 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
652 arg->outbuf_size);
654 if (arg->bytes_returned)
655 *arg->bytes_returned = arg->outbuf_size;
657 else
658 arg->result = HPT_IOCTL_RESULT_FAILED;
660 arg->done(arg);
661 writel(tag, &hba->u.itl.iop->outbound_queue);
664 static irqreturn_t hptiop_intr(int irq, void *dev_id)
666 struct hptiop_hba *hba = dev_id;
667 int handled;
668 unsigned long flags;
670 spin_lock_irqsave(hba->host->host_lock, flags);
671 handled = hba->ops->iop_intr(hba);
672 spin_unlock_irqrestore(hba->host->host_lock, flags);
674 return handled;
677 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
679 struct Scsi_Host *host = scp->device->host;
680 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
681 struct scatterlist *sg;
682 int idx, nseg;
684 nseg = scsi_dma_map(scp);
685 BUG_ON(nseg < 0);
686 if (!nseg)
687 return 0;
689 HPT_SCP(scp)->sgcnt = nseg;
690 HPT_SCP(scp)->mapped = 1;
692 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
694 scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
695 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
696 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
697 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
698 cpu_to_le32(1) : 0;
700 return HPT_SCP(scp)->sgcnt;
703 static void hptiop_post_req_itl(struct hptiop_hba *hba,
704 struct hptiop_request *_req)
706 struct hpt_iop_request_header *reqhdr = _req->req_virt;
708 reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
709 (u32)_req->index);
710 reqhdr->context_hi32 = 0;
712 if (hba->iopintf_v2) {
713 u32 size, size_bits;
715 size = le32_to_cpu(reqhdr->size);
716 if (size < 256)
717 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
718 else if (size < 512)
719 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
720 else
721 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
722 IOPMU_QUEUE_ADDR_HOST_BIT;
723 writel(_req->req_shifted_phy | size_bits,
724 &hba->u.itl.iop->inbound_queue);
725 } else
726 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
727 &hba->u.itl.iop->inbound_queue);
730 static void hptiop_post_req_mv(struct hptiop_hba *hba,
731 struct hptiop_request *_req)
733 struct hpt_iop_request_header *reqhdr = _req->req_virt;
734 u32 size, size_bit;
736 reqhdr->context = cpu_to_le32(_req->index<<8 |
737 IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
738 reqhdr->context_hi32 = 0;
739 size = le32_to_cpu(reqhdr->size);
741 if (size <= 256)
742 size_bit = 0;
743 else if (size <= 256*2)
744 size_bit = 1;
745 else if (size <= 256*3)
746 size_bit = 2;
747 else
748 size_bit = 3;
750 mv_inbound_write((_req->req_shifted_phy << 5) |
751 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
754 static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
755 void (*done)(struct scsi_cmnd *))
757 struct Scsi_Host *host = scp->device->host;
758 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
759 struct hpt_iop_request_scsi_command *req;
760 int sg_count = 0;
761 struct hptiop_request *_req;
763 BUG_ON(!done);
764 scp->scsi_done = done;
766 _req = get_req(hba);
767 if (_req == NULL) {
768 dprintk("hptiop_queuecmd : no free req\n");
769 return SCSI_MLQUEUE_HOST_BUSY;
772 _req->scp = scp;
774 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
775 "req_index=%d, req=%p\n",
776 scp,
777 host->host_no, scp->device->channel,
778 scp->device->id, scp->device->lun,
779 ((u32 *)scp->cmnd)[0],
780 ((u32 *)scp->cmnd)[1],
781 ((u32 *)scp->cmnd)[2],
782 _req->index, _req->req_virt);
784 scp->result = 0;
786 if (scp->device->channel || scp->device->lun ||
787 scp->device->id > hba->max_devices) {
788 scp->result = DID_BAD_TARGET << 16;
789 free_req(hba, _req);
790 goto cmd_done;
793 req = _req->req_virt;
795 /* build S/G table */
796 sg_count = hptiop_buildsgl(scp, req->sg_list);
797 if (!sg_count)
798 HPT_SCP(scp)->mapped = 0;
800 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
801 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
802 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
803 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
804 req->channel = scp->device->channel;
805 req->target = scp->device->id;
806 req->lun = scp->device->lun;
807 req->header.size = cpu_to_le32(
808 sizeof(struct hpt_iop_request_scsi_command)
809 - sizeof(struct hpt_iopsg)
810 + sg_count * sizeof(struct hpt_iopsg));
812 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
813 hba->ops->post_req(hba, _req);
814 return 0;
816 cmd_done:
817 dprintk("scsi_done(scp=%p)\n", scp);
818 scp->scsi_done(scp);
819 return 0;
822 static DEF_SCSI_QCMD(hptiop_queuecommand)
824 static const char *hptiop_info(struct Scsi_Host *host)
826 return driver_name_long;
829 static int hptiop_reset_hba(struct hptiop_hba *hba)
831 if (atomic_xchg(&hba->resetting, 1) == 0) {
832 atomic_inc(&hba->reset_count);
833 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
836 wait_event_timeout(hba->reset_wq,
837 atomic_read(&hba->resetting) == 0, 60 * HZ);
839 if (atomic_read(&hba->resetting)) {
840 /* IOP is in unknown state, abort reset */
841 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
842 return -1;
845 if (iop_send_sync_msg(hba,
846 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
847 dprintk("scsi%d: fail to start background task\n",
848 hba->host->host_no);
851 return 0;
854 static int hptiop_reset(struct scsi_cmnd *scp)
856 struct Scsi_Host * host = scp->device->host;
857 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
859 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
860 scp->device->host->host_no, scp->device->channel,
861 scp->device->id, scp);
863 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
866 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
867 int queue_depth, int reason)
869 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
871 if (reason != SCSI_QDEPTH_DEFAULT)
872 return -EOPNOTSUPP;
874 if (queue_depth > hba->max_requests)
875 queue_depth = hba->max_requests;
876 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
877 return queue_depth;
880 static ssize_t hptiop_show_version(struct device *dev,
881 struct device_attribute *attr, char *buf)
883 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
886 static ssize_t hptiop_show_fw_version(struct device *dev,
887 struct device_attribute *attr, char *buf)
889 struct Scsi_Host *host = class_to_shost(dev);
890 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
892 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
893 hba->firmware_version >> 24,
894 (hba->firmware_version >> 16) & 0xff,
895 (hba->firmware_version >> 8) & 0xff,
896 hba->firmware_version & 0xff);
899 static struct device_attribute hptiop_attr_version = {
900 .attr = {
901 .name = "driver-version",
902 .mode = S_IRUGO,
904 .show = hptiop_show_version,
907 static struct device_attribute hptiop_attr_fw_version = {
908 .attr = {
909 .name = "firmware-version",
910 .mode = S_IRUGO,
912 .show = hptiop_show_fw_version,
915 static struct device_attribute *hptiop_attrs[] = {
916 &hptiop_attr_version,
917 &hptiop_attr_fw_version,
918 NULL
921 static struct scsi_host_template driver_template = {
922 .module = THIS_MODULE,
923 .name = driver_name,
924 .queuecommand = hptiop_queuecommand,
925 .eh_device_reset_handler = hptiop_reset,
926 .eh_bus_reset_handler = hptiop_reset,
927 .info = hptiop_info,
928 .emulated = 0,
929 .use_clustering = ENABLE_CLUSTERING,
930 .proc_name = driver_name,
931 .shost_attrs = hptiop_attrs,
932 .this_id = -1,
933 .change_queue_depth = hptiop_adjust_disk_queue_depth,
936 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
938 hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
939 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
940 if (hba->u.mv.internal_req)
941 return 0;
942 else
943 return -1;
946 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
948 if (hba->u.mv.internal_req) {
949 dma_free_coherent(&hba->pcidev->dev, 0x800,
950 hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
951 return 0;
952 } else
953 return -1;
956 static int __devinit hptiop_probe(struct pci_dev *pcidev,
957 const struct pci_device_id *id)
959 struct Scsi_Host *host = NULL;
960 struct hptiop_hba *hba;
961 struct hpt_iop_request_get_config iop_config;
962 struct hpt_iop_request_set_config set_config;
963 dma_addr_t start_phy;
964 void *start_virt;
965 u32 offset, i, req_size;
967 dprintk("hptiop_probe(%p)\n", pcidev);
969 if (pci_enable_device(pcidev)) {
970 printk(KERN_ERR "hptiop: fail to enable pci device\n");
971 return -ENODEV;
974 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
975 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
976 pcidev->irq);
978 pci_set_master(pcidev);
980 /* Enable 64bit DMA if possible */
981 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
982 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
983 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
984 goto disable_pci_device;
988 if (pci_request_regions(pcidev, driver_name)) {
989 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
990 goto disable_pci_device;
993 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
994 if (!host) {
995 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
996 goto free_pci_regions;
999 hba = (struct hptiop_hba *)host->hostdata;
1001 hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
1002 hba->pcidev = pcidev;
1003 hba->host = host;
1004 hba->initialized = 0;
1005 hba->iopintf_v2 = 0;
1007 atomic_set(&hba->resetting, 0);
1008 atomic_set(&hba->reset_count, 0);
1010 init_waitqueue_head(&hba->reset_wq);
1011 init_waitqueue_head(&hba->ioctl_wq);
1013 host->max_lun = 1;
1014 host->max_channel = 0;
1015 host->io_port = 0;
1016 host->n_io_port = 0;
1017 host->irq = pcidev->irq;
1019 if (hba->ops->map_pci_bar(hba))
1020 goto free_scsi_host;
1022 if (hba->ops->iop_wait_ready(hba, 20000)) {
1023 printk(KERN_ERR "scsi%d: firmware not ready\n",
1024 hba->host->host_no);
1025 goto unmap_pci_bar;
1028 if (hba->ops->internal_memalloc) {
1029 if (hba->ops->internal_memalloc(hba)) {
1030 printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1031 hba->host->host_no);
1032 goto unmap_pci_bar;
1036 if (hba->ops->get_config(hba, &iop_config)) {
1037 printk(KERN_ERR "scsi%d: get config failed\n",
1038 hba->host->host_no);
1039 goto unmap_pci_bar;
1042 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1043 HPTIOP_MAX_REQUESTS);
1044 hba->max_devices = le32_to_cpu(iop_config.max_devices);
1045 hba->max_request_size = le32_to_cpu(iop_config.request_size);
1046 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1047 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1048 hba->interface_version = le32_to_cpu(iop_config.interface_version);
1049 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1051 if (hba->firmware_version > 0x01020000 ||
1052 hba->interface_version > 0x01020000)
1053 hba->iopintf_v2 = 1;
1055 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1056 host->max_id = le32_to_cpu(iop_config.max_devices);
1057 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1058 host->can_queue = le32_to_cpu(iop_config.max_requests);
1059 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1060 host->max_cmd_len = 16;
1062 req_size = sizeof(struct hpt_iop_request_scsi_command)
1063 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1064 if ((req_size & 0x1f) != 0)
1065 req_size = (req_size + 0x1f) & ~0x1f;
1067 memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
1068 set_config.iop_id = cpu_to_le32(host->host_no);
1069 set_config.vbus_id = cpu_to_le16(host->host_no);
1070 set_config.max_host_request_size = cpu_to_le16(req_size);
1072 if (hba->ops->set_config(hba, &set_config)) {
1073 printk(KERN_ERR "scsi%d: set config failed\n",
1074 hba->host->host_no);
1075 goto unmap_pci_bar;
1078 pci_set_drvdata(pcidev, host);
1080 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1081 driver_name, hba)) {
1082 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1083 hba->host->host_no, pcidev->irq);
1084 goto unmap_pci_bar;
1087 /* Allocate request mem */
1089 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1091 hba->req_size = req_size;
1092 start_virt = dma_alloc_coherent(&pcidev->dev,
1093 hba->req_size*hba->max_requests + 0x20,
1094 &start_phy, GFP_KERNEL);
1096 if (!start_virt) {
1097 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1098 hba->host->host_no);
1099 goto free_request_irq;
1102 hba->dma_coherent = start_virt;
1103 hba->dma_coherent_handle = start_phy;
1105 if ((start_phy & 0x1f) != 0)
1107 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1108 start_phy += offset;
1109 start_virt += offset;
1112 hba->req_list = start_virt;
1113 for (i = 0; i < hba->max_requests; i++) {
1114 hba->reqs[i].next = NULL;
1115 hba->reqs[i].req_virt = start_virt;
1116 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1117 hba->reqs[i].index = i;
1118 free_req(hba, &hba->reqs[i]);
1119 start_virt = (char *)start_virt + hba->req_size;
1120 start_phy = start_phy + hba->req_size;
1123 /* Enable Interrupt and start background task */
1124 if (hptiop_initialize_iop(hba))
1125 goto free_request_mem;
1127 if (scsi_add_host(host, &pcidev->dev)) {
1128 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1129 hba->host->host_no);
1130 goto free_request_mem;
1134 scsi_scan_host(host);
1136 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1137 return 0;
1139 free_request_mem:
1140 dma_free_coherent(&hba->pcidev->dev,
1141 hba->req_size * hba->max_requests + 0x20,
1142 hba->dma_coherent, hba->dma_coherent_handle);
1144 free_request_irq:
1145 free_irq(hba->pcidev->irq, hba);
1147 unmap_pci_bar:
1148 if (hba->ops->internal_memfree)
1149 hba->ops->internal_memfree(hba);
1151 hba->ops->unmap_pci_bar(hba);
1153 free_scsi_host:
1154 scsi_host_put(host);
1156 free_pci_regions:
1157 pci_release_regions(pcidev);
1159 disable_pci_device:
1160 pci_disable_device(pcidev);
1162 dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
1163 return -ENODEV;
1166 static void hptiop_shutdown(struct pci_dev *pcidev)
1168 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1169 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1171 dprintk("hptiop_shutdown(%p)\n", hba);
1173 /* stop the iop */
1174 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1175 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1176 hba->host->host_no);
1178 /* disable all outbound interrupts */
1179 hba->ops->disable_intr(hba);
1182 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1184 u32 int_mask;
1186 int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1187 writel(int_mask |
1188 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1189 &hba->u.itl.iop->outbound_intmask);
1190 readl(&hba->u.itl.iop->outbound_intmask);
1193 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1195 writel(0, &hba->u.mv.regs->outbound_intmask);
1196 readl(&hba->u.mv.regs->outbound_intmask);
1199 static void hptiop_remove(struct pci_dev *pcidev)
1201 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1202 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1204 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1206 scsi_remove_host(host);
1208 hptiop_shutdown(pcidev);
1210 free_irq(hba->pcidev->irq, hba);
1212 dma_free_coherent(&hba->pcidev->dev,
1213 hba->req_size * hba->max_requests + 0x20,
1214 hba->dma_coherent,
1215 hba->dma_coherent_handle);
1217 if (hba->ops->internal_memfree)
1218 hba->ops->internal_memfree(hba);
1220 hba->ops->unmap_pci_bar(hba);
1222 pci_release_regions(hba->pcidev);
1223 pci_set_drvdata(hba->pcidev, NULL);
1224 pci_disable_device(hba->pcidev);
1226 scsi_host_put(host);
1229 static struct hptiop_adapter_ops hptiop_itl_ops = {
1230 .iop_wait_ready = iop_wait_ready_itl,
1231 .internal_memalloc = NULL,
1232 .internal_memfree = NULL,
1233 .map_pci_bar = hptiop_map_pci_bar_itl,
1234 .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
1235 .enable_intr = hptiop_enable_intr_itl,
1236 .disable_intr = hptiop_disable_intr_itl,
1237 .get_config = iop_get_config_itl,
1238 .set_config = iop_set_config_itl,
1239 .iop_intr = iop_intr_itl,
1240 .post_msg = hptiop_post_msg_itl,
1241 .post_req = hptiop_post_req_itl,
1244 static struct hptiop_adapter_ops hptiop_mv_ops = {
1245 .iop_wait_ready = iop_wait_ready_mv,
1246 .internal_memalloc = hptiop_internal_memalloc_mv,
1247 .internal_memfree = hptiop_internal_memfree_mv,
1248 .map_pci_bar = hptiop_map_pci_bar_mv,
1249 .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
1250 .enable_intr = hptiop_enable_intr_mv,
1251 .disable_intr = hptiop_disable_intr_mv,
1252 .get_config = iop_get_config_mv,
1253 .set_config = iop_set_config_mv,
1254 .iop_intr = iop_intr_mv,
1255 .post_msg = hptiop_post_msg_mv,
1256 .post_req = hptiop_post_req_mv,
1259 static struct pci_device_id hptiop_id_table[] = {
1260 { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1261 { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1262 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1263 { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1264 { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1265 { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1266 { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1267 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1268 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1269 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1270 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1271 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1272 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1273 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1274 { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1275 { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1276 { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1277 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1278 { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
1279 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1280 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1281 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1285 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1287 static struct pci_driver hptiop_pci_driver = {
1288 .name = driver_name,
1289 .id_table = hptiop_id_table,
1290 .probe = hptiop_probe,
1291 .remove = hptiop_remove,
1292 .shutdown = hptiop_shutdown,
1295 static int __init hptiop_module_init(void)
1297 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1298 return pci_register_driver(&hptiop_pci_driver);
1301 static void __exit hptiop_module_exit(void)
1303 pci_unregister_driver(&hptiop_pci_driver);
1307 module_init(hptiop_module_init);
1308 module_exit(hptiop_module_exit);
1310 MODULE_LICENSE("GPL");