2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3 * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.9 2011/08/01 21:12:41 delphij Exp $
29 #include <sys/param.h>
30 #include <sys/types.h>
33 #include <sys/systm.h>
36 #include <sys/malloc.h>
38 #include <sys/libkern.h>
39 #include <sys/kernel.h>
41 #include <sys/kthread.h>
43 #include <sys/module.h>
45 #include <sys/eventhandler.h>
47 #include <sys/taskqueue.h>
48 #include <sys/ioccom.h>
49 #include <sys/device.h>
50 #include <sys/mplock2.h>
52 #include <machine/stdarg.h>
58 #include <bus/pci/pcireg.h>
59 #include <bus/pci/pcivar.h>
61 #include <bus/cam/cam.h>
62 #include <bus/cam/cam_ccb.h>
63 #include <bus/cam/cam_sim.h>
64 #include <bus/cam/cam_xpt_periph.h>
65 #include <bus/cam/cam_xpt_sim.h>
66 #include <bus/cam/cam_debug.h>
67 #include <bus/cam/cam_periph.h>
68 #include <bus/cam/scsi/scsi_all.h>
69 #include <bus/cam/scsi/scsi_message.h>
71 #include <dev/raid/hptiop/hptiop.h>
73 static char driver_name
[] = "hptiop";
74 static char driver_version
[] = "v1.3 (010208)";
76 static devclass_t hptiop_devclass
;
78 static int hptiop_send_sync_msg(struct hpt_iop_hba
*hba
,
79 u_int32_t msg
, u_int32_t millisec
);
80 static void hptiop_request_callback_itl(struct hpt_iop_hba
*hba
,
82 static void hptiop_request_callback_mv(struct hpt_iop_hba
*hba
, u_int64_t req
);
83 static void hptiop_os_message_callback(struct hpt_iop_hba
*hba
, u_int32_t msg
);
84 static int hptiop_do_ioctl_itl(struct hpt_iop_hba
*hba
,
85 struct hpt_iop_ioctl_param
*pParams
);
86 static int hptiop_do_ioctl_mv(struct hpt_iop_hba
*hba
,
87 struct hpt_iop_ioctl_param
*pParams
);
88 static void hptiop_bus_scan_cb(struct cam_periph
*periph
, union ccb
*ccb
);
89 static int hptiop_rescan_bus(struct hpt_iop_hba
*hba
);
90 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba
*hba
);
91 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba
*hba
);
92 static int hptiop_get_config_itl(struct hpt_iop_hba
*hba
,
93 struct hpt_iop_request_get_config
*config
);
94 static int hptiop_get_config_mv(struct hpt_iop_hba
*hba
,
95 struct hpt_iop_request_get_config
*config
);
96 static int hptiop_set_config_itl(struct hpt_iop_hba
*hba
,
97 struct hpt_iop_request_set_config
*config
);
98 static int hptiop_set_config_mv(struct hpt_iop_hba
*hba
,
99 struct hpt_iop_request_set_config
*config
);
100 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba
*hba
);
101 static int hptiop_internal_memfree_mv(struct hpt_iop_hba
*hba
);
102 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba
*hba
,
103 u_int32_t req32
, struct hpt_iop_ioctl_param
*pParams
);
104 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba
*hba
,
105 struct hpt_iop_request_ioctl_command
*req
,
106 struct hpt_iop_ioctl_param
*pParams
);
107 static void hptiop_post_req_itl(struct hpt_iop_hba
*hba
,
108 struct hpt_iop_srb
*srb
,
109 bus_dma_segment_t
*segs
, int nsegs
);
110 static void hptiop_post_req_mv(struct hpt_iop_hba
*hba
,
111 struct hpt_iop_srb
*srb
,
112 bus_dma_segment_t
*segs
, int nsegs
);
113 static void hptiop_post_msg_itl(struct hpt_iop_hba
*hba
, u_int32_t msg
);
114 static void hptiop_post_msg_mv(struct hpt_iop_hba
*hba
, u_int32_t msg
);
115 static void hptiop_enable_intr_itl(struct hpt_iop_hba
*hba
);
116 static void hptiop_enable_intr_mv(struct hpt_iop_hba
*hba
);
117 static void hptiop_disable_intr_itl(struct hpt_iop_hba
*hba
);
118 static void hptiop_disable_intr_mv(struct hpt_iop_hba
*hba
);
119 static void hptiop_free_srb(struct hpt_iop_hba
*hba
, struct hpt_iop_srb
*srb
);
120 static int hptiop_os_query_remove_device(struct hpt_iop_hba
*hba
, int tid
);
121 static int hptiop_probe(device_t dev
);
122 static int hptiop_attach(device_t dev
);
123 static int hptiop_detach(device_t dev
);
124 static int hptiop_shutdown(device_t dev
);
125 static void hptiop_action(struct cam_sim
*sim
, union ccb
*ccb
);
126 static void hptiop_poll(struct cam_sim
*sim
);
127 static void hptiop_async(void *callback_arg
, u_int32_t code
,
128 struct cam_path
*path
, void *arg
);
129 static void hptiop_pci_intr(void *arg
);
130 static void hptiop_release_resource(struct hpt_iop_hba
*hba
);
131 static int hptiop_reset_adapter(struct hpt_iop_hba
*hba
);
133 static d_open_t hptiop_open
;
134 static d_close_t hptiop_close
;
135 static d_ioctl_t hptiop_ioctl
;
137 static struct dev_ops hptiop_ops
= {
138 { driver_name
, 0, 0 },
139 .d_open
= hptiop_open
,
140 .d_close
= hptiop_close
,
141 .d_ioctl
= hptiop_ioctl
,
144 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
146 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
147 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
148 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
149 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
151 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
152 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
153 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
154 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
155 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
156 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
157 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
158 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
160 static int hptiop_open(struct dev_open_args
*ap
)
162 cdev_t dev
= ap
->a_head
.a_dev
;
163 struct hpt_iop_hba
*hba
= hba_from_dev(dev
);
167 if (hba
->flag
& HPT_IOCTL_FLAG_OPEN
)
169 hba
->flag
|= HPT_IOCTL_FLAG_OPEN
;
173 static int hptiop_close(struct dev_close_args
*ap
)
175 cdev_t dev
= ap
->a_head
.a_dev
;
176 struct hpt_iop_hba
*hba
= hba_from_dev(dev
);
177 hba
->flag
&= ~(u_int32_t
)HPT_IOCTL_FLAG_OPEN
;
181 static int hptiop_ioctl(struct dev_ioctl_args
*ap
)
183 cdev_t dev
= ap
->a_head
.a_dev
;
184 u_long cmd
= ap
->a_cmd
;
185 caddr_t data
= ap
->a_data
;
187 struct hpt_iop_hba
*hba
= hba_from_dev(dev
);
192 case HPT_DO_IOCONTROL
:
193 ret
= hba
->ops
->do_ioctl(hba
,
194 (struct hpt_iop_ioctl_param
*)data
);
197 ret
= hptiop_rescan_bus(hba
);
206 static u_int64_t
hptiop_mv_outbound_read(struct hpt_iop_hba
*hba
)
209 u_int32_t outbound_tail
= BUS_SPACE_RD4_MV2(outbound_tail
);
210 u_int32_t outbound_head
= BUS_SPACE_RD4_MV2(outbound_head
);
212 if (outbound_tail
!= outbound_head
) {
213 bus_space_read_region_4(hba
->bar2t
, hba
->bar2h
,
214 offsetof(struct hpt_iopmu_mv
,
215 outbound_q
[outbound_tail
]),
220 if (outbound_tail
== MVIOP_QUEUE_LEN
)
223 BUS_SPACE_WRT4_MV2(outbound_tail
, outbound_tail
);
229 static void hptiop_mv_inbound_write(u_int64_t p
, struct hpt_iop_hba
*hba
)
231 u_int32_t inbound_head
= BUS_SPACE_RD4_MV2(inbound_head
);
232 u_int32_t head
= inbound_head
+ 1;
234 if (head
== MVIOP_QUEUE_LEN
)
237 bus_space_write_region_4(hba
->bar2t
, hba
->bar2h
,
238 offsetof(struct hpt_iopmu_mv
, inbound_q
[inbound_head
]),
240 BUS_SPACE_WRT4_MV2(inbound_head
, head
);
241 BUS_SPACE_WRT4_MV0(inbound_doorbell
, MVIOP_MU_INBOUND_INT_POSTQUEUE
);
244 static void hptiop_post_msg_itl(struct hpt_iop_hba
*hba
, u_int32_t msg
)
246 BUS_SPACE_WRT4_ITL(inbound_msgaddr0
, msg
);
247 BUS_SPACE_RD4_ITL(outbound_intstatus
);
250 static void hptiop_post_msg_mv(struct hpt_iop_hba
*hba
, u_int32_t msg
)
253 BUS_SPACE_WRT4_MV2(inbound_msg
, msg
);
254 BUS_SPACE_WRT4_MV0(inbound_doorbell
, MVIOP_MU_INBOUND_INT_MSG
);
256 BUS_SPACE_RD4_MV0(outbound_intmask
);
259 static int hptiop_wait_ready_itl(struct hpt_iop_hba
* hba
, u_int32_t millisec
)
264 for (i
= 0; i
< millisec
; i
++) {
265 req
= BUS_SPACE_RD4_ITL(inbound_queue
);
266 if (req
!= IOPMU_QUEUE_EMPTY
)
271 if (req
!=IOPMU_QUEUE_EMPTY
) {
272 BUS_SPACE_WRT4_ITL(outbound_queue
, req
);
273 BUS_SPACE_RD4_ITL(outbound_intstatus
);
280 static int hptiop_wait_ready_mv(struct hpt_iop_hba
* hba
, u_int32_t millisec
)
282 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_NOP
, millisec
))
288 static void hptiop_request_callback_itl(struct hpt_iop_hba
* hba
,
291 struct hpt_iop_srb
*srb
;
292 struct hpt_iop_request_scsi_command
*req
=NULL
;
295 u_int32_t result
, temp
, dxfer
;
298 if (index
& IOPMU_QUEUE_MASK_HOST_BITS
) { /*host req*/
299 if (hba
->firmware_version
> 0x01020000 ||
300 hba
->interface_version
> 0x01020000) {
301 srb
= hba
->srb
[index
& ~(u_int32_t
)
302 (IOPMU_QUEUE_ADDR_HOST_BIT
303 | IOPMU_QUEUE_REQUEST_RESULT_BIT
)];
304 req
= (struct hpt_iop_request_scsi_command
*)srb
;
305 if (index
& IOPMU_QUEUE_REQUEST_RESULT_BIT
)
306 result
= IOP_RESULT_SUCCESS
;
308 result
= req
->header
.result
;
310 srb
= hba
->srb
[index
&
311 ~(u_int32_t
)IOPMU_QUEUE_ADDR_HOST_BIT
];
312 req
= (struct hpt_iop_request_scsi_command
*)srb
;
313 result
= req
->header
.result
;
315 dxfer
= req
->dataxfer_length
;
320 temp
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
, index
+
321 offsetof(struct hpt_iop_request_header
, type
));
322 result
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
, index
+
323 offsetof(struct hpt_iop_request_header
, result
));
325 case IOP_REQUEST_TYPE_IOCTL_COMMAND
:
328 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
, index
+
329 offsetof(struct hpt_iop_request_header
, context
),
330 (u_int32_t
*)&temp64
, 2);
331 wakeup((void *)((unsigned long)hba
->u
.itl
.mu
+ index
));
335 case IOP_REQUEST_TYPE_SCSI_COMMAND
:
336 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
, index
+
337 offsetof(struct hpt_iop_request_header
, context
),
338 (u_int32_t
*)&temp64
, 2);
339 srb
= (struct hpt_iop_srb
*)(unsigned long)temp64
;
340 dxfer
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
,
341 index
+ offsetof(struct hpt_iop_request_scsi_command
,
344 ccb
= (union ccb
*)srb
->ccb
;
345 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
346 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
348 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
350 if (cdb
[0] == SYNCHRONIZE_CACHE
) { /* ??? */
351 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
356 case IOP_RESULT_SUCCESS
:
357 switch (ccb
->ccb_h
.flags
& CAM_DIR_MASK
) {
359 bus_dmamap_sync(hba
->io_dmat
,
360 srb
->dma_map
, BUS_DMASYNC_POSTREAD
);
361 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
364 bus_dmamap_sync(hba
->io_dmat
,
365 srb
->dma_map
, BUS_DMASYNC_POSTWRITE
);
366 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
370 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
373 case IOP_RESULT_BAD_TARGET
:
374 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
376 case IOP_RESULT_BUSY
:
377 ccb
->ccb_h
.status
= CAM_BUSY
;
379 case IOP_RESULT_INVALID_REQUEST
:
380 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
382 case IOP_RESULT_FAIL
:
383 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
385 case IOP_RESULT_RESET
:
386 ccb
->ccb_h
.status
= CAM_BUSY
;
388 case IOP_RESULT_CHECK_CONDITION
:
389 if (srb
->srb_flag
& HPT_SRB_FLAG_HIGH_MEM_ACESS
) {/*iop*/
390 bus_space_read_region_1(hba
->bar0t
, hba
->bar0h
,
391 index
+ offsetof(struct hpt_iop_request_scsi_command
,
392 sg_list
), (u_int8_t
*)&ccb
->csio
.sense_data
,
393 MIN(dxfer
, sizeof(ccb
->csio
.sense_data
)));
395 memcpy(&ccb
->csio
.sense_data
, &req
->sg_list
,
396 MIN(dxfer
, sizeof(ccb
->csio
.sense_data
)));
398 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
399 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
400 ccb
->csio
.scsi_status
= SCSI_STATUS_CHECK_COND
;
403 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
407 if (srb
->srb_flag
& HPT_SRB_FLAG_HIGH_MEM_ACESS
)
408 BUS_SPACE_WRT4_ITL(outbound_queue
, index
);
410 ccb
->csio
.resid
= ccb
->csio
.dxfer_len
- dxfer
;
412 hptiop_free_srb(hba
, srb
);
418 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba
*hba
)
422 while ((req
= BUS_SPACE_RD4_ITL(outbound_queue
)) !=IOPMU_QUEUE_EMPTY
) {
423 if (req
& IOPMU_QUEUE_MASK_HOST_BITS
)
424 hptiop_request_callback_itl(hba
, req
);
426 struct hpt_iop_request_header
*p
;
428 p
= (struct hpt_iop_request_header
*)
429 ((char *)hba
->u
.itl
.mu
+ req
);
430 temp
= bus_space_read_4(hba
->bar0t
,
432 offsetof(struct hpt_iop_request_header
,
434 if (temp
& IOP_REQUEST_FLAG_SYNC_REQUEST
) {
436 bus_space_read_region_4(hba
->bar0t
,
438 offsetof(struct hpt_iop_request_header
,
440 (u_int32_t
*)&temp64
, 2);
442 hptiop_request_callback_itl(hba
, req
);
445 bus_space_write_region_4(hba
->bar0t
,
447 offsetof(struct hpt_iop_request_header
,
449 (u_int32_t
*)&temp64
, 2);
452 hptiop_request_callback_itl(hba
, req
);
457 static int hptiop_intr_itl(struct hpt_iop_hba
* hba
)
462 status
= BUS_SPACE_RD4_ITL(outbound_intstatus
);
464 if (status
& IOPMU_OUTBOUND_INT_MSG0
) {
465 u_int32_t msg
= BUS_SPACE_RD4_ITL(outbound_msgaddr0
);
466 KdPrint(("hptiop: received outbound msg %x\n", msg
));
467 BUS_SPACE_WRT4_ITL(outbound_intstatus
, IOPMU_OUTBOUND_INT_MSG0
);
468 hptiop_os_message_callback(hba
, msg
);
472 if (status
& IOPMU_OUTBOUND_INT_POSTQUEUE
) {
473 hptiop_drain_outbound_queue_itl(hba
);
480 static void hptiop_request_callback_mv(struct hpt_iop_hba
* hba
,
483 u_int32_t context
= (u_int32_t
)_tag
;
485 if (context
& MVIOP_CMD_TYPE_SCSI
) {
486 struct hpt_iop_srb
*srb
;
487 struct hpt_iop_request_scsi_command
*req
;
491 srb
= hba
->srb
[context
>> MVIOP_REQUEST_NUMBER_START_BIT
];
492 req
= (struct hpt_iop_request_scsi_command
*)srb
;
493 ccb
= (union ccb
*)srb
->ccb
;
494 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
495 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
497 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
499 if (cdb
[0] == SYNCHRONIZE_CACHE
) { /* ??? */
500 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
503 if (context
& MVIOP_MU_QUEUE_REQUEST_RESULT_BIT
)
504 req
->header
.result
= IOP_RESULT_SUCCESS
;
506 switch (req
->header
.result
) {
507 case IOP_RESULT_SUCCESS
:
508 switch (ccb
->ccb_h
.flags
& CAM_DIR_MASK
) {
510 bus_dmamap_sync(hba
->io_dmat
,
511 srb
->dma_map
, BUS_DMASYNC_POSTREAD
);
512 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
515 bus_dmamap_sync(hba
->io_dmat
,
516 srb
->dma_map
, BUS_DMASYNC_POSTWRITE
);
517 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
520 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
522 case IOP_RESULT_BAD_TARGET
:
523 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
525 case IOP_RESULT_BUSY
:
526 ccb
->ccb_h
.status
= CAM_BUSY
;
528 case IOP_RESULT_INVALID_REQUEST
:
529 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
531 case IOP_RESULT_FAIL
:
532 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
534 case IOP_RESULT_RESET
:
535 ccb
->ccb_h
.status
= CAM_BUSY
;
537 case IOP_RESULT_CHECK_CONDITION
:
538 memcpy(&ccb
->csio
.sense_data
, &req
->sg_list
,
539 MIN(req
->dataxfer_length
, sizeof(ccb
->csio
.sense_data
)));
540 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
541 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
542 ccb
->csio
.scsi_status
= SCSI_STATUS_CHECK_COND
;
545 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
549 ccb
->csio
.resid
= ccb
->csio
.dxfer_len
- req
->dataxfer_length
;
551 hptiop_free_srb(hba
, srb
);
553 } else if (context
& MVIOP_CMD_TYPE_IOCTL
) {
554 struct hpt_iop_request_ioctl_command
*req
= hba
->ctlcfg_ptr
;
555 if (context
& MVIOP_MU_QUEUE_REQUEST_RESULT_BIT
)
556 hba
->config_done
= 1;
558 hba
->config_done
= -1;
561 (MVIOP_CMD_TYPE_SET_CONFIG
|
562 MVIOP_CMD_TYPE_GET_CONFIG
))
563 hba
->config_done
= 1;
565 device_printf(hba
->pcidev
, "wrong callback type\n");
569 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba
* hba
)
573 while ((req
= hptiop_mv_outbound_read(hba
))) {
574 if (req
& MVIOP_MU_QUEUE_ADDR_HOST_BIT
) {
575 if (req
& MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT
) {
576 hptiop_request_callback_mv(hba
, req
);
582 static int hptiop_intr_mv(struct hpt_iop_hba
* hba
)
587 status
= BUS_SPACE_RD4_MV0(outbound_doorbell
);
590 BUS_SPACE_WRT4_MV0(outbound_doorbell
, ~status
);
592 if (status
& MVIOP_MU_OUTBOUND_INT_MSG
) {
593 u_int32_t msg
= BUS_SPACE_RD4_MV2(outbound_msg
);
594 KdPrint(("hptiop: received outbound msg %x\n", msg
));
595 hptiop_os_message_callback(hba
, msg
);
599 if (status
& MVIOP_MU_OUTBOUND_INT_POSTQUEUE
) {
600 hptiop_drain_outbound_queue_mv(hba
);
607 static int hptiop_send_sync_request_itl(struct hpt_iop_hba
* hba
,
608 u_int32_t req32
, u_int32_t millisec
)
613 BUS_SPACE_WRT4_ITL(inbound_queue
, req32
);
614 BUS_SPACE_RD4_ITL(outbound_intstatus
);
616 for (i
= 0; i
< millisec
; i
++) {
617 hptiop_intr_itl(hba
);
618 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
, req32
+
619 offsetof(struct hpt_iop_request_header
, context
),
620 (u_int32_t
*)&temp64
, 2);
629 static int hptiop_send_sync_request_mv(struct hpt_iop_hba
*hba
,
630 void *req
, u_int32_t millisec
)
634 hba
->config_done
= 0;
636 phy_addr
= hba
->ctlcfgcmd_phy
|
637 (u_int64_t
)MVIOP_MU_QUEUE_ADDR_HOST_BIT
;
638 ((struct hpt_iop_request_get_config
*)req
)->header
.flags
|=
639 IOP_REQUEST_FLAG_SYNC_REQUEST
|
640 IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
641 hptiop_mv_inbound_write(phy_addr
, hba
);
642 BUS_SPACE_RD4_MV0(outbound_intmask
);
644 for (i
= 0; i
< millisec
; i
++) {
646 if (hba
->config_done
)
653 static int hptiop_send_sync_msg(struct hpt_iop_hba
*hba
,
654 u_int32_t msg
, u_int32_t millisec
)
659 hba
->ops
->post_msg(hba
, msg
);
661 for (i
=0; i
<millisec
; i
++) {
662 hba
->ops
->iop_intr(hba
);
668 return hba
->msg_done
? 0 : -1;
671 static int hptiop_get_config_itl(struct hpt_iop_hba
* hba
,
672 struct hpt_iop_request_get_config
* config
)
676 config
->header
.size
= sizeof(struct hpt_iop_request_get_config
);
677 config
->header
.type
= IOP_REQUEST_TYPE_GET_CONFIG
;
678 config
->header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
;
679 config
->header
.result
= IOP_RESULT_PENDING
;
680 config
->header
.context
= 0;
682 req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
683 if (req32
== IOPMU_QUEUE_EMPTY
)
686 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
,
687 req32
, (u_int32_t
*)config
,
688 sizeof(struct hpt_iop_request_header
) >> 2);
690 if (hptiop_send_sync_request_itl(hba
, req32
, 20000)) {
691 KdPrint(("hptiop: get config send cmd failed"));
695 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
,
696 req32
, (u_int32_t
*)config
,
697 sizeof(struct hpt_iop_request_get_config
) >> 2);
699 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
704 static int hptiop_get_config_mv(struct hpt_iop_hba
* hba
,
705 struct hpt_iop_request_get_config
* config
)
707 struct hpt_iop_request_get_config
*req
;
709 if (!(req
= hba
->ctlcfg_ptr
))
712 req
->header
.flags
= 0;
713 req
->header
.type
= IOP_REQUEST_TYPE_GET_CONFIG
;
714 req
->header
.size
= sizeof(struct hpt_iop_request_get_config
);
715 req
->header
.result
= IOP_RESULT_PENDING
;
716 req
->header
.context
= MVIOP_CMD_TYPE_GET_CONFIG
;
718 if (hptiop_send_sync_request_mv(hba
, req
, 20000)) {
719 KdPrint(("hptiop: get config send cmd failed"));
727 static int hptiop_set_config_itl(struct hpt_iop_hba
*hba
,
728 struct hpt_iop_request_set_config
*config
)
732 req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
734 if (req32
== IOPMU_QUEUE_EMPTY
)
737 config
->header
.size
= sizeof(struct hpt_iop_request_set_config
);
738 config
->header
.type
= IOP_REQUEST_TYPE_SET_CONFIG
;
739 config
->header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
;
740 config
->header
.result
= IOP_RESULT_PENDING
;
741 config
->header
.context
= 0;
743 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
, req32
,
745 sizeof(struct hpt_iop_request_set_config
) >> 2);
747 if (hptiop_send_sync_request_itl(hba
, req32
, 20000)) {
748 KdPrint(("hptiop: set config send cmd failed"));
752 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
757 static int hptiop_set_config_mv(struct hpt_iop_hba
*hba
,
758 struct hpt_iop_request_set_config
*config
)
760 struct hpt_iop_request_set_config
*req
;
762 if (!(req
= hba
->ctlcfg_ptr
))
765 memcpy((u_int8_t
*)req
+ sizeof(struct hpt_iop_request_header
),
766 (u_int8_t
*)config
+ sizeof(struct hpt_iop_request_header
),
767 sizeof(struct hpt_iop_request_set_config
) -
768 sizeof(struct hpt_iop_request_header
));
770 req
->header
.flags
= 0;
771 req
->header
.type
= IOP_REQUEST_TYPE_SET_CONFIG
;
772 req
->header
.size
= sizeof(struct hpt_iop_request_set_config
);
773 req
->header
.result
= IOP_RESULT_PENDING
;
774 req
->header
.context
= MVIOP_CMD_TYPE_SET_CONFIG
;
776 if (hptiop_send_sync_request_mv(hba
, req
, 20000)) {
777 KdPrint(("hptiop: set config send cmd failed"));
784 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba
*hba
,
786 struct hpt_iop_ioctl_param
*pParams
)
789 struct hpt_iop_request_ioctl_command req
;
791 if ((((pParams
->nInBufferSize
+ 3) & ~3) + pParams
->nOutBufferSize
) >
792 (hba
->max_request_size
-
793 offsetof(struct hpt_iop_request_ioctl_command
, buf
))) {
794 device_printf(hba
->pcidev
, "request size beyond max value");
798 req
.header
.size
= offsetof(struct hpt_iop_request_ioctl_command
, buf
)
799 + pParams
->nInBufferSize
;
800 req
.header
.type
= IOP_REQUEST_TYPE_IOCTL_COMMAND
;
801 req
.header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
;
802 req
.header
.result
= IOP_RESULT_PENDING
;
803 req
.header
.context
= req32
+ (u_int64_t
)(unsigned long)hba
->u
.itl
.mu
;
804 req
.ioctl_code
= HPT_CTL_CODE_BSD_TO_IOP(pParams
->dwIoControlCode
);
805 req
.inbuf_size
= pParams
->nInBufferSize
;
806 req
.outbuf_size
= pParams
->nOutBufferSize
;
807 req
.bytes_returned
= 0;
809 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
, req32
, (u_int32_t
*)&req
,
810 offsetof(struct hpt_iop_request_ioctl_command
, buf
)>>2);
812 hptiop_lock_adapter(hba
);
814 BUS_SPACE_WRT4_ITL(inbound_queue
, req32
);
815 BUS_SPACE_RD4_ITL(outbound_intstatus
);
817 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
, req32
+
818 offsetof(struct hpt_iop_request_ioctl_command
, header
.context
),
819 (u_int32_t
*)&temp64
, 2);
821 if (hptiop_sleep(hba
, (void *)((unsigned long)hba
->u
.itl
.mu
+ req32
),
822 0, "hptctl", HPT_OSM_TIMEOUT
)==0)
824 hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000);
825 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
,req32
+
826 offsetof(struct hpt_iop_request_ioctl_command
,
828 (u_int32_t
*)&temp64
, 2);
831 hptiop_unlock_adapter(hba
);
835 static int hptiop_bus_space_copyin(struct hpt_iop_hba
*hba
, u_int32_t bus
, void *user
, int size
)
840 for (i
=0; i
<size
; i
++) {
841 if (copyin((u_int8_t
*)user
+ i
, &byte
, 1))
843 bus_space_write_1(hba
->bar0t
, hba
->bar0h
, bus
+ i
, byte
);
849 static int hptiop_bus_space_copyout(struct hpt_iop_hba
*hba
, u_int32_t bus
, void *user
, int size
)
854 for (i
=0; i
<size
; i
++) {
855 byte
= bus_space_read_1(hba
->bar0t
, hba
->bar0h
, bus
+ i
);
856 if (copyout(&byte
, (u_int8_t
*)user
+ i
, 1))
863 static int hptiop_do_ioctl_itl(struct hpt_iop_hba
*hba
,
864 struct hpt_iop_ioctl_param
* pParams
)
869 if ((pParams
->Magic
!= HPT_IOCTL_MAGIC
) &&
870 (pParams
->Magic
!= HPT_IOCTL_MAGIC32
))
873 req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
874 if (req32
== IOPMU_QUEUE_EMPTY
)
877 if (pParams
->nInBufferSize
)
878 if (hptiop_bus_space_copyin(hba
, req32
+
879 offsetof(struct hpt_iop_request_ioctl_command
, buf
),
880 (void *)pParams
->lpInBuffer
, pParams
->nInBufferSize
))
883 if (hptiop_post_ioctl_command_itl(hba
, req32
, pParams
))
886 result
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
, req32
+
887 offsetof(struct hpt_iop_request_ioctl_command
,
890 if (result
== IOP_RESULT_SUCCESS
) {
891 if (pParams
->nOutBufferSize
)
892 if (hptiop_bus_space_copyout(hba
, req32
+
893 offsetof(struct hpt_iop_request_ioctl_command
, buf
) +
894 ((pParams
->nInBufferSize
+ 3) & ~3),
895 (void *)pParams
->lpOutBuffer
, pParams
->nOutBufferSize
))
898 if (pParams
->lpBytesReturned
) {
899 if (hptiop_bus_space_copyout(hba
, req32
+
900 offsetof(struct hpt_iop_request_ioctl_command
, bytes_returned
),
901 (void *)pParams
->lpBytesReturned
, sizeof(unsigned long)))
905 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
910 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
916 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba
*hba
,
917 struct hpt_iop_request_ioctl_command
*req
,
918 struct hpt_iop_ioctl_param
*pParams
)
923 if ((((pParams
->nInBufferSize
+ 3) & ~3) + pParams
->nOutBufferSize
) >
924 (hba
->max_request_size
-
925 offsetof(struct hpt_iop_request_ioctl_command
, buf
))) {
926 device_printf(hba
->pcidev
, "request size beyond max value");
930 req
->ioctl_code
= HPT_CTL_CODE_BSD_TO_IOP(pParams
->dwIoControlCode
);
931 req
->inbuf_size
= pParams
->nInBufferSize
;
932 req
->outbuf_size
= pParams
->nOutBufferSize
;
933 req
->header
.size
= offsetof(struct hpt_iop_request_ioctl_command
, buf
)
934 + pParams
->nInBufferSize
;
935 req
->header
.context
= (u_int64_t
)MVIOP_CMD_TYPE_IOCTL
;
936 req
->header
.type
= IOP_REQUEST_TYPE_IOCTL_COMMAND
;
937 req
->header
.result
= IOP_RESULT_PENDING
;
938 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
939 size
= req
->header
.size
>> 8;
940 size
= size
> 3 ? 3 : size
;
941 req_phy
= hba
->ctlcfgcmd_phy
| MVIOP_MU_QUEUE_ADDR_HOST_BIT
| size
;
942 hptiop_mv_inbound_write(req_phy
, hba
);
944 BUS_SPACE_RD4_MV0(outbound_intmask
);
946 while (hba
->config_done
== 0) {
947 if (hptiop_sleep(hba
, req
, 0,
948 "hptctl", HPT_OSM_TIMEOUT
)==0)
950 hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000);
955 static int hptiop_do_ioctl_mv(struct hpt_iop_hba
*hba
,
956 struct hpt_iop_ioctl_param
*pParams
)
958 struct hpt_iop_request_ioctl_command
*req
;
960 if ((pParams
->Magic
!= HPT_IOCTL_MAGIC
) &&
961 (pParams
->Magic
!= HPT_IOCTL_MAGIC32
))
964 req
= (struct hpt_iop_request_ioctl_command
*)(hba
->ctlcfg_ptr
);
965 hba
->config_done
= 0;
966 hptiop_lock_adapter(hba
);
967 if (pParams
->nInBufferSize
)
968 if (copyin((void *)pParams
->lpInBuffer
,
969 req
->buf
, pParams
->nInBufferSize
))
971 if (hptiop_post_ioctl_command_mv(hba
, req
, pParams
))
974 if (hba
->config_done
== 1) {
975 if (pParams
->nOutBufferSize
)
976 if (copyout(req
->buf
+
977 ((pParams
->nInBufferSize
+ 3) & ~3),
978 (void *)pParams
->lpOutBuffer
,
979 pParams
->nOutBufferSize
))
982 if (pParams
->lpBytesReturned
)
983 if (copyout(&req
->bytes_returned
,
984 (void*)pParams
->lpBytesReturned
,
987 hptiop_unlock_adapter(hba
);
991 hptiop_unlock_adapter(hba
);
996 static int hptiop_rescan_bus(struct hpt_iop_hba
* hba
)
1000 if ((ccb
= xpt_alloc_ccb()) == NULL
)
1002 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
, cam_sim_path(hba
->sim
),
1003 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
1008 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
, 5/*priority (low)*/);
1009 ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
1010 ccb
->ccb_h
.cbfcnp
= hptiop_bus_scan_cb
;
1011 ccb
->crcn
.flags
= CAM_FLAG_NONE
;
1016 static void hptiop_bus_scan_cb(struct cam_periph
*periph
, union ccb
*ccb
)
1018 xpt_free_path(ccb
->ccb_h
.path
);
1022 static bus_dmamap_callback_t hptiop_map_srb
;
1023 static bus_dmamap_callback_t hptiop_post_scsi_command
;
1024 static bus_dmamap_callback_t hptiop_mv_map_ctlcfg
;
1026 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba
*hba
)
1028 hba
->bar0_rid
= 0x10;
1029 hba
->bar0_res
= bus_alloc_resource_any(hba
->pcidev
,
1030 SYS_RES_MEMORY
, &hba
->bar0_rid
, RF_ACTIVE
);
1032 if (hba
->bar0_res
== NULL
) {
1033 device_printf(hba
->pcidev
,
1034 "failed to get iop base adrress.\n");
1037 hba
->bar0t
= rman_get_bustag(hba
->bar0_res
);
1038 hba
->bar0h
= rman_get_bushandle(hba
->bar0_res
);
1039 hba
->u
.itl
.mu
= (struct hpt_iopmu_itl
*)
1040 rman_get_virtual(hba
->bar0_res
);
1042 if (!hba
->u
.itl
.mu
) {
1043 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1044 hba
->bar0_rid
, hba
->bar0_res
);
1045 device_printf(hba
->pcidev
, "alloc mem res failed\n");
1052 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba
*hba
)
1054 hba
->bar0_rid
= 0x10;
1055 hba
->bar0_res
= bus_alloc_resource_any(hba
->pcidev
,
1056 SYS_RES_MEMORY
, &hba
->bar0_rid
, RF_ACTIVE
);
1058 if (hba
->bar0_res
== NULL
) {
1059 device_printf(hba
->pcidev
, "failed to get iop bar0.\n");
1062 hba
->bar0t
= rman_get_bustag(hba
->bar0_res
);
1063 hba
->bar0h
= rman_get_bushandle(hba
->bar0_res
);
1064 hba
->u
.mv
.regs
= (struct hpt_iopmv_regs
*)
1065 rman_get_virtual(hba
->bar0_res
);
1067 if (!hba
->u
.mv
.regs
) {
1068 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1069 hba
->bar0_rid
, hba
->bar0_res
);
1070 device_printf(hba
->pcidev
, "alloc bar0 mem res failed\n");
1074 hba
->bar2_rid
= 0x18;
1075 hba
->bar2_res
= bus_alloc_resource_any(hba
->pcidev
,
1076 SYS_RES_MEMORY
, &hba
->bar2_rid
, RF_ACTIVE
);
1078 if (hba
->bar2_res
== NULL
) {
1079 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1080 hba
->bar0_rid
, hba
->bar0_res
);
1081 device_printf(hba
->pcidev
, "failed to get iop bar2.\n");
1085 hba
->bar2t
= rman_get_bustag(hba
->bar2_res
);
1086 hba
->bar2h
= rman_get_bushandle(hba
->bar2_res
);
1087 hba
->u
.mv
.mu
= (struct hpt_iopmu_mv
*)rman_get_virtual(hba
->bar2_res
);
1089 if (!hba
->u
.mv
.mu
) {
1090 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1091 hba
->bar0_rid
, hba
->bar0_res
);
1092 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1093 hba
->bar2_rid
, hba
->bar2_res
);
1094 device_printf(hba
->pcidev
, "alloc mem bar2 res failed\n");
1101 static void hptiop_release_pci_res_itl(struct hpt_iop_hba
*hba
)
1104 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1105 hba
->bar0_rid
, hba
->bar0_res
);
1108 static void hptiop_release_pci_res_mv(struct hpt_iop_hba
*hba
)
1111 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1112 hba
->bar0_rid
, hba
->bar0_res
);
1114 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1115 hba
->bar2_rid
, hba
->bar2_res
);
1118 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba
*hba
)
1120 if (bus_dma_tag_create(hba
->parent_dmat
,
1123 BUS_SPACE_MAXADDR_32BIT
,
1128 BUS_SPACE_MAXSIZE_32BIT
,
1130 &hba
->ctlcfg_dmat
)) {
1131 device_printf(hba
->pcidev
, "alloc ctlcfg_dmat failed\n");
1135 if (bus_dmamem_alloc(hba
->ctlcfg_dmat
, (void **)&hba
->ctlcfg_ptr
,
1136 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
,
1137 &hba
->ctlcfg_dmamap
) != 0) {
1138 device_printf(hba
->pcidev
,
1139 "bus_dmamem_alloc failed!\n");
1140 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1144 if (bus_dmamap_load(hba
->ctlcfg_dmat
,
1145 hba
->ctlcfg_dmamap
, hba
->ctlcfg_ptr
,
1146 MVIOP_IOCTLCFG_SIZE
,
1147 hptiop_mv_map_ctlcfg
, hba
, 0)) {
1148 device_printf(hba
->pcidev
, "bus_dmamap_load failed!\n");
1149 if (hba
->ctlcfg_dmat
)
1150 bus_dmamem_free(hba
->ctlcfg_dmat
,
1151 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
1152 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1159 static int hptiop_internal_memfree_mv(struct hpt_iop_hba
*hba
)
1161 if (hba
->ctlcfg_dmat
) {
1162 bus_dmamap_unload(hba
->ctlcfg_dmat
, hba
->ctlcfg_dmamap
);
1163 bus_dmamem_free(hba
->ctlcfg_dmat
,
1164 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
1165 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1172 * CAM driver interface
1174 static device_method_t driver_methods
[] = {
1175 /* Device interface */
1176 DEVMETHOD(device_probe
, hptiop_probe
),
1177 DEVMETHOD(device_attach
, hptiop_attach
),
1178 DEVMETHOD(device_detach
, hptiop_detach
),
1179 DEVMETHOD(device_shutdown
, hptiop_shutdown
),
1183 static struct hptiop_adapter_ops hptiop_itl_ops
= {
1184 .iop_wait_ready
= hptiop_wait_ready_itl
,
1185 .internal_memalloc
= 0,
1186 .internal_memfree
= 0,
1187 .alloc_pci_res
= hptiop_alloc_pci_res_itl
,
1188 .release_pci_res
= hptiop_release_pci_res_itl
,
1189 .enable_intr
= hptiop_enable_intr_itl
,
1190 .disable_intr
= hptiop_disable_intr_itl
,
1191 .get_config
= hptiop_get_config_itl
,
1192 .set_config
= hptiop_set_config_itl
,
1193 .iop_intr
= hptiop_intr_itl
,
1194 .post_msg
= hptiop_post_msg_itl
,
1195 .post_req
= hptiop_post_req_itl
,
1196 .do_ioctl
= hptiop_do_ioctl_itl
,
1199 static struct hptiop_adapter_ops hptiop_mv_ops
= {
1200 .iop_wait_ready
= hptiop_wait_ready_mv
,
1201 .internal_memalloc
= hptiop_internal_memalloc_mv
,
1202 .internal_memfree
= hptiop_internal_memfree_mv
,
1203 .alloc_pci_res
= hptiop_alloc_pci_res_mv
,
1204 .release_pci_res
= hptiop_release_pci_res_mv
,
1205 .enable_intr
= hptiop_enable_intr_mv
,
1206 .disable_intr
= hptiop_disable_intr_mv
,
1207 .get_config
= hptiop_get_config_mv
,
1208 .set_config
= hptiop_set_config_mv
,
1209 .iop_intr
= hptiop_intr_mv
,
1210 .post_msg
= hptiop_post_msg_mv
,
1211 .post_req
= hptiop_post_req_mv
,
1212 .do_ioctl
= hptiop_do_ioctl_mv
,
1215 static driver_t hptiop_pci_driver
= {
1218 sizeof(struct hpt_iop_hba
)
1221 DRIVER_MODULE(hptiop
, pci
, hptiop_pci_driver
, hptiop_devclass
, NULL
, NULL
);
1222 MODULE_VERSION(hptiop
, 1);
1224 static int hptiop_probe(device_t dev
)
1226 struct hpt_iop_hba
*hba
;
1228 static char buf
[256];
1230 struct hptiop_adapter_ops
*ops
;
1232 if (pci_get_vendor(dev
) != 0x1103)
1235 id
= pci_get_device(dev
);
1251 ops
= &hptiop_itl_ops
;
1256 ops
= &hptiop_mv_ops
;
1262 device_printf(dev
, "adapter at PCI %d:%d:%d, IRQ %d\n",
1263 pci_get_bus(dev
), pci_get_slot(dev
),
1264 pci_get_function(dev
), pci_get_irq(dev
));
1266 ksprintf(buf
, "RocketRAID %x %s Controller",
1267 id
, sas
? "SAS" : "SATA");
1268 device_set_desc_copy(dev
, buf
);
1270 hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
1271 bzero(hba
, sizeof(struct hpt_iop_hba
));
1274 KdPrint(("hba->ops=%p\n", hba
->ops
));
1278 static int hptiop_attach(device_t dev
)
1280 struct hpt_iop_hba
*hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
1281 struct hpt_iop_request_get_config iop_config
;
1282 struct hpt_iop_request_set_config set_config
;
1284 struct cam_devq
*devq
;
1285 struct ccb_setasync ccb
;
1286 u_int32_t unit
= device_get_unit(dev
);
1288 device_printf(dev
, "RocketRAID 3xxx/4xxx controller driver %s\n",
1291 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit
,
1292 pci_get_bus(dev
), pci_get_slot(dev
),
1293 pci_get_function(dev
), hba
->ops
));
1295 pci_enable_busmaster(dev
);
1298 if (hba
->ops
->alloc_pci_res(hba
))
1301 if (hba
->ops
->iop_wait_ready(hba
, 2000)) {
1302 device_printf(dev
, "adapter is not ready\n");
1303 goto release_pci_res
;
1306 lockinit(&hba
->lock
, "hptioplock", 0, LK_CANRECURSE
);
1308 if (bus_dma_tag_create(NULL
,/* parent */
1311 BUS_SPACE_MAXADDR
, /* lowaddr */
1312 BUS_SPACE_MAXADDR
, /* highaddr */
1313 NULL
, NULL
, /* filter, filterarg */
1314 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1315 BUS_SPACE_UNRESTRICTED
, /* nsegments */
1316 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1318 &hba
->parent_dmat
/* tag */))
1320 device_printf(dev
, "alloc parent_dmat failed\n");
1321 goto release_pci_res
;
1324 if (hba
->ops
->internal_memalloc
) {
1325 if (hba
->ops
->internal_memalloc(hba
)) {
1326 device_printf(dev
, "alloc srb_dmat failed\n");
1327 goto destroy_parent_tag
;
1331 if (hba
->ops
->get_config(hba
, &iop_config
)) {
1332 device_printf(dev
, "get iop config failed.\n");
1333 goto get_config_failed
;
1336 hba
->firmware_version
= iop_config
.firmware_version
;
1337 hba
->interface_version
= iop_config
.interface_version
;
1338 hba
->max_requests
= iop_config
.max_requests
;
1339 hba
->max_devices
= iop_config
.max_devices
;
1340 hba
->max_request_size
= iop_config
.request_size
;
1341 hba
->max_sg_count
= iop_config
.max_sg_count
;
1343 if (bus_dma_tag_create(hba
->parent_dmat
,/* parent */
1345 BUS_SPACE_MAXADDR_32BIT
+1, /* boundary */
1346 BUS_SPACE_MAXADDR
, /* lowaddr */
1347 BUS_SPACE_MAXADDR
, /* highaddr */
1348 NULL
, NULL
, /* filter, filterarg */
1349 PAGE_SIZE
* (hba
->max_sg_count
-1), /* maxsize */
1350 hba
->max_sg_count
, /* nsegments */
1351 0x20000, /* maxsegsize */
1352 BUS_DMA_ALLOCNOW
, /* flags */
1353 &hba
->io_dmat
/* tag */))
1355 device_printf(dev
, "alloc io_dmat failed\n");
1356 goto get_config_failed
;
1359 if (bus_dma_tag_create(hba
->parent_dmat
,/* parent */
1362 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1363 BUS_SPACE_MAXADDR
, /* highaddr */
1364 NULL
, NULL
, /* filter, filterarg */
1365 HPT_SRB_MAX_SIZE
* HPT_SRB_MAX_QUEUE_SIZE
+ 0x20,
1367 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1369 &hba
->srb_dmat
/* tag */))
1371 device_printf(dev
, "alloc srb_dmat failed\n");
1372 goto destroy_io_dmat
;
1375 if (bus_dmamem_alloc(hba
->srb_dmat
, (void **)&hba
->uncached_ptr
,
1376 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
,
1377 &hba
->srb_dmamap
) != 0)
1379 device_printf(dev
, "srb bus_dmamem_alloc failed!\n");
1380 goto destroy_srb_dmat
;
1383 if (bus_dmamap_load(hba
->srb_dmat
,
1384 hba
->srb_dmamap
, hba
->uncached_ptr
,
1385 (HPT_SRB_MAX_SIZE
* HPT_SRB_MAX_QUEUE_SIZE
) + 0x20,
1386 hptiop_map_srb
, hba
, 0))
1388 device_printf(dev
, "bus_dmamap_load failed!\n");
1389 goto srb_dmamem_free
;
1392 if ((devq
= cam_simq_alloc(hba
->max_requests
- 1 )) == NULL
) {
1393 device_printf(dev
, "cam_simq_alloc failed\n");
1394 goto srb_dmamap_unload
;
1397 hba
->sim
= cam_sim_alloc(hptiop_action
, hptiop_poll
, driver_name
,
1398 hba
, unit
, &sim_mplock
, hba
->max_requests
- 1, 1, devq
);
1400 device_printf(dev
, "cam_sim_alloc failed\n");
1401 cam_simq_release(devq
);
1402 goto srb_dmamap_unload
;
1404 if (xpt_bus_register(hba
->sim
, 0) != CAM_SUCCESS
)
1406 device_printf(dev
, "xpt_bus_register failed\n");
1410 if (xpt_create_path(&hba
->path
, /*periph */ NULL
,
1411 cam_sim_path(hba
->sim
), CAM_TARGET_WILDCARD
,
1412 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
1413 device_printf(dev
, "xpt_create_path failed\n");
1414 goto deregister_xpt_bus
;
1417 bzero(&set_config
, sizeof(set_config
));
1418 set_config
.iop_id
= unit
;
1419 set_config
.vbus_id
= cam_sim_path(hba
->sim
);
1420 set_config
.max_host_request_size
= HPT_SRB_MAX_REQ_SIZE
;
1422 if (hba
->ops
->set_config(hba
, &set_config
)) {
1423 device_printf(dev
, "set iop config failed.\n");
1427 xpt_setup_ccb(&ccb
.ccb_h
, hba
->path
, /*priority*/5);
1428 ccb
.ccb_h
.func_code
= XPT_SASYNC_CB
;
1429 ccb
.event_enable
= (AC_FOUND_DEVICE
| AC_LOST_DEVICE
);
1430 ccb
.callback
= hptiop_async
;
1431 ccb
.callback_arg
= hba
->sim
;
1432 xpt_action((union ccb
*)&ccb
);
1435 if ((hba
->irq_res
= bus_alloc_resource(hba
->pcidev
, SYS_RES_IRQ
,
1436 &rid
, 0, ~0ul, 1, RF_SHAREABLE
| RF_ACTIVE
)) == NULL
) {
1437 device_printf(dev
, "allocate irq failed!\n");
1441 if (bus_setup_intr(hba
->pcidev
, hba
->irq_res
, 0,
1442 hptiop_pci_intr
, hba
, &hba
->irq_handle
, NULL
))
1444 device_printf(dev
, "allocate intr function failed!\n");
1445 goto free_irq_resource
;
1448 if (hptiop_send_sync_msg(hba
,
1449 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK
, 5000)) {
1450 device_printf(dev
, "fail to start background task\n");
1451 goto teartown_irq_resource
;
1454 hba
->ops
->enable_intr(hba
);
1456 hba
->ioctl_dev
= make_dev(&hptiop_ops
, unit
,
1457 UID_ROOT
, GID_WHEEL
/*GID_OPERATOR*/,
1458 S_IRUSR
| S_IWUSR
, "%s%d", driver_name
, unit
);
1460 hba
->ioctl_dev
->si_drv1
= hba
;
1462 hptiop_rescan_bus(hba
);
1467 teartown_irq_resource
:
1468 bus_teardown_intr(dev
, hba
->irq_res
, hba
->irq_handle
);
1471 bus_release_resource(dev
, SYS_RES_IRQ
, 0, hba
->irq_res
);
1474 xpt_free_path(hba
->path
);
1477 xpt_bus_deregister(cam_sim_path(hba
->sim
));
1480 cam_sim_free(hba
->sim
);
1483 if (hba
->uncached_ptr
)
1484 bus_dmamap_unload(hba
->srb_dmat
, hba
->srb_dmamap
);
1487 if (hba
->uncached_ptr
)
1488 bus_dmamem_free(hba
->srb_dmat
,
1489 hba
->uncached_ptr
, hba
->srb_dmamap
);
1493 bus_dma_tag_destroy(hba
->srb_dmat
);
1497 bus_dma_tag_destroy(hba
->io_dmat
);
1500 if (hba
->ops
->internal_memfree
)
1501 hba
->ops
->internal_memfree(hba
);
1504 if (hba
->parent_dmat
)
1505 bus_dma_tag_destroy(hba
->parent_dmat
);
1508 if (hba
->ops
->release_pci_res
)
1509 hba
->ops
->release_pci_res(hba
);
1514 static int hptiop_detach(device_t dev
)
1516 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
1520 hptiop_lock_adapter(hba
);
1521 for (i
= 0; i
< hba
->max_devices
; i
++)
1522 if (hptiop_os_query_remove_device(hba
, i
)) {
1523 device_printf(dev
, "file system is busy. id=%d", i
);
1527 if ((error
= hptiop_shutdown(dev
)) != 0)
1529 if (hptiop_send_sync_msg(hba
,
1530 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK
, 60000))
1533 hptiop_release_resource(hba
);
1536 hptiop_unlock_adapter(hba
);
1540 static int hptiop_shutdown(device_t dev
)
1542 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
1546 if (hba
->flag
& HPT_IOCTL_FLAG_OPEN
) {
1547 device_printf(dev
, "device is busy");
1551 hba
->ops
->disable_intr(hba
);
1553 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_SHUTDOWN
, 60000))
1559 static void hptiop_pci_intr(void *arg
)
1561 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)arg
;
1562 hptiop_lock_adapter(hba
);
1563 hba
->ops
->iop_intr(hba
);
1564 hptiop_unlock_adapter(hba
);
1567 static void hptiop_poll(struct cam_sim
*sim
)
1569 hptiop_pci_intr(cam_sim_softc(sim
));
1572 static void hptiop_async(void * callback_arg
, u_int32_t code
,
1573 struct cam_path
* path
, void * arg
)
1577 static void hptiop_enable_intr_itl(struct hpt_iop_hba
*hba
)
1579 BUS_SPACE_WRT4_ITL(outbound_intmask
,
1580 ~(IOPMU_OUTBOUND_INT_POSTQUEUE
| IOPMU_OUTBOUND_INT_MSG0
));
1583 static void hptiop_enable_intr_mv(struct hpt_iop_hba
*hba
)
1587 int_mask
= BUS_SPACE_RD4_MV0(outbound_intmask
);
1589 int_mask
|= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1590 | MVIOP_MU_OUTBOUND_INT_MSG
;
1591 BUS_SPACE_WRT4_MV0(outbound_intmask
,int_mask
);
1594 static void hptiop_disable_intr_itl(struct hpt_iop_hba
*hba
)
1598 int_mask
= BUS_SPACE_RD4_ITL(outbound_intmask
);
1600 int_mask
|= IOPMU_OUTBOUND_INT_POSTQUEUE
| IOPMU_OUTBOUND_INT_MSG0
;
1601 BUS_SPACE_WRT4_ITL(outbound_intmask
, int_mask
);
1602 BUS_SPACE_RD4_ITL(outbound_intstatus
);
1605 static void hptiop_disable_intr_mv(struct hpt_iop_hba
*hba
)
1608 int_mask
= BUS_SPACE_RD4_MV0(outbound_intmask
);
1610 int_mask
&= ~(MVIOP_MU_OUTBOUND_INT_MSG
1611 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE
);
1612 BUS_SPACE_WRT4_MV0(outbound_intmask
,int_mask
);
1613 BUS_SPACE_RD4_MV0(outbound_intmask
);
1616 static int hptiop_reset_adapter(struct hpt_iop_hba
* hba
)
1618 return hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000);
1621 static void *hptiop_get_srb(struct hpt_iop_hba
* hba
)
1623 struct hpt_iop_srb
* srb
;
1625 if (hba
->srb_list
) {
1626 srb
= hba
->srb_list
;
1627 hba
->srb_list
= srb
->next
;
1634 static void hptiop_free_srb(struct hpt_iop_hba
*hba
, struct hpt_iop_srb
*srb
)
1636 srb
->next
= hba
->srb_list
;
1637 hba
->srb_list
= srb
;
1640 static void hptiop_action(struct cam_sim
*sim
, union ccb
*ccb
)
1642 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)cam_sim_softc(sim
);
1643 struct hpt_iop_srb
* srb
;
1645 switch (ccb
->ccb_h
.func_code
) {
1648 hptiop_lock_adapter(hba
);
1649 if (ccb
->ccb_h
.target_lun
!= 0 ||
1650 ccb
->ccb_h
.target_id
>= hba
->max_devices
||
1651 (ccb
->ccb_h
.flags
& CAM_CDB_PHYS
))
1653 ccb
->ccb_h
.status
= CAM_TID_INVALID
;
1658 if ((srb
= hptiop_get_srb(hba
)) == NULL
) {
1659 device_printf(hba
->pcidev
, "srb allocated failed");
1660 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1667 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
)
1668 hptiop_post_scsi_command(srb
, NULL
, 0, 0);
1669 else if ((ccb
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
1670 if ((ccb
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
1673 error
= bus_dmamap_load(hba
->io_dmat
,
1676 ccb
->csio
.dxfer_len
,
1677 hptiop_post_scsi_command
,
1680 if (error
&& error
!= EINPROGRESS
) {
1681 device_printf(hba
->pcidev
,
1682 "bus_dmamap_load error %d", error
);
1683 xpt_freeze_simq(hba
->sim
, 1);
1684 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1686 hptiop_free_srb(hba
, srb
);
1692 device_printf(hba
->pcidev
,
1693 "CAM_DATA_PHYS not supported");
1694 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1699 struct bus_dma_segment
*segs
;
1701 if ((ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0 ||
1702 (ccb
->ccb_h
.flags
& CAM_DATA_PHYS
) != 0) {
1703 device_printf(hba
->pcidev
, "SCSI cmd failed");
1704 ccb
->ccb_h
.status
=CAM_PROVIDE_FAIL
;
1708 segs
= (struct bus_dma_segment
*)ccb
->csio
.data_ptr
;
1709 hptiop_post_scsi_command(srb
, segs
,
1710 ccb
->csio
.sglist_cnt
, 0);
1714 hptiop_unlock_adapter(hba
);
1718 device_printf(hba
->pcidev
, "reset adapter");
1719 hptiop_lock_adapter(hba
);
1721 hptiop_reset_adapter(hba
);
1722 hptiop_unlock_adapter(hba
);
1725 case XPT_GET_TRAN_SETTINGS
:
1726 case XPT_SET_TRAN_SETTINGS
:
1727 ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
1730 case XPT_CALC_GEOMETRY
:
1731 ccb
->ccg
.heads
= 255;
1732 ccb
->ccg
.secs_per_track
= 63;
1733 ccb
->ccg
.cylinders
= ccb
->ccg
.volume_size
/
1734 (ccb
->ccg
.heads
* ccb
->ccg
.secs_per_track
);
1735 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1740 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
1742 cpi
->version_num
= 1;
1743 cpi
->hba_inquiry
= PI_SDTR_ABLE
;
1744 cpi
->target_sprt
= 0;
1745 cpi
->hba_misc
= PIM_NOBUSRESET
;
1746 cpi
->hba_eng_cnt
= 0;
1747 cpi
->max_target
= hba
->max_devices
;
1749 cpi
->unit_number
= cam_sim_unit(sim
);
1750 cpi
->bus_id
= cam_sim_bus(sim
);
1751 cpi
->initiator_id
= hba
->max_devices
;
1752 cpi
->base_transfer_speed
= 3300;
1754 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
1755 strncpy(cpi
->hba_vid
, "HPT ", HBA_IDLEN
);
1756 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
1757 cpi
->transport
= XPORT_SPI
;
1758 cpi
->transport_version
= 2;
1759 cpi
->protocol
= PROTO_SCSI
;
1760 cpi
->protocol_version
= SCSI_REV_2
;
1761 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
1766 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
1774 static void hptiop_post_req_itl(struct hpt_iop_hba
*hba
,
1775 struct hpt_iop_srb
*srb
,
1776 bus_dma_segment_t
*segs
, int nsegs
)
1779 union ccb
*ccb
= srb
->ccb
;
1782 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
1783 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
1785 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
1787 KdPrint(("ccb=%p %x-%x-%x\n",
1788 ccb
, *(u_int32_t
*)cdb
, *((u_int32_t
*)cdb
+1), *((u_int32_t
*)cdb
+2)));
1790 if (srb
->srb_flag
& HPT_SRB_FLAG_HIGH_MEM_ACESS
) {
1791 u_int32_t iop_req32
;
1792 struct hpt_iop_request_scsi_command req
;
1794 iop_req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
1796 if (iop_req32
== IOPMU_QUEUE_EMPTY
) {
1797 device_printf(hba
->pcidev
, "invaild req offset\n");
1798 ccb
->ccb_h
.status
= CAM_BUSY
;
1799 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
1800 hptiop_free_srb(hba
, srb
);
1805 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
1806 struct hpt_iopsg
*psg
= req
.sg_list
;
1807 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
1808 psg
->pci_address
= (u_int64_t
)segs
[idx
].ds_addr
;
1809 psg
->size
= segs
[idx
].ds_len
;
1815 bcopy(cdb
, req
.cdb
, ccb
->csio
.cdb_len
);
1817 req
.header
.size
= offsetof(struct hpt_iop_request_scsi_command
, sg_list
)
1818 + nsegs
*sizeof(struct hpt_iopsg
);
1819 req
.header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
1820 req
.header
.flags
= 0;
1821 req
.header
.result
= IOP_RESULT_PENDING
;
1822 req
.header
.context
= (u_int64_t
)(unsigned long)srb
;
1823 req
.dataxfer_length
= ccb
->csio
.dxfer_len
;
1825 req
.target
= ccb
->ccb_h
.target_id
;
1826 req
.lun
= ccb
->ccb_h
.target_lun
;
1828 bus_space_write_region_1(hba
->bar0t
, hba
->bar0h
, iop_req32
,
1829 (u_int8_t
*)&req
, req
.header
.size
);
1831 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1832 bus_dmamap_sync(hba
->io_dmat
,
1833 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
1835 else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
)
1836 bus_dmamap_sync(hba
->io_dmat
,
1837 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
1839 BUS_SPACE_WRT4_ITL(inbound_queue
,iop_req32
);
1841 struct hpt_iop_request_scsi_command
*req
;
1843 req
= (struct hpt_iop_request_scsi_command
*)srb
;
1844 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
1845 struct hpt_iopsg
*psg
= req
->sg_list
;
1846 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
1848 (u_int64_t
)segs
[idx
].ds_addr
;
1849 psg
->size
= segs
[idx
].ds_len
;
1855 bcopy(cdb
, req
->cdb
, ccb
->csio
.cdb_len
);
1857 req
->header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
1858 req
->header
.result
= IOP_RESULT_PENDING
;
1859 req
->dataxfer_length
= ccb
->csio
.dxfer_len
;
1861 req
->target
= ccb
->ccb_h
.target_id
;
1862 req
->lun
= ccb
->ccb_h
.target_lun
;
1863 req
->header
.size
= offsetof(struct hpt_iop_request_scsi_command
, sg_list
)
1864 + nsegs
*sizeof(struct hpt_iopsg
);
1865 req
->header
.context
= (u_int64_t
)srb
->index
|
1866 IOPMU_QUEUE_ADDR_HOST_BIT
;
1867 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
1869 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1870 bus_dmamap_sync(hba
->io_dmat
,
1871 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
1872 }else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1873 bus_dmamap_sync(hba
->io_dmat
,
1874 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
1877 if (hba
->firmware_version
> 0x01020000
1878 || hba
->interface_version
> 0x01020000) {
1879 u_int32_t size_bits
;
1881 if (req
->header
.size
< 256)
1882 size_bits
= IOPMU_QUEUE_REQUEST_SIZE_BIT
;
1883 else if (req
->header
.size
< 512)
1884 size_bits
= IOPMU_QUEUE_ADDR_HOST_BIT
;
1886 size_bits
= IOPMU_QUEUE_REQUEST_SIZE_BIT
1887 | IOPMU_QUEUE_ADDR_HOST_BIT
;
1889 BUS_SPACE_WRT4_ITL(inbound_queue
,
1890 (u_int32_t
)srb
->phy_addr
| size_bits
);
1892 BUS_SPACE_WRT4_ITL(inbound_queue
, (u_int32_t
)srb
->phy_addr
1893 |IOPMU_QUEUE_ADDR_HOST_BIT
);
1897 static void hptiop_post_req_mv(struct hpt_iop_hba
*hba
,
1898 struct hpt_iop_srb
*srb
,
1899 bus_dma_segment_t
*segs
, int nsegs
)
1902 union ccb
*ccb
= srb
->ccb
;
1904 struct hpt_iop_request_scsi_command
*req
;
1907 req
= (struct hpt_iop_request_scsi_command
*)srb
;
1908 req_phy
= srb
->phy_addr
;
1910 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
1911 struct hpt_iopsg
*psg
= req
->sg_list
;
1912 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
1913 psg
->pci_address
= (u_int64_t
)segs
[idx
].ds_addr
;
1914 psg
->size
= segs
[idx
].ds_len
;
1919 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
1920 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
1922 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
1924 bcopy(cdb
, req
->cdb
, ccb
->csio
.cdb_len
);
1925 req
->header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
1926 req
->header
.result
= IOP_RESULT_PENDING
;
1927 req
->dataxfer_length
= ccb
->csio
.dxfer_len
;
1929 req
->target
= ccb
->ccb_h
.target_id
;
1930 req
->lun
= ccb
->ccb_h
.target_lun
;
1931 req
->header
.size
= sizeof(struct hpt_iop_request_scsi_command
)
1932 - sizeof(struct hpt_iopsg
)
1933 + nsegs
* sizeof(struct hpt_iopsg
);
1934 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1935 bus_dmamap_sync(hba
->io_dmat
,
1936 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
1938 else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
)
1939 bus_dmamap_sync(hba
->io_dmat
,
1940 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
1941 req
->header
.context
= (u_int64_t
)srb
->index
1942 << MVIOP_REQUEST_NUMBER_START_BIT
1943 | MVIOP_CMD_TYPE_SCSI
;
1944 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
1945 size
= req
->header
.size
>> 8;
1946 hptiop_mv_inbound_write(req_phy
1947 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
1948 | (size
> 3 ? 3 : size
), hba
);
1951 static void hptiop_post_scsi_command(void *arg
, bus_dma_segment_t
*segs
,
1952 int nsegs
, int error
)
1954 struct hpt_iop_srb
*srb
= (struct hpt_iop_srb
*)arg
;
1955 union ccb
*ccb
= srb
->ccb
;
1956 struct hpt_iop_hba
*hba
= srb
->hba
;
1958 if (error
|| nsegs
> hba
->max_sg_count
) {
1959 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
1960 ccb
->ccb_h
.func_code
,
1961 ccb
->ccb_h
.target_id
,
1962 ccb
->ccb_h
.target_lun
, nsegs
));
1963 ccb
->ccb_h
.status
= CAM_BUSY
;
1964 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
1965 hptiop_free_srb(hba
, srb
);
1970 hba
->ops
->post_req(hba
, srb
, segs
, nsegs
);
1973 static void hptiop_mv_map_ctlcfg(void *arg
, bus_dma_segment_t
*segs
,
1974 int nsegs
, int error
)
1976 struct hpt_iop_hba
*hba
= (struct hpt_iop_hba
*)arg
;
1977 hba
->ctlcfgcmd_phy
= ((u_int64_t
)segs
->ds_addr
+ 0x1F)
1979 hba
->ctlcfg_ptr
= (u_int8_t
*)(((unsigned long)hba
->ctlcfg_ptr
+ 0x1F)
1983 static void hptiop_map_srb(void *arg
, bus_dma_segment_t
*segs
,
1984 int nsegs
, int error
)
1986 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)arg
;
1987 bus_addr_t phy_addr
= (segs
->ds_addr
+ 0x1F) & ~(bus_addr_t
)0x1F;
1988 struct hpt_iop_srb
*srb
, *tmp_srb
;
1991 if (error
|| nsegs
== 0) {
1992 device_printf(hba
->pcidev
, "hptiop_map_srb error");
1997 srb
= (struct hpt_iop_srb
*)
1998 (((unsigned long)hba
->uncached_ptr
+ 0x1F)
1999 & ~(unsigned long)0x1F);
2001 for (i
= 0; i
< HPT_SRB_MAX_QUEUE_SIZE
; i
++) {
2002 tmp_srb
= (struct hpt_iop_srb
*)
2003 ((char *)srb
+ i
* HPT_SRB_MAX_SIZE
);
2004 if (((unsigned long)tmp_srb
& 0x1F) == 0) {
2005 if (bus_dmamap_create(hba
->io_dmat
,
2006 0, &tmp_srb
->dma_map
)) {
2007 device_printf(hba
->pcidev
, "dmamap create failed");
2011 bzero(tmp_srb
, sizeof(struct hpt_iop_srb
));
2014 if (hba
->ctlcfg_ptr
== 0) {/*itl iop*/
2015 tmp_srb
->phy_addr
= (u_int64_t
)(u_int32_t
)
2017 if (phy_addr
& IOPMU_MAX_MEM_SUPPORT_MASK_32G
)
2019 HPT_SRB_FLAG_HIGH_MEM_ACESS
;
2021 tmp_srb
->phy_addr
= phy_addr
;
2024 hptiop_free_srb(hba
, tmp_srb
);
2025 hba
->srb
[i
] = tmp_srb
;
2026 phy_addr
+= HPT_SRB_MAX_SIZE
;
2029 device_printf(hba
->pcidev
, "invalid alignment");
2035 static void hptiop_os_message_callback(struct hpt_iop_hba
* hba
, u_int32_t msg
)
2040 static int hptiop_os_query_remove_device(struct hpt_iop_hba
* hba
,
2043 struct cam_periph
*periph
= NULL
;
2044 struct cam_path
*path
;
2045 int status
, retval
= 0;
2047 status
= xpt_create_path(&path
, NULL
, hba
->sim
->path_id
, target_id
, 0);
2049 if (status
== CAM_REQ_CMP
) {
2050 if ((periph
= cam_periph_find(path
, "da")) != NULL
) {
2051 if (periph
->refcount
>= 1) {
2052 device_printf(hba
->pcidev
, "target_id=0x%x,"
2053 "refcount=%d", target_id
, periph
->refcount
);
2057 xpt_free_path(path
);
2062 static void hptiop_release_resource(struct hpt_iop_hba
*hba
)
2066 struct ccb_setasync ccb
;
2068 xpt_setup_ccb(&ccb
.ccb_h
, hba
->path
, /*priority*/5);
2069 ccb
.ccb_h
.func_code
= XPT_SASYNC_CB
;
2070 ccb
.event_enable
= 0;
2071 ccb
.callback
= hptiop_async
;
2072 ccb
.callback_arg
= hba
->sim
;
2073 xpt_action((union ccb
*)&ccb
);
2074 xpt_free_path(hba
->path
);
2078 xpt_bus_deregister(cam_sim_path(hba
->sim
));
2079 cam_sim_free(hba
->sim
);
2082 if (hba
->ctlcfg_dmat
) {
2083 bus_dmamap_unload(hba
->ctlcfg_dmat
, hba
->ctlcfg_dmamap
);
2084 bus_dmamem_free(hba
->ctlcfg_dmat
,
2085 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
2086 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
2089 for (i
= 0; i
< HPT_SRB_MAX_QUEUE_SIZE
; i
++) {
2090 struct hpt_iop_srb
*srb
= hba
->srb
[i
];
2092 bus_dmamap_destroy(hba
->io_dmat
, srb
->dma_map
);
2095 if (hba
->srb_dmat
) {
2096 bus_dmamap_unload(hba
->srb_dmat
, hba
->srb_dmamap
);
2097 bus_dmamap_destroy(hba
->srb_dmat
, hba
->srb_dmamap
);
2098 bus_dma_tag_destroy(hba
->srb_dmat
);
2102 bus_dma_tag_destroy(hba
->io_dmat
);
2104 if (hba
->parent_dmat
)
2105 bus_dma_tag_destroy(hba
->parent_dmat
);
2107 if (hba
->irq_handle
)
2108 bus_teardown_intr(hba
->pcidev
, hba
->irq_res
, hba
->irq_handle
);
2111 bus_release_resource(hba
->pcidev
, SYS_RES_IRQ
,
2115 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
2116 hba
->bar0_rid
, hba
->bar0_res
);
2118 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
2119 hba
->bar2_rid
, hba
->bar2_res
);
2121 destroy_dev(hba
->ioctl_dev
);
2122 dev_ops_remove_minor(&hptiop_ops
, device_get_unit(hba
->pcidev
));