2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3 * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.15 2012/10/25 17:29:11 delphij Exp $
29 #include <sys/param.h>
30 #include <sys/types.h>
33 #include <sys/systm.h>
36 #include <sys/malloc.h>
38 #include <sys/kernel.h>
40 #include <sys/kthread.h>
42 #include <sys/module.h>
44 #include <sys/eventhandler.h>
46 #include <sys/taskqueue.h>
47 #include <sys/device.h>
48 #include <sys/mplock2.h>
50 #include <machine/stdarg.h>
56 #include <bus/pci/pcireg.h>
57 #include <bus/pci/pcivar.h>
59 #include <bus/cam/cam.h>
60 #include <bus/cam/cam_ccb.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_periph.h>
63 #include <bus/cam/cam_xpt_sim.h>
64 #include <bus/cam/cam_debug.h>
65 #include <bus/cam/cam_periph.h>
66 #include <bus/cam/scsi/scsi_all.h>
67 #include <bus/cam/scsi/scsi_message.h>
69 #include <dev/raid/hptiop/hptiop.h>
71 static const char driver_name
[] = "hptiop";
72 static const char driver_version
[] = "v1.8";
74 static devclass_t hptiop_devclass
;
76 static int hptiop_send_sync_msg(struct hpt_iop_hba
*hba
,
77 u_int32_t msg
, u_int32_t millisec
);
78 static void hptiop_request_callback_itl(struct hpt_iop_hba
*hba
,
80 static void hptiop_request_callback_mv(struct hpt_iop_hba
*hba
, u_int64_t req
);
81 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba
*hba
,
83 static void hptiop_os_message_callback(struct hpt_iop_hba
*hba
, u_int32_t msg
);
84 static int hptiop_do_ioctl_itl(struct hpt_iop_hba
*hba
,
85 struct hpt_iop_ioctl_param
*pParams
);
86 static int hptiop_do_ioctl_mv(struct hpt_iop_hba
*hba
,
87 struct hpt_iop_ioctl_param
*pParams
);
88 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba
*hba
,
89 struct hpt_iop_ioctl_param
*pParams
);
90 static void hptiop_bus_scan_cb(struct cam_periph
*periph
, union ccb
*ccb
);
91 static int hptiop_rescan_bus(struct hpt_iop_hba
*hba
);
92 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba
*hba
);
93 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba
*hba
);
94 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba
*hba
);
95 static int hptiop_get_config_itl(struct hpt_iop_hba
*hba
,
96 struct hpt_iop_request_get_config
*config
);
97 static int hptiop_get_config_mv(struct hpt_iop_hba
*hba
,
98 struct hpt_iop_request_get_config
*config
);
99 static int hptiop_get_config_mvfrey(struct hpt_iop_hba
*hba
,
100 struct hpt_iop_request_get_config
*config
);
101 static int hptiop_set_config_itl(struct hpt_iop_hba
*hba
,
102 struct hpt_iop_request_set_config
*config
);
103 static int hptiop_set_config_mv(struct hpt_iop_hba
*hba
,
104 struct hpt_iop_request_set_config
*config
);
105 static int hptiop_set_config_mvfrey(struct hpt_iop_hba
*hba
,
106 struct hpt_iop_request_set_config
*config
);
107 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba
*hba
);
108 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba
*hba
);
109 static int hptiop_internal_memfree_itl(struct hpt_iop_hba
*hba
);
110 static int hptiop_internal_memfree_mv(struct hpt_iop_hba
*hba
);
111 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba
*hba
);
112 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba
*hba
,
113 u_int32_t req32
, struct hpt_iop_ioctl_param
*pParams
);
114 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba
*hba
,
115 struct hpt_iop_request_ioctl_command
*req
,
116 struct hpt_iop_ioctl_param
*pParams
);
117 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba
*hba
,
118 struct hpt_iop_request_ioctl_command
*req
,
119 struct hpt_iop_ioctl_param
*pParams
);
120 static void hptiop_post_req_itl(struct hpt_iop_hba
*hba
,
121 struct hpt_iop_srb
*srb
,
122 bus_dma_segment_t
*segs
, int nsegs
);
123 static void hptiop_post_req_mv(struct hpt_iop_hba
*hba
,
124 struct hpt_iop_srb
*srb
,
125 bus_dma_segment_t
*segs
, int nsegs
);
126 static void hptiop_post_req_mvfrey(struct hpt_iop_hba
*hba
,
127 struct hpt_iop_srb
*srb
,
128 bus_dma_segment_t
*segs
, int nsegs
);
129 static void hptiop_post_msg_itl(struct hpt_iop_hba
*hba
, u_int32_t msg
);
130 static void hptiop_post_msg_mv(struct hpt_iop_hba
*hba
, u_int32_t msg
);
131 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba
*hba
, u_int32_t msg
);
132 static void hptiop_enable_intr_itl(struct hpt_iop_hba
*hba
);
133 static void hptiop_enable_intr_mv(struct hpt_iop_hba
*hba
);
134 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba
*hba
);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba
*hba
);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba
*hba
);
137 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba
*hba
);
138 static void hptiop_free_srb(struct hpt_iop_hba
*hba
, struct hpt_iop_srb
*srb
);
139 static int hptiop_os_query_remove_device(struct hpt_iop_hba
*hba
, int tid
);
140 static int hptiop_probe(device_t dev
);
141 static int hptiop_attach(device_t dev
);
142 static int hptiop_detach(device_t dev
);
143 static int hptiop_shutdown(device_t dev
);
144 static void hptiop_action(struct cam_sim
*sim
, union ccb
*ccb
);
145 static void hptiop_poll(struct cam_sim
*sim
);
146 static void hptiop_async(void *callback_arg
, u_int32_t code
,
147 struct cam_path
*path
, void *arg
);
148 static void hptiop_pci_intr(void *arg
);
149 static void hptiop_release_resource(struct hpt_iop_hba
*hba
);
150 static void hptiop_reset_adapter(void *argv
);
151 static d_open_t hptiop_open
;
152 static d_close_t hptiop_close
;
153 static d_ioctl_t hptiop_ioctl
;
155 static struct dev_ops hptiop_ops
= {
156 { driver_name
, 0, 0 },
157 .d_open
= hptiop_open
,
158 .d_close
= hptiop_close
,
159 .d_ioctl
= hptiop_ioctl
,
162 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
164 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
165 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
166 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
167 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
169 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
170 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
171 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
172 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
173 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
174 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
175 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
176 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
178 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
179 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
180 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
181 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
183 static int hptiop_open(struct dev_open_args
*ap
)
185 cdev_t dev
= ap
->a_head
.a_dev
;
186 struct hpt_iop_hba
*hba
= hba_from_dev(dev
);
190 if (hba
->flag
& HPT_IOCTL_FLAG_OPEN
)
192 hba
->flag
|= HPT_IOCTL_FLAG_OPEN
;
196 static int hptiop_close(struct dev_close_args
*ap
)
198 cdev_t dev
= ap
->a_head
.a_dev
;
199 struct hpt_iop_hba
*hba
= hba_from_dev(dev
);
200 hba
->flag
&= ~(u_int32_t
)HPT_IOCTL_FLAG_OPEN
;
204 static int hptiop_ioctl(struct dev_ioctl_args
*ap
)
206 cdev_t dev
= ap
->a_head
.a_dev
;
207 u_long cmd
= ap
->a_cmd
;
208 caddr_t data
= ap
->a_data
;
210 struct hpt_iop_hba
*hba
= hba_from_dev(dev
);
215 case HPT_DO_IOCONTROL
:
216 ret
= hba
->ops
->do_ioctl(hba
,
217 (struct hpt_iop_ioctl_param
*)data
);
220 ret
= hptiop_rescan_bus(hba
);
229 static u_int64_t
hptiop_mv_outbound_read(struct hpt_iop_hba
*hba
)
232 u_int32_t outbound_tail
= BUS_SPACE_RD4_MV2(outbound_tail
);
233 u_int32_t outbound_head
= BUS_SPACE_RD4_MV2(outbound_head
);
235 if (outbound_tail
!= outbound_head
) {
236 bus_space_read_region_4(hba
->bar2t
, hba
->bar2h
,
237 offsetof(struct hpt_iopmu_mv
,
238 outbound_q
[outbound_tail
]),
243 if (outbound_tail
== MVIOP_QUEUE_LEN
)
246 BUS_SPACE_WRT4_MV2(outbound_tail
, outbound_tail
);
252 static void hptiop_mv_inbound_write(u_int64_t p
, struct hpt_iop_hba
*hba
)
254 u_int32_t inbound_head
= BUS_SPACE_RD4_MV2(inbound_head
);
255 u_int32_t head
= inbound_head
+ 1;
257 if (head
== MVIOP_QUEUE_LEN
)
260 bus_space_write_region_4(hba
->bar2t
, hba
->bar2h
,
261 offsetof(struct hpt_iopmu_mv
, inbound_q
[inbound_head
]),
263 BUS_SPACE_WRT4_MV2(inbound_head
, head
);
264 BUS_SPACE_WRT4_MV0(inbound_doorbell
, MVIOP_MU_INBOUND_INT_POSTQUEUE
);
267 static void hptiop_post_msg_itl(struct hpt_iop_hba
*hba
, u_int32_t msg
)
269 BUS_SPACE_WRT4_ITL(inbound_msgaddr0
, msg
);
270 BUS_SPACE_RD4_ITL(outbound_intstatus
);
273 static void hptiop_post_msg_mv(struct hpt_iop_hba
*hba
, u_int32_t msg
)
276 BUS_SPACE_WRT4_MV2(inbound_msg
, msg
);
277 BUS_SPACE_WRT4_MV0(inbound_doorbell
, MVIOP_MU_INBOUND_INT_MSG
);
279 BUS_SPACE_RD4_MV0(outbound_intmask
);
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba
*hba
, u_int32_t msg
)
284 BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a
, msg
);
285 BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a
);
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba
* hba
, u_int32_t millisec
)
293 for (i
= 0; i
< millisec
; i
++) {
294 req
= BUS_SPACE_RD4_ITL(inbound_queue
);
295 if (req
!= IOPMU_QUEUE_EMPTY
)
300 if (req
!=IOPMU_QUEUE_EMPTY
) {
301 BUS_SPACE_WRT4_ITL(outbound_queue
, req
);
302 BUS_SPACE_RD4_ITL(outbound_intstatus
);
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba
* hba
, u_int32_t millisec
)
311 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_NOP
, millisec
))
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba
* hba
,
320 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_NOP
, millisec
))
326 static void hptiop_request_callback_itl(struct hpt_iop_hba
* hba
,
329 struct hpt_iop_srb
*srb
;
330 struct hpt_iop_request_scsi_command
*req
=NULL
;
333 u_int32_t result
, temp
, dxfer
;
336 if (index
& IOPMU_QUEUE_MASK_HOST_BITS
) { /*host req*/
337 if (hba
->firmware_version
> 0x01020000 ||
338 hba
->interface_version
> 0x01020000) {
339 srb
= hba
->srb
[index
& ~(u_int32_t
)
340 (IOPMU_QUEUE_ADDR_HOST_BIT
341 | IOPMU_QUEUE_REQUEST_RESULT_BIT
)];
342 req
= (struct hpt_iop_request_scsi_command
*)srb
;
343 if (index
& IOPMU_QUEUE_REQUEST_RESULT_BIT
)
344 result
= IOP_RESULT_SUCCESS
;
346 result
= req
->header
.result
;
348 srb
= hba
->srb
[index
&
349 ~(u_int32_t
)IOPMU_QUEUE_ADDR_HOST_BIT
];
350 req
= (struct hpt_iop_request_scsi_command
*)srb
;
351 result
= req
->header
.result
;
353 dxfer
= req
->dataxfer_length
;
358 temp
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
, index
+
359 offsetof(struct hpt_iop_request_header
, type
));
360 result
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
, index
+
361 offsetof(struct hpt_iop_request_header
, result
));
363 case IOP_REQUEST_TYPE_IOCTL_COMMAND
:
366 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
, index
+
367 offsetof(struct hpt_iop_request_header
, context
),
368 (u_int32_t
*)&temp64
, 2);
369 wakeup((void *)((unsigned long)hba
->u
.itl
.mu
+ index
));
373 case IOP_REQUEST_TYPE_SCSI_COMMAND
:
374 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
, index
+
375 offsetof(struct hpt_iop_request_header
, context
),
376 (u_int32_t
*)&temp64
, 2);
377 srb
= (struct hpt_iop_srb
*)(unsigned long)temp64
;
378 dxfer
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
,
379 index
+ offsetof(struct hpt_iop_request_scsi_command
,
382 ccb
= (union ccb
*)srb
->ccb
;
383 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
384 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
386 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
388 if (cdb
[0] == SYNCHRONIZE_CACHE
) { /* ??? */
389 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
394 case IOP_RESULT_SUCCESS
:
395 switch (ccb
->ccb_h
.flags
& CAM_DIR_MASK
) {
397 bus_dmamap_sync(hba
->io_dmat
,
398 srb
->dma_map
, BUS_DMASYNC_POSTREAD
);
399 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
402 bus_dmamap_sync(hba
->io_dmat
,
403 srb
->dma_map
, BUS_DMASYNC_POSTWRITE
);
404 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
408 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
411 case IOP_RESULT_BAD_TARGET
:
412 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
414 case IOP_RESULT_BUSY
:
415 ccb
->ccb_h
.status
= CAM_BUSY
;
417 case IOP_RESULT_INVALID_REQUEST
:
418 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
420 case IOP_RESULT_FAIL
:
421 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
423 case IOP_RESULT_RESET
:
424 ccb
->ccb_h
.status
= CAM_BUSY
;
426 case IOP_RESULT_CHECK_CONDITION
:
427 memset(&ccb
->csio
.sense_data
, 0,
428 sizeof(ccb
->csio
.sense_data
));
429 if (dxfer
< ccb
->csio
.sense_len
)
430 ccb
->csio
.sense_resid
= ccb
->csio
.sense_len
-
433 ccb
->csio
.sense_resid
= 0;
434 if (srb
->srb_flag
& HPT_SRB_FLAG_HIGH_MEM_ACESS
) {/*iop*/
435 bus_space_read_region_1(hba
->bar0t
, hba
->bar0h
,
436 index
+ offsetof(struct hpt_iop_request_scsi_command
,
437 sg_list
), (u_int8_t
*)&ccb
->csio
.sense_data
,
438 MIN(dxfer
, sizeof(ccb
->csio
.sense_data
)));
440 memcpy(&ccb
->csio
.sense_data
, &req
->sg_list
,
441 MIN(dxfer
, sizeof(ccb
->csio
.sense_data
)));
443 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
444 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
445 ccb
->csio
.scsi_status
= SCSI_STATUS_CHECK_COND
;
448 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
452 if (srb
->srb_flag
& HPT_SRB_FLAG_HIGH_MEM_ACESS
)
453 BUS_SPACE_WRT4_ITL(outbound_queue
, index
);
455 ccb
->csio
.resid
= ccb
->csio
.dxfer_len
- dxfer
;
457 hptiop_free_srb(hba
, srb
);
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba
*hba
)
467 while ((req
= BUS_SPACE_RD4_ITL(outbound_queue
)) !=IOPMU_QUEUE_EMPTY
) {
468 if (req
& IOPMU_QUEUE_MASK_HOST_BITS
)
469 hptiop_request_callback_itl(hba
, req
);
471 temp
= bus_space_read_4(hba
->bar0t
,
473 offsetof(struct hpt_iop_request_header
,
475 if (temp
& IOP_REQUEST_FLAG_SYNC_REQUEST
) {
477 bus_space_read_region_4(hba
->bar0t
,
479 offsetof(struct hpt_iop_request_header
,
481 (u_int32_t
*)&temp64
, 2);
483 hptiop_request_callback_itl(hba
, req
);
486 bus_space_write_region_4(hba
->bar0t
,
488 offsetof(struct hpt_iop_request_header
,
490 (u_int32_t
*)&temp64
, 2);
493 hptiop_request_callback_itl(hba
, req
);
498 static int hptiop_intr_itl(struct hpt_iop_hba
* hba
)
503 status
= BUS_SPACE_RD4_ITL(outbound_intstatus
);
505 if (status
& IOPMU_OUTBOUND_INT_MSG0
) {
506 u_int32_t msg
= BUS_SPACE_RD4_ITL(outbound_msgaddr0
);
507 KdPrint(("hptiop: received outbound msg %x\n", msg
));
508 BUS_SPACE_WRT4_ITL(outbound_intstatus
, IOPMU_OUTBOUND_INT_MSG0
);
509 hptiop_os_message_callback(hba
, msg
);
513 if (status
& IOPMU_OUTBOUND_INT_POSTQUEUE
) {
514 hptiop_drain_outbound_queue_itl(hba
);
521 static void hptiop_request_callback_mv(struct hpt_iop_hba
* hba
,
524 u_int32_t context
= (u_int32_t
)_tag
;
526 if (context
& MVIOP_CMD_TYPE_SCSI
) {
527 struct hpt_iop_srb
*srb
;
528 struct hpt_iop_request_scsi_command
*req
;
532 srb
= hba
->srb
[context
>> MVIOP_REQUEST_NUMBER_START_BIT
];
533 req
= (struct hpt_iop_request_scsi_command
*)srb
;
534 ccb
= (union ccb
*)srb
->ccb
;
535 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
536 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
538 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
540 if (cdb
[0] == SYNCHRONIZE_CACHE
) { /* ??? */
541 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
544 if (context
& MVIOP_MU_QUEUE_REQUEST_RESULT_BIT
)
545 req
->header
.result
= IOP_RESULT_SUCCESS
;
547 switch (req
->header
.result
) {
548 case IOP_RESULT_SUCCESS
:
549 switch (ccb
->ccb_h
.flags
& CAM_DIR_MASK
) {
551 bus_dmamap_sync(hba
->io_dmat
,
552 srb
->dma_map
, BUS_DMASYNC_POSTREAD
);
553 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
556 bus_dmamap_sync(hba
->io_dmat
,
557 srb
->dma_map
, BUS_DMASYNC_POSTWRITE
);
558 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
561 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
563 case IOP_RESULT_BAD_TARGET
:
564 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
566 case IOP_RESULT_BUSY
:
567 ccb
->ccb_h
.status
= CAM_BUSY
;
569 case IOP_RESULT_INVALID_REQUEST
:
570 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
572 case IOP_RESULT_FAIL
:
573 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
575 case IOP_RESULT_RESET
:
576 ccb
->ccb_h
.status
= CAM_BUSY
;
578 case IOP_RESULT_CHECK_CONDITION
:
579 memset(&ccb
->csio
.sense_data
, 0,
580 sizeof(ccb
->csio
.sense_data
));
581 if (req
->dataxfer_length
< ccb
->csio
.sense_len
)
582 ccb
->csio
.sense_resid
= ccb
->csio
.sense_len
-
583 req
->dataxfer_length
;
585 ccb
->csio
.sense_resid
= 0;
586 memcpy(&ccb
->csio
.sense_data
, &req
->sg_list
,
587 MIN(req
->dataxfer_length
, sizeof(ccb
->csio
.sense_data
)));
588 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
589 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
590 ccb
->csio
.scsi_status
= SCSI_STATUS_CHECK_COND
;
593 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
597 ccb
->csio
.resid
= ccb
->csio
.dxfer_len
- req
->dataxfer_length
;
599 hptiop_free_srb(hba
, srb
);
601 } else if (context
& MVIOP_CMD_TYPE_IOCTL
) {
602 struct hpt_iop_request_ioctl_command
*req
= hba
->ctlcfg_ptr
;
603 if (context
& MVIOP_MU_QUEUE_REQUEST_RESULT_BIT
)
604 hba
->config_done
= 1;
606 hba
->config_done
= -1;
609 (MVIOP_CMD_TYPE_SET_CONFIG
|
610 MVIOP_CMD_TYPE_GET_CONFIG
))
611 hba
->config_done
= 1;
613 device_printf(hba
->pcidev
, "wrong callback type\n");
617 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba
* hba
,
620 u_int32_t req_type
= _tag
& 0xf;
622 struct hpt_iop_srb
*srb
;
623 struct hpt_iop_request_scsi_command
*req
;
628 case IOP_REQUEST_TYPE_GET_CONFIG
:
629 case IOP_REQUEST_TYPE_SET_CONFIG
:
630 hba
->config_done
= 1;
633 case IOP_REQUEST_TYPE_SCSI_COMMAND
:
634 srb
= hba
->srb
[(_tag
>> 4) & 0xff];
635 req
= (struct hpt_iop_request_scsi_command
*)srb
;
637 ccb
= (union ccb
*)srb
->ccb
;
639 callout_stop(ccb
->ccb_h
.timeout_ch
);
641 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
642 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
644 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
646 if (cdb
[0] == SYNCHRONIZE_CACHE
) { /* ??? */
647 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
651 if (_tag
& MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT
)
652 req
->header
.result
= IOP_RESULT_SUCCESS
;
654 switch (req
->header
.result
) {
655 case IOP_RESULT_SUCCESS
:
656 switch (ccb
->ccb_h
.flags
& CAM_DIR_MASK
) {
658 bus_dmamap_sync(hba
->io_dmat
,
659 srb
->dma_map
, BUS_DMASYNC_POSTREAD
);
660 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
663 bus_dmamap_sync(hba
->io_dmat
,
664 srb
->dma_map
, BUS_DMASYNC_POSTWRITE
);
665 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
668 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
670 case IOP_RESULT_BAD_TARGET
:
671 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
673 case IOP_RESULT_BUSY
:
674 ccb
->ccb_h
.status
= CAM_BUSY
;
676 case IOP_RESULT_INVALID_REQUEST
:
677 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
679 case IOP_RESULT_FAIL
:
680 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
682 case IOP_RESULT_RESET
:
683 ccb
->ccb_h
.status
= CAM_BUSY
;
685 case IOP_RESULT_CHECK_CONDITION
:
686 memset(&ccb
->csio
.sense_data
, 0,
687 sizeof(ccb
->csio
.sense_data
));
688 if (req
->dataxfer_length
< ccb
->csio
.sense_len
)
689 ccb
->csio
.sense_resid
= ccb
->csio
.sense_len
-
690 req
->dataxfer_length
;
692 ccb
->csio
.sense_resid
= 0;
693 memcpy(&ccb
->csio
.sense_data
, &req
->sg_list
,
694 MIN(req
->dataxfer_length
, sizeof(ccb
->csio
.sense_data
)));
695 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
696 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
697 ccb
->csio
.scsi_status
= SCSI_STATUS_CHECK_COND
;
700 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
704 ccb
->csio
.resid
= ccb
->csio
.dxfer_len
- req
->dataxfer_length
;
706 hptiop_free_srb(hba
, srb
);
709 case IOP_REQUEST_TYPE_IOCTL_COMMAND
:
710 if (_tag
& MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT
)
711 hba
->config_done
= 1;
713 hba
->config_done
= -1;
714 wakeup((struct hpt_iop_request_ioctl_command
*)hba
->ctlcfg_ptr
);
717 device_printf(hba
->pcidev
, "wrong callback type\n");
722 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba
* hba
)
726 while ((req
= hptiop_mv_outbound_read(hba
))) {
727 if (req
& MVIOP_MU_QUEUE_ADDR_HOST_BIT
) {
728 if (req
& MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT
) {
729 hptiop_request_callback_mv(hba
, req
);
735 static int hptiop_intr_mv(struct hpt_iop_hba
* hba
)
740 status
= BUS_SPACE_RD4_MV0(outbound_doorbell
);
743 BUS_SPACE_WRT4_MV0(outbound_doorbell
, ~status
);
745 if (status
& MVIOP_MU_OUTBOUND_INT_MSG
) {
746 u_int32_t msg
= BUS_SPACE_RD4_MV2(outbound_msg
);
747 KdPrint(("hptiop: received outbound msg %x\n", msg
));
748 hptiop_os_message_callback(hba
, msg
);
752 if (status
& MVIOP_MU_OUTBOUND_INT_POSTQUEUE
) {
753 hptiop_drain_outbound_queue_mv(hba
);
760 static int hptiop_intr_mvfrey(struct hpt_iop_hba
* hba
)
762 u_int32_t status
, _tag
, cptr
;
765 if (hba
->initialized
) {
766 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable
, 0);
769 status
= BUS_SPACE_RD4_MVFREY2(f0_doorbell
);
771 BUS_SPACE_WRT4_MVFREY2(f0_doorbell
, status
);
772 if (status
& CPU_TO_F0_DRBL_MSG_A_BIT
) {
773 u_int32_t msg
= BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a
);
774 hptiop_os_message_callback(hba
, msg
);
779 status
= BUS_SPACE_RD4_MVFREY2(isr_cause
);
781 BUS_SPACE_WRT4_MVFREY2(isr_cause
, status
);
783 cptr
= *hba
->u
.mvfrey
.outlist_cptr
& 0xff;
784 while (hba
->u
.mvfrey
.outlist_rptr
!= cptr
) {
785 hba
->u
.mvfrey
.outlist_rptr
++;
786 if (hba
->u
.mvfrey
.outlist_rptr
== hba
->u
.mvfrey
.list_count
) {
787 hba
->u
.mvfrey
.outlist_rptr
= 0;
790 _tag
= hba
->u
.mvfrey
.outlist
[hba
->u
.mvfrey
.outlist_rptr
].val
;
791 hptiop_request_callback_mvfrey(hba
, _tag
);
794 } while (cptr
!= (*hba
->u
.mvfrey
.outlist_cptr
& 0xff));
797 if (hba
->initialized
) {
798 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable
, 0x1010);
804 static int hptiop_send_sync_request_itl(struct hpt_iop_hba
* hba
,
805 u_int32_t req32
, u_int32_t millisec
)
810 BUS_SPACE_WRT4_ITL(inbound_queue
, req32
);
811 BUS_SPACE_RD4_ITL(outbound_intstatus
);
813 for (i
= 0; i
< millisec
; i
++) {
814 hptiop_intr_itl(hba
);
815 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
, req32
+
816 offsetof(struct hpt_iop_request_header
, context
),
817 (u_int32_t
*)&temp64
, 2);
826 static int hptiop_send_sync_request_mv(struct hpt_iop_hba
*hba
,
827 void *req
, u_int32_t millisec
)
831 hba
->config_done
= 0;
833 phy_addr
= hba
->ctlcfgcmd_phy
|
834 (u_int64_t
)MVIOP_MU_QUEUE_ADDR_HOST_BIT
;
835 ((struct hpt_iop_request_get_config
*)req
)->header
.flags
|=
836 IOP_REQUEST_FLAG_SYNC_REQUEST
|
837 IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
838 hptiop_mv_inbound_write(phy_addr
, hba
);
839 BUS_SPACE_RD4_MV0(outbound_intmask
);
841 for (i
= 0; i
< millisec
; i
++) {
843 if (hba
->config_done
)
850 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba
*hba
,
851 void *req
, u_int32_t millisec
)
855 struct hpt_iop_request_header
*reqhdr
= (struct hpt_iop_request_header
*)req
;
857 hba
->config_done
= 0;
859 phy_addr
= hba
->ctlcfgcmd_phy
;
860 reqhdr
->flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
861 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
862 | IOP_REQUEST_FLAG_ADDR_BITS
863 | ((phy_addr
>> 16) & 0xffff0000);
864 reqhdr
->context
= ((phy_addr
& 0xffffffff) << 32 )
865 | IOPMU_QUEUE_ADDR_HOST_BIT
| reqhdr
->type
;
867 hba
->u
.mvfrey
.inlist_wptr
++;
868 index
= hba
->u
.mvfrey
.inlist_wptr
& 0x3fff;
870 if (index
== hba
->u
.mvfrey
.list_count
) {
872 hba
->u
.mvfrey
.inlist_wptr
&= ~0x3fff;
873 hba
->u
.mvfrey
.inlist_wptr
^= CL_POINTER_TOGGLE
;
876 hba
->u
.mvfrey
.inlist
[index
].addr
= phy_addr
;
877 hba
->u
.mvfrey
.inlist
[index
].intrfc_len
= (reqhdr
->size
+ 3) / 4;
879 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr
, hba
->u
.mvfrey
.inlist_wptr
);
880 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr
);
882 for (i
= 0; i
< millisec
; i
++) {
883 hptiop_intr_mvfrey(hba
);
884 if (hba
->config_done
)
891 static int hptiop_send_sync_msg(struct hpt_iop_hba
*hba
,
892 u_int32_t msg
, u_int32_t millisec
)
897 hba
->ops
->post_msg(hba
, msg
);
899 for (i
=0; i
<millisec
; i
++) {
900 hba
->ops
->iop_intr(hba
);
906 return hba
->msg_done
? 0 : -1;
909 static int hptiop_get_config_itl(struct hpt_iop_hba
* hba
,
910 struct hpt_iop_request_get_config
* config
)
914 config
->header
.size
= sizeof(struct hpt_iop_request_get_config
);
915 config
->header
.type
= IOP_REQUEST_TYPE_GET_CONFIG
;
916 config
->header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
;
917 config
->header
.result
= IOP_RESULT_PENDING
;
918 config
->header
.context
= 0;
920 req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
921 if (req32
== IOPMU_QUEUE_EMPTY
)
924 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
,
925 req32
, (u_int32_t
*)config
,
926 sizeof(struct hpt_iop_request_header
) >> 2);
928 if (hptiop_send_sync_request_itl(hba
, req32
, 20000)) {
929 KdPrint(("hptiop: get config send cmd failed"));
933 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
,
934 req32
, (u_int32_t
*)config
,
935 sizeof(struct hpt_iop_request_get_config
) >> 2);
937 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
942 static int hptiop_get_config_mv(struct hpt_iop_hba
* hba
,
943 struct hpt_iop_request_get_config
* config
)
945 struct hpt_iop_request_get_config
*req
;
947 if (!(req
= hba
->ctlcfg_ptr
))
950 req
->header
.flags
= 0;
951 req
->header
.type
= IOP_REQUEST_TYPE_GET_CONFIG
;
952 req
->header
.size
= sizeof(struct hpt_iop_request_get_config
);
953 req
->header
.result
= IOP_RESULT_PENDING
;
954 req
->header
.context
= MVIOP_CMD_TYPE_GET_CONFIG
;
956 if (hptiop_send_sync_request_mv(hba
, req
, 20000)) {
957 KdPrint(("hptiop: get config send cmd failed"));
965 static int hptiop_get_config_mvfrey(struct hpt_iop_hba
* hba
,
966 struct hpt_iop_request_get_config
* config
)
968 struct hpt_iop_request_get_config
*info
= hba
->u
.mvfrey
.config
;
970 if (info
->header
.size
!= sizeof(struct hpt_iop_request_get_config
) ||
971 info
->header
.type
!= IOP_REQUEST_TYPE_GET_CONFIG
) {
972 KdPrint(("hptiop: header size %x/%x type %x/%x",
973 info
->header
.size
, (int)sizeof(struct hpt_iop_request_get_config
),
974 info
->header
.type
, IOP_REQUEST_TYPE_GET_CONFIG
));
978 config
->interface_version
= info
->interface_version
;
979 config
->firmware_version
= info
->firmware_version
;
980 config
->max_requests
= info
->max_requests
;
981 config
->request_size
= info
->request_size
;
982 config
->max_sg_count
= info
->max_sg_count
;
983 config
->data_transfer_length
= info
->data_transfer_length
;
984 config
->alignment_mask
= info
->alignment_mask
;
985 config
->max_devices
= info
->max_devices
;
986 config
->sdram_size
= info
->sdram_size
;
988 KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
989 config
->max_requests
, config
->request_size
,
990 config
->data_transfer_length
, config
->max_devices
,
991 config
->sdram_size
));
996 static int hptiop_set_config_itl(struct hpt_iop_hba
*hba
,
997 struct hpt_iop_request_set_config
*config
)
1001 req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
1003 if (req32
== IOPMU_QUEUE_EMPTY
)
1006 config
->header
.size
= sizeof(struct hpt_iop_request_set_config
);
1007 config
->header
.type
= IOP_REQUEST_TYPE_SET_CONFIG
;
1008 config
->header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
;
1009 config
->header
.result
= IOP_RESULT_PENDING
;
1010 config
->header
.context
= 0;
1012 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
, req32
,
1013 (u_int32_t
*)config
,
1014 sizeof(struct hpt_iop_request_set_config
) >> 2);
1016 if (hptiop_send_sync_request_itl(hba
, req32
, 20000)) {
1017 KdPrint(("hptiop: set config send cmd failed"));
1021 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
1026 static int hptiop_set_config_mv(struct hpt_iop_hba
*hba
,
1027 struct hpt_iop_request_set_config
*config
)
1029 struct hpt_iop_request_set_config
*req
;
1031 if (!(req
= hba
->ctlcfg_ptr
))
1034 memcpy((u_int8_t
*)req
+ sizeof(struct hpt_iop_request_header
),
1035 (u_int8_t
*)config
+ sizeof(struct hpt_iop_request_header
),
1036 sizeof(struct hpt_iop_request_set_config
) -
1037 sizeof(struct hpt_iop_request_header
));
1039 req
->header
.flags
= 0;
1040 req
->header
.type
= IOP_REQUEST_TYPE_SET_CONFIG
;
1041 req
->header
.size
= sizeof(struct hpt_iop_request_set_config
);
1042 req
->header
.result
= IOP_RESULT_PENDING
;
1043 req
->header
.context
= MVIOP_CMD_TYPE_SET_CONFIG
;
1045 if (hptiop_send_sync_request_mv(hba
, req
, 20000)) {
1046 KdPrint(("hptiop: set config send cmd failed"));
1053 static int hptiop_set_config_mvfrey(struct hpt_iop_hba
*hba
,
1054 struct hpt_iop_request_set_config
*config
)
1056 struct hpt_iop_request_set_config
*req
;
1058 if (!(req
= hba
->ctlcfg_ptr
))
1061 memcpy((u_int8_t
*)req
+ sizeof(struct hpt_iop_request_header
),
1062 (u_int8_t
*)config
+ sizeof(struct hpt_iop_request_header
),
1063 sizeof(struct hpt_iop_request_set_config
) -
1064 sizeof(struct hpt_iop_request_header
));
1066 req
->header
.type
= IOP_REQUEST_TYPE_SET_CONFIG
;
1067 req
->header
.size
= sizeof(struct hpt_iop_request_set_config
);
1068 req
->header
.result
= IOP_RESULT_PENDING
;
1070 if (hptiop_send_sync_request_mvfrey(hba
, req
, 20000)) {
1071 KdPrint(("hptiop: set config send cmd failed"));
1078 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba
*hba
,
1080 struct hpt_iop_ioctl_param
*pParams
)
1083 struct hpt_iop_request_ioctl_command req
;
1085 if ((((pParams
->nInBufferSize
+ 3) & ~3) + pParams
->nOutBufferSize
) >
1086 (hba
->max_request_size
-
1087 offsetof(struct hpt_iop_request_ioctl_command
, buf
))) {
1088 device_printf(hba
->pcidev
, "request size beyond max value");
1092 req
.header
.size
= offsetof(struct hpt_iop_request_ioctl_command
, buf
)
1093 + pParams
->nInBufferSize
;
1094 req
.header
.type
= IOP_REQUEST_TYPE_IOCTL_COMMAND
;
1095 req
.header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
;
1096 req
.header
.result
= IOP_RESULT_PENDING
;
1097 req
.header
.context
= req32
+ (u_int64_t
)(unsigned long)hba
->u
.itl
.mu
;
1098 req
.ioctl_code
= HPT_CTL_CODE_BSD_TO_IOP(pParams
->dwIoControlCode
);
1099 req
.inbuf_size
= pParams
->nInBufferSize
;
1100 req
.outbuf_size
= pParams
->nOutBufferSize
;
1101 req
.bytes_returned
= 0;
1103 bus_space_write_region_4(hba
->bar0t
, hba
->bar0h
, req32
, (u_int32_t
*)&req
,
1104 offsetof(struct hpt_iop_request_ioctl_command
, buf
)>>2);
1106 hptiop_lock_adapter(hba
);
1108 BUS_SPACE_WRT4_ITL(inbound_queue
, req32
);
1109 BUS_SPACE_RD4_ITL(outbound_intstatus
);
1111 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
, req32
+
1112 offsetof(struct hpt_iop_request_ioctl_command
, header
.context
),
1113 (u_int32_t
*)&temp64
, 2);
1115 if (hptiop_sleep(hba
, (void *)((unsigned long)hba
->u
.itl
.mu
+ req32
),
1116 0, "hptctl", HPT_OSM_TIMEOUT
)==0)
1118 hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000);
1119 bus_space_read_region_4(hba
->bar0t
, hba
->bar0h
,req32
+
1120 offsetof(struct hpt_iop_request_ioctl_command
,
1122 (u_int32_t
*)&temp64
, 2);
1125 hptiop_unlock_adapter(hba
);
1129 static int hptiop_bus_space_copyin(struct hpt_iop_hba
*hba
, u_int32_t bus
,
1130 void *user
, int size
)
1135 for (i
=0; i
<size
; i
++) {
1136 if (copyin((u_int8_t
*)user
+ i
, &byte
, 1))
1138 bus_space_write_1(hba
->bar0t
, hba
->bar0h
, bus
+ i
, byte
);
1144 static int hptiop_bus_space_copyout(struct hpt_iop_hba
*hba
, u_int32_t bus
,
1145 void *user
, int size
)
1150 for (i
=0; i
<size
; i
++) {
1151 byte
= bus_space_read_1(hba
->bar0t
, hba
->bar0h
, bus
+ i
);
1152 if (copyout(&byte
, (u_int8_t
*)user
+ i
, 1))
1159 static int hptiop_do_ioctl_itl(struct hpt_iop_hba
*hba
,
1160 struct hpt_iop_ioctl_param
* pParams
)
1165 if ((pParams
->Magic
!= HPT_IOCTL_MAGIC
) &&
1166 (pParams
->Magic
!= HPT_IOCTL_MAGIC32
))
1169 req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
1170 if (req32
== IOPMU_QUEUE_EMPTY
)
1173 if (pParams
->nInBufferSize
)
1174 if (hptiop_bus_space_copyin(hba
, req32
+
1175 offsetof(struct hpt_iop_request_ioctl_command
, buf
),
1176 (void *)pParams
->lpInBuffer
, pParams
->nInBufferSize
))
1179 if (hptiop_post_ioctl_command_itl(hba
, req32
, pParams
))
1182 result
= bus_space_read_4(hba
->bar0t
, hba
->bar0h
, req32
+
1183 offsetof(struct hpt_iop_request_ioctl_command
,
1186 if (result
== IOP_RESULT_SUCCESS
) {
1187 if (pParams
->nOutBufferSize
)
1188 if (hptiop_bus_space_copyout(hba
, req32
+
1189 offsetof(struct hpt_iop_request_ioctl_command
, buf
) +
1190 ((pParams
->nInBufferSize
+ 3) & ~3),
1191 (void *)pParams
->lpOutBuffer
, pParams
->nOutBufferSize
))
1194 if (pParams
->lpBytesReturned
) {
1195 if (hptiop_bus_space_copyout(hba
, req32
+
1196 offsetof(struct hpt_iop_request_ioctl_command
, bytes_returned
),
1197 (void *)pParams
->lpBytesReturned
, sizeof(unsigned long)))
1201 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
1206 BUS_SPACE_WRT4_ITL(outbound_queue
, req32
);
1212 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba
*hba
,
1213 struct hpt_iop_request_ioctl_command
*req
,
1214 struct hpt_iop_ioctl_param
*pParams
)
1219 if ((((pParams
->nInBufferSize
+ 3) & ~3) + pParams
->nOutBufferSize
) >
1220 (hba
->max_request_size
-
1221 offsetof(struct hpt_iop_request_ioctl_command
, buf
))) {
1222 device_printf(hba
->pcidev
, "request size beyond max value");
1226 req
->ioctl_code
= HPT_CTL_CODE_BSD_TO_IOP(pParams
->dwIoControlCode
);
1227 req
->inbuf_size
= pParams
->nInBufferSize
;
1228 req
->outbuf_size
= pParams
->nOutBufferSize
;
1229 req
->header
.size
= offsetof(struct hpt_iop_request_ioctl_command
, buf
)
1230 + pParams
->nInBufferSize
;
1231 req
->header
.context
= (u_int64_t
)MVIOP_CMD_TYPE_IOCTL
;
1232 req
->header
.type
= IOP_REQUEST_TYPE_IOCTL_COMMAND
;
1233 req
->header
.result
= IOP_RESULT_PENDING
;
1234 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
1235 size
= req
->header
.size
>> 8;
1236 size
= size
> 3 ? 3 : size
;
1237 req_phy
= hba
->ctlcfgcmd_phy
| MVIOP_MU_QUEUE_ADDR_HOST_BIT
| size
;
1238 hptiop_mv_inbound_write(req_phy
, hba
);
1240 BUS_SPACE_RD4_MV0(outbound_intmask
);
1242 while (hba
->config_done
== 0) {
1243 if (hptiop_sleep(hba
, req
, 0,
1244 "hptctl", HPT_OSM_TIMEOUT
)==0)
1246 hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000);
1251 static int hptiop_do_ioctl_mv(struct hpt_iop_hba
*hba
,
1252 struct hpt_iop_ioctl_param
*pParams
)
1254 struct hpt_iop_request_ioctl_command
*req
;
1256 if ((pParams
->Magic
!= HPT_IOCTL_MAGIC
) &&
1257 (pParams
->Magic
!= HPT_IOCTL_MAGIC32
))
1260 req
= (struct hpt_iop_request_ioctl_command
*)(hba
->ctlcfg_ptr
);
1261 hba
->config_done
= 0;
1262 hptiop_lock_adapter(hba
);
1263 if (pParams
->nInBufferSize
)
1264 if (copyin((void *)pParams
->lpInBuffer
,
1265 req
->buf
, pParams
->nInBufferSize
))
1267 if (hptiop_post_ioctl_command_mv(hba
, req
, pParams
))
1270 if (hba
->config_done
== 1) {
1271 if (pParams
->nOutBufferSize
)
1272 if (copyout(req
->buf
+
1273 ((pParams
->nInBufferSize
+ 3) & ~3),
1274 (void *)pParams
->lpOutBuffer
,
1275 pParams
->nOutBufferSize
))
1278 if (pParams
->lpBytesReturned
)
1279 if (copyout(&req
->bytes_returned
,
1280 (void*)pParams
->lpBytesReturned
,
1283 hptiop_unlock_adapter(hba
);
1287 hptiop_unlock_adapter(hba
);
1292 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba
*hba
,
1293 struct hpt_iop_request_ioctl_command
*req
,
1294 struct hpt_iop_ioctl_param
*pParams
)
1299 phy_addr
= hba
->ctlcfgcmd_phy
;
1301 if ((((pParams
->nInBufferSize
+ 3) & ~3) + pParams
->nOutBufferSize
) >
1302 (hba
->max_request_size
-
1303 offsetof(struct hpt_iop_request_ioctl_command
, buf
))) {
1304 device_printf(hba
->pcidev
, "request size beyond max value");
1308 req
->ioctl_code
= HPT_CTL_CODE_BSD_TO_IOP(pParams
->dwIoControlCode
);
1309 req
->inbuf_size
= pParams
->nInBufferSize
;
1310 req
->outbuf_size
= pParams
->nOutBufferSize
;
1311 req
->header
.size
= offsetof(struct hpt_iop_request_ioctl_command
, buf
)
1312 + pParams
->nInBufferSize
;
1314 req
->header
.type
= IOP_REQUEST_TYPE_IOCTL_COMMAND
;
1315 req
->header
.result
= IOP_RESULT_PENDING
;
1317 req
->header
.flags
= IOP_REQUEST_FLAG_SYNC_REQUEST
1318 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1319 | IOP_REQUEST_FLAG_ADDR_BITS
1320 | ((phy_addr
>> 16) & 0xffff0000);
1321 req
->header
.context
= ((phy_addr
& 0xffffffff) << 32 )
1322 | IOPMU_QUEUE_ADDR_HOST_BIT
| req
->header
.type
;
1324 hba
->u
.mvfrey
.inlist_wptr
++;
1325 index
= hba
->u
.mvfrey
.inlist_wptr
& 0x3fff;
1327 if (index
== hba
->u
.mvfrey
.list_count
) {
1329 hba
->u
.mvfrey
.inlist_wptr
&= ~0x3fff;
1330 hba
->u
.mvfrey
.inlist_wptr
^= CL_POINTER_TOGGLE
;
1333 hba
->u
.mvfrey
.inlist
[index
].addr
= phy_addr
;
1334 hba
->u
.mvfrey
.inlist
[index
].intrfc_len
= (req
->header
.size
+ 3) / 4;
1336 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr
, hba
->u
.mvfrey
.inlist_wptr
);
1337 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr
);
1339 while (hba
->config_done
== 0) {
1340 if (hptiop_sleep(hba
, req
, 0, "hptctl", HPT_OSM_TIMEOUT
) == 0)
1342 hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000);
1347 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba
*hba
,
1348 struct hpt_iop_ioctl_param
*pParams
)
1350 struct hpt_iop_request_ioctl_command
*req
;
1352 if ((pParams
->Magic
!= HPT_IOCTL_MAGIC
) &&
1353 (pParams
->Magic
!= HPT_IOCTL_MAGIC32
))
1356 req
= (struct hpt_iop_request_ioctl_command
*)(hba
->ctlcfg_ptr
);
1357 hba
->config_done
= 0;
1358 hptiop_lock_adapter(hba
);
1359 if (pParams
->nInBufferSize
)
1360 if (copyin((void *)pParams
->lpInBuffer
,
1361 req
->buf
, pParams
->nInBufferSize
))
1363 if (hptiop_post_ioctl_command_mvfrey(hba
, req
, pParams
))
1366 if (hba
->config_done
== 1) {
1367 if (pParams
->nOutBufferSize
)
1368 if (copyout(req
->buf
+
1369 ((pParams
->nInBufferSize
+ 3) & ~3),
1370 (void *)pParams
->lpOutBuffer
,
1371 pParams
->nOutBufferSize
))
1374 if (pParams
->lpBytesReturned
)
1375 if (copyout(&req
->bytes_returned
,
1376 (void*)pParams
->lpBytesReturned
,
1379 hptiop_unlock_adapter(hba
);
1383 hptiop_unlock_adapter(hba
);
1388 static int hptiop_rescan_bus(struct hpt_iop_hba
* hba
)
1392 if ((ccb
= xpt_alloc_ccb()) == NULL
)
1394 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
, cam_sim_path(hba
->sim
),
1395 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
1396 xpt_free_ccb(&ccb
->ccb_h
);
1400 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
, 5/*priority (low)*/);
1401 ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
1402 ccb
->ccb_h
.cbfcnp
= hptiop_bus_scan_cb
;
1403 ccb
->crcn
.flags
= CAM_FLAG_NONE
;
1408 static void hptiop_bus_scan_cb(struct cam_periph
*periph
, union ccb
*ccb
)
1410 xpt_free_path(ccb
->ccb_h
.path
);
1411 xpt_free_ccb(&ccb
->ccb_h
);
1414 static bus_dmamap_callback_t hptiop_map_srb
;
1415 static bus_dmamap_callback_t hptiop_post_scsi_command
;
1416 static bus_dmamap_callback_t hptiop_mv_map_ctlcfg
;
1417 static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg
;
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba
*hba
)
1421 hba
->bar0_rid
= 0x10;
1422 hba
->bar0_res
= bus_alloc_resource_any(hba
->pcidev
,
1423 SYS_RES_MEMORY
, &hba
->bar0_rid
, RF_ACTIVE
);
1425 if (hba
->bar0_res
== NULL
) {
1426 device_printf(hba
->pcidev
,
1427 "failed to get iop base adrress.\n");
1430 hba
->bar0t
= rman_get_bustag(hba
->bar0_res
);
1431 hba
->bar0h
= rman_get_bushandle(hba
->bar0_res
);
1432 hba
->u
.itl
.mu
= (struct hpt_iopmu_itl
*)
1433 rman_get_virtual(hba
->bar0_res
);
1435 if (!hba
->u
.itl
.mu
) {
1436 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1437 hba
->bar0_rid
, hba
->bar0_res
);
1438 device_printf(hba
->pcidev
, "alloc mem res failed\n");
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba
*hba
)
1447 hba
->bar0_rid
= 0x10;
1448 hba
->bar0_res
= bus_alloc_resource_any(hba
->pcidev
,
1449 SYS_RES_MEMORY
, &hba
->bar0_rid
, RF_ACTIVE
);
1451 if (hba
->bar0_res
== NULL
) {
1452 device_printf(hba
->pcidev
, "failed to get iop bar0.\n");
1455 hba
->bar0t
= rman_get_bustag(hba
->bar0_res
);
1456 hba
->bar0h
= rman_get_bushandle(hba
->bar0_res
);
1457 hba
->u
.mv
.regs
= (struct hpt_iopmv_regs
*)
1458 rman_get_virtual(hba
->bar0_res
);
1460 if (!hba
->u
.mv
.regs
) {
1461 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1462 hba
->bar0_rid
, hba
->bar0_res
);
1463 device_printf(hba
->pcidev
, "alloc bar0 mem res failed\n");
1467 hba
->bar2_rid
= 0x18;
1468 hba
->bar2_res
= bus_alloc_resource_any(hba
->pcidev
,
1469 SYS_RES_MEMORY
, &hba
->bar2_rid
, RF_ACTIVE
);
1471 if (hba
->bar2_res
== NULL
) {
1472 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1473 hba
->bar0_rid
, hba
->bar0_res
);
1474 device_printf(hba
->pcidev
, "failed to get iop bar2.\n");
1478 hba
->bar2t
= rman_get_bustag(hba
->bar2_res
);
1479 hba
->bar2h
= rman_get_bushandle(hba
->bar2_res
);
1480 hba
->u
.mv
.mu
= (struct hpt_iopmu_mv
*)rman_get_virtual(hba
->bar2_res
);
1482 if (!hba
->u
.mv
.mu
) {
1483 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1484 hba
->bar0_rid
, hba
->bar0_res
);
1485 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1486 hba
->bar2_rid
, hba
->bar2_res
);
1487 device_printf(hba
->pcidev
, "alloc mem bar2 res failed\n");
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba
*hba
)
1496 hba
->bar0_rid
= 0x10;
1497 hba
->bar0_res
= bus_alloc_resource_any(hba
->pcidev
,
1498 SYS_RES_MEMORY
, &hba
->bar0_rid
, RF_ACTIVE
);
1500 if (hba
->bar0_res
== NULL
) {
1501 device_printf(hba
->pcidev
, "failed to get iop bar0.\n");
1504 hba
->bar0t
= rman_get_bustag(hba
->bar0_res
);
1505 hba
->bar0h
= rman_get_bushandle(hba
->bar0_res
);
1506 hba
->u
.mvfrey
.config
= (struct hpt_iop_request_get_config
*)
1507 rman_get_virtual(hba
->bar0_res
);
1509 if (!hba
->u
.mvfrey
.config
) {
1510 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1511 hba
->bar0_rid
, hba
->bar0_res
);
1512 device_printf(hba
->pcidev
, "alloc bar0 mem res failed\n");
1516 hba
->bar2_rid
= 0x18;
1517 hba
->bar2_res
= bus_alloc_resource_any(hba
->pcidev
,
1518 SYS_RES_MEMORY
, &hba
->bar2_rid
, RF_ACTIVE
);
1520 if (hba
->bar2_res
== NULL
) {
1521 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1522 hba
->bar0_rid
, hba
->bar0_res
);
1523 device_printf(hba
->pcidev
, "failed to get iop bar2.\n");
1527 hba
->bar2t
= rman_get_bustag(hba
->bar2_res
);
1528 hba
->bar2h
= rman_get_bushandle(hba
->bar2_res
);
1530 (struct hpt_iopmu_mvfrey
*)rman_get_virtual(hba
->bar2_res
);
1532 if (!hba
->u
.mvfrey
.mu
) {
1533 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1534 hba
->bar0_rid
, hba
->bar0_res
);
1535 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1536 hba
->bar2_rid
, hba
->bar2_res
);
1537 device_printf(hba
->pcidev
, "alloc mem bar2 res failed\n");
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba
*hba
)
1547 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1548 hba
->bar0_rid
, hba
->bar0_res
);
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba
*hba
)
1554 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1555 hba
->bar0_rid
, hba
->bar0_res
);
1557 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1558 hba
->bar2_rid
, hba
->bar2_res
);
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba
*hba
)
1564 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1565 hba
->bar0_rid
, hba
->bar0_res
);
1567 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
1568 hba
->bar2_rid
, hba
->bar2_res
);
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba
*hba
)
1573 if (bus_dma_tag_create(hba
->parent_dmat
,
1576 BUS_SPACE_MAXADDR_32BIT
,
1581 BUS_SPACE_MAXSIZE_32BIT
,
1583 &hba
->ctlcfg_dmat
)) {
1584 device_printf(hba
->pcidev
, "alloc ctlcfg_dmat failed\n");
1588 if (bus_dmamem_alloc(hba
->ctlcfg_dmat
, (void **)&hba
->ctlcfg_ptr
,
1589 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
,
1590 &hba
->ctlcfg_dmamap
) != 0) {
1591 device_printf(hba
->pcidev
,
1592 "bus_dmamem_alloc failed!\n");
1593 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1597 if (bus_dmamap_load(hba
->ctlcfg_dmat
,
1598 hba
->ctlcfg_dmamap
, hba
->ctlcfg_ptr
,
1599 MVIOP_IOCTLCFG_SIZE
,
1600 hptiop_mv_map_ctlcfg
, hba
, 0)) {
1601 device_printf(hba
->pcidev
, "bus_dmamap_load failed!\n");
1602 if (hba
->ctlcfg_dmat
) {
1603 bus_dmamem_free(hba
->ctlcfg_dmat
,
1604 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
1605 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1613 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba
*hba
)
1615 u_int32_t list_count
= BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl
);
1619 if (list_count
== 0) {
1623 hba
->u
.mvfrey
.list_count
= list_count
;
1624 hba
->u
.mvfrey
.internal_mem_size
= 0x800
1625 + list_count
* sizeof(struct mvfrey_inlist_entry
)
1626 + list_count
* sizeof(struct mvfrey_outlist_entry
)
1628 if (bus_dma_tag_create(hba
->parent_dmat
,
1631 BUS_SPACE_MAXADDR_32BIT
,
1634 hba
->u
.mvfrey
.internal_mem_size
,
1636 BUS_SPACE_MAXSIZE_32BIT
,
1638 &hba
->ctlcfg_dmat
)) {
1639 device_printf(hba
->pcidev
, "alloc ctlcfg_dmat failed\n");
1643 if (bus_dmamem_alloc(hba
->ctlcfg_dmat
, (void **)&hba
->ctlcfg_ptr
,
1644 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
,
1645 &hba
->ctlcfg_dmamap
) != 0) {
1646 device_printf(hba
->pcidev
,
1647 "bus_dmamem_alloc failed!\n");
1648 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1652 if (bus_dmamap_load(hba
->ctlcfg_dmat
,
1653 hba
->ctlcfg_dmamap
, hba
->ctlcfg_ptr
,
1654 hba
->u
.mvfrey
.internal_mem_size
,
1655 hptiop_mvfrey_map_ctlcfg
, hba
, 0)) {
1656 device_printf(hba
->pcidev
, "bus_dmamap_load failed!\n");
1657 if (hba
->ctlcfg_dmat
) {
1658 bus_dmamem_free(hba
->ctlcfg_dmat
,
1659 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
1660 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1668 static int hptiop_internal_memfree_itl(struct hpt_iop_hba
*hba
) {
1672 static int hptiop_internal_memfree_mv(struct hpt_iop_hba
*hba
)
1674 if (hba
->ctlcfg_dmat
) {
1675 bus_dmamap_unload(hba
->ctlcfg_dmat
, hba
->ctlcfg_dmamap
);
1676 bus_dmamem_free(hba
->ctlcfg_dmat
,
1677 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
1678 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1684 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba
*hba
)
1686 if (hba
->ctlcfg_dmat
) {
1687 bus_dmamap_unload(hba
->ctlcfg_dmat
, hba
->ctlcfg_dmamap
);
1688 bus_dmamem_free(hba
->ctlcfg_dmat
,
1689 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
1690 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
1696 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba
*hba
)
1700 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET_COMM
, 3000))
1703 /* wait 100ms for MCU ready */
1708 BUS_SPACE_WRT4_MVFREY2(inbound_base
,
1709 hba
->u
.mvfrey
.inlist_phy
& 0xffffffff);
1710 BUS_SPACE_WRT4_MVFREY2(inbound_base_high
,
1711 (hba
->u
.mvfrey
.inlist_phy
>> 16) >> 16);
1713 BUS_SPACE_WRT4_MVFREY2(outbound_base
,
1714 hba
->u
.mvfrey
.outlist_phy
& 0xffffffff);
1715 BUS_SPACE_WRT4_MVFREY2(outbound_base_high
,
1716 (hba
->u
.mvfrey
.outlist_phy
>> 16) >> 16);
1718 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base
,
1719 hba
->u
.mvfrey
.outlist_cptr_phy
& 0xffffffff);
1720 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high
,
1721 (hba
->u
.mvfrey
.outlist_cptr_phy
>> 16) >> 16);
1723 hba
->u
.mvfrey
.inlist_wptr
= (hba
->u
.mvfrey
.list_count
- 1)
1724 | CL_POINTER_TOGGLE
;
1725 *hba
->u
.mvfrey
.outlist_cptr
= (hba
->u
.mvfrey
.list_count
- 1)
1726 | CL_POINTER_TOGGLE
;
1727 hba
->u
.mvfrey
.outlist_rptr
= hba
->u
.mvfrey
.list_count
- 1;
1733 * CAM driver interface
1735 static device_method_t driver_methods
[] = {
1736 /* Device interface */
1737 DEVMETHOD(device_probe
, hptiop_probe
),
1738 DEVMETHOD(device_attach
, hptiop_attach
),
1739 DEVMETHOD(device_detach
, hptiop_detach
),
1740 DEVMETHOD(device_shutdown
, hptiop_shutdown
),
1744 static struct hptiop_adapter_ops hptiop_itl_ops
= {
1745 .family
= INTEL_BASED_IOP
,
1746 .iop_wait_ready
= hptiop_wait_ready_itl
,
1747 .internal_memalloc
= 0,
1748 .internal_memfree
= hptiop_internal_memfree_itl
,
1749 .alloc_pci_res
= hptiop_alloc_pci_res_itl
,
1750 .release_pci_res
= hptiop_release_pci_res_itl
,
1751 .enable_intr
= hptiop_enable_intr_itl
,
1752 .disable_intr
= hptiop_disable_intr_itl
,
1753 .get_config
= hptiop_get_config_itl
,
1754 .set_config
= hptiop_set_config_itl
,
1755 .iop_intr
= hptiop_intr_itl
,
1756 .post_msg
= hptiop_post_msg_itl
,
1757 .post_req
= hptiop_post_req_itl
,
1758 .do_ioctl
= hptiop_do_ioctl_itl
,
1762 static struct hptiop_adapter_ops hptiop_mv_ops
= {
1763 .family
= MV_BASED_IOP
,
1764 .iop_wait_ready
= hptiop_wait_ready_mv
,
1765 .internal_memalloc
= hptiop_internal_memalloc_mv
,
1766 .internal_memfree
= hptiop_internal_memfree_mv
,
1767 .alloc_pci_res
= hptiop_alloc_pci_res_mv
,
1768 .release_pci_res
= hptiop_release_pci_res_mv
,
1769 .enable_intr
= hptiop_enable_intr_mv
,
1770 .disable_intr
= hptiop_disable_intr_mv
,
1771 .get_config
= hptiop_get_config_mv
,
1772 .set_config
= hptiop_set_config_mv
,
1773 .iop_intr
= hptiop_intr_mv
,
1774 .post_msg
= hptiop_post_msg_mv
,
1775 .post_req
= hptiop_post_req_mv
,
1776 .do_ioctl
= hptiop_do_ioctl_mv
,
1780 static struct hptiop_adapter_ops hptiop_mvfrey_ops
= {
1781 .family
= MVFREY_BASED_IOP
,
1782 .iop_wait_ready
= hptiop_wait_ready_mvfrey
,
1783 .internal_memalloc
= hptiop_internal_memalloc_mvfrey
,
1784 .internal_memfree
= hptiop_internal_memfree_mvfrey
,
1785 .alloc_pci_res
= hptiop_alloc_pci_res_mvfrey
,
1786 .release_pci_res
= hptiop_release_pci_res_mvfrey
,
1787 .enable_intr
= hptiop_enable_intr_mvfrey
,
1788 .disable_intr
= hptiop_disable_intr_mvfrey
,
1789 .get_config
= hptiop_get_config_mvfrey
,
1790 .set_config
= hptiop_set_config_mvfrey
,
1791 .iop_intr
= hptiop_intr_mvfrey
,
1792 .post_msg
= hptiop_post_msg_mvfrey
,
1793 .post_req
= hptiop_post_req_mvfrey
,
1794 .do_ioctl
= hptiop_do_ioctl_mvfrey
,
1795 .reset_comm
= hptiop_reset_comm_mvfrey
,
1798 static driver_t hptiop_pci_driver
= {
1801 sizeof(struct hpt_iop_hba
)
1804 DRIVER_MODULE(hptiop
, pci
, hptiop_pci_driver
, hptiop_devclass
, NULL
, NULL
);
1805 MODULE_DEPEND(hptiop
, cam
, 1, 1, 1);
1806 MODULE_VERSION(hptiop
, 1);
1808 static int hptiop_probe(device_t dev
)
1810 struct hpt_iop_hba
*hba
;
1812 static char buf
[256];
1814 struct hptiop_adapter_ops
*ops
;
1816 if (pci_get_vendor(dev
) != 0x1103)
1819 id
= pci_get_device(dev
);
1825 ops
= &hptiop_mvfrey_ops
;
1846 ops
= &hptiop_itl_ops
;
1851 ops
= &hptiop_mv_ops
;
1857 device_printf(dev
, "adapter at PCI %d:%d:%d, IRQ %d\n",
1858 pci_get_bus(dev
), pci_get_slot(dev
),
1859 pci_get_function(dev
), pci_get_irq(dev
));
1861 ksprintf(buf
, "RocketRAID %x %s Controller",
1862 id
, sas
? "SAS" : "SATA");
1863 device_set_desc_copy(dev
, buf
);
1865 hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
1866 bzero(hba
, sizeof(struct hpt_iop_hba
));
1869 KdPrint(("hba->ops=%p\n", hba
->ops
));
1873 static int hptiop_attach(device_t dev
)
1875 struct hpt_iop_hba
*hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
1876 struct hpt_iop_request_get_config iop_config
;
1877 struct hpt_iop_request_set_config set_config
;
1879 struct cam_devq
*devq
;
1880 struct ccb_setasync
*ccb
;
1881 u_int32_t unit
= device_get_unit(dev
);
1883 device_printf(dev
, "RocketRAID 3xxx/4xxx controller driver %s\n",
1886 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit
,
1887 pci_get_bus(dev
), pci_get_slot(dev
),
1888 pci_get_function(dev
), hba
->ops
));
1890 pci_enable_busmaster(dev
);
1893 if (hba
->ops
->alloc_pci_res(hba
))
1896 if (hba
->ops
->iop_wait_ready(hba
, 2000)) {
1897 device_printf(dev
, "adapter is not ready\n");
1898 goto release_pci_res
;
1901 lockinit(&hba
->lock
, "hptioplock", 0, LK_CANRECURSE
);
1903 if (bus_dma_tag_create(NULL
,/* parent */
1906 BUS_SPACE_MAXADDR
, /* lowaddr */
1907 BUS_SPACE_MAXADDR
, /* highaddr */
1908 NULL
, NULL
, /* filter, filterarg */
1909 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1910 BUS_SPACE_UNRESTRICTED
, /* nsegments */
1911 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1913 &hba
->parent_dmat
/* tag */))
1915 device_printf(dev
, "alloc parent_dmat failed\n");
1916 goto release_pci_res
;
1919 if (hba
->ops
->family
== MV_BASED_IOP
) {
1920 if (hba
->ops
->internal_memalloc(hba
)) {
1921 device_printf(dev
, "alloc srb_dmat failed\n");
1922 goto destroy_parent_tag
;
1926 if (hba
->ops
->get_config(hba
, &iop_config
)) {
1927 device_printf(dev
, "get iop config failed.\n");
1928 goto get_config_failed
;
1931 hba
->firmware_version
= iop_config
.firmware_version
;
1932 hba
->interface_version
= iop_config
.interface_version
;
1933 hba
->max_requests
= iop_config
.max_requests
;
1934 hba
->max_devices
= iop_config
.max_devices
;
1935 hba
->max_request_size
= iop_config
.request_size
;
1936 hba
->max_sg_count
= iop_config
.max_sg_count
;
1938 if (hba
->ops
->family
== MVFREY_BASED_IOP
) {
1939 if (hba
->ops
->internal_memalloc(hba
)) {
1940 device_printf(dev
, "alloc srb_dmat failed\n");
1941 goto destroy_parent_tag
;
1943 if (hba
->ops
->reset_comm(hba
)) {
1944 device_printf(dev
, "reset comm failed\n");
1945 goto get_config_failed
;
1949 if (bus_dma_tag_create(hba
->parent_dmat
,/* parent */
1951 BUS_SPACE_MAXADDR_32BIT
+1, /* boundary */
1952 BUS_SPACE_MAXADDR
, /* lowaddr */
1953 BUS_SPACE_MAXADDR
, /* highaddr */
1954 NULL
, NULL
, /* filter, filterarg */
1955 PAGE_SIZE
* (hba
->max_sg_count
-1), /* maxsize */
1956 hba
->max_sg_count
, /* nsegments */
1957 0x20000, /* maxsegsize */
1958 BUS_DMA_ALLOCNOW
, /* flags */
1959 &hba
->io_dmat
/* tag */))
1961 device_printf(dev
, "alloc io_dmat failed\n");
1962 goto get_config_failed
;
1965 if (bus_dma_tag_create(hba
->parent_dmat
,/* parent */
1968 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1969 BUS_SPACE_MAXADDR
, /* highaddr */
1970 NULL
, NULL
, /* filter, filterarg */
1971 HPT_SRB_MAX_SIZE
* HPT_SRB_MAX_QUEUE_SIZE
+ 0x20,
1973 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1975 &hba
->srb_dmat
/* tag */))
1977 device_printf(dev
, "alloc srb_dmat failed\n");
1978 goto destroy_io_dmat
;
1981 if (bus_dmamem_alloc(hba
->srb_dmat
, (void **)&hba
->uncached_ptr
,
1982 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
,
1983 &hba
->srb_dmamap
) != 0)
1985 device_printf(dev
, "srb bus_dmamem_alloc failed!\n");
1986 goto destroy_srb_dmat
;
1989 if (bus_dmamap_load(hba
->srb_dmat
,
1990 hba
->srb_dmamap
, hba
->uncached_ptr
,
1991 (HPT_SRB_MAX_SIZE
* HPT_SRB_MAX_QUEUE_SIZE
) + 0x20,
1992 hptiop_map_srb
, hba
, 0))
1994 device_printf(dev
, "bus_dmamap_load failed!\n");
1995 goto srb_dmamem_free
;
1998 if ((devq
= cam_simq_alloc(hba
->max_requests
- 1 )) == NULL
) {
1999 device_printf(dev
, "cam_simq_alloc failed\n");
2000 goto srb_dmamap_unload
;
2003 hba
->sim
= cam_sim_alloc(hptiop_action
, hptiop_poll
, driver_name
,
2004 hba
, unit
, &sim_mplock
, hba
->max_requests
- 1, 1, devq
);
2005 cam_simq_release(devq
);
2007 device_printf(dev
, "cam_sim_alloc failed\n");
2008 goto srb_dmamap_unload
;
2010 if (xpt_bus_register(hba
->sim
, 0) != CAM_SUCCESS
)
2012 device_printf(dev
, "xpt_bus_register failed\n");
2016 if (xpt_create_path(&hba
->path
, /*periph */ NULL
,
2017 cam_sim_path(hba
->sim
), CAM_TARGET_WILDCARD
,
2018 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2019 device_printf(dev
, "xpt_create_path failed\n");
2020 goto deregister_xpt_bus
;
2023 bzero(&set_config
, sizeof(set_config
));
2024 set_config
.iop_id
= unit
;
2025 set_config
.vbus_id
= cam_sim_path(hba
->sim
);
2026 set_config
.max_host_request_size
= HPT_SRB_MAX_REQ_SIZE
;
2028 if (hba
->ops
->set_config(hba
, &set_config
)) {
2029 device_printf(dev
, "set iop config failed.\n");
2033 ccb
= &xpt_alloc_ccb()->csa
;
2035 xpt_setup_ccb(&ccb
->ccb_h
, hba
->path
, /*priority*/5);
2036 ccb
->ccb_h
.func_code
= XPT_SASYNC_CB
;
2037 ccb
->event_enable
= (AC_FOUND_DEVICE
| AC_LOST_DEVICE
);
2038 ccb
->callback
= hptiop_async
;
2039 ccb
->callback_arg
= hba
->sim
;
2040 xpt_action((union ccb
*)ccb
);
2041 xpt_free_ccb(&ccb
->ccb_h
);
2044 if ((hba
->irq_res
= bus_alloc_resource(hba
->pcidev
, SYS_RES_IRQ
,
2045 &rid
, 0, ~0ul, 1, RF_SHAREABLE
| RF_ACTIVE
)) == NULL
) {
2046 device_printf(dev
, "allocate irq failed!\n");
2050 if (bus_setup_intr(hba
->pcidev
, hba
->irq_res
, 0,
2051 hptiop_pci_intr
, hba
, &hba
->irq_handle
, NULL
))
2053 device_printf(dev
, "allocate intr function failed!\n");
2054 goto free_irq_resource
;
2057 if (hptiop_send_sync_msg(hba
,
2058 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK
, 5000)) {
2059 device_printf(dev
, "fail to start background task\n");
2060 goto teartown_irq_resource
;
2063 hba
->ops
->enable_intr(hba
);
2064 hba
->initialized
= 1;
2066 hba
->ioctl_dev
= make_dev(&hptiop_ops
, unit
,
2067 UID_ROOT
, GID_WHEEL
/*GID_OPERATOR*/,
2068 S_IRUSR
| S_IWUSR
, "%s%d", driver_name
, unit
);
2070 hba
->ioctl_dev
->si_drv1
= hba
;
2072 hptiop_rescan_bus(hba
);
2077 teartown_irq_resource
:
2078 bus_teardown_intr(dev
, hba
->irq_res
, hba
->irq_handle
);
2081 bus_release_resource(dev
, SYS_RES_IRQ
, 0, hba
->irq_res
);
2084 xpt_free_path(hba
->path
);
2087 xpt_bus_deregister(cam_sim_path(hba
->sim
));
2090 cam_sim_free(hba
->sim
);
2093 if (hba
->uncached_ptr
)
2094 bus_dmamap_unload(hba
->srb_dmat
, hba
->srb_dmamap
);
2097 if (hba
->uncached_ptr
)
2098 bus_dmamem_free(hba
->srb_dmat
,
2099 hba
->uncached_ptr
, hba
->srb_dmamap
);
2103 bus_dma_tag_destroy(hba
->srb_dmat
);
2107 bus_dma_tag_destroy(hba
->io_dmat
);
2110 hba
->ops
->internal_memfree(hba
);
2113 if (hba
->parent_dmat
)
2114 bus_dma_tag_destroy(hba
->parent_dmat
);
2117 if (hba
->ops
->release_pci_res
)
2118 hba
->ops
->release_pci_res(hba
);
2123 static int hptiop_detach(device_t dev
)
2125 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
2129 hptiop_lock_adapter(hba
);
2130 for (i
= 0; i
< hba
->max_devices
; i
++)
2131 if (hptiop_os_query_remove_device(hba
, i
)) {
2132 device_printf(dev
, "file system is busy. id=%d", i
);
2136 if ((error
= hptiop_shutdown(dev
)) != 0)
2138 if (hptiop_send_sync_msg(hba
,
2139 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK
, 60000))
2142 hptiop_release_resource(hba
);
2145 hptiop_unlock_adapter(hba
);
2149 static int hptiop_shutdown(device_t dev
)
2151 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)device_get_softc(dev
);
2155 if (hba
->flag
& HPT_IOCTL_FLAG_OPEN
) {
2156 device_printf(dev
, "device is busy");
2160 hba
->ops
->disable_intr(hba
);
2162 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_SHUTDOWN
, 60000))
2168 static void hptiop_pci_intr(void *arg
)
2170 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)arg
;
2171 hptiop_lock_adapter(hba
);
2172 hba
->ops
->iop_intr(hba
);
2173 hptiop_unlock_adapter(hba
);
2176 static void hptiop_poll(struct cam_sim
*sim
)
2178 hptiop_pci_intr(cam_sim_softc(sim
));
2181 static void hptiop_async(void * callback_arg
, u_int32_t code
,
2182 struct cam_path
* path
, void * arg
)
2186 static void hptiop_enable_intr_itl(struct hpt_iop_hba
*hba
)
2188 BUS_SPACE_WRT4_ITL(outbound_intmask
,
2189 ~(IOPMU_OUTBOUND_INT_POSTQUEUE
| IOPMU_OUTBOUND_INT_MSG0
));
2192 static void hptiop_enable_intr_mv(struct hpt_iop_hba
*hba
)
2196 int_mask
= BUS_SPACE_RD4_MV0(outbound_intmask
);
2198 int_mask
|= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2199 | MVIOP_MU_OUTBOUND_INT_MSG
;
2200 BUS_SPACE_WRT4_MV0(outbound_intmask
,int_mask
);
2203 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba
*hba
)
2205 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable
, CPU_TO_F0_DRBL_MSG_A_BIT
);
2206 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable
);
2208 BUS_SPACE_WRT4_MVFREY2(isr_enable
, 0x1);
2209 BUS_SPACE_RD4_MVFREY2(isr_enable
);
2211 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable
, 0x1010);
2212 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable
);
2215 static void hptiop_disable_intr_itl(struct hpt_iop_hba
*hba
)
2219 int_mask
= BUS_SPACE_RD4_ITL(outbound_intmask
);
2221 int_mask
|= IOPMU_OUTBOUND_INT_POSTQUEUE
| IOPMU_OUTBOUND_INT_MSG0
;
2222 BUS_SPACE_WRT4_ITL(outbound_intmask
, int_mask
);
2223 BUS_SPACE_RD4_ITL(outbound_intstatus
);
2226 static void hptiop_disable_intr_mv(struct hpt_iop_hba
*hba
)
2229 int_mask
= BUS_SPACE_RD4_MV0(outbound_intmask
);
2231 int_mask
&= ~(MVIOP_MU_OUTBOUND_INT_MSG
2232 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE
);
2233 BUS_SPACE_WRT4_MV0(outbound_intmask
,int_mask
);
2234 BUS_SPACE_RD4_MV0(outbound_intmask
);
2237 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba
*hba
)
2239 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable
, 0);
2240 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable
);
2242 BUS_SPACE_WRT4_MVFREY2(isr_enable
, 0);
2243 BUS_SPACE_RD4_MVFREY2(isr_enable
);
2245 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable
, 0);
2246 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable
);
2249 static void hptiop_reset_adapter(void *argv
)
2251 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)argv
;
2252 if (hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET
, 60000))
2254 hptiop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK
, 5000);
2257 static void *hptiop_get_srb(struct hpt_iop_hba
* hba
)
2259 struct hpt_iop_srb
* srb
;
2261 if (hba
->srb_list
) {
2262 srb
= hba
->srb_list
;
2263 hba
->srb_list
= srb
->next
;
2270 static void hptiop_free_srb(struct hpt_iop_hba
*hba
, struct hpt_iop_srb
*srb
)
2272 srb
->next
= hba
->srb_list
;
2273 hba
->srb_list
= srb
;
2276 static void hptiop_action(struct cam_sim
*sim
, union ccb
*ccb
)
2278 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)cam_sim_softc(sim
);
2279 struct hpt_iop_srb
* srb
;
2281 switch (ccb
->ccb_h
.func_code
) {
2284 hptiop_lock_adapter(hba
);
2285 if (ccb
->ccb_h
.target_lun
!= 0 ||
2286 ccb
->ccb_h
.target_id
>= hba
->max_devices
||
2287 (ccb
->ccb_h
.flags
& CAM_CDB_PHYS
))
2289 ccb
->ccb_h
.status
= CAM_TID_INVALID
;
2294 if ((srb
= hptiop_get_srb(hba
)) == NULL
) {
2295 device_printf(hba
->pcidev
, "srb allocated failed");
2296 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
2303 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
)
2304 hptiop_post_scsi_command(srb
, NULL
, 0, 0);
2305 else if ((ccb
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
2306 if ((ccb
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
2309 error
= bus_dmamap_load(hba
->io_dmat
,
2312 ccb
->csio
.dxfer_len
,
2313 hptiop_post_scsi_command
,
2316 if (error
&& error
!= EINPROGRESS
) {
2317 device_printf(hba
->pcidev
,
2318 "bus_dmamap_load error %d", error
);
2319 xpt_freeze_simq(hba
->sim
, 1);
2320 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
2322 hptiop_free_srb(hba
, srb
);
2328 device_printf(hba
->pcidev
,
2329 "CAM_DATA_PHYS not supported");
2330 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
2335 struct bus_dma_segment
*segs
;
2337 if ((ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0 ||
2338 (ccb
->ccb_h
.flags
& CAM_DATA_PHYS
) != 0) {
2339 device_printf(hba
->pcidev
, "SCSI cmd failed");
2340 ccb
->ccb_h
.status
=CAM_PROVIDE_FAIL
;
2344 segs
= (struct bus_dma_segment
*)ccb
->csio
.data_ptr
;
2345 hptiop_post_scsi_command(srb
, segs
,
2346 ccb
->csio
.sglist_cnt
, 0);
2350 hptiop_unlock_adapter(hba
);
2354 device_printf(hba
->pcidev
, "reset adapter");
2355 hptiop_lock_adapter(hba
);
2357 hptiop_reset_adapter(hba
);
2358 hptiop_unlock_adapter(hba
);
2361 case XPT_GET_TRAN_SETTINGS
:
2362 case XPT_SET_TRAN_SETTINGS
:
2363 ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
2366 case XPT_CALC_GEOMETRY
:
2367 cam_calc_geometry(&ccb
->ccg
, 1);
2372 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
2374 cpi
->version_num
= 1;
2375 cpi
->hba_inquiry
= PI_SDTR_ABLE
;
2376 cpi
->target_sprt
= 0;
2377 cpi
->hba_misc
= PIM_NOBUSRESET
;
2378 cpi
->hba_eng_cnt
= 0;
2379 cpi
->max_target
= hba
->max_devices
;
2381 cpi
->unit_number
= cam_sim_unit(sim
);
2382 cpi
->bus_id
= cam_sim_bus(sim
);
2383 cpi
->initiator_id
= hba
->max_devices
;
2384 cpi
->base_transfer_speed
= 3300;
2386 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
2387 strncpy(cpi
->hba_vid
, "HPT ", HBA_IDLEN
);
2388 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
2389 cpi
->transport
= XPORT_SPI
;
2390 cpi
->transport_version
= 2;
2391 cpi
->protocol
= PROTO_SCSI
;
2392 cpi
->protocol_version
= SCSI_REV_2
;
2393 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
2398 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2406 static void hptiop_post_req_itl(struct hpt_iop_hba
*hba
,
2407 struct hpt_iop_srb
*srb
,
2408 bus_dma_segment_t
*segs
, int nsegs
)
2411 union ccb
*ccb
= srb
->ccb
;
2414 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
2415 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
2417 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
2419 KdPrint(("ccb=%p %x-%x-%x\n",
2420 ccb
, *(u_int32_t
*)cdb
, *((u_int32_t
*)cdb
+1), *((u_int32_t
*)cdb
+2)));
2422 if (srb
->srb_flag
& HPT_SRB_FLAG_HIGH_MEM_ACESS
) {
2423 u_int32_t iop_req32
;
2424 struct hpt_iop_request_scsi_command req
;
2426 iop_req32
= BUS_SPACE_RD4_ITL(inbound_queue
);
2428 if (iop_req32
== IOPMU_QUEUE_EMPTY
) {
2429 device_printf(hba
->pcidev
, "invalid req offset\n");
2430 ccb
->ccb_h
.status
= CAM_BUSY
;
2431 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
2432 hptiop_free_srb(hba
, srb
);
2437 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
2438 struct hpt_iopsg
*psg
= req
.sg_list
;
2439 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
2440 psg
->pci_address
= (u_int64_t
)segs
[idx
].ds_addr
;
2441 psg
->size
= segs
[idx
].ds_len
;
2447 bcopy(cdb
, req
.cdb
, ccb
->csio
.cdb_len
);
2450 offsetof(struct hpt_iop_request_scsi_command
, sg_list
)
2451 + nsegs
*sizeof(struct hpt_iopsg
);
2452 req
.header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
2453 req
.header
.flags
= 0;
2454 req
.header
.result
= IOP_RESULT_PENDING
;
2455 req
.header
.context
= (u_int64_t
)(unsigned long)srb
;
2456 req
.dataxfer_length
= ccb
->csio
.dxfer_len
;
2458 req
.target
= ccb
->ccb_h
.target_id
;
2459 req
.lun
= ccb
->ccb_h
.target_lun
;
2461 bus_space_write_region_1(hba
->bar0t
, hba
->bar0h
, iop_req32
,
2462 (u_int8_t
*)&req
, req
.header
.size
);
2464 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
2465 bus_dmamap_sync(hba
->io_dmat
,
2466 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
2468 else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
)
2469 bus_dmamap_sync(hba
->io_dmat
,
2470 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
2472 BUS_SPACE_WRT4_ITL(inbound_queue
,iop_req32
);
2474 struct hpt_iop_request_scsi_command
*req
;
2476 req
= (struct hpt_iop_request_scsi_command
*)srb
;
2477 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
2478 struct hpt_iopsg
*psg
= req
->sg_list
;
2479 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
2481 (u_int64_t
)segs
[idx
].ds_addr
;
2482 psg
->size
= segs
[idx
].ds_len
;
2488 bcopy(cdb
, req
->cdb
, ccb
->csio
.cdb_len
);
2490 req
->header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
2491 req
->header
.result
= IOP_RESULT_PENDING
;
2492 req
->dataxfer_length
= ccb
->csio
.dxfer_len
;
2494 req
->target
= ccb
->ccb_h
.target_id
;
2495 req
->lun
= ccb
->ccb_h
.target_lun
;
2497 offsetof(struct hpt_iop_request_scsi_command
, sg_list
)
2498 + nsegs
*sizeof(struct hpt_iopsg
);
2499 req
->header
.context
= (u_int64_t
)srb
->index
|
2500 IOPMU_QUEUE_ADDR_HOST_BIT
;
2501 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
2503 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
2504 bus_dmamap_sync(hba
->io_dmat
,
2505 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
2506 }else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
2507 bus_dmamap_sync(hba
->io_dmat
,
2508 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
2511 if (hba
->firmware_version
> 0x01020000
2512 || hba
->interface_version
> 0x01020000) {
2513 u_int32_t size_bits
;
2515 if (req
->header
.size
< 256)
2516 size_bits
= IOPMU_QUEUE_REQUEST_SIZE_BIT
;
2517 else if (req
->header
.size
< 512)
2518 size_bits
= IOPMU_QUEUE_ADDR_HOST_BIT
;
2520 size_bits
= IOPMU_QUEUE_REQUEST_SIZE_BIT
2521 | IOPMU_QUEUE_ADDR_HOST_BIT
;
2523 BUS_SPACE_WRT4_ITL(inbound_queue
,
2524 (u_int32_t
)srb
->phy_addr
| size_bits
);
2526 BUS_SPACE_WRT4_ITL(inbound_queue
, (u_int32_t
)srb
->phy_addr
2527 |IOPMU_QUEUE_ADDR_HOST_BIT
);
2531 static void hptiop_post_req_mv(struct hpt_iop_hba
*hba
,
2532 struct hpt_iop_srb
*srb
,
2533 bus_dma_segment_t
*segs
, int nsegs
)
2536 union ccb
*ccb
= srb
->ccb
;
2538 struct hpt_iop_request_scsi_command
*req
;
2541 req
= (struct hpt_iop_request_scsi_command
*)srb
;
2542 req_phy
= srb
->phy_addr
;
2544 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
2545 struct hpt_iopsg
*psg
= req
->sg_list
;
2546 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
2547 psg
->pci_address
= (u_int64_t
)segs
[idx
].ds_addr
;
2548 psg
->size
= segs
[idx
].ds_len
;
2553 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
2554 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
2556 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
2558 bcopy(cdb
, req
->cdb
, ccb
->csio
.cdb_len
);
2559 req
->header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
2560 req
->header
.result
= IOP_RESULT_PENDING
;
2561 req
->dataxfer_length
= ccb
->csio
.dxfer_len
;
2563 req
->target
= ccb
->ccb_h
.target_id
;
2564 req
->lun
= ccb
->ccb_h
.target_lun
;
2565 req
->header
.size
= sizeof(struct hpt_iop_request_scsi_command
)
2566 - sizeof(struct hpt_iopsg
)
2567 + nsegs
* sizeof(struct hpt_iopsg
);
2568 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
2569 bus_dmamap_sync(hba
->io_dmat
,
2570 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
2572 else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
)
2573 bus_dmamap_sync(hba
->io_dmat
,
2574 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
2575 req
->header
.context
= (u_int64_t
)srb
->index
2576 << MVIOP_REQUEST_NUMBER_START_BIT
2577 | MVIOP_CMD_TYPE_SCSI
;
2578 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
;
2579 size
= req
->header
.size
>> 8;
2580 hptiop_mv_inbound_write(req_phy
2581 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2582 | (size
> 3 ? 3 : size
), hba
);
2585 static void hptiop_post_req_mvfrey(struct hpt_iop_hba
*hba
,
2586 struct hpt_iop_srb
*srb
,
2587 bus_dma_segment_t
*segs
, int nsegs
)
2590 union ccb
*ccb
= srb
->ccb
;
2592 struct hpt_iop_request_scsi_command
*req
;
2595 req
= (struct hpt_iop_request_scsi_command
*)srb
;
2596 req_phy
= srb
->phy_addr
;
2598 if (ccb
->csio
.dxfer_len
&& nsegs
> 0) {
2599 struct hpt_iopsg
*psg
= req
->sg_list
;
2600 for (idx
= 0; idx
< nsegs
; idx
++, psg
++) {
2601 psg
->pci_address
= (u_int64_t
)segs
[idx
].ds_addr
| 1;
2602 psg
->size
= segs
[idx
].ds_len
;
2607 if (ccb
->ccb_h
.flags
& CAM_CDB_POINTER
)
2608 cdb
= ccb
->csio
.cdb_io
.cdb_ptr
;
2610 cdb
= ccb
->csio
.cdb_io
.cdb_bytes
;
2612 bcopy(cdb
, req
->cdb
, ccb
->csio
.cdb_len
);
2613 req
->header
.type
= IOP_REQUEST_TYPE_SCSI_COMMAND
;
2614 req
->header
.result
= IOP_RESULT_PENDING
;
2615 req
->dataxfer_length
= ccb
->csio
.dxfer_len
;
2617 req
->target
= ccb
->ccb_h
.target_id
;
2618 req
->lun
= ccb
->ccb_h
.target_lun
;
2619 req
->header
.size
= sizeof(struct hpt_iop_request_scsi_command
)
2620 - sizeof(struct hpt_iopsg
)
2621 + nsegs
* sizeof(struct hpt_iopsg
);
2622 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
2623 bus_dmamap_sync(hba
->io_dmat
,
2624 srb
->dma_map
, BUS_DMASYNC_PREREAD
);
2626 else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
)
2627 bus_dmamap_sync(hba
->io_dmat
,
2628 srb
->dma_map
, BUS_DMASYNC_PREWRITE
);
2630 req
->header
.flags
= IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2631 | IOP_REQUEST_FLAG_ADDR_BITS
2632 | ((req_phy
>> 16) & 0xffff0000);
2633 req
->header
.context
= ((req_phy
& 0xffffffff) << 32 )
2635 | IOPMU_QUEUE_ADDR_HOST_BIT
| req
->header
.type
;
2637 hba
->u
.mvfrey
.inlist_wptr
++;
2638 index
= hba
->u
.mvfrey
.inlist_wptr
& 0x3fff;
2640 if (index
== hba
->u
.mvfrey
.list_count
) {
2642 hba
->u
.mvfrey
.inlist_wptr
&= ~0x3fff;
2643 hba
->u
.mvfrey
.inlist_wptr
^= CL_POINTER_TOGGLE
;
2646 hba
->u
.mvfrey
.inlist
[index
].addr
= req_phy
;
2647 hba
->u
.mvfrey
.inlist
[index
].intrfc_len
= (req
->header
.size
+ 3) / 4;
2649 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr
, hba
->u
.mvfrey
.inlist_wptr
);
2650 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr
);
2652 if (req
->header
.type
== IOP_REQUEST_TYPE_SCSI_COMMAND
) {
2653 callout_reset(ccb
->ccb_h
.timeout_ch
, 20 * hz
,
2654 hptiop_reset_adapter
, hba
);
2658 static void hptiop_post_scsi_command(void *arg
, bus_dma_segment_t
*segs
,
2659 int nsegs
, int error
)
2661 struct hpt_iop_srb
*srb
= (struct hpt_iop_srb
*)arg
;
2662 union ccb
*ccb
= srb
->ccb
;
2663 struct hpt_iop_hba
*hba
= srb
->hba
;
2665 if (error
|| nsegs
> hba
->max_sg_count
) {
2666 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2667 ccb
->ccb_h
.func_code
,
2668 ccb
->ccb_h
.target_id
,
2669 ccb
->ccb_h
.target_lun
, nsegs
));
2670 ccb
->ccb_h
.status
= CAM_BUSY
;
2671 bus_dmamap_unload(hba
->io_dmat
, srb
->dma_map
);
2672 hptiop_free_srb(hba
, srb
);
2677 hba
->ops
->post_req(hba
, srb
, segs
, nsegs
);
2680 static void hptiop_mv_map_ctlcfg(void *arg
, bus_dma_segment_t
*segs
,
2681 int nsegs
, int error
)
2683 struct hpt_iop_hba
*hba
= (struct hpt_iop_hba
*)arg
;
2684 hba
->ctlcfgcmd_phy
= ((u_int64_t
)segs
->ds_addr
+ 0x1F)
2686 hba
->ctlcfg_ptr
= (u_int8_t
*)(((unsigned long)hba
->ctlcfg_ptr
+ 0x1F)
2690 static void hptiop_mvfrey_map_ctlcfg(void *arg
, bus_dma_segment_t
*segs
,
2691 int nsegs
, int error
)
2693 struct hpt_iop_hba
*hba
= (struct hpt_iop_hba
*)arg
;
2696 u_int32_t list_count
= hba
->u
.mvfrey
.list_count
;
2698 phy
= ((u_int64_t
)segs
->ds_addr
+ 0x1F)
2700 p
= (u_int8_t
*)(((unsigned long)hba
->ctlcfg_ptr
+ 0x1F)
2703 hba
->ctlcfgcmd_phy
= phy
;
2704 hba
->ctlcfg_ptr
= p
;
2709 hba
->u
.mvfrey
.inlist
= (struct mvfrey_inlist_entry
*)p
;
2710 hba
->u
.mvfrey
.inlist_phy
= phy
;
2712 p
+= list_count
* sizeof(struct mvfrey_inlist_entry
);
2713 phy
+= list_count
* sizeof(struct mvfrey_inlist_entry
);
2715 hba
->u
.mvfrey
.outlist
= (struct mvfrey_outlist_entry
*)p
;
2716 hba
->u
.mvfrey
.outlist_phy
= phy
;
2718 p
+= list_count
* sizeof(struct mvfrey_outlist_entry
);
2719 phy
+= list_count
* sizeof(struct mvfrey_outlist_entry
);
2721 hba
->u
.mvfrey
.outlist_cptr
= (u_int32_t
*)p
;
2722 hba
->u
.mvfrey
.outlist_cptr_phy
= phy
;
2725 static void hptiop_map_srb(void *arg
, bus_dma_segment_t
*segs
,
2726 int nsegs
, int error
)
2728 struct hpt_iop_hba
* hba
= (struct hpt_iop_hba
*)arg
;
2729 bus_addr_t phy_addr
= (segs
->ds_addr
+ 0x1F) & ~(bus_addr_t
)0x1F;
2730 struct hpt_iop_srb
*srb
, *tmp_srb
;
2733 if (error
|| nsegs
== 0) {
2734 device_printf(hba
->pcidev
, "hptiop_map_srb error");
2739 srb
= (struct hpt_iop_srb
*)
2740 (((unsigned long)hba
->uncached_ptr
+ 0x1F)
2741 & ~(unsigned long)0x1F);
2743 for (i
= 0; i
< HPT_SRB_MAX_QUEUE_SIZE
; i
++) {
2744 tmp_srb
= (struct hpt_iop_srb
*)
2745 ((char *)srb
+ i
* HPT_SRB_MAX_SIZE
);
2746 if (((unsigned long)tmp_srb
& 0x1F) == 0) {
2747 if (bus_dmamap_create(hba
->io_dmat
,
2748 0, &tmp_srb
->dma_map
)) {
2749 device_printf(hba
->pcidev
, "dmamap create failed");
2753 bzero(tmp_srb
, sizeof(struct hpt_iop_srb
));
2756 if (hba
->ctlcfg_ptr
== 0) {/*itl iop*/
2757 tmp_srb
->phy_addr
= (u_int64_t
)(u_int32_t
)
2759 if (phy_addr
& IOPMU_MAX_MEM_SUPPORT_MASK_32G
)
2761 HPT_SRB_FLAG_HIGH_MEM_ACESS
;
2763 tmp_srb
->phy_addr
= phy_addr
;
2766 hptiop_free_srb(hba
, tmp_srb
);
2767 hba
->srb
[i
] = tmp_srb
;
2768 phy_addr
+= HPT_SRB_MAX_SIZE
;
2771 device_printf(hba
->pcidev
, "invalid alignment");
2777 static void hptiop_os_message_callback(struct hpt_iop_hba
* hba
, u_int32_t msg
)
2782 static int hptiop_os_query_remove_device(struct hpt_iop_hba
* hba
,
2785 struct cam_periph
*periph
= NULL
;
2786 struct cam_path
*path
;
2787 int status
, retval
= 0;
2789 status
= xpt_create_path(&path
, NULL
, hba
->sim
->path_id
, target_id
, 0);
2791 if (status
== CAM_REQ_CMP
) {
2792 if ((periph
= cam_periph_find(path
, "da")) != NULL
) {
2793 if (periph
->refcount
>= 1) {
2794 device_printf(hba
->pcidev
, "target_id=0x%x,"
2795 "refcount=%d", target_id
, periph
->refcount
);
2799 xpt_free_path(path
);
2804 static void hptiop_release_resource(struct hpt_iop_hba
*hba
)
2808 struct ccb_setasync
*ccb
;
2810 ccb
= &xpt_alloc_ccb()->csa
;
2811 xpt_setup_ccb(&ccb
->ccb_h
, hba
->path
, /*priority*/5);
2812 ccb
->ccb_h
.func_code
= XPT_SASYNC_CB
;
2813 ccb
->event_enable
= 0;
2814 ccb
->callback
= hptiop_async
;
2815 ccb
->callback_arg
= hba
->sim
;
2816 xpt_action((union ccb
*)ccb
);
2817 xpt_free_path(hba
->path
);
2818 xpt_free_ccb(&ccb
->ccb_h
);
2822 xpt_bus_deregister(cam_sim_path(hba
->sim
));
2823 cam_sim_free(hba
->sim
);
2826 if (hba
->ctlcfg_dmat
) {
2827 bus_dmamap_unload(hba
->ctlcfg_dmat
, hba
->ctlcfg_dmamap
);
2828 bus_dmamem_free(hba
->ctlcfg_dmat
,
2829 hba
->ctlcfg_ptr
, hba
->ctlcfg_dmamap
);
2830 bus_dma_tag_destroy(hba
->ctlcfg_dmat
);
2833 for (i
= 0; i
< HPT_SRB_MAX_QUEUE_SIZE
; i
++) {
2834 struct hpt_iop_srb
*srb
= hba
->srb
[i
];
2836 bus_dmamap_destroy(hba
->io_dmat
, srb
->dma_map
);
2839 if (hba
->srb_dmat
) {
2840 bus_dmamap_unload(hba
->srb_dmat
, hba
->srb_dmamap
);
2841 bus_dmamap_destroy(hba
->srb_dmat
, hba
->srb_dmamap
);
2842 bus_dma_tag_destroy(hba
->srb_dmat
);
2846 bus_dma_tag_destroy(hba
->io_dmat
);
2848 if (hba
->parent_dmat
)
2849 bus_dma_tag_destroy(hba
->parent_dmat
);
2851 if (hba
->irq_handle
)
2852 bus_teardown_intr(hba
->pcidev
, hba
->irq_res
, hba
->irq_handle
);
2855 bus_release_resource(hba
->pcidev
, SYS_RES_IRQ
,
2859 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
2860 hba
->bar0_rid
, hba
->bar0_res
);
2862 bus_release_resource(hba
->pcidev
, SYS_RES_MEMORY
,
2863 hba
->bar2_rid
, hba
->bar2_res
);
2865 destroy_dev(hba
->ioctl_dev
);
2866 dev_ops_remove_minor(&hptiop_ops
, device_get_unit(hba
->pcidev
));