kernel - Rewrite the callout_*() API
[dragonfly.git] / sys / dev / raid / hptiop / hptiop.c
blobb42e8ab9a1022ea1d98972292de3ba2325c7a7db
1 /*
2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3 * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.15 2012/10/25 17:29:11 delphij Exp $
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/cons.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
35 #include <sys/stat.h>
36 #include <sys/malloc.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
44 #include <sys/eventhandler.h>
45 #include <sys/bus.h>
46 #include <sys/taskqueue.h>
47 #include <sys/device.h>
48 #include <sys/mplock2.h>
50 #include <machine/stdarg.h>
51 #include <sys/rman.h>
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
56 #include <bus/pci/pcireg.h>
57 #include <bus/pci/pcivar.h>
59 #include <bus/cam/cam.h>
60 #include <bus/cam/cam_ccb.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_periph.h>
63 #include <bus/cam/cam_xpt_sim.h>
64 #include <bus/cam/cam_debug.h>
65 #include <bus/cam/cam_periph.h>
66 #include <bus/cam/scsi/scsi_all.h>
67 #include <bus/cam/scsi/scsi_message.h>
69 #include <dev/raid/hptiop/hptiop.h>
71 static const char driver_name[] = "hptiop";
72 static const char driver_version[] = "v1.8";
74 static devclass_t hptiop_devclass;
76 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
77 u_int32_t msg, u_int32_t millisec);
78 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
79 u_int32_t req);
80 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
81 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
82 u_int32_t req);
83 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
84 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
85 struct hpt_iop_ioctl_param *pParams);
86 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
87 struct hpt_iop_ioctl_param *pParams);
88 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
89 struct hpt_iop_ioctl_param *pParams);
90 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
91 static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
92 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
93 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
94 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
95 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
96 struct hpt_iop_request_get_config *config);
97 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
98 struct hpt_iop_request_get_config *config);
99 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
100 struct hpt_iop_request_get_config *config);
101 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
102 struct hpt_iop_request_set_config *config);
103 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
104 struct hpt_iop_request_set_config *config);
105 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
106 struct hpt_iop_request_set_config *config);
107 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
108 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
109 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
110 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
112 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
113 u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
114 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
115 struct hpt_iop_request_ioctl_command *req,
116 struct hpt_iop_ioctl_param *pParams);
117 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
118 struct hpt_iop_request_ioctl_command *req,
119 struct hpt_iop_ioctl_param *pParams);
120 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
121 struct hpt_iop_srb *srb,
122 bus_dma_segment_t *segs, int nsegs);
123 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
124 struct hpt_iop_srb *srb,
125 bus_dma_segment_t *segs, int nsegs);
126 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
127 struct hpt_iop_srb *srb,
128 bus_dma_segment_t *segs, int nsegs);
129 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
130 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
131 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
133 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
138 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
139 static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
140 static int hptiop_probe(device_t dev);
141 static int hptiop_attach(device_t dev);
142 static int hptiop_detach(device_t dev);
143 static int hptiop_shutdown(device_t dev);
144 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
145 static void hptiop_poll(struct cam_sim *sim);
146 static void hptiop_async(void *callback_arg, u_int32_t code,
147 struct cam_path *path, void *arg);
148 static void hptiop_pci_intr(void *arg);
149 static void hptiop_release_resource(struct hpt_iop_hba *hba);
150 static void hptiop_reset_adapter(void *argv);
151 static d_open_t hptiop_open;
152 static d_close_t hptiop_close;
153 static d_ioctl_t hptiop_ioctl;
155 static struct dev_ops hptiop_ops = {
156 { driver_name, 0, 0 },
157 .d_open = hptiop_open,
158 .d_close = hptiop_close,
159 .d_ioctl = hptiop_ioctl,
162 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
164 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
165 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
166 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
167 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
169 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
170 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
171 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
172 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
173 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
174 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
175 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
176 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
178 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
179 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
180 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
181 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
183 static int hptiop_open(struct dev_open_args *ap)
185 cdev_t dev = ap->a_head.a_dev;
186 struct hpt_iop_hba *hba = hba_from_dev(dev);
188 if (hba==NULL)
189 return ENXIO;
190 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
191 return EBUSY;
192 hba->flag |= HPT_IOCTL_FLAG_OPEN;
193 return 0;
196 static int hptiop_close(struct dev_close_args *ap)
198 cdev_t dev = ap->a_head.a_dev;
199 struct hpt_iop_hba *hba = hba_from_dev(dev);
200 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
201 return 0;
204 static int hptiop_ioctl(struct dev_ioctl_args *ap)
206 cdev_t dev = ap->a_head.a_dev;
207 u_long cmd = ap->a_cmd;
208 caddr_t data = ap->a_data;
209 int ret = EFAULT;
210 struct hpt_iop_hba *hba = hba_from_dev(dev);
212 get_mplock();
214 switch (cmd) {
215 case HPT_DO_IOCONTROL:
216 ret = hba->ops->do_ioctl(hba,
217 (struct hpt_iop_ioctl_param *)data);
218 break;
219 case HPT_SCAN_BUS:
220 ret = hptiop_rescan_bus(hba);
221 break;
224 rel_mplock();
226 return ret;
229 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
231 u_int64_t p;
232 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
233 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
235 if (outbound_tail != outbound_head) {
236 bus_space_read_region_4(hba->bar2t, hba->bar2h,
237 offsetof(struct hpt_iopmu_mv,
238 outbound_q[outbound_tail]),
239 (u_int32_t *)&p, 2);
241 outbound_tail++;
243 if (outbound_tail == MVIOP_QUEUE_LEN)
244 outbound_tail = 0;
246 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
247 return p;
248 } else
249 return 0;
252 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
254 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
255 u_int32_t head = inbound_head + 1;
257 if (head == MVIOP_QUEUE_LEN)
258 head = 0;
260 bus_space_write_region_4(hba->bar2t, hba->bar2h,
261 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
262 (u_int32_t *)&p, 2);
263 BUS_SPACE_WRT4_MV2(inbound_head, head);
264 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
267 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
269 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
270 BUS_SPACE_RD4_ITL(outbound_intstatus);
273 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
276 BUS_SPACE_WRT4_MV2(inbound_msg, msg);
277 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
279 BUS_SPACE_RD4_MV0(outbound_intmask);
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
284 BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
285 BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
290 u_int32_t req=0;
291 int i;
293 for (i = 0; i < millisec; i++) {
294 req = BUS_SPACE_RD4_ITL(inbound_queue);
295 if (req != IOPMU_QUEUE_EMPTY)
296 break;
297 DELAY(1000);
300 if (req!=IOPMU_QUEUE_EMPTY) {
301 BUS_SPACE_WRT4_ITL(outbound_queue, req);
302 BUS_SPACE_RD4_ITL(outbound_intstatus);
303 return 0;
306 return -1;
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
311 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
312 return -1;
314 return 0;
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
318 u_int32_t millisec)
320 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321 return -1;
323 return 0;
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327 u_int32_t index)
329 struct hpt_iop_srb *srb;
330 struct hpt_iop_request_scsi_command *req=NULL;
331 union ccb *ccb;
332 u_int8_t *cdb;
333 u_int32_t result, temp, dxfer;
334 u_int64_t temp64;
336 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 if (hba->firmware_version > 0x01020000 ||
338 hba->interface_version > 0x01020000) {
339 srb = hba->srb[index & ~(u_int32_t)
340 (IOPMU_QUEUE_ADDR_HOST_BIT
341 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 req = (struct hpt_iop_request_scsi_command *)srb;
343 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 result = IOP_RESULT_SUCCESS;
345 else
346 result = req->header.result;
347 } else {
348 srb = hba->srb[index &
349 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 req = (struct hpt_iop_request_scsi_command *)srb;
351 result = req->header.result;
353 dxfer = req->dataxfer_length;
354 goto srb_complete;
357 /*iop req*/
358 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 offsetof(struct hpt_iop_request_header, type));
360 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 offsetof(struct hpt_iop_request_header, result));
362 switch(temp) {
363 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
365 temp64 = 0;
366 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 offsetof(struct hpt_iop_request_header, context),
368 (u_int32_t *)&temp64, 2);
369 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370 break;
373 case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 offsetof(struct hpt_iop_request_header, context),
376 (u_int32_t *)&temp64, 2);
377 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 index + offsetof(struct hpt_iop_request_scsi_command,
380 dataxfer_length));
381 srb_complete:
382 ccb = (union ccb *)srb->ccb;
383 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 cdb = ccb->csio.cdb_io.cdb_ptr;
385 else
386 cdb = ccb->csio.cdb_io.cdb_bytes;
388 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 ccb->ccb_h.status = CAM_REQ_CMP;
390 goto scsi_done;
393 switch (result) {
394 case IOP_RESULT_SUCCESS:
395 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396 case CAM_DIR_IN:
397 bus_dmamap_sync(hba->io_dmat,
398 srb->dma_map, BUS_DMASYNC_POSTREAD);
399 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400 break;
401 case CAM_DIR_OUT:
402 bus_dmamap_sync(hba->io_dmat,
403 srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 break;
408 ccb->ccb_h.status = CAM_REQ_CMP;
409 break;
411 case IOP_RESULT_BAD_TARGET:
412 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413 break;
414 case IOP_RESULT_BUSY:
415 ccb->ccb_h.status = CAM_BUSY;
416 break;
417 case IOP_RESULT_INVALID_REQUEST:
418 ccb->ccb_h.status = CAM_REQ_INVALID;
419 break;
420 case IOP_RESULT_FAIL:
421 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422 break;
423 case IOP_RESULT_RESET:
424 ccb->ccb_h.status = CAM_BUSY;
425 break;
426 case IOP_RESULT_CHECK_CONDITION:
427 memset(&ccb->csio.sense_data, 0,
428 sizeof(ccb->csio.sense_data));
429 if (dxfer < ccb->csio.sense_len)
430 ccb->csio.sense_resid = ccb->csio.sense_len -
431 dxfer;
432 else
433 ccb->csio.sense_resid = 0;
434 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 index + offsetof(struct hpt_iop_request_scsi_command,
437 sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 MIN(dxfer, sizeof(ccb->csio.sense_data)));
439 } else {
440 memcpy(&ccb->csio.sense_data, &req->sg_list,
441 MIN(dxfer, sizeof(ccb->csio.sense_data)));
443 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446 break;
447 default:
448 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449 break;
451 scsi_done:
452 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 BUS_SPACE_WRT4_ITL(outbound_queue, index);
455 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
457 hptiop_free_srb(hba, srb);
458 xpt_done(ccb);
459 break;
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
465 u_int32_t req, temp;
467 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 hptiop_request_callback_itl(hba, req);
470 else {
471 temp = bus_space_read_4(hba->bar0t,
472 hba->bar0h,req +
473 offsetof(struct hpt_iop_request_header,
474 flags));
475 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
476 u_int64_t temp64;
477 bus_space_read_region_4(hba->bar0t,
478 hba->bar0h,req +
479 offsetof(struct hpt_iop_request_header,
480 context),
481 (u_int32_t *)&temp64, 2);
482 if (temp64) {
483 hptiop_request_callback_itl(hba, req);
484 } else {
485 temp64 = 1;
486 bus_space_write_region_4(hba->bar0t,
487 hba->bar0h,req +
488 offsetof(struct hpt_iop_request_header,
489 context),
490 (u_int32_t *)&temp64, 2);
492 } else
493 hptiop_request_callback_itl(hba, req);
498 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
500 u_int32_t status;
501 int ret = 0;
503 status = BUS_SPACE_RD4_ITL(outbound_intstatus);
505 if (status & IOPMU_OUTBOUND_INT_MSG0) {
506 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
507 KdPrint(("hptiop: received outbound msg %x\n", msg));
508 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
509 hptiop_os_message_callback(hba, msg);
510 ret = 1;
513 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
514 hptiop_drain_outbound_queue_itl(hba);
515 ret = 1;
518 return ret;
521 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
522 u_int64_t _tag)
524 u_int32_t context = (u_int32_t)_tag;
526 if (context & MVIOP_CMD_TYPE_SCSI) {
527 struct hpt_iop_srb *srb;
528 struct hpt_iop_request_scsi_command *req;
529 union ccb *ccb;
530 u_int8_t *cdb;
532 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
533 req = (struct hpt_iop_request_scsi_command *)srb;
534 ccb = (union ccb *)srb->ccb;
535 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
536 cdb = ccb->csio.cdb_io.cdb_ptr;
537 else
538 cdb = ccb->csio.cdb_io.cdb_bytes;
540 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
541 ccb->ccb_h.status = CAM_REQ_CMP;
542 goto scsi_done;
544 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
545 req->header.result = IOP_RESULT_SUCCESS;
547 switch (req->header.result) {
548 case IOP_RESULT_SUCCESS:
549 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
550 case CAM_DIR_IN:
551 bus_dmamap_sync(hba->io_dmat,
552 srb->dma_map, BUS_DMASYNC_POSTREAD);
553 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
554 break;
555 case CAM_DIR_OUT:
556 bus_dmamap_sync(hba->io_dmat,
557 srb->dma_map, BUS_DMASYNC_POSTWRITE);
558 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
559 break;
561 ccb->ccb_h.status = CAM_REQ_CMP;
562 break;
563 case IOP_RESULT_BAD_TARGET:
564 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
565 break;
566 case IOP_RESULT_BUSY:
567 ccb->ccb_h.status = CAM_BUSY;
568 break;
569 case IOP_RESULT_INVALID_REQUEST:
570 ccb->ccb_h.status = CAM_REQ_INVALID;
571 break;
572 case IOP_RESULT_FAIL:
573 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
574 break;
575 case IOP_RESULT_RESET:
576 ccb->ccb_h.status = CAM_BUSY;
577 break;
578 case IOP_RESULT_CHECK_CONDITION:
579 memset(&ccb->csio.sense_data, 0,
580 sizeof(ccb->csio.sense_data));
581 if (req->dataxfer_length < ccb->csio.sense_len)
582 ccb->csio.sense_resid = ccb->csio.sense_len -
583 req->dataxfer_length;
584 else
585 ccb->csio.sense_resid = 0;
586 memcpy(&ccb->csio.sense_data, &req->sg_list,
587 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
588 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
589 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
590 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
591 break;
592 default:
593 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
594 break;
596 scsi_done:
597 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
599 hptiop_free_srb(hba, srb);
600 xpt_done(ccb);
601 } else if (context & MVIOP_CMD_TYPE_IOCTL) {
602 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
603 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
604 hba->config_done = 1;
605 else
606 hba->config_done = -1;
607 wakeup(req);
608 } else if (context &
609 (MVIOP_CMD_TYPE_SET_CONFIG |
610 MVIOP_CMD_TYPE_GET_CONFIG))
611 hba->config_done = 1;
612 else {
613 device_printf(hba->pcidev, "wrong callback type\n");
617 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
618 u_int32_t _tag)
620 u_int32_t req_type = _tag & 0xf;
622 struct hpt_iop_srb *srb;
623 struct hpt_iop_request_scsi_command *req;
624 union ccb *ccb;
625 u_int8_t *cdb;
627 switch (req_type) {
628 case IOP_REQUEST_TYPE_GET_CONFIG:
629 case IOP_REQUEST_TYPE_SET_CONFIG:
630 hba->config_done = 1;
631 break;
633 case IOP_REQUEST_TYPE_SCSI_COMMAND:
634 srb = hba->srb[(_tag >> 4) & 0xff];
635 req = (struct hpt_iop_request_scsi_command *)srb;
637 ccb = (union ccb *)srb->ccb;
639 callout_stop(ccb->ccb_h.timeout_ch);
641 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
642 cdb = ccb->csio.cdb_io.cdb_ptr;
643 else
644 cdb = ccb->csio.cdb_io.cdb_bytes;
646 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
647 ccb->ccb_h.status = CAM_REQ_CMP;
648 goto scsi_done;
651 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
652 req->header.result = IOP_RESULT_SUCCESS;
654 switch (req->header.result) {
655 case IOP_RESULT_SUCCESS:
656 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
657 case CAM_DIR_IN:
658 bus_dmamap_sync(hba->io_dmat,
659 srb->dma_map, BUS_DMASYNC_POSTREAD);
660 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
661 break;
662 case CAM_DIR_OUT:
663 bus_dmamap_sync(hba->io_dmat,
664 srb->dma_map, BUS_DMASYNC_POSTWRITE);
665 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
666 break;
668 ccb->ccb_h.status = CAM_REQ_CMP;
669 break;
670 case IOP_RESULT_BAD_TARGET:
671 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
672 break;
673 case IOP_RESULT_BUSY:
674 ccb->ccb_h.status = CAM_BUSY;
675 break;
676 case IOP_RESULT_INVALID_REQUEST:
677 ccb->ccb_h.status = CAM_REQ_INVALID;
678 break;
679 case IOP_RESULT_FAIL:
680 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
681 break;
682 case IOP_RESULT_RESET:
683 ccb->ccb_h.status = CAM_BUSY;
684 break;
685 case IOP_RESULT_CHECK_CONDITION:
686 memset(&ccb->csio.sense_data, 0,
687 sizeof(ccb->csio.sense_data));
688 if (req->dataxfer_length < ccb->csio.sense_len)
689 ccb->csio.sense_resid = ccb->csio.sense_len -
690 req->dataxfer_length;
691 else
692 ccb->csio.sense_resid = 0;
693 memcpy(&ccb->csio.sense_data, &req->sg_list,
694 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
695 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
696 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
697 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
698 break;
699 default:
700 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
701 break;
703 scsi_done:
704 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
706 hptiop_free_srb(hba, srb);
707 xpt_done(ccb);
708 break;
709 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
710 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
711 hba->config_done = 1;
712 else
713 hba->config_done = -1;
714 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
715 break;
716 default:
717 device_printf(hba->pcidev, "wrong callback type\n");
718 break;
722 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
724 u_int64_t req;
726 while ((req = hptiop_mv_outbound_read(hba))) {
727 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
728 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
729 hptiop_request_callback_mv(hba, req);
735 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
737 u_int32_t status;
738 int ret = 0;
740 status = BUS_SPACE_RD4_MV0(outbound_doorbell);
742 if (status)
743 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
745 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
746 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
747 KdPrint(("hptiop: received outbound msg %x\n", msg));
748 hptiop_os_message_callback(hba, msg);
749 ret = 1;
752 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
753 hptiop_drain_outbound_queue_mv(hba);
754 ret = 1;
757 return ret;
760 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
762 u_int32_t status, _tag, cptr;
763 int ret = 0;
765 if (hba->initialized) {
766 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
769 status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
770 if (status) {
771 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
772 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
773 u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
774 hptiop_os_message_callback(hba, msg);
776 ret = 1;
779 status = BUS_SPACE_RD4_MVFREY2(isr_cause);
780 if (status) {
781 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
782 do {
783 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
784 while (hba->u.mvfrey.outlist_rptr != cptr) {
785 hba->u.mvfrey.outlist_rptr++;
786 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
787 hba->u.mvfrey.outlist_rptr = 0;
790 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
791 hptiop_request_callback_mvfrey(hba, _tag);
792 ret = 2;
794 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
797 if (hba->initialized) {
798 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
801 return ret;
804 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
805 u_int32_t req32, u_int32_t millisec)
807 u_int32_t i;
808 u_int64_t temp64;
810 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
811 BUS_SPACE_RD4_ITL(outbound_intstatus);
813 for (i = 0; i < millisec; i++) {
814 hptiop_intr_itl(hba);
815 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
816 offsetof(struct hpt_iop_request_header, context),
817 (u_int32_t *)&temp64, 2);
818 if (temp64)
819 return 0;
820 DELAY(1000);
823 return -1;
826 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
827 void *req, u_int32_t millisec)
829 u_int32_t i;
830 u_int64_t phy_addr;
831 hba->config_done = 0;
833 phy_addr = hba->ctlcfgcmd_phy |
834 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
835 ((struct hpt_iop_request_get_config *)req)->header.flags |=
836 IOP_REQUEST_FLAG_SYNC_REQUEST |
837 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
838 hptiop_mv_inbound_write(phy_addr, hba);
839 BUS_SPACE_RD4_MV0(outbound_intmask);
841 for (i = 0; i < millisec; i++) {
842 hptiop_intr_mv(hba);
843 if (hba->config_done)
844 return 0;
845 DELAY(1000);
847 return -1;
850 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
851 void *req, u_int32_t millisec)
853 u_int32_t i, index;
854 u_int64_t phy_addr;
855 struct hpt_iop_request_header *reqhdr = (struct hpt_iop_request_header *)req;
857 hba->config_done = 0;
859 phy_addr = hba->ctlcfgcmd_phy;
860 reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
861 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
862 | IOP_REQUEST_FLAG_ADDR_BITS
863 | ((phy_addr >> 16) & 0xffff0000);
864 reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
865 | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
867 hba->u.mvfrey.inlist_wptr++;
868 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
870 if (index == hba->u.mvfrey.list_count) {
871 index = 0;
872 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
873 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
876 hba->u.mvfrey.inlist[index].addr = phy_addr;
877 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
879 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
880 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
882 for (i = 0; i < millisec; i++) {
883 hptiop_intr_mvfrey(hba);
884 if (hba->config_done)
885 return 0;
886 DELAY(1000);
888 return -1;
891 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
892 u_int32_t msg, u_int32_t millisec)
894 u_int32_t i;
896 hba->msg_done = 0;
897 hba->ops->post_msg(hba, msg);
899 for (i=0; i<millisec; i++) {
900 hba->ops->iop_intr(hba);
901 if (hba->msg_done)
902 break;
903 DELAY(1000);
906 return hba->msg_done? 0 : -1;
909 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
910 struct hpt_iop_request_get_config * config)
912 u_int32_t req32;
914 config->header.size = sizeof(struct hpt_iop_request_get_config);
915 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
916 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
917 config->header.result = IOP_RESULT_PENDING;
918 config->header.context = 0;
920 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
921 if (req32 == IOPMU_QUEUE_EMPTY)
922 return -1;
924 bus_space_write_region_4(hba->bar0t, hba->bar0h,
925 req32, (u_int32_t *)config,
926 sizeof(struct hpt_iop_request_header) >> 2);
928 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
929 KdPrint(("hptiop: get config send cmd failed"));
930 return -1;
933 bus_space_read_region_4(hba->bar0t, hba->bar0h,
934 req32, (u_int32_t *)config,
935 sizeof(struct hpt_iop_request_get_config) >> 2);
937 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
939 return 0;
942 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
943 struct hpt_iop_request_get_config * config)
945 struct hpt_iop_request_get_config *req;
947 if (!(req = hba->ctlcfg_ptr))
948 return -1;
950 req->header.flags = 0;
951 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
952 req->header.size = sizeof(struct hpt_iop_request_get_config);
953 req->header.result = IOP_RESULT_PENDING;
954 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
956 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
957 KdPrint(("hptiop: get config send cmd failed"));
958 return -1;
961 *config = *req;
962 return 0;
965 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
966 struct hpt_iop_request_get_config * config)
968 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
970 if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
971 info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
972 KdPrint(("hptiop: header size %x/%x type %x/%x",
973 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
974 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
975 return -1;
978 config->interface_version = info->interface_version;
979 config->firmware_version = info->firmware_version;
980 config->max_requests = info->max_requests;
981 config->request_size = info->request_size;
982 config->max_sg_count = info->max_sg_count;
983 config->data_transfer_length = info->data_transfer_length;
984 config->alignment_mask = info->alignment_mask;
985 config->max_devices = info->max_devices;
986 config->sdram_size = info->sdram_size;
988 KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
989 config->max_requests, config->request_size,
990 config->data_transfer_length, config->max_devices,
991 config->sdram_size));
993 return 0;
996 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
997 struct hpt_iop_request_set_config *config)
999 u_int32_t req32;
1001 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1003 if (req32 == IOPMU_QUEUE_EMPTY)
1004 return -1;
1006 config->header.size = sizeof(struct hpt_iop_request_set_config);
1007 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1008 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1009 config->header.result = IOP_RESULT_PENDING;
1010 config->header.context = 0;
1012 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1013 (u_int32_t *)config,
1014 sizeof(struct hpt_iop_request_set_config) >> 2);
1016 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1017 KdPrint(("hptiop: set config send cmd failed"));
1018 return -1;
1021 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1023 return 0;
1026 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1027 struct hpt_iop_request_set_config *config)
1029 struct hpt_iop_request_set_config *req;
1031 if (!(req = hba->ctlcfg_ptr))
1032 return -1;
1034 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1035 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1036 sizeof(struct hpt_iop_request_set_config) -
1037 sizeof(struct hpt_iop_request_header));
1039 req->header.flags = 0;
1040 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1041 req->header.size = sizeof(struct hpt_iop_request_set_config);
1042 req->header.result = IOP_RESULT_PENDING;
1043 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1045 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1046 KdPrint(("hptiop: set config send cmd failed"));
1047 return -1;
1050 return 0;
1053 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1054 struct hpt_iop_request_set_config *config)
1056 struct hpt_iop_request_set_config *req;
1058 if (!(req = hba->ctlcfg_ptr))
1059 return -1;
1061 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1062 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1063 sizeof(struct hpt_iop_request_set_config) -
1064 sizeof(struct hpt_iop_request_header));
1066 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1067 req->header.size = sizeof(struct hpt_iop_request_set_config);
1068 req->header.result = IOP_RESULT_PENDING;
1070 if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1071 KdPrint(("hptiop: set config send cmd failed"));
1072 return -1;
1075 return 0;
1078 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1079 u_int32_t req32,
1080 struct hpt_iop_ioctl_param *pParams)
1082 u_int64_t temp64;
1083 struct hpt_iop_request_ioctl_command req;
1085 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1086 (hba->max_request_size -
1087 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1088 device_printf(hba->pcidev, "request size beyond max value");
1089 return -1;
1092 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1093 + pParams->nInBufferSize;
1094 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1095 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1096 req.header.result = IOP_RESULT_PENDING;
1097 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1098 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1099 req.inbuf_size = pParams->nInBufferSize;
1100 req.outbuf_size = pParams->nOutBufferSize;
1101 req.bytes_returned = 0;
1103 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1104 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1106 hptiop_lock_adapter(hba);
1108 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1109 BUS_SPACE_RD4_ITL(outbound_intstatus);
1111 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1112 offsetof(struct hpt_iop_request_ioctl_command, header.context),
1113 (u_int32_t *)&temp64, 2);
1114 while (temp64) {
1115 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1116 0, "hptctl", HPT_OSM_TIMEOUT)==0)
1117 break;
1118 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1119 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1120 offsetof(struct hpt_iop_request_ioctl_command,
1121 header.context),
1122 (u_int32_t *)&temp64, 2);
1125 hptiop_unlock_adapter(hba);
1126 return 0;
1129 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1130 void *user, int size)
1132 unsigned char byte;
1133 int i;
1135 for (i=0; i<size; i++) {
1136 if (copyin((u_int8_t *)user + i, &byte, 1))
1137 return -1;
1138 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1141 return 0;
1144 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1145 void *user, int size)
1147 unsigned char byte;
1148 int i;
1150 for (i=0; i<size; i++) {
1151 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1152 if (copyout(&byte, (u_int8_t *)user + i, 1))
1153 return -1;
1156 return 0;
1159 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1160 struct hpt_iop_ioctl_param * pParams)
1162 u_int32_t req32;
1163 u_int32_t result;
1165 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1166 (pParams->Magic != HPT_IOCTL_MAGIC32))
1167 return EFAULT;
1169 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1170 if (req32 == IOPMU_QUEUE_EMPTY)
1171 return EFAULT;
1173 if (pParams->nInBufferSize)
1174 if (hptiop_bus_space_copyin(hba, req32 +
1175 offsetof(struct hpt_iop_request_ioctl_command, buf),
1176 (void *)pParams->lpInBuffer, pParams->nInBufferSize))
1177 goto invalid;
1179 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1180 goto invalid;
1182 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1183 offsetof(struct hpt_iop_request_ioctl_command,
1184 header.result));
1186 if (result == IOP_RESULT_SUCCESS) {
1187 if (pParams->nOutBufferSize)
1188 if (hptiop_bus_space_copyout(hba, req32 +
1189 offsetof(struct hpt_iop_request_ioctl_command, buf) +
1190 ((pParams->nInBufferSize + 3) & ~3),
1191 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1192 goto invalid;
1194 if (pParams->lpBytesReturned) {
1195 if (hptiop_bus_space_copyout(hba, req32 +
1196 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1197 (void *)pParams->lpBytesReturned, sizeof(unsigned long)))
1198 goto invalid;
1201 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1203 return 0;
1204 } else{
1205 invalid:
1206 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1208 return EFAULT;
1212 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1213 struct hpt_iop_request_ioctl_command *req,
1214 struct hpt_iop_ioctl_param *pParams)
1216 u_int64_t req_phy;
1217 int size = 0;
1219 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1220 (hba->max_request_size -
1221 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1222 device_printf(hba->pcidev, "request size beyond max value");
1223 return -1;
1226 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1227 req->inbuf_size = pParams->nInBufferSize;
1228 req->outbuf_size = pParams->nOutBufferSize;
1229 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1230 + pParams->nInBufferSize;
1231 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1232 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1233 req->header.result = IOP_RESULT_PENDING;
1234 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1235 size = req->header.size >> 8;
1236 size = size > 3 ? 3 : size;
1237 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1238 hptiop_mv_inbound_write(req_phy, hba);
1240 BUS_SPACE_RD4_MV0(outbound_intmask);
1242 while (hba->config_done == 0) {
1243 if (hptiop_sleep(hba, req, 0,
1244 "hptctl", HPT_OSM_TIMEOUT)==0)
1245 continue;
1246 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1248 return 0;
1251 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1252 struct hpt_iop_ioctl_param *pParams)
1254 struct hpt_iop_request_ioctl_command *req;
1256 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1257 (pParams->Magic != HPT_IOCTL_MAGIC32))
1258 return EFAULT;
1260 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1261 hba->config_done = 0;
1262 hptiop_lock_adapter(hba);
1263 if (pParams->nInBufferSize)
1264 if (copyin((void *)pParams->lpInBuffer,
1265 req->buf, pParams->nInBufferSize))
1266 goto invalid;
1267 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1268 goto invalid;
1270 if (hba->config_done == 1) {
1271 if (pParams->nOutBufferSize)
1272 if (copyout(req->buf +
1273 ((pParams->nInBufferSize + 3) & ~3),
1274 (void *)pParams->lpOutBuffer,
1275 pParams->nOutBufferSize))
1276 goto invalid;
1278 if (pParams->lpBytesReturned)
1279 if (copyout(&req->bytes_returned,
1280 (void*)pParams->lpBytesReturned,
1281 sizeof(u_int32_t)))
1282 goto invalid;
1283 hptiop_unlock_adapter(hba);
1284 return 0;
1285 } else{
1286 invalid:
1287 hptiop_unlock_adapter(hba);
1288 return EFAULT;
1292 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1293 struct hpt_iop_request_ioctl_command *req,
1294 struct hpt_iop_ioctl_param *pParams)
1296 u_int64_t phy_addr;
1297 u_int32_t index;
1299 phy_addr = hba->ctlcfgcmd_phy;
1301 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1302 (hba->max_request_size -
1303 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1304 device_printf(hba->pcidev, "request size beyond max value");
1305 return -1;
1308 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1309 req->inbuf_size = pParams->nInBufferSize;
1310 req->outbuf_size = pParams->nOutBufferSize;
1311 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1312 + pParams->nInBufferSize;
1314 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1315 req->header.result = IOP_RESULT_PENDING;
1317 req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1318 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1319 | IOP_REQUEST_FLAG_ADDR_BITS
1320 | ((phy_addr >> 16) & 0xffff0000);
1321 req->header.context = ((phy_addr & 0xffffffff) << 32 )
1322 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1324 hba->u.mvfrey.inlist_wptr++;
1325 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1327 if (index == hba->u.mvfrey.list_count) {
1328 index = 0;
1329 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1330 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1333 hba->u.mvfrey.inlist[index].addr = phy_addr;
1334 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1336 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1337 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1339 while (hba->config_done == 0) {
1340 if (hptiop_sleep(hba, req, 0, "hptctl", HPT_OSM_TIMEOUT) == 0)
1341 continue;
1342 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1344 return 0;
1347 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1348 struct hpt_iop_ioctl_param *pParams)
1350 struct hpt_iop_request_ioctl_command *req;
1352 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1353 (pParams->Magic != HPT_IOCTL_MAGIC32))
1354 return EFAULT;
1356 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1357 hba->config_done = 0;
1358 hptiop_lock_adapter(hba);
1359 if (pParams->nInBufferSize)
1360 if (copyin((void *)pParams->lpInBuffer,
1361 req->buf, pParams->nInBufferSize))
1362 goto invalid;
1363 if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1364 goto invalid;
1366 if (hba->config_done == 1) {
1367 if (pParams->nOutBufferSize)
1368 if (copyout(req->buf +
1369 ((pParams->nInBufferSize + 3) & ~3),
1370 (void *)pParams->lpOutBuffer,
1371 pParams->nOutBufferSize))
1372 goto invalid;
1374 if (pParams->lpBytesReturned)
1375 if (copyout(&req->bytes_returned,
1376 (void*)pParams->lpBytesReturned,
1377 sizeof(u_int32_t)))
1378 goto invalid;
1379 hptiop_unlock_adapter(hba);
1380 return 0;
1381 } else{
1382 invalid:
1383 hptiop_unlock_adapter(hba);
1384 return EFAULT;
1388 static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1390 union ccb *ccb;
1392 if ((ccb = xpt_alloc_ccb()) == NULL)
1393 return(ENOMEM);
1394 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1395 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1396 xpt_free_ccb(&ccb->ccb_h);
1397 return(EIO);
1400 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1401 ccb->ccb_h.func_code = XPT_SCAN_BUS;
1402 ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
1403 ccb->crcn.flags = CAM_FLAG_NONE;
1404 xpt_action(ccb);
1405 return(0);
1408 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
1410 xpt_free_path(ccb->ccb_h.path);
1411 xpt_free_ccb(&ccb->ccb_h);
1414 static bus_dmamap_callback_t hptiop_map_srb;
1415 static bus_dmamap_callback_t hptiop_post_scsi_command;
1416 static bus_dmamap_callback_t hptiop_mv_map_ctlcfg;
1417 static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg;
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1421 hba->bar0_rid = 0x10;
1422 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1423 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1425 if (hba->bar0_res == NULL) {
1426 device_printf(hba->pcidev,
1427 "failed to get iop base adrress.\n");
1428 return -1;
1430 hba->bar0t = rman_get_bustag(hba->bar0_res);
1431 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1432 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1433 rman_get_virtual(hba->bar0_res);
1435 if (!hba->u.itl.mu) {
1436 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1437 hba->bar0_rid, hba->bar0_res);
1438 device_printf(hba->pcidev, "alloc mem res failed\n");
1439 return -1;
1442 return 0;
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1447 hba->bar0_rid = 0x10;
1448 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1449 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1451 if (hba->bar0_res == NULL) {
1452 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453 return -1;
1455 hba->bar0t = rman_get_bustag(hba->bar0_res);
1456 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1457 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1458 rman_get_virtual(hba->bar0_res);
1460 if (!hba->u.mv.regs) {
1461 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1462 hba->bar0_rid, hba->bar0_res);
1463 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1464 return -1;
1467 hba->bar2_rid = 0x18;
1468 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1469 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1471 if (hba->bar2_res == NULL) {
1472 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473 hba->bar0_rid, hba->bar0_res);
1474 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1475 return -1;
1478 hba->bar2t = rman_get_bustag(hba->bar2_res);
1479 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1480 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1482 if (!hba->u.mv.mu) {
1483 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 hba->bar0_rid, hba->bar0_res);
1485 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1486 hba->bar2_rid, hba->bar2_res);
1487 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1488 return -1;
1491 return 0;
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1496 hba->bar0_rid = 0x10;
1497 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1498 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1500 if (hba->bar0_res == NULL) {
1501 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502 return -1;
1504 hba->bar0t = rman_get_bustag(hba->bar0_res);
1505 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1506 hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1507 rman_get_virtual(hba->bar0_res);
1509 if (!hba->u.mvfrey.config) {
1510 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1511 hba->bar0_rid, hba->bar0_res);
1512 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1513 return -1;
1516 hba->bar2_rid = 0x18;
1517 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1518 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1520 if (hba->bar2_res == NULL) {
1521 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522 hba->bar0_rid, hba->bar0_res);
1523 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1524 return -1;
1527 hba->bar2t = rman_get_bustag(hba->bar2_res);
1528 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1529 hba->u.mvfrey.mu =
1530 (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1532 if (!hba->u.mvfrey.mu) {
1533 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 hba->bar0_rid, hba->bar0_res);
1535 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1536 hba->bar2_rid, hba->bar2_res);
1537 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1538 return -1;
1541 return 0;
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1546 if (hba->bar0_res)
1547 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1548 hba->bar0_rid, hba->bar0_res);
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1553 if (hba->bar0_res)
1554 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555 hba->bar0_rid, hba->bar0_res);
1556 if (hba->bar2_res)
1557 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1558 hba->bar2_rid, hba->bar2_res);
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1563 if (hba->bar0_res)
1564 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1565 hba->bar0_rid, hba->bar0_res);
1566 if (hba->bar2_res)
1567 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1568 hba->bar2_rid, hba->bar2_res);
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1573 if (bus_dma_tag_create(hba->parent_dmat,
1576 BUS_SPACE_MAXADDR_32BIT,
1577 BUS_SPACE_MAXADDR,
1578 NULL, NULL,
1579 0x800 - 0x8,
1581 BUS_SPACE_MAXSIZE_32BIT,
1582 BUS_DMA_ALLOCNOW,
1583 &hba->ctlcfg_dmat)) {
1584 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1585 return -1;
1588 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1589 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1590 &hba->ctlcfg_dmamap) != 0) {
1591 device_printf(hba->pcidev,
1592 "bus_dmamem_alloc failed!\n");
1593 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1594 return -1;
1597 if (bus_dmamap_load(hba->ctlcfg_dmat,
1598 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1599 MVIOP_IOCTLCFG_SIZE,
1600 hptiop_mv_map_ctlcfg, hba, 0)) {
1601 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1602 if (hba->ctlcfg_dmat) {
1603 bus_dmamem_free(hba->ctlcfg_dmat,
1604 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1605 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1607 return -1;
1610 return 0;
1613 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1615 u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1617 list_count >>= 16;
1619 if (list_count == 0) {
1620 return -1;
1623 hba->u.mvfrey.list_count = list_count;
1624 hba->u.mvfrey.internal_mem_size = 0x800
1625 + list_count * sizeof(struct mvfrey_inlist_entry)
1626 + list_count * sizeof(struct mvfrey_outlist_entry)
1627 + sizeof(int);
1628 if (bus_dma_tag_create(hba->parent_dmat,
1631 BUS_SPACE_MAXADDR_32BIT,
1632 BUS_SPACE_MAXADDR,
1633 NULL, NULL,
1634 hba->u.mvfrey.internal_mem_size,
1636 BUS_SPACE_MAXSIZE_32BIT,
1637 BUS_DMA_ALLOCNOW,
1638 &hba->ctlcfg_dmat)) {
1639 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1640 return -1;
1643 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1644 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1645 &hba->ctlcfg_dmamap) != 0) {
1646 device_printf(hba->pcidev,
1647 "bus_dmamem_alloc failed!\n");
1648 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1649 return -1;
1652 if (bus_dmamap_load(hba->ctlcfg_dmat,
1653 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1654 hba->u.mvfrey.internal_mem_size,
1655 hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1656 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1657 if (hba->ctlcfg_dmat) {
1658 bus_dmamem_free(hba->ctlcfg_dmat,
1659 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1660 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1662 return -1;
1665 return 0;
1668 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1669 return 0;
1672 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1674 if (hba->ctlcfg_dmat) {
1675 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1676 bus_dmamem_free(hba->ctlcfg_dmat,
1677 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1678 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1681 return 0;
1684 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1686 if (hba->ctlcfg_dmat) {
1687 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1688 bus_dmamem_free(hba->ctlcfg_dmat,
1689 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1690 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1693 return 0;
1696 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1698 u_int32_t i = 100;
1700 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1701 return -1;
1703 /* wait 100ms for MCU ready */
1704 while(i--) {
1705 DELAY(1000);
1708 BUS_SPACE_WRT4_MVFREY2(inbound_base,
1709 hba->u.mvfrey.inlist_phy & 0xffffffff);
1710 BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1711 (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1713 BUS_SPACE_WRT4_MVFREY2(outbound_base,
1714 hba->u.mvfrey.outlist_phy & 0xffffffff);
1715 BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1716 (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1718 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1719 hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1720 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1721 (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1723 hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1724 | CL_POINTER_TOGGLE;
1725 *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1726 | CL_POINTER_TOGGLE;
1727 hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1729 return 0;
1733 * CAM driver interface
1735 static device_method_t driver_methods[] = {
1736 /* Device interface */
1737 DEVMETHOD(device_probe, hptiop_probe),
1738 DEVMETHOD(device_attach, hptiop_attach),
1739 DEVMETHOD(device_detach, hptiop_detach),
1740 DEVMETHOD(device_shutdown, hptiop_shutdown),
1741 DEVMETHOD_END
1744 static struct hptiop_adapter_ops hptiop_itl_ops = {
1745 .family = INTEL_BASED_IOP,
1746 .iop_wait_ready = hptiop_wait_ready_itl,
1747 .internal_memalloc = 0,
1748 .internal_memfree = hptiop_internal_memfree_itl,
1749 .alloc_pci_res = hptiop_alloc_pci_res_itl,
1750 .release_pci_res = hptiop_release_pci_res_itl,
1751 .enable_intr = hptiop_enable_intr_itl,
1752 .disable_intr = hptiop_disable_intr_itl,
1753 .get_config = hptiop_get_config_itl,
1754 .set_config = hptiop_set_config_itl,
1755 .iop_intr = hptiop_intr_itl,
1756 .post_msg = hptiop_post_msg_itl,
1757 .post_req = hptiop_post_req_itl,
1758 .do_ioctl = hptiop_do_ioctl_itl,
1759 .reset_comm = 0,
1762 static struct hptiop_adapter_ops hptiop_mv_ops = {
1763 .family = MV_BASED_IOP,
1764 .iop_wait_ready = hptiop_wait_ready_mv,
1765 .internal_memalloc = hptiop_internal_memalloc_mv,
1766 .internal_memfree = hptiop_internal_memfree_mv,
1767 .alloc_pci_res = hptiop_alloc_pci_res_mv,
1768 .release_pci_res = hptiop_release_pci_res_mv,
1769 .enable_intr = hptiop_enable_intr_mv,
1770 .disable_intr = hptiop_disable_intr_mv,
1771 .get_config = hptiop_get_config_mv,
1772 .set_config = hptiop_set_config_mv,
1773 .iop_intr = hptiop_intr_mv,
1774 .post_msg = hptiop_post_msg_mv,
1775 .post_req = hptiop_post_req_mv,
1776 .do_ioctl = hptiop_do_ioctl_mv,
1777 .reset_comm = 0,
1780 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1781 .family = MVFREY_BASED_IOP,
1782 .iop_wait_ready = hptiop_wait_ready_mvfrey,
1783 .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1784 .internal_memfree = hptiop_internal_memfree_mvfrey,
1785 .alloc_pci_res = hptiop_alloc_pci_res_mvfrey,
1786 .release_pci_res = hptiop_release_pci_res_mvfrey,
1787 .enable_intr = hptiop_enable_intr_mvfrey,
1788 .disable_intr = hptiop_disable_intr_mvfrey,
1789 .get_config = hptiop_get_config_mvfrey,
1790 .set_config = hptiop_set_config_mvfrey,
1791 .iop_intr = hptiop_intr_mvfrey,
1792 .post_msg = hptiop_post_msg_mvfrey,
1793 .post_req = hptiop_post_req_mvfrey,
1794 .do_ioctl = hptiop_do_ioctl_mvfrey,
1795 .reset_comm = hptiop_reset_comm_mvfrey,
1798 static driver_t hptiop_pci_driver = {
1799 driver_name,
1800 driver_methods,
1801 sizeof(struct hpt_iop_hba)
1804 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, NULL, NULL);
1805 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1806 MODULE_VERSION(hptiop, 1);
1808 static int hptiop_probe(device_t dev)
1810 struct hpt_iop_hba *hba;
1811 u_int32_t id;
1812 static char buf[256];
1813 int sas = 0;
1814 struct hptiop_adapter_ops *ops;
1816 if (pci_get_vendor(dev) != 0x1103)
1817 return (ENXIO);
1819 id = pci_get_device(dev);
1821 switch (id) {
1822 case 0x4520:
1823 case 0x4522:
1824 sas = 1;
1825 ops = &hptiop_mvfrey_ops;
1826 break;
1827 case 0x4210:
1828 case 0x4211:
1829 case 0x4310:
1830 case 0x4311:
1831 case 0x4320:
1832 case 0x4321:
1833 case 0x4322:
1834 sas = 1;
1835 case 0x3220:
1836 case 0x3320:
1837 case 0x3410:
1838 case 0x3520:
1839 case 0x3510:
1840 case 0x3511:
1841 case 0x3521:
1842 case 0x3522:
1843 case 0x3530:
1844 case 0x3540:
1845 case 0x3560:
1846 ops = &hptiop_itl_ops;
1847 break;
1848 case 0x3020:
1849 case 0x3120:
1850 case 0x3122:
1851 ops = &hptiop_mv_ops;
1852 break;
1853 default:
1854 return (ENXIO);
1857 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1858 pci_get_bus(dev), pci_get_slot(dev),
1859 pci_get_function(dev), pci_get_irq(dev));
1861 ksprintf(buf, "RocketRAID %x %s Controller",
1862 id, sas ? "SAS" : "SATA");
1863 device_set_desc_copy(dev, buf);
1865 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1866 bzero(hba, sizeof(struct hpt_iop_hba));
1867 hba->ops = ops;
1869 KdPrint(("hba->ops=%p\n", hba->ops));
1870 return 0;
1873 static int hptiop_attach(device_t dev)
1875 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1876 struct hpt_iop_request_get_config iop_config;
1877 struct hpt_iop_request_set_config set_config;
1878 int rid = 0;
1879 struct cam_devq *devq;
1880 struct ccb_setasync *ccb;
1881 u_int32_t unit = device_get_unit(dev);
1883 device_printf(dev, "RocketRAID 3xxx/4xxx controller driver %s\n",
1884 driver_version);
1886 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1887 pci_get_bus(dev), pci_get_slot(dev),
1888 pci_get_function(dev), hba->ops));
1890 pci_enable_busmaster(dev);
1891 hba->pcidev = dev;
1893 if (hba->ops->alloc_pci_res(hba))
1894 return ENXIO;
1896 if (hba->ops->iop_wait_ready(hba, 2000)) {
1897 device_printf(dev, "adapter is not ready\n");
1898 goto release_pci_res;
1901 lockinit(&hba->lock, "hptioplock", 0, LK_CANRECURSE);
1903 if (bus_dma_tag_create(NULL,/* parent */
1904 1, /* alignment */
1905 0, /* boundary */
1906 BUS_SPACE_MAXADDR, /* lowaddr */
1907 BUS_SPACE_MAXADDR, /* highaddr */
1908 NULL, NULL, /* filter, filterarg */
1909 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1910 BUS_SPACE_UNRESTRICTED, /* nsegments */
1911 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1912 0, /* flags */
1913 &hba->parent_dmat /* tag */))
1915 device_printf(dev, "alloc parent_dmat failed\n");
1916 goto release_pci_res;
1919 if (hba->ops->family == MV_BASED_IOP) {
1920 if (hba->ops->internal_memalloc(hba)) {
1921 device_printf(dev, "alloc srb_dmat failed\n");
1922 goto destroy_parent_tag;
1926 if (hba->ops->get_config(hba, &iop_config)) {
1927 device_printf(dev, "get iop config failed.\n");
1928 goto get_config_failed;
1931 hba->firmware_version = iop_config.firmware_version;
1932 hba->interface_version = iop_config.interface_version;
1933 hba->max_requests = iop_config.max_requests;
1934 hba->max_devices = iop_config.max_devices;
1935 hba->max_request_size = iop_config.request_size;
1936 hba->max_sg_count = iop_config.max_sg_count;
1938 if (hba->ops->family == MVFREY_BASED_IOP) {
1939 if (hba->ops->internal_memalloc(hba)) {
1940 device_printf(dev, "alloc srb_dmat failed\n");
1941 goto destroy_parent_tag;
1943 if (hba->ops->reset_comm(hba)) {
1944 device_printf(dev, "reset comm failed\n");
1945 goto get_config_failed;
1949 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1950 4, /* alignment */
1951 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1952 BUS_SPACE_MAXADDR, /* lowaddr */
1953 BUS_SPACE_MAXADDR, /* highaddr */
1954 NULL, NULL, /* filter, filterarg */
1955 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1956 hba->max_sg_count, /* nsegments */
1957 0x20000, /* maxsegsize */
1958 BUS_DMA_ALLOCNOW, /* flags */
1959 &hba->io_dmat /* tag */))
1961 device_printf(dev, "alloc io_dmat failed\n");
1962 goto get_config_failed;
1965 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1966 1, /* alignment */
1967 0, /* boundary */
1968 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1969 BUS_SPACE_MAXADDR, /* highaddr */
1970 NULL, NULL, /* filter, filterarg */
1971 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1972 1, /* nsegments */
1973 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1974 0, /* flags */
1975 &hba->srb_dmat /* tag */))
1977 device_printf(dev, "alloc srb_dmat failed\n");
1978 goto destroy_io_dmat;
1981 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1982 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1983 &hba->srb_dmamap) != 0)
1985 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1986 goto destroy_srb_dmat;
1989 if (bus_dmamap_load(hba->srb_dmat,
1990 hba->srb_dmamap, hba->uncached_ptr,
1991 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1992 hptiop_map_srb, hba, 0))
1994 device_printf(dev, "bus_dmamap_load failed!\n");
1995 goto srb_dmamem_free;
1998 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1999 device_printf(dev, "cam_simq_alloc failed\n");
2000 goto srb_dmamap_unload;
2003 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2004 hba, unit, &sim_mplock, hba->max_requests - 1, 1, devq);
2005 cam_simq_release(devq);
2006 if (!hba->sim) {
2007 device_printf(dev, "cam_sim_alloc failed\n");
2008 goto srb_dmamap_unload;
2010 if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
2012 device_printf(dev, "xpt_bus_register failed\n");
2013 goto free_cam_sim;
2016 if (xpt_create_path(&hba->path, /*periph */ NULL,
2017 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2018 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2019 device_printf(dev, "xpt_create_path failed\n");
2020 goto deregister_xpt_bus;
2023 bzero(&set_config, sizeof(set_config));
2024 set_config.iop_id = unit;
2025 set_config.vbus_id = cam_sim_path(hba->sim);
2026 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2028 if (hba->ops->set_config(hba, &set_config)) {
2029 device_printf(dev, "set iop config failed.\n");
2030 goto free_hba_path;
2033 ccb = &xpt_alloc_ccb()->csa;
2035 xpt_setup_ccb(&ccb->ccb_h, hba->path, /*priority*/5);
2036 ccb->ccb_h.func_code = XPT_SASYNC_CB;
2037 ccb->event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2038 ccb->callback = hptiop_async;
2039 ccb->callback_arg = hba->sim;
2040 xpt_action((union ccb *)ccb);
2041 xpt_free_ccb(&ccb->ccb_h);
2043 rid = 0;
2044 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
2045 &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2046 device_printf(dev, "allocate irq failed!\n");
2047 goto free_hba_path;
2050 if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
2051 hptiop_pci_intr, hba, &hba->irq_handle, NULL))
2053 device_printf(dev, "allocate intr function failed!\n");
2054 goto free_irq_resource;
2057 if (hptiop_send_sync_msg(hba,
2058 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2059 device_printf(dev, "fail to start background task\n");
2060 goto teartown_irq_resource;
2063 hba->ops->enable_intr(hba);
2064 hba->initialized = 1;
2066 hba->ioctl_dev = make_dev(&hptiop_ops, unit,
2067 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2068 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2070 hba->ioctl_dev->si_drv1 = hba;
2072 hptiop_rescan_bus(hba);
2074 return 0;
2077 teartown_irq_resource:
2078 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2080 free_irq_resource:
2081 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2083 free_hba_path:
2084 xpt_free_path(hba->path);
2086 deregister_xpt_bus:
2087 xpt_bus_deregister(cam_sim_path(hba->sim));
2089 free_cam_sim:
2090 cam_sim_free(hba->sim);
2092 srb_dmamap_unload:
2093 if (hba->uncached_ptr)
2094 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2096 srb_dmamem_free:
2097 if (hba->uncached_ptr)
2098 bus_dmamem_free(hba->srb_dmat,
2099 hba->uncached_ptr, hba->srb_dmamap);
2101 destroy_srb_dmat:
2102 if (hba->srb_dmat)
2103 bus_dma_tag_destroy(hba->srb_dmat);
2105 destroy_io_dmat:
2106 if (hba->io_dmat)
2107 bus_dma_tag_destroy(hba->io_dmat);
2109 get_config_failed:
2110 hba->ops->internal_memfree(hba);
2112 destroy_parent_tag:
2113 if (hba->parent_dmat)
2114 bus_dma_tag_destroy(hba->parent_dmat);
2116 release_pci_res:
2117 if (hba->ops->release_pci_res)
2118 hba->ops->release_pci_res(hba);
2120 return ENXIO;
2123 static int hptiop_detach(device_t dev)
2125 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2126 int i;
2127 int error = EBUSY;
2129 hptiop_lock_adapter(hba);
2130 for (i = 0; i < hba->max_devices; i++)
2131 if (hptiop_os_query_remove_device(hba, i)) {
2132 device_printf(dev, "file system is busy. id=%d", i);
2133 goto out;
2136 if ((error = hptiop_shutdown(dev)) != 0)
2137 goto out;
2138 if (hptiop_send_sync_msg(hba,
2139 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2140 goto out;
2142 hptiop_release_resource(hba);
2143 error = 0;
2144 out:
2145 hptiop_unlock_adapter(hba);
2146 return error;
2149 static int hptiop_shutdown(device_t dev)
2151 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2153 int error = 0;
2155 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2156 device_printf(dev, "device is busy");
2157 return EBUSY;
2160 hba->ops->disable_intr(hba);
2162 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2163 error = EBUSY;
2165 return error;
2168 static void hptiop_pci_intr(void *arg)
2170 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2171 hptiop_lock_adapter(hba);
2172 hba->ops->iop_intr(hba);
2173 hptiop_unlock_adapter(hba);
2176 static void hptiop_poll(struct cam_sim *sim)
2178 hptiop_pci_intr(cam_sim_softc(sim));
2181 static void hptiop_async(void * callback_arg, u_int32_t code,
2182 struct cam_path * path, void * arg)
2186 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2188 BUS_SPACE_WRT4_ITL(outbound_intmask,
2189 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2192 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2194 u_int32_t int_mask;
2196 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2198 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2199 | MVIOP_MU_OUTBOUND_INT_MSG;
2200 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2203 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2205 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2206 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2208 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2209 BUS_SPACE_RD4_MVFREY2(isr_enable);
2211 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2212 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2215 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2217 u_int32_t int_mask;
2219 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2221 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2222 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2223 BUS_SPACE_RD4_ITL(outbound_intstatus);
2226 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2228 u_int32_t int_mask;
2229 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2231 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2232 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2233 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2234 BUS_SPACE_RD4_MV0(outbound_intmask);
2237 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2239 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2240 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2242 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2243 BUS_SPACE_RD4_MVFREY2(isr_enable);
2245 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2246 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2249 static void hptiop_reset_adapter(void *argv)
2251 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2252 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2253 return;
2254 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2257 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2259 struct hpt_iop_srb * srb;
2261 if (hba->srb_list) {
2262 srb = hba->srb_list;
2263 hba->srb_list = srb->next;
2264 return srb;
2267 return NULL;
2270 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2272 srb->next = hba->srb_list;
2273 hba->srb_list = srb;
2276 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2278 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2279 struct hpt_iop_srb * srb;
2281 switch (ccb->ccb_h.func_code) {
2283 case XPT_SCSI_IO:
2284 hptiop_lock_adapter(hba);
2285 if (ccb->ccb_h.target_lun != 0 ||
2286 ccb->ccb_h.target_id >= hba->max_devices ||
2287 (ccb->ccb_h.flags & CAM_CDB_PHYS))
2289 ccb->ccb_h.status = CAM_TID_INVALID;
2290 xpt_done(ccb);
2291 goto scsi_done;
2294 if ((srb = hptiop_get_srb(hba)) == NULL) {
2295 device_printf(hba->pcidev, "srb allocated failed");
2296 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2297 xpt_done(ccb);
2298 goto scsi_done;
2301 srb->ccb = ccb;
2303 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
2304 hptiop_post_scsi_command(srb, NULL, 0, 0);
2305 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2306 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2307 int error;
2309 error = bus_dmamap_load(hba->io_dmat,
2310 srb->dma_map,
2311 ccb->csio.data_ptr,
2312 ccb->csio.dxfer_len,
2313 hptiop_post_scsi_command,
2314 srb, 0);
2316 if (error && error != EINPROGRESS) {
2317 device_printf(hba->pcidev,
2318 "bus_dmamap_load error %d", error);
2319 xpt_freeze_simq(hba->sim, 1);
2320 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2321 invalid:
2322 hptiop_free_srb(hba, srb);
2323 xpt_done(ccb);
2324 goto scsi_done;
2327 else {
2328 device_printf(hba->pcidev,
2329 "CAM_DATA_PHYS not supported");
2330 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2331 goto invalid;
2334 else {
2335 struct bus_dma_segment *segs;
2337 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
2338 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2339 device_printf(hba->pcidev, "SCSI cmd failed");
2340 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
2341 goto invalid;
2344 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
2345 hptiop_post_scsi_command(srb, segs,
2346 ccb->csio.sglist_cnt, 0);
2349 scsi_done:
2350 hptiop_unlock_adapter(hba);
2351 return;
2353 case XPT_RESET_BUS:
2354 device_printf(hba->pcidev, "reset adapter");
2355 hptiop_lock_adapter(hba);
2356 hba->msg_done = 0;
2357 hptiop_reset_adapter(hba);
2358 hptiop_unlock_adapter(hba);
2359 break;
2361 case XPT_GET_TRAN_SETTINGS:
2362 case XPT_SET_TRAN_SETTINGS:
2363 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2364 break;
2366 case XPT_CALC_GEOMETRY:
2367 cam_calc_geometry(&ccb->ccg, 1);
2368 break;
2370 case XPT_PATH_INQ:
2372 struct ccb_pathinq *cpi = &ccb->cpi;
2374 cpi->version_num = 1;
2375 cpi->hba_inquiry = PI_SDTR_ABLE;
2376 cpi->target_sprt = 0;
2377 cpi->hba_misc = PIM_NOBUSRESET;
2378 cpi->hba_eng_cnt = 0;
2379 cpi->max_target = hba->max_devices;
2380 cpi->max_lun = 0;
2381 cpi->unit_number = cam_sim_unit(sim);
2382 cpi->bus_id = cam_sim_bus(sim);
2383 cpi->initiator_id = hba->max_devices;
2384 cpi->base_transfer_speed = 3300;
2386 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2387 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
2388 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2389 cpi->transport = XPORT_SPI;
2390 cpi->transport_version = 2;
2391 cpi->protocol = PROTO_SCSI;
2392 cpi->protocol_version = SCSI_REV_2;
2393 cpi->ccb_h.status = CAM_REQ_CMP;
2394 break;
2397 default:
2398 ccb->ccb_h.status = CAM_REQ_INVALID;
2399 break;
2402 xpt_done(ccb);
2403 return;
2406 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2407 struct hpt_iop_srb *srb,
2408 bus_dma_segment_t *segs, int nsegs)
2410 int idx;
2411 union ccb *ccb = srb->ccb;
2412 u_int8_t *cdb;
2414 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2415 cdb = ccb->csio.cdb_io.cdb_ptr;
2416 else
2417 cdb = ccb->csio.cdb_io.cdb_bytes;
2419 KdPrint(("ccb=%p %x-%x-%x\n",
2420 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2422 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2423 u_int32_t iop_req32;
2424 struct hpt_iop_request_scsi_command req;
2426 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2428 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2429 device_printf(hba->pcidev, "invalid req offset\n");
2430 ccb->ccb_h.status = CAM_BUSY;
2431 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2432 hptiop_free_srb(hba, srb);
2433 xpt_done(ccb);
2434 return;
2437 if (ccb->csio.dxfer_len && nsegs > 0) {
2438 struct hpt_iopsg *psg = req.sg_list;
2439 for (idx = 0; idx < nsegs; idx++, psg++) {
2440 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2441 psg->size = segs[idx].ds_len;
2442 psg->eot = 0;
2444 psg[-1].eot = 1;
2447 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2449 req.header.size =
2450 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2451 + nsegs*sizeof(struct hpt_iopsg);
2452 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2453 req.header.flags = 0;
2454 req.header.result = IOP_RESULT_PENDING;
2455 req.header.context = (u_int64_t)(unsigned long)srb;
2456 req.dataxfer_length = ccb->csio.dxfer_len;
2457 req.channel = 0;
2458 req.target = ccb->ccb_h.target_id;
2459 req.lun = ccb->ccb_h.target_lun;
2461 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2462 (u_int8_t *)&req, req.header.size);
2464 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2465 bus_dmamap_sync(hba->io_dmat,
2466 srb->dma_map, BUS_DMASYNC_PREREAD);
2468 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2469 bus_dmamap_sync(hba->io_dmat,
2470 srb->dma_map, BUS_DMASYNC_PREWRITE);
2472 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2473 } else {
2474 struct hpt_iop_request_scsi_command *req;
2476 req = (struct hpt_iop_request_scsi_command *)srb;
2477 if (ccb->csio.dxfer_len && nsegs > 0) {
2478 struct hpt_iopsg *psg = req->sg_list;
2479 for (idx = 0; idx < nsegs; idx++, psg++) {
2480 psg->pci_address =
2481 (u_int64_t)segs[idx].ds_addr;
2482 psg->size = segs[idx].ds_len;
2483 psg->eot = 0;
2485 psg[-1].eot = 1;
2488 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2490 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2491 req->header.result = IOP_RESULT_PENDING;
2492 req->dataxfer_length = ccb->csio.dxfer_len;
2493 req->channel = 0;
2494 req->target = ccb->ccb_h.target_id;
2495 req->lun = ccb->ccb_h.target_lun;
2496 req->header.size =
2497 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2498 + nsegs*sizeof(struct hpt_iopsg);
2499 req->header.context = (u_int64_t)srb->index |
2500 IOPMU_QUEUE_ADDR_HOST_BIT;
2501 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2503 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2504 bus_dmamap_sync(hba->io_dmat,
2505 srb->dma_map, BUS_DMASYNC_PREREAD);
2506 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2507 bus_dmamap_sync(hba->io_dmat,
2508 srb->dma_map, BUS_DMASYNC_PREWRITE);
2511 if (hba->firmware_version > 0x01020000
2512 || hba->interface_version > 0x01020000) {
2513 u_int32_t size_bits;
2515 if (req->header.size < 256)
2516 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2517 else if (req->header.size < 512)
2518 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2519 else
2520 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2521 | IOPMU_QUEUE_ADDR_HOST_BIT;
2523 BUS_SPACE_WRT4_ITL(inbound_queue,
2524 (u_int32_t)srb->phy_addr | size_bits);
2525 } else
2526 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2527 |IOPMU_QUEUE_ADDR_HOST_BIT);
2531 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2532 struct hpt_iop_srb *srb,
2533 bus_dma_segment_t *segs, int nsegs)
2535 int idx, size;
2536 union ccb *ccb = srb->ccb;
2537 u_int8_t *cdb;
2538 struct hpt_iop_request_scsi_command *req;
2539 u_int64_t req_phy;
2541 req = (struct hpt_iop_request_scsi_command *)srb;
2542 req_phy = srb->phy_addr;
2544 if (ccb->csio.dxfer_len && nsegs > 0) {
2545 struct hpt_iopsg *psg = req->sg_list;
2546 for (idx = 0; idx < nsegs; idx++, psg++) {
2547 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2548 psg->size = segs[idx].ds_len;
2549 psg->eot = 0;
2551 psg[-1].eot = 1;
2553 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2554 cdb = ccb->csio.cdb_io.cdb_ptr;
2555 else
2556 cdb = ccb->csio.cdb_io.cdb_bytes;
2558 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2559 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2560 req->header.result = IOP_RESULT_PENDING;
2561 req->dataxfer_length = ccb->csio.dxfer_len;
2562 req->channel = 0;
2563 req->target = ccb->ccb_h.target_id;
2564 req->lun = ccb->ccb_h.target_lun;
2565 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2566 - sizeof(struct hpt_iopsg)
2567 + nsegs * sizeof(struct hpt_iopsg);
2568 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2569 bus_dmamap_sync(hba->io_dmat,
2570 srb->dma_map, BUS_DMASYNC_PREREAD);
2572 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2573 bus_dmamap_sync(hba->io_dmat,
2574 srb->dma_map, BUS_DMASYNC_PREWRITE);
2575 req->header.context = (u_int64_t)srb->index
2576 << MVIOP_REQUEST_NUMBER_START_BIT
2577 | MVIOP_CMD_TYPE_SCSI;
2578 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2579 size = req->header.size >> 8;
2580 hptiop_mv_inbound_write(req_phy
2581 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2582 | (size > 3 ? 3 : size), hba);
2585 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2586 struct hpt_iop_srb *srb,
2587 bus_dma_segment_t *segs, int nsegs)
2589 int idx, index;
2590 union ccb *ccb = srb->ccb;
2591 u_int8_t *cdb;
2592 struct hpt_iop_request_scsi_command *req;
2593 u_int64_t req_phy;
2595 req = (struct hpt_iop_request_scsi_command *)srb;
2596 req_phy = srb->phy_addr;
2598 if (ccb->csio.dxfer_len && nsegs > 0) {
2599 struct hpt_iopsg *psg = req->sg_list;
2600 for (idx = 0; idx < nsegs; idx++, psg++) {
2601 psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2602 psg->size = segs[idx].ds_len;
2603 psg->eot = 0;
2605 psg[-1].eot = 1;
2607 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2608 cdb = ccb->csio.cdb_io.cdb_ptr;
2609 else
2610 cdb = ccb->csio.cdb_io.cdb_bytes;
2612 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2613 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2614 req->header.result = IOP_RESULT_PENDING;
2615 req->dataxfer_length = ccb->csio.dxfer_len;
2616 req->channel = 0;
2617 req->target = ccb->ccb_h.target_id;
2618 req->lun = ccb->ccb_h.target_lun;
2619 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2620 - sizeof(struct hpt_iopsg)
2621 + nsegs * sizeof(struct hpt_iopsg);
2622 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2623 bus_dmamap_sync(hba->io_dmat,
2624 srb->dma_map, BUS_DMASYNC_PREREAD);
2626 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2627 bus_dmamap_sync(hba->io_dmat,
2628 srb->dma_map, BUS_DMASYNC_PREWRITE);
2630 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2631 | IOP_REQUEST_FLAG_ADDR_BITS
2632 | ((req_phy >> 16) & 0xffff0000);
2633 req->header.context = ((req_phy & 0xffffffff) << 32 )
2634 | srb->index << 4
2635 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2637 hba->u.mvfrey.inlist_wptr++;
2638 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2640 if (index == hba->u.mvfrey.list_count) {
2641 index = 0;
2642 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2643 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2646 hba->u.mvfrey.inlist[index].addr = req_phy;
2647 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2649 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2650 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2652 if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2653 callout_reset(ccb->ccb_h.timeout_ch, 20 * hz,
2654 hptiop_reset_adapter, hba);
2658 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2659 int nsegs, int error)
2661 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2662 union ccb *ccb = srb->ccb;
2663 struct hpt_iop_hba *hba = srb->hba;
2665 if (error || nsegs > hba->max_sg_count) {
2666 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2667 ccb->ccb_h.func_code,
2668 ccb->ccb_h.target_id,
2669 ccb->ccb_h.target_lun, nsegs));
2670 ccb->ccb_h.status = CAM_BUSY;
2671 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2672 hptiop_free_srb(hba, srb);
2673 xpt_done(ccb);
2674 return;
2677 hba->ops->post_req(hba, srb, segs, nsegs);
2680 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2681 int nsegs, int error)
2683 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2684 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2685 & ~(u_int64_t)0x1F;
2686 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2687 & ~0x1F);
2690 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2691 int nsegs, int error)
2693 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2694 char *p;
2695 u_int64_t phy;
2696 u_int32_t list_count = hba->u.mvfrey.list_count;
2698 phy = ((u_int64_t)segs->ds_addr + 0x1F)
2699 & ~(u_int64_t)0x1F;
2700 p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2701 & ~0x1F);
2703 hba->ctlcfgcmd_phy = phy;
2704 hba->ctlcfg_ptr = p;
2706 p += 0x800;
2707 phy += 0x800;
2709 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2710 hba->u.mvfrey.inlist_phy = phy;
2712 p += list_count * sizeof(struct mvfrey_inlist_entry);
2713 phy += list_count * sizeof(struct mvfrey_inlist_entry);
2715 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2716 hba->u.mvfrey.outlist_phy = phy;
2718 p += list_count * sizeof(struct mvfrey_outlist_entry);
2719 phy += list_count * sizeof(struct mvfrey_outlist_entry);
2721 hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2722 hba->u.mvfrey.outlist_cptr_phy = phy;
2725 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2726 int nsegs, int error)
2728 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2729 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2730 struct hpt_iop_srb *srb, *tmp_srb;
2731 int i;
2733 if (error || nsegs == 0) {
2734 device_printf(hba->pcidev, "hptiop_map_srb error");
2735 return;
2738 /* map srb */
2739 srb = (struct hpt_iop_srb *)
2740 (((unsigned long)hba->uncached_ptr + 0x1F)
2741 & ~(unsigned long)0x1F);
2743 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2744 tmp_srb = (struct hpt_iop_srb *)
2745 ((char *)srb + i * HPT_SRB_MAX_SIZE);
2746 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2747 if (bus_dmamap_create(hba->io_dmat,
2748 0, &tmp_srb->dma_map)) {
2749 device_printf(hba->pcidev, "dmamap create failed");
2750 return;
2753 bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2754 tmp_srb->hba = hba;
2755 tmp_srb->index = i;
2756 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2757 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2758 (phy_addr >> 5);
2759 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2760 tmp_srb->srb_flag =
2761 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2762 } else {
2763 tmp_srb->phy_addr = phy_addr;
2766 hptiop_free_srb(hba, tmp_srb);
2767 hba->srb[i] = tmp_srb;
2768 phy_addr += HPT_SRB_MAX_SIZE;
2770 else {
2771 device_printf(hba->pcidev, "invalid alignment");
2772 return;
2777 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2779 hba->msg_done = 1;
2782 static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2783 int target_id)
2785 struct cam_periph *periph = NULL;
2786 struct cam_path *path;
2787 int status, retval = 0;
2789 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2791 if (status == CAM_REQ_CMP) {
2792 if ((periph = cam_periph_find(path, "da")) != NULL) {
2793 if (periph->refcount >= 1) {
2794 device_printf(hba->pcidev, "target_id=0x%x,"
2795 "refcount=%d", target_id, periph->refcount);
2796 retval = -1;
2799 xpt_free_path(path);
2801 return retval;
2804 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2806 int i;
2807 if (hba->path) {
2808 struct ccb_setasync *ccb;
2810 ccb = &xpt_alloc_ccb()->csa;
2811 xpt_setup_ccb(&ccb->ccb_h, hba->path, /*priority*/5);
2812 ccb->ccb_h.func_code = XPT_SASYNC_CB;
2813 ccb->event_enable = 0;
2814 ccb->callback = hptiop_async;
2815 ccb->callback_arg = hba->sim;
2816 xpt_action((union ccb *)ccb);
2817 xpt_free_path(hba->path);
2818 xpt_free_ccb(&ccb->ccb_h);
2821 if (hba->sim) {
2822 xpt_bus_deregister(cam_sim_path(hba->sim));
2823 cam_sim_free(hba->sim);
2826 if (hba->ctlcfg_dmat) {
2827 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2828 bus_dmamem_free(hba->ctlcfg_dmat,
2829 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2830 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2833 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2834 struct hpt_iop_srb *srb = hba->srb[i];
2835 if (srb->dma_map)
2836 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2839 if (hba->srb_dmat) {
2840 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2841 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2842 bus_dma_tag_destroy(hba->srb_dmat);
2845 if (hba->io_dmat)
2846 bus_dma_tag_destroy(hba->io_dmat);
2848 if (hba->parent_dmat)
2849 bus_dma_tag_destroy(hba->parent_dmat);
2851 if (hba->irq_handle)
2852 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2854 if (hba->irq_res)
2855 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2856 0, hba->irq_res);
2858 if (hba->bar0_res)
2859 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2860 hba->bar0_rid, hba->bar0_res);
2861 if (hba->bar2_res)
2862 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2863 hba->bar2_rid, hba->bar2_res);
2864 if (hba->ioctl_dev)
2865 destroy_dev(hba->ioctl_dev);
2866 dev_ops_remove_minor(&hptiop_ops, device_get_unit(hba->pcidev));