tws(4): Add MSI support.
[dragonfly.git] / sys / dev / raid / tws / tws.c
blob3fabc01f4d1391f3fae46b4141f83ff134bfccef
1 /*
2 * Copyright (c) 2010, LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * $FreeBSD: src/sys/dev/tws/tws.c,v 1.3 2007/05/09 04:16:32 mrangana Exp $
37 #include <dev/raid/tws/tws.h>
38 #include <dev/raid/tws/tws_services.h>
39 #include <dev/raid/tws/tws_hdm.h>
41 #include <bus/cam/cam.h>
42 #include <bus/cam/cam_ccb.h>
44 static int tws_msi_enable = 1;
46 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
47 int tws_queue_depth = TWS_MAX_REQS;
49 /* externs */
50 extern int tws_cam_attach(struct tws_softc *sc);
51 extern void tws_cam_detach(struct tws_softc *sc);
52 extern int tws_init_ctlr(struct tws_softc *sc);
53 extern boolean tws_ctlr_ready(struct tws_softc *sc);
54 extern void tws_turn_off_interrupts(struct tws_softc *sc);
55 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
56 u_int8_t q_type );
57 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
58 struct tws_request *req, u_int8_t q_type );
59 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
60 u_int8_t q_type );
61 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
62 extern boolean tws_ctlr_reset(struct tws_softc *sc);
63 extern void tws_intr(void *arg);
64 extern int tws_use_32bit_sgls;
67 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
68 int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
69 void tws_send_event(struct tws_softc *sc, u_int8_t event);
70 uint8_t tws_get_state(struct tws_softc *sc);
71 void tws_release_request(struct tws_request *req);
75 /* Function prototypes */
76 static d_open_t tws_open;
77 static d_close_t tws_close;
78 static d_read_t tws_read;
79 static d_write_t tws_write;
80 extern d_ioctl_t tws_ioctl;
82 static int tws_init(struct tws_softc *sc);
83 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
84 int nseg, int error);
86 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
87 static int tws_init_aen_q(struct tws_softc *sc);
88 static int tws_init_trace_q(struct tws_softc *sc);
89 static int tws_setup_irq(struct tws_softc *sc);
92 /* Character device entry points */
94 static struct dev_ops tws_ops = {
95 { "tws", 0, 0 },
96 .d_open = tws_open,
97 .d_close = tws_close,
98 .d_read = tws_read,
99 .d_write = tws_write,
100 .d_ioctl = tws_ioctl,
104 * In the cdevsw routines, we find our softc by using the si_drv1 member
105 * of struct cdev. We set this variable to point to our softc in our
106 * attach routine when we create the /dev entry.
110 tws_open(struct dev_open_args *ap)
112 cdev_t dev = ap->a_head.a_dev;
113 struct tws_softc *sc = dev->si_drv1;
115 if ( sc )
116 TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
117 return (0);
121 tws_close(struct dev_close_args *ap)
123 cdev_t dev = ap->a_head.a_dev;
124 struct tws_softc *sc = dev->si_drv1;
126 if ( sc )
127 TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
128 return (0);
132 tws_read(struct dev_read_args *ap)
134 cdev_t dev = ap->a_head.a_dev;
135 struct tws_softc *sc = dev->si_drv1;
137 if ( sc )
138 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
139 return (0);
143 tws_write(struct dev_write_args *ap)
145 cdev_t dev = ap->a_head.a_dev;
146 struct tws_softc *sc = dev->si_drv1;
148 if ( sc )
149 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
150 return (0);
153 /* PCI Support Functions */
156 * Compare the device ID of this device against the IDs that this driver
157 * supports. If there is a match, set the description and return success.
159 static int
160 tws_probe(device_t dev)
162 static u_int8_t first_ctlr = 1;
164 if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
165 (pci_get_device(dev) == TWS_DEVICE_ID)) {
166 device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
167 if (first_ctlr) {
168 kprintf("LSI 3ware device driver for SAS/SATA storage "
169 "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
170 first_ctlr = 0;
173 return(0);
175 return (ENXIO);
178 /* Attach function is only called if the probe is successful. */
180 static int
181 tws_attach(device_t dev)
183 struct tws_softc *sc = device_get_softc(dev);
184 u_int32_t cmd, bar;
185 int error=0;
187 /* no tracing yet */
188 /* Look up our softc and initialize its fields. */
189 sc->tws_dev = dev;
190 sc->device_id = pci_get_device(dev);
191 sc->subvendor_id = pci_get_subvendor(dev);
192 sc->subdevice_id = pci_get_subdevice(dev);
194 /* Intialize mutexes */
195 lockinit(&sc->q_lock, "tws_q_lock", 0, LK_CANRECURSE);
196 lockinit(&sc->sim_lock, "tws_sim_lock", 0, LK_CANRECURSE);
197 lockinit(&sc->gen_lock, "tws_gen_lock", 0, LK_CANRECURSE);
198 lockinit(&sc->io_lock, "tws_io_lock", 0, LK_CANRECURSE);
200 if ( tws_init_trace_q(sc) == FAILURE )
201 kprintf("trace init failure\n");
202 /* send init event */
203 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
204 tws_send_event(sc, TWS_INIT_START);
205 lockmgr(&sc->gen_lock, LK_RELEASE);
208 #if _BYTE_ORDER == _BIG_ENDIAN
209 TWS_TRACE(sc, "BIG endian", 0, 0);
210 #endif
211 /* sysctl context setup */
212 sysctl_ctx_init(&sc->tws_clist);
213 sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
214 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
215 device_get_nameunit(dev),
216 CTLFLAG_RD, 0, "");
217 if ( sc->tws_oidp == NULL ) {
218 tws_log(sc, SYSCTL_TREE_NODE_ADD);
219 goto attach_fail_1;
221 SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
222 OID_AUTO, "driver_version", CTLFLAG_RD,
223 TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
225 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
226 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
227 tws_log(sc, PCI_COMMAND_READ);
228 goto attach_fail_1;
230 /* Force the busmaster enable bit on. */
231 cmd |= PCIM_CMD_BUSMASTEREN;
232 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
234 bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
235 TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
236 bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
237 bar = bar & ~TWS_BIT2;
238 TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
240 /* MFA base address is BAR2 register used for
241 * push mode. Firmware will evatualy move to
242 * pull mode during witch this needs to change
244 #ifndef TWS_PULL_MODE_ENABLE
245 sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
246 sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
247 TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
248 #endif
250 /* allocate MMIO register space */
251 sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
252 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
253 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
254 == NULL) {
255 tws_log(sc, ALLOC_MEMORY_RES);
256 goto attach_fail_1;
258 sc->bus_tag = rman_get_bustag(sc->reg_res);
259 sc->bus_handle = rman_get_bushandle(sc->reg_res);
261 #ifndef TWS_PULL_MODE_ENABLE
262 /* Allocate bus space for inbound mfa */
263 sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
264 if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
265 &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE))
266 == NULL) {
267 tws_log(sc, ALLOC_MEMORY_RES);
268 goto attach_fail_2;
270 sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
271 sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
272 #endif
274 /* Allocate and register our interrupt. */
275 if ( tws_setup_irq(sc) == FAILURE ) {
276 tws_log(sc, ALLOC_MEMORY_RES);
277 goto attach_fail_3;
280 /* Init callouts. */
281 callout_init(&sc->print_stats_handle);
282 callout_init(&sc->reset_cb_handle);
283 callout_init(&sc->reinit_handle);
286 * Create a /dev entry for this device. The kernel will assign us
287 * a major number automatically. We use the unit number of this
288 * device as the minor number and name the character device
289 * "tws<unit>".
291 sc->tws_cdev = make_dev(&tws_ops, device_get_unit(dev),
292 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
293 device_get_unit(dev));
294 sc->tws_cdev->si_drv1 = sc;
296 if ( tws_init(sc) == FAILURE ) {
297 tws_log(sc, TWS_INIT_FAILURE);
298 goto attach_fail_4;
300 if ( tws_init_ctlr(sc) == FAILURE ) {
301 tws_log(sc, TWS_CTLR_INIT_FAILURE);
302 goto attach_fail_4;
304 if ((error = tws_cam_attach(sc))) {
305 tws_log(sc, TWS_CAM_ATTACH);
306 goto attach_fail_4;
308 /* send init complete event */
309 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
310 tws_send_event(sc, TWS_INIT_COMPLETE);
311 lockmgr(&sc->gen_lock, LK_RELEASE);
313 TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
314 return(0);
316 attach_fail_4:
317 if (sc->intr_handle) {
318 if ((error = bus_teardown_intr(sc->tws_dev,
319 sc->irq_res, sc->intr_handle)))
320 TWS_TRACE(sc, "bus teardown intr", 0, error);
322 destroy_dev(sc->tws_cdev);
323 dev_ops_remove_minor(&tws_ops, device_get_unit(sc->tws_dev));
324 attach_fail_3:
325 if (sc->irq_res) {
326 if (bus_release_resource(sc->tws_dev,
327 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))
328 TWS_TRACE(sc, "bus irq res", 0, 0);
330 #ifndef TWS_PULL_MODE_ENABLE
331 attach_fail_2:
332 #endif
333 if ( sc->mfa_res ){
334 if (bus_release_resource(sc->tws_dev,
335 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
336 TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
338 if ( sc->reg_res ){
339 if (bus_release_resource(sc->tws_dev,
340 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
341 TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
343 attach_fail_1:
344 lockuninit(&sc->q_lock);
345 lockuninit(&sc->sim_lock);
346 lockuninit(&sc->gen_lock);
347 lockuninit(&sc->io_lock);
348 sysctl_ctx_free(&sc->tws_clist);
349 return (ENXIO);
352 /* Detach device. */
354 static int
355 tws_detach(device_t dev)
357 struct tws_softc *sc = device_get_softc(dev);
358 int error;
359 u_int32_t reg;
361 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
363 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
364 tws_send_event(sc, TWS_UNINIT_START);
365 lockmgr(&sc->gen_lock, LK_RELEASE);
367 /* needs to disable interrupt before detaching from cam */
368 tws_turn_off_interrupts(sc);
369 /* clear door bell */
370 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
371 reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
372 TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
373 sc->obfl_q_overrun = false;
374 tws_init_connect(sc, 1);
376 /* Teardown the state in our softc created in our attach routine. */
377 /* Disconnect the interrupt handler. */
378 if (sc->intr_handle) {
379 if ((error = bus_teardown_intr(sc->tws_dev,
380 sc->irq_res, sc->intr_handle)))
381 TWS_TRACE(sc, "bus teardown intr", 0, error);
383 /* Release irq resource */
384 if (sc->irq_res) {
385 if (bus_release_resource(sc->tws_dev,
386 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))
387 TWS_TRACE(sc, "bus release irq resource", 0, sc->irq_res_id);
389 if (sc->intr_type == PCI_INTR_TYPE_MSI)
390 pci_release_msi(sc->tws_dev);
392 tws_cam_detach(sc);
394 /* Release memory resource */
395 if ( sc->mfa_res ){
396 if (bus_release_resource(sc->tws_dev,
397 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
398 TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
400 if ( sc->reg_res ){
401 if (bus_release_resource(sc->tws_dev,
402 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
403 TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
406 kfree(sc->reqs, M_TWS);
407 kfree(sc->sense_bufs, M_TWS);
408 kfree(sc->scan_ccb, M_TWS);
409 kfree(sc->aen_q.q, M_TWS);
410 kfree(sc->trace_q.q, M_TWS);
411 lockuninit(&sc->q_lock);
412 lockuninit(&sc->sim_lock);
413 lockuninit(&sc->gen_lock);
414 lockuninit(&sc->io_lock);
415 destroy_dev(sc->tws_cdev);
416 dev_ops_remove_minor(&tws_ops, device_get_unit(sc->tws_dev));
417 sysctl_ctx_free(&sc->tws_clist);
418 return (0);
421 static int
422 tws_setup_irq(struct tws_softc *sc)
424 u_int16_t cmd;
425 u_int irq_flags;
427 cmd = pci_read_config(sc->tws_dev, PCIR_COMMAND, 2);
429 if (tws_msi_enable)
430 cmd |= 0x0400;
431 else
432 cmd &= ~0x0400;
433 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
434 sc->irq_res = 0;
435 sc->intr_type = pci_alloc_1intr(sc->tws_dev, tws_msi_enable,
436 &sc->irq_res_id, &irq_flags);
437 sc->irq_res = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
438 &sc->irq_res_id, irq_flags);
439 if (!sc->irq_res)
440 return(FAILURE);
441 if (bus_setup_intr(sc->tws_dev, sc->irq_res, INTR_MPSAFE, tws_intr, sc,
442 &sc->intr_handle, NULL)) {
443 tws_log(sc, SETUP_INTR_RES);
444 return(FAILURE);
446 if (sc->intr_type == PCI_INTR_TYPE_MSI)
447 device_printf(sc->tws_dev, "Using MSI\n");
448 else
449 device_printf(sc->tws_dev, "Using legacy INTx\n");
451 return(SUCCESS);
454 static int
455 tws_init(struct tws_softc *sc)
458 u_int32_t max_sg_elements;
459 u_int32_t dma_mem_size;
460 int error;
461 u_int32_t reg;
463 sc->seq_id = 0;
464 if ( tws_queue_depth > TWS_MAX_REQS )
465 tws_queue_depth = TWS_MAX_REQS;
466 if (tws_queue_depth < TWS_RESERVED_REQS+1)
467 tws_queue_depth = TWS_RESERVED_REQS+1;
468 sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
469 max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
470 TWS_MAX_64BIT_SG_ELEMENTS :
471 TWS_MAX_32BIT_SG_ELEMENTS;
472 dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
473 (TWS_SECTOR_SIZE) ;
474 if ( bus_dma_tag_create(NULL, /* parent */
475 TWS_ALIGNMENT, /* alignment */
476 0, /* boundary */
477 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
478 BUS_SPACE_MAXADDR, /* highaddr */
479 NULL, NULL, /* filter, filterarg */
480 BUS_SPACE_MAXSIZE, /* maxsize */
481 max_sg_elements, /* numsegs */
482 BUS_SPACE_MAXSIZE, /* maxsegsize */
483 0, /* flags */
484 &sc->parent_tag /* tag */
485 )) {
486 TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
487 sc->is64bit);
488 return(ENOMEM);
490 /* In bound message frame requires 16byte alignment.
491 * Outbound MF's can live with 4byte alignment - for now just
492 * use 16 for both.
494 if ( bus_dma_tag_create(sc->parent_tag, /* parent */
495 TWS_IN_MF_ALIGNMENT, /* alignment */
496 0, /* boundary */
497 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filter, filterarg */
500 dma_mem_size, /* maxsize */
501 1, /* numsegs */
502 BUS_SPACE_MAXSIZE, /* maxsegsize */
503 0, /* flags */
504 &sc->cmd_tag /* tag */
505 )) {
506 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
507 return(ENOMEM);
510 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
511 BUS_DMA_NOWAIT, &sc->cmd_map)) {
512 TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
513 return(ENOMEM);
516 /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
517 sc->dma_mem_phys=0;
518 error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
519 dma_mem_size, tws_dmamap_cmds_load_cbfn,
520 &sc->dma_mem_phys, 0);
522 if ( error == EINPROGRESS )
523 TWS_TRACE_DEBUG(sc, "req queued", max_sg_elements, sc->is64bit);
526 * Create a dma tag for data buffers; size will be the maximum
527 * possible I/O size (128kB).
529 if (bus_dma_tag_create(sc->parent_tag, /* parent */
530 TWS_ALIGNMENT, /* alignment */
531 0, /* boundary */
532 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
533 BUS_SPACE_MAXADDR, /* highaddr */
534 NULL, NULL, /* filter, filterarg */
535 TWS_MAX_IO_SIZE, /* maxsize */
536 max_sg_elements, /* nsegments */
537 TWS_MAX_IO_SIZE, /* maxsegsize */
538 BUS_DMA_ALLOCALL | /* flags */
539 BUS_DMA_ALLOCNOW |
540 BUS_DMA_PRIVBZONE |
541 BUS_DMA_PROTECTED,
542 &sc->data_tag /* tag */)) {
543 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
544 return(ENOMEM);
547 sc->reqs = kmalloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
548 M_WAITOK | M_ZERO);
549 sc->sense_bufs = kmalloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
550 M_WAITOK | M_ZERO);
551 sc->scan_ccb = kmalloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
553 if ( !tws_ctlr_ready(sc) )
554 if( !tws_ctlr_reset(sc) )
555 return(FAILURE);
557 bzero(&sc->stats, sizeof(struct tws_stats));
558 tws_init_qs(sc);
559 tws_turn_off_interrupts(sc);
562 * enable pull mode by setting bit1 .
563 * setting bit0 to 1 will enable interrupt coalesing
564 * will revisit.
567 #ifdef TWS_PULL_MODE_ENABLE
569 reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
570 TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
571 tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
573 #endif
575 TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
576 if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
577 return(FAILURE);
578 if ( tws_init_aen_q(sc) == FAILURE )
579 return(FAILURE);
581 return(SUCCESS);
585 static int
586 tws_init_aen_q(struct tws_softc *sc)
588 sc->aen_q.head=0;
589 sc->aen_q.tail=0;
590 sc->aen_q.depth=256;
591 sc->aen_q.overflow=0;
592 sc->aen_q.q = kmalloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
593 M_TWS, M_WAITOK | M_ZERO);
594 return(SUCCESS);
597 static int
598 tws_init_trace_q(struct tws_softc *sc)
600 sc->trace_q.head=0;
601 sc->trace_q.tail=0;
602 sc->trace_q.depth=256;
603 sc->trace_q.overflow=0;
604 sc->trace_q.q = kmalloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
605 M_TWS, M_WAITOK | M_ZERO);
606 return(SUCCESS);
609 static int
610 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
613 struct tws_command_packet *cmd_buf;
614 cmd_buf = (struct tws_command_packet *)sc->dma_mem;
615 int i;
617 bzero(cmd_buf, dma_mem_size);
618 TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
619 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
620 for ( i=0; i< tws_queue_depth; i++)
622 if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
623 /* log a ENOMEM failure msg here */
624 lockmgr(&sc->q_lock, LK_RELEASE);
625 return(FAILURE);
627 sc->reqs[i].cmd_pkt = &cmd_buf[i];
629 sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
630 sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
631 (i * sizeof(struct tws_command_packet));
632 sc->sense_bufs[i].posted = false;
634 sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
635 sizeof(struct tws_command_header) +
636 (i * sizeof(struct tws_command_packet));
637 sc->reqs[i].request_id = i;
638 sc->reqs[i].sc = sc;
640 sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
642 sc->reqs[i].state = TWS_REQ_STATE_FREE;
643 if ( i >= TWS_RESERVED_REQS )
644 tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
646 lockmgr(&sc->q_lock, LK_RELEASE);
647 return(SUCCESS);
650 static void
651 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
652 int nseg, int error)
655 /* kprintf("command load done \n"); */
657 *((bus_addr_t *)arg) = segs[0].ds_addr;
660 void
661 tws_send_event(struct tws_softc *sc, u_int8_t event)
663 KKASSERT(lockstatus(&sc->gen_lock, curthread) != 0);
664 TWS_TRACE_DEBUG(sc, "received event ", 0, event);
665 switch (event) {
667 case TWS_INIT_START:
668 sc->tws_state = TWS_INIT;
669 break;
671 case TWS_INIT_COMPLETE:
672 KASSERT(sc->tws_state == TWS_INIT , ("invalid state transition"));
673 sc->tws_state = TWS_ONLINE;
674 break;
676 case TWS_RESET_START:
677 /* multiple reset ? */
678 KASSERT(sc->tws_state != TWS_RESET, ("invalid state transition"));
680 /* we can transition to reset state from any state */
681 sc->tws_prev_state = sc->tws_state;
682 sc->tws_state = TWS_RESET;
683 break;
685 case TWS_RESET_COMPLETE:
686 KASSERT(sc->tws_state == TWS_RESET, ("invalid state transition"));
687 sc->tws_state = sc->tws_prev_state;
688 break;
690 case TWS_SCAN_FAILURE:
691 KASSERT(sc->tws_state == TWS_ONLINE , ("invalid state transition"));
692 sc->tws_state = TWS_OFFLINE;
693 break;
695 case TWS_UNINIT_START:
696 KASSERT(sc->tws_state == TWS_ONLINE || sc->tws_state == TWS_OFFLINE,
697 ("invalid state transition"));
698 sc->tws_state = TWS_UNINIT;
699 break;
704 uint8_t
705 tws_get_state(struct tws_softc *sc)
708 return((u_int8_t)sc->tws_state);
712 /* Called during system shutdown after sync. */
714 static int
715 tws_shutdown(device_t dev)
718 struct tws_softc *sc = device_get_softc(dev);
720 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
722 tws_turn_off_interrupts(sc);
723 tws_init_connect(sc, 1);
725 return (0);
729 * Device suspend routine.
731 static int
732 tws_suspend(device_t dev)
734 struct tws_softc *sc = device_get_softc(dev);
736 if ( sc )
737 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
738 return (0);
742 * Device resume routine.
744 static int
745 tws_resume(device_t dev)
748 struct tws_softc *sc = device_get_softc(dev);
750 if ( sc )
751 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
752 return (0);
756 struct tws_request *
757 tws_get_request(struct tws_softc *sc, u_int16_t type)
759 struct tws_request *r = NULL;
761 switch ( type ) {
762 case TWS_INTERNAL_CMD_REQ :
763 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
764 r = &sc->reqs[0];
765 if ( r->state != TWS_REQ_STATE_FREE ) {
766 r = NULL;
767 } else {
768 r->state = TWS_REQ_STATE_BUSY;
770 lockmgr(&sc->gen_lock, LK_RELEASE);
771 break;
772 case TWS_AEN_FETCH_REQ :
773 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
774 r = &sc->reqs[1];
775 if ( r->state != TWS_REQ_STATE_FREE ) {
776 r = NULL;
777 } else {
778 r->state = TWS_REQ_STATE_BUSY;
780 lockmgr(&sc->gen_lock, LK_RELEASE);
781 break;
782 case TWS_PASSTHRU_REQ :
783 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
784 r = &sc->reqs[2];
785 if ( r->state != TWS_REQ_STATE_FREE ) {
786 r = NULL;
787 } else {
788 r->state = TWS_REQ_STATE_BUSY;
790 lockmgr(&sc->gen_lock, LK_RELEASE);
791 break;
792 case TWS_GETSET_PARAM_REQ :
793 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
794 r = &sc->reqs[3];
795 if ( r->state != TWS_REQ_STATE_FREE ) {
796 r = NULL;
797 } else {
798 r->state = TWS_REQ_STATE_BUSY;
800 lockmgr(&sc->gen_lock, LK_RELEASE);
801 break;
802 case TWS_SCSI_IO_REQ :
803 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
804 r = tws_q_remove_head(sc, TWS_FREE_Q);
805 if ( r )
806 r->state = TWS_REQ_STATE_TRAN;
807 lockmgr(&sc->q_lock, LK_RELEASE);
808 break;
809 default :
810 TWS_TRACE_DEBUG(sc, "Unknown req type", 0, type);
811 r = NULL;
815 if ( r ) {
816 bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
817 callout_init(&r->thandle);
818 r->data = NULL;
819 r->length = 0;
820 r->type = type;
821 r->flags = TWS_DIR_UNKNOWN;
822 r->error_code = TWS_REQ_ERR_INVALID;
823 r->ccb_ptr = NULL;
824 r->cb = NULL;
825 r->next = r->prev = NULL;
827 return(r);
830 void
831 tws_release_request(struct tws_request *req)
834 struct tws_softc *sc = req->sc;
836 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
837 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
838 tws_q_insert_tail(sc, req, TWS_FREE_Q);
839 lockmgr(&sc->q_lock, LK_RELEASE);
842 static device_method_t tws_methods[] = {
843 /* Device interface */
844 DEVMETHOD(device_probe, tws_probe),
845 DEVMETHOD(device_attach, tws_attach),
846 DEVMETHOD(device_detach, tws_detach),
847 DEVMETHOD(device_shutdown, tws_shutdown),
848 DEVMETHOD(device_suspend, tws_suspend),
849 DEVMETHOD(device_resume, tws_resume),
851 DEVMETHOD(bus_print_child, bus_generic_print_child),
852 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
853 { 0, 0 }
856 static driver_t tws_driver = {
857 "tws",
858 tws_methods,
859 sizeof(struct tws_softc)
863 static devclass_t tws_devclass;
865 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
866 DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, NULL, NULL);
867 MODULE_DEPEND(tws, cam, 1, 1, 1);
868 MODULE_DEPEND(tws, pci, 1, 1, 1);
870 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
871 TUNABLE_INT("hw.tws.msi.enable", &tws_msi_enable);