2 * Copyright (c) 2008-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
34 #include <sys/sunddi.h>
35 #include <sys/stream.h>
36 #include <sys/strsun.h>
37 #include <sys/strsubr.h>
46 /* Timeout to wait for DRIVER_EV_START event at EVQ startup */
47 #define SFXGE_EV_QSTART_TIMEOUT_USEC (2000000)
50 /* Event queue DMA attributes */
51 static ddi_device_acc_attr_t sfxge_evq_devacc
= {
53 DDI_DEVICE_ATTR_V0
, /* devacc_attr_version */
54 DDI_NEVERSWAP_ACC
, /* devacc_attr_endian_flags */
55 DDI_STRICTORDER_ACC
/* devacc_attr_dataorder */
58 static ddi_dma_attr_t sfxge_evq_dma_attr
= {
59 DMA_ATTR_V0
, /* dma_attr_version */
60 0, /* dma_attr_addr_lo */
61 0xffffffffffffffffull
, /* dma_attr_addr_hi */
62 0xffffffffffffffffull
, /* dma_attr_count_max */
63 EFX_BUF_SIZE
, /* dma_attr_align */
64 0xffffffff, /* dma_attr_burstsizes */
65 1, /* dma_attr_minxfer */
66 0xffffffffffffffffull
, /* dma_attr_maxxfer */
67 0xffffffffffffffffull
, /* dma_attr_seg */
68 1, /* dma_attr_sgllen */
69 1, /* dma_attr_granular */
70 0 /* dma_attr_flags */
74 _sfxge_ev_qctor(sfxge_t
*sp
, sfxge_evq_t
*sep
, int kmflags
, uint16_t evq_size
)
76 efsys_mem_t
*esmp
= &(sep
->se_mem
);
77 sfxge_dma_buffer_attr_t dma_attr
;
80 /* Compile-time structure layout checks */
81 EFX_STATIC_ASSERT(sizeof (sep
->__se_u1
.__se_s1
) <=
82 sizeof (sep
->__se_u1
.__se_pad
));
83 EFX_STATIC_ASSERT(sizeof (sep
->__se_u2
.__se_s2
) <=
84 sizeof (sep
->__se_u2
.__se_pad
));
85 EFX_STATIC_ASSERT(sizeof (sep
->__se_u3
.__se_s3
) <=
86 sizeof (sep
->__se_u3
.__se_pad
));
88 bzero(sep
, sizeof (sfxge_evq_t
));
92 dma_attr
.sdba_dip
= sp
->s_dip
;
93 dma_attr
.sdba_dattrp
= &sfxge_evq_dma_attr
;
94 dma_attr
.sdba_callback
= (kmflags
== KM_SLEEP
) ?
95 DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
;
96 dma_attr
.sdba_length
= EFX_EVQ_SIZE(evq_size
);
97 dma_attr
.sdba_memflags
= DDI_DMA_CONSISTENT
;
98 dma_attr
.sdba_devaccp
= &sfxge_evq_devacc
;
99 dma_attr
.sdba_bindflags
= DDI_DMA_READ
| DDI_DMA_CONSISTENT
;
100 dma_attr
.sdba_maxcookies
= 1;
101 dma_attr
.sdba_zeroinit
= B_FALSE
;
103 if ((rc
= sfxge_dma_buffer_create(esmp
, &dma_attr
)) != 0)
106 /* Allocate some buffer table entries */
107 if ((rc
= sfxge_sram_buf_tbl_alloc(sp
, EFX_EVQ_NBUFS(evq_size
),
108 &(sep
->se_id
))) != 0)
111 sep
->se_stpp
= &(sep
->se_stp
);
118 /* Tear down DMA setup */
120 sfxge_dma_buffer_destroy(esmp
);
123 DTRACE_PROBE1(fail1
, int, rc
);
127 SFXGE_OBJ_CHECK(sep
, sfxge_evq_t
);
133 sfxge_ev_q0ctor(void *buf
, void *arg
, int kmflags
)
135 sfxge_evq_t
*sep
= buf
;
137 return (_sfxge_ev_qctor(sp
, sep
, kmflags
, sp
->s_evq0_size
));
141 sfxge_ev_qXctor(void *buf
, void *arg
, int kmflags
)
143 sfxge_evq_t
*sep
= buf
;
145 return (_sfxge_ev_qctor(sp
, sep
, kmflags
, sp
->s_evqX_size
));
148 _sfxge_ev_qdtor(sfxge_t
*sp
, sfxge_evq_t
*sep
, uint16_t evq_size
)
150 efsys_mem_t
*esmp
= &(sep
->se_mem
);
151 ASSERT3P(sep
->se_sp
, ==, sp
);
152 ASSERT3P(sep
->se_stpp
, ==, &(sep
->se_stp
));
155 /* Free the buffer table entries */
156 sfxge_sram_buf_tbl_free(sp
, sep
->se_id
, EFX_EVQ_NBUFS(evq_size
));
159 /* Tear down DMA setup */
160 sfxge_dma_buffer_destroy(esmp
);
164 SFXGE_OBJ_CHECK(sep
, sfxge_evq_t
);
168 sfxge_ev_q0dtor(void *buf
, void *arg
)
170 sfxge_evq_t
*sep
= buf
;
172 _sfxge_ev_qdtor(sp
, sep
, sp
->s_evq0_size
);
176 sfxge_ev_qXdtor(void *buf
, void *arg
)
178 sfxge_evq_t
*sep
= buf
;
180 _sfxge_ev_qdtor(sp
, sep
, sp
->s_evqX_size
);
184 sfxge_ev_initialized(void *arg
)
186 sfxge_evq_t
*sep
= arg
;
188 ASSERT(mutex_owned(&(sep
->se_lock
)));
190 /* Init done events may be duplicated on 7xxx (see SFCbug31631) */
191 if (sep
->se_state
== SFXGE_EVQ_STARTED
)
194 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_STARTING
);
195 sep
->se_state
= SFXGE_EVQ_STARTED
;
197 cv_broadcast(&(sep
->se_init_kv
));
204 sfxge_ev_qcomplete(sfxge_evq_t
*sep
, boolean_t eop
)
206 sfxge_t
*sp
= sep
->se_sp
;
207 unsigned int index
= sep
->se_index
;
208 sfxge_rxq_t
*srp
= sp
->s_srp
[index
];
211 if ((stp
= sep
->se_stp
) != NULL
) {
213 sep
->se_stpp
= &(sep
->se_stp
);
221 ASSERT3U(stp
->st_evq
, ==, index
);
223 if (stp
->st_pending
!= stp
->st_completed
)
224 sfxge_tx_qcomplete(stp
);
227 } while (stp
!= NULL
);
231 if (srp
->sr_pending
!= srp
->sr_completed
)
232 sfxge_rx_qcomplete(srp
, eop
);
237 sfxge_ev_rx(void *arg
, uint32_t label
, uint32_t id
, uint32_t size
,
240 sfxge_evq_t
*sep
= arg
;
241 sfxge_t
*sp
= sep
->se_sp
;
243 sfxge_rx_packet_t
*srpp
;
244 unsigned int prefetch
;
248 ASSERT(mutex_owned(&(sep
->se_lock
)));
250 if (sep
->se_exception
)
253 srp
= sp
->s_srp
[label
];
257 ASSERT3U(sep
->se_index
, ==, srp
->sr_index
);
258 ASSERT3U(id
, <, sp
->s_rxq_size
);
261 * Note that in sfxge_stop() EVQ stopped after RXQ, and will be reset
262 * So the return missing srp->sr_pending increase is safe
264 if (srp
->sr_state
!= SFXGE_RXQ_STARTED
)
267 stop
= (id
+ 1) & (sp
->s_rxq_size
- 1);
268 id
= srp
->sr_pending
& (sp
->s_rxq_size
- 1);
270 delta
= (stop
>= id
) ? (stop
- id
) : (sp
->s_rxq_size
- id
+ stop
);
271 srp
->sr_pending
+= delta
;
274 if ((!efx_nic_cfg_get(sp
->s_enp
)->enc_rx_batching_enabled
) ||
276 (delta
> efx_nic_cfg_get(sp
->s_enp
)->enc_rx_batch_max
)) {
278 * FIXME: This does not take into account scatter
279 * aborts. See Bug40811
281 sep
->se_exception
= B_TRUE
;
283 DTRACE_PROBE(restart_ev_rx_id
);
284 /* sfxge_evq_t->se_lock held */
285 (void) sfxge_restart_dispatch(sp
, DDI_SLEEP
,
286 SFXGE_HW_ERR
, "Out of order RX event", delta
);
292 prefetch
= (id
+ 4) & (sp
->s_rxq_size
- 1);
293 if ((srpp
= srp
->sr_srpp
[prefetch
]) != NULL
)
294 prefetch_read_many(srpp
);
296 srpp
= srp
->sr_srpp
[id
];
297 ASSERT(srpp
!= NULL
);
298 prefetch_read_many(srpp
->srp_mp
);
300 for (; id
!= stop
; id
= (id
+ 1) & (sp
->s_rxq_size
- 1)) {
301 srpp
= srp
->sr_srpp
[id
];
302 ASSERT(srpp
!= NULL
);
304 ASSERT3U(srpp
->srp_flags
, ==, EFX_DISCARD
);
305 srpp
->srp_flags
= flags
;
307 ASSERT3U(size
, <, (1 << 16));
308 srpp
->srp_size
= (uint16_t)size
;
313 DTRACE_PROBE2(qlevel
, unsigned int, srp
->sr_index
,
314 unsigned int, srp
->sr_added
- srp
->sr_pending
);
316 if (srp
->sr_pending
- srp
->sr_completed
>= SFXGE_RX_BATCH
)
317 sfxge_ev_qcomplete(sep
, B_FALSE
);
320 /* returning B_TRUE makes efx_ev_qpoll() stop processing events */
321 return (sep
->se_rx
>= sep
->se_ev_batch
);
325 sfxge_ev_exception(void *arg
, uint32_t code
, uint32_t data
)
327 sfxge_evq_t
*sep
= arg
;
328 sfxge_t
*sp
= sep
->se_sp
;
330 _NOTE(ARGUNUSED(code
))
331 _NOTE(ARGUNUSED(data
))
333 ASSERT(mutex_owned(&(sep
->se_lock
)));
334 sep
->se_exception
= B_TRUE
;
336 if (code
!= EFX_EXCEPTION_UNKNOWN_SENSOREVT
) {
338 DTRACE_PROBE(restart_ev_exception
);
340 /* sfxge_evq_t->se_lock held */
341 (void) sfxge_restart_dispatch(sp
, DDI_SLEEP
, SFXGE_HW_ERR
,
349 sfxge_ev_rxq_flush_done(void *arg
, uint32_t rxq_index
)
351 sfxge_evq_t
*sep_targetq
, *sep
= arg
;
352 sfxge_t
*sp
= sep
->se_sp
;
358 ASSERT(mutex_owned(&(sep
->se_lock
)));
360 /* Ensure RXQ exists, as events may arrive after RXQ was destroyed */
361 srp
= sp
->s_srp
[rxq_index
];
365 /* Process right now if it is the correct event queue */
366 index
= srp
->sr_index
;
367 if (index
== sep
->se_index
) {
368 sfxge_rx_qflush_done(srp
);
372 /* Resend a software event on the correct queue */
373 sep_targetq
= sp
->s_sep
[index
];
375 if (sep_targetq
->se_state
!= SFXGE_EVQ_STARTED
)
376 goto done
; /* TBD: state test not under the lock */
379 ASSERT((label
& SFXGE_MAGIC_DMAQ_LABEL_MASK
) == label
);
380 magic
= SFXGE_MAGIC_RX_QFLUSH_DONE
| label
;
382 efx_ev_qpost(sep_targetq
->se_eep
, magic
);
389 sfxge_ev_rxq_flush_failed(void *arg
, uint32_t rxq_index
)
391 sfxge_evq_t
*sep_targetq
, *sep
= arg
;
392 sfxge_t
*sp
= sep
->se_sp
;
398 ASSERT(mutex_owned(&(sep
->se_lock
)));
400 /* Ensure RXQ exists, as events may arrive after RXQ was destroyed */
401 srp
= sp
->s_srp
[rxq_index
];
405 /* Process right now if it is the correct event queue */
406 index
= srp
->sr_index
;
407 if (index
== sep
->se_index
) {
408 sfxge_rx_qflush_failed(srp
);
412 /* Resend a software event on the correct queue */
413 sep_targetq
= sp
->s_sep
[index
];
416 ASSERT((label
& SFXGE_MAGIC_DMAQ_LABEL_MASK
) == label
);
417 magic
= SFXGE_MAGIC_RX_QFLUSH_FAILED
| label
;
419 if (sep_targetq
->se_state
!= SFXGE_EVQ_STARTED
)
420 goto done
; /* TBD: state test not under the lock */
422 efx_ev_qpost(sep_targetq
->se_eep
, magic
);
429 sfxge_ev_tx(void *arg
, uint32_t label
, uint32_t id
)
431 sfxge_evq_t
*sep
= arg
;
436 ASSERT(mutex_owned(&(sep
->se_lock
)));
438 stp
= sep
->se_label_stp
[label
];
442 if (stp
->st_state
!= SFXGE_TXQ_STARTED
)
445 ASSERT3U(sep
->se_index
, ==, stp
->st_evq
);
447 stop
= (id
+ 1) & (SFXGE_TX_NDESCS
- 1);
448 id
= stp
->st_pending
& (SFXGE_TX_NDESCS
- 1);
450 delta
= (stop
>= id
) ? (stop
- id
) : (SFXGE_TX_NDESCS
- id
+ stop
);
451 stp
->st_pending
+= delta
;
455 if (stp
->st_next
== NULL
&&
456 sep
->se_stpp
!= &(stp
->st_next
)) {
457 *(sep
->se_stpp
) = stp
;
458 sep
->se_stpp
= &(stp
->st_next
);
461 DTRACE_PROBE2(qlevel
, unsigned int, stp
->st_index
,
462 unsigned int, stp
->st_added
- stp
->st_pending
);
464 if (stp
->st_pending
- stp
->st_completed
>= SFXGE_TX_BATCH
)
465 sfxge_tx_qcomplete(stp
);
468 /* returning B_TRUE makes efx_ev_qpoll() stop processing events */
469 return (sep
->se_tx
>= sep
->se_ev_batch
);
473 sfxge_ev_txq_flush_done(void *arg
, uint32_t txq_index
)
475 sfxge_evq_t
*sep
= arg
;
476 sfxge_t
*sp
= sep
->se_sp
;
482 ASSERT(mutex_owned(&(sep
->se_lock
)));
484 /* Ensure TXQ exists, as events may arrive after TXQ was destroyed */
485 stp
= sp
->s_stp
[txq_index
];
489 /* Process right now if it is the correct event queue */
491 if (evq
== sep
->se_index
) {
492 sfxge_tx_qflush_done(stp
);
496 /* Resend a software event on the correct queue */
497 sep
= sp
->s_sep
[evq
];
499 label
= stp
->st_label
;
501 ASSERT((label
& SFXGE_MAGIC_DMAQ_LABEL_MASK
) == label
);
502 magic
= SFXGE_MAGIC_TX_QFLUSH_DONE
| label
;
504 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_STARTED
);
505 efx_ev_qpost(sep
->se_eep
, magic
);
512 sfxge_ev_software(void *arg
, uint16_t magic
)
514 sfxge_evq_t
*sep
= arg
;
515 sfxge_t
*sp
= sep
->se_sp
;
516 dev_info_t
*dip
= sp
->s_dip
;
519 ASSERT(mutex_owned(&(sep
->se_lock
)));
521 EFX_STATIC_ASSERT(SFXGE_MAGIC_DMAQ_LABEL_WIDTH
==
522 FSF_AZ_RX_EV_Q_LABEL_WIDTH
);
523 EFX_STATIC_ASSERT(SFXGE_MAGIC_DMAQ_LABEL_WIDTH
==
524 FSF_AZ_TX_EV_Q_LABEL_WIDTH
);
526 label
= magic
& SFXGE_MAGIC_DMAQ_LABEL_MASK
;
527 magic
&= ~SFXGE_MAGIC_DMAQ_LABEL_MASK
;
530 case SFXGE_MAGIC_RX_QFLUSH_DONE
: {
531 sfxge_rxq_t
*srp
= sp
->s_srp
[label
];
534 ASSERT3U(sep
->se_index
, ==, srp
->sr_index
);
536 sfxge_rx_qflush_done(srp
);
540 case SFXGE_MAGIC_RX_QFLUSH_FAILED
: {
541 sfxge_rxq_t
*srp
= sp
->s_srp
[label
];
544 ASSERT3U(sep
->se_index
, ==, srp
->sr_index
);
546 sfxge_rx_qflush_failed(srp
);
550 case SFXGE_MAGIC_RX_QFPP_TRIM
: {
551 sfxge_rxq_t
*srp
= sp
->s_srp
[label
];
554 ASSERT3U(sep
->se_index
, ==, srp
->sr_index
);
556 sfxge_rx_qfpp_trim(srp
);
560 case SFXGE_MAGIC_TX_QFLUSH_DONE
: {
561 sfxge_txq_t
*stp
= sep
->se_label_stp
[label
];
564 ASSERT3U(sep
->se_index
, ==, stp
->st_evq
);
566 sfxge_tx_qflush_done(stp
);
571 dev_err(dip
, CE_NOTE
,
572 SFXGE_CMN_ERR
"unknown software event 0x%x", magic
);
580 sfxge_ev_sram(void *arg
, uint32_t code
)
582 _NOTE(ARGUNUSED(arg
))
585 case EFX_SRAM_UPDATE
:
586 DTRACE_PROBE(sram_update
);
590 DTRACE_PROBE(sram_clear
);
593 case EFX_SRAM_ILLEGAL_CLEAR
:
594 DTRACE_PROBE(sram_illegal_clear
);
606 sfxge_ev_timer(void *arg
, uint32_t index
)
608 _NOTE(ARGUNUSED(arg
, index
))
614 sfxge_ev_wake_up(void *arg
, uint32_t index
)
616 _NOTE(ARGUNUSED(arg
, index
))
622 sfxge_ev_link_change(void *arg
, efx_link_mode_t link_mode
)
624 sfxge_evq_t
*sep
= arg
;
625 sfxge_t
*sp
= sep
->se_sp
;
627 sfxge_mac_link_update(sp
, link_mode
);
633 sfxge_ev_kstat_update(kstat_t
*ksp
, int rw
)
635 sfxge_evq_t
*sep
= ksp
->ks_private
;
639 if (rw
!= KSTAT_READ
) {
644 ASSERT(mutex_owned(&(sep
->se_lock
)));
646 if (sep
->se_state
!= SFXGE_EVQ_STARTED
)
649 efx_ev_qstats_update(sep
->se_eep
, sep
->se_stat
);
654 knp
->value
.ui64
= sep
->se_cpu_id
;
660 DTRACE_PROBE1(fail1
, int, rc
);
666 sfxge_ev_kstat_init(sfxge_evq_t
*sep
)
668 sfxge_t
*sp
= sep
->se_sp
;
669 unsigned int index
= sep
->se_index
;
670 dev_info_t
*dip
= sp
->s_dip
;
673 char name
[MAXNAMELEN
];
677 /* Determine the name */
678 (void) snprintf(name
, MAXNAMELEN
- 1, "%s_evq%04d",
679 ddi_driver_name(dip
), index
);
682 if ((ksp
= kstat_create((char *)ddi_driver_name(dip
),
683 ddi_get_instance(dip
), name
, "queue", KSTAT_TYPE_NAMED
,
684 EV_NQSTATS
+ 1, 0)) == NULL
) {
691 ksp
->ks_update
= sfxge_ev_kstat_update
;
692 ksp
->ks_private
= sep
;
693 ksp
->ks_lock
= &(sep
->se_lock
);
695 /* Initialise the named stats */
696 sep
->se_stat
= knp
= ksp
->ks_data
;
697 for (id
= 0; id
< EV_NQSTATS
; id
++) {
698 kstat_named_init(knp
, (char *)efx_ev_qstat_name(sp
->s_enp
, id
),
703 kstat_named_init(knp
, "cpu", KSTAT_DATA_UINT64
);
709 DTRACE_PROBE1(fail1
, int, rc
);
715 sfxge_ev_kstat_fini(sfxge_evq_t
*sep
)
717 /* Destroy the set */
718 kstat_delete(sep
->se_ksp
);
723 inline unsigned pow2_ge(unsigned int n
) {
724 unsigned int order
= 0;
726 while ((1ul << order
) < n
) ++order
;
727 return (1ul << (order
));
731 sfxge_ev_qinit(sfxge_t
*sp
, unsigned int index
, unsigned int ev_batch
)
736 ASSERT3U(index
, <, SFXGE_RX_SCALE_MAX
);
738 sep
= kmem_cache_alloc(index
? sp
->s_eqXc
: sp
->s_eq0c
, KM_SLEEP
);
743 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_UNINITIALIZED
);
745 sep
->se_index
= index
;
747 mutex_init(&(sep
->se_lock
), NULL
,
748 MUTEX_DRIVER
, DDI_INTR_PRI(sp
->s_intr
.si_intr_pri
));
750 cv_init(&(sep
->se_init_kv
), NULL
, CV_DRIVER
, NULL
);
752 /* Initialize the statistics */
753 if ((rc
= sfxge_ev_kstat_init(sep
)) != 0)
756 sep
->se_state
= SFXGE_EVQ_INITIALIZED
;
757 sep
->se_ev_batch
= (uint16_t)ev_batch
;
758 sp
->s_sep
[index
] = sep
;
767 cv_destroy(&(sep
->se_init_kv
));
768 mutex_destroy(&(sep
->se_lock
));
770 kmem_cache_free(index
? sp
->s_eqXc
: sp
->s_eq0c
, sep
);
773 DTRACE_PROBE1(fail1
, int, rc
);
779 sfxge_ev_qstart(sfxge_t
*sp
, unsigned int index
)
781 sfxge_evq_t
*sep
= sp
->s_sep
[index
];
782 sfxge_intr_t
*sip
= &(sp
->s_intr
);
783 efx_nic_t
*enp
= sp
->s_enp
;
784 efx_ev_callbacks_t
*eecp
;
788 uint16_t evq_size
= index
? sp
->s_evqX_size
: sp
->s_evq0_size
;
790 mutex_enter(&(sep
->se_lock
));
791 esmp
= &(sep
->se_mem
);
793 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_INITIALIZED
);
795 /* Set the memory to all ones */
796 (void) memset(esmp
->esm_base
, 0xff, EFX_EVQ_SIZE(evq_size
));
798 /* Program the buffer table */
799 if ((rc
= sfxge_sram_buf_tbl_set(sp
, sep
->se_id
, esmp
,
800 EFX_EVQ_NBUFS(evq_size
))) != 0)
803 /* Set up the event callbacks */
804 eecp
= &(sep
->se_eec
);
805 eecp
->eec_initialized
= sfxge_ev_initialized
;
806 eecp
->eec_rx
= sfxge_ev_rx
;
807 eecp
->eec_tx
= sfxge_ev_tx
;
808 eecp
->eec_exception
= sfxge_ev_exception
;
809 eecp
->eec_rxq_flush_done
= sfxge_ev_rxq_flush_done
;
810 eecp
->eec_rxq_flush_failed
= sfxge_ev_rxq_flush_failed
;
811 eecp
->eec_txq_flush_done
= sfxge_ev_txq_flush_done
;
812 eecp
->eec_software
= sfxge_ev_software
;
813 eecp
->eec_sram
= sfxge_ev_sram
;
814 eecp
->eec_wake_up
= sfxge_ev_wake_up
;
815 eecp
->eec_timer
= sfxge_ev_timer
;
816 eecp
->eec_link_change
= sfxge_ev_link_change
;
818 /* Create the event queue */
819 if ((rc
= efx_ev_qcreate(enp
, index
, esmp
, evq_size
, sep
->se_id
,
820 &(sep
->se_eep
))) != 0)
823 /* Set the default moderation */
824 if ((rc
= efx_ev_qmoderate(sep
->se_eep
, sp
->s_ev_moderation
)) != 0)
827 /* Check that interrupts are enabled at the NIC */
828 if (sip
->si_state
!= SFXGE_INTR_STARTED
) {
833 sep
->se_state
= SFXGE_EVQ_STARTING
;
835 /* Prime the event queue for interrupts */
836 if ((rc
= efx_ev_qprime(sep
->se_eep
, sep
->se_count
)) != 0)
839 /* Wait for the initialization event */
840 timeout
= ddi_get_lbolt() + drv_usectohz(SFXGE_EV_QSTART_TIMEOUT_USEC
);
841 while (sep
->se_state
!= SFXGE_EVQ_STARTED
) {
842 if (cv_timedwait(&(sep
->se_init_kv
), &(sep
->se_lock
),
844 /* Timeout waiting for initialization */
845 dev_info_t
*dip
= sp
->s_dip
;
847 DTRACE_PROBE(timeout
);
848 dev_err(dip
, CE_NOTE
,
849 SFXGE_CMN_ERR
"evq[%d] qstart timeout", index
);
856 mutex_exit(&(sep
->se_lock
));
865 sep
->se_state
= SFXGE_EVQ_INITIALIZED
;
873 /* Destroy the event queue */
874 efx_ev_qdestroy(sep
->se_eep
);
880 /* Zero out the event handlers */
881 bzero(&(sep
->se_eec
), sizeof (efx_ev_callbacks_t
));
883 /* Clear entries from the buffer table */
884 sfxge_sram_buf_tbl_clear(sp
, sep
->se_id
, EFX_EVQ_NBUFS(evq_size
));
887 DTRACE_PROBE1(fail1
, int, rc
);
889 mutex_exit(&(sep
->se_lock
));
895 sfxge_ev_qpoll(sfxge_t
*sp
, unsigned int index
)
897 sfxge_evq_t
*sep
= sp
->s_sep
[index
];
898 processorid_t cpu_id
;
900 uint16_t evq_size
= index
? sp
->s_evqX_size
: sp
->s_evq0_size
;
902 mutex_enter(&(sep
->se_lock
));
904 if (sep
->se_state
!= SFXGE_EVQ_STARTING
&&
905 sep
->se_state
!= SFXGE_EVQ_STARTED
) {
910 /* Make sure the CPU information is up to date */
911 cpu_id
= CPU
->cpu_id
;
913 if (cpu_id
!= sep
->se_cpu_id
) {
914 sep
->se_cpu_id
= cpu_id
;
916 /* sfxge_evq_t->se_lock held */
917 (void) ddi_taskq_dispatch(sp
->s_tqp
, sfxge_rx_scale_update
, sp
,
921 /* Synchronize the DMA memory for reading */
922 (void) ddi_dma_sync(sep
->se_mem
.esm_dma_handle
,
924 EFX_EVQ_SIZE(evq_size
),
925 DDI_DMA_SYNC_FORKERNEL
);
927 ASSERT3U(sep
->se_rx
, ==, 0);
928 ASSERT3U(sep
->se_tx
, ==, 0);
929 ASSERT3P(sep
->se_stp
, ==, NULL
);
930 ASSERT3P(sep
->se_stpp
, ==, &(sep
->se_stp
));
933 efx_ev_qpoll(sep
->se_eep
, &(sep
->se_count
), &(sep
->se_eec
),
939 /* Perform any pending completion processing */
940 sfxge_ev_qcomplete(sep
, B_TRUE
);
942 /* Re-prime the event queue for interrupts */
943 if ((rc
= efx_ev_qprime(sep
->se_eep
, sep
->se_count
)) != 0)
946 mutex_exit(&(sep
->se_lock
));
953 DTRACE_PROBE1(fail1
, int, rc
);
955 mutex_exit(&(sep
->se_lock
));
961 sfxge_ev_qprime(sfxge_t
*sp
, unsigned int index
)
963 sfxge_evq_t
*sep
= sp
->s_sep
[index
];
966 mutex_enter(&(sep
->se_lock
));
968 if (sep
->se_state
!= SFXGE_EVQ_STARTING
&&
969 sep
->se_state
!= SFXGE_EVQ_STARTED
) {
974 if ((rc
= efx_ev_qprime(sep
->se_eep
, sep
->se_count
)) != 0)
977 mutex_exit(&(sep
->se_lock
));
984 DTRACE_PROBE1(fail1
, int, rc
);
986 mutex_exit(&(sep
->se_lock
));
993 sfxge_ev_qmoderate(sfxge_t
*sp
, unsigned int index
, unsigned int us
)
995 sfxge_evq_t
*sep
= sp
->s_sep
[index
];
996 efx_evq_t
*eep
= sep
->se_eep
;
998 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_STARTED
);
1000 return (efx_ev_qmoderate(eep
, us
));
1004 sfxge_ev_qstop(sfxge_t
*sp
, unsigned int index
)
1006 sfxge_evq_t
*sep
= sp
->s_sep
[index
];
1009 mutex_enter(&(sep
->se_lock
));
1010 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_STARTED
);
1011 sep
->se_state
= SFXGE_EVQ_INITIALIZED
;
1012 evq_size
= index
? sp
->s_evqX_size
: sp
->s_evq0_size
;
1014 /* Clear the CPU information */
1017 /* Clear the event count */
1020 /* Reset the exception flag */
1021 sep
->se_exception
= B_FALSE
;
1023 /* Destroy the event queue */
1024 efx_ev_qdestroy(sep
->se_eep
);
1027 mutex_exit(&(sep
->se_lock
));
1029 /* Zero out the event handlers */
1030 bzero(&(sep
->se_eec
), sizeof (efx_ev_callbacks_t
));
1032 /* Clear entries from the buffer table */
1033 sfxge_sram_buf_tbl_clear(sp
, sep
->se_id
, EFX_EVQ_NBUFS(evq_size
));
1037 sfxge_ev_qfini(sfxge_t
*sp
, unsigned int index
)
1039 sfxge_evq_t
*sep
= sp
->s_sep
[index
];
1041 ASSERT3U(sep
->se_state
, ==, SFXGE_EVQ_INITIALIZED
);
1043 sp
->s_sep
[index
] = NULL
;
1044 sep
->se_state
= SFXGE_EVQ_UNINITIALIZED
;
1046 /* Tear down the statistics */
1047 sfxge_ev_kstat_fini(sep
);
1049 cv_destroy(&(sep
->se_init_kv
));
1050 mutex_destroy(&(sep
->se_lock
));
1054 kmem_cache_free(index
? sp
->s_eqXc
: sp
->s_eq0c
, sep
);
1058 sfxge_ev_txlabel_alloc(sfxge_t
*sp
, unsigned int evq
, sfxge_txq_t
*stp
,
1059 unsigned int *labelp
)
1061 sfxge_evq_t
*sep
= sp
->s_sep
[evq
];
1066 mutex_enter(&(sep
->se_lock
));
1068 if (stp
== NULL
|| labelp
== NULL
) {
1074 for (label
= 0; label
< SFXGE_TX_NLABELS
; label
++) {
1075 if (sep
->se_label_stp
[label
] == stp
) {
1079 if ((stpp
== NULL
) && (sep
->se_label_stp
[label
] == NULL
)) {
1080 stpp
= &sep
->se_label_stp
[label
];
1088 label
= stpp
- sep
->se_label_stp
;
1090 ASSERT3U(label
, <, SFXGE_TX_NLABELS
);
1093 mutex_exit(&(sep
->se_lock
));
1097 DTRACE_PROBE(fail3
);
1099 DTRACE_PROBE(fail2
);
1101 DTRACE_PROBE1(fail1
, int, rc
);
1103 mutex_exit(&(sep
->se_lock
));
1110 sfxge_ev_txlabel_free(sfxge_t
*sp
, unsigned int evq
, sfxge_txq_t
*stp
,
1113 sfxge_evq_t
*sep
= sp
->s_sep
[evq
];
1116 mutex_enter(&(sep
->se_lock
));
1118 if (stp
== NULL
|| label
> SFXGE_TX_NLABELS
) {
1123 if (sep
->se_label_stp
[label
] != stp
) {
1127 sep
->se_label_stp
[label
] = NULL
;
1129 mutex_exit(&(sep
->se_lock
));
1134 DTRACE_PROBE(fail2
);
1136 DTRACE_PROBE1(fail1
, int, rc
);
1138 mutex_exit(&(sep
->se_lock
));
1144 static kmem_cache_t
*
1145 sfxge_ev_kmem_cache_create(sfxge_t
*sp
, const char *qname
,
1146 int (*ctor
)(void *, void *, int), void (*dtor
)(void *, void *))
1148 char name
[MAXNAMELEN
];
1151 (void) snprintf(name
, MAXNAMELEN
- 1, "%s%d_%s_cache",
1152 ddi_driver_name(sp
->s_dip
), ddi_get_instance(sp
->s_dip
), qname
);
1154 eqc
= kmem_cache_create(name
, sizeof (sfxge_evq_t
),
1155 SFXGE_CPU_CACHE_SIZE
, ctor
, dtor
, NULL
, sp
, NULL
, 0);
1156 ASSERT(eqc
!= NULL
);
1161 sfxge_ev_init(sfxge_t
*sp
)
1163 sfxge_intr_t
*sip
= &(sp
->s_intr
);
1164 unsigned int evq0_size
;
1165 unsigned int evqX_size
;
1166 unsigned int ev_batch
;
1170 ASSERT3U(sip
->si_state
, ==, SFXGE_INTR_INITIALIZED
);
1173 * Must account for RXQ, TXQ(s); MCDI not event completed at present
1174 * Note that common code does not completely fill descriptor queues
1176 evqX_size
= sp
->s_rxq_size
+ SFXGE_TX_NDESCS
;
1177 evq0_size
= evqX_size
+ SFXGE_TX_NDESCS
; /* only IP checksum TXQ */
1178 evq0_size
+= SFXGE_TX_NDESCS
; /* no checksums */
1180 ASSERT3U(evqX_size
, >=, EFX_EVQ_MINNEVS
);
1181 ASSERT3U(evq0_size
, >, evqX_size
);
1183 if (evq0_size
> EFX_EVQ_MAXNEVS
) {
1188 sp
->s_evq0_size
= pow2_ge(evq0_size
);
1189 sp
->s_evqX_size
= pow2_ge(evqX_size
);
1191 /* Read driver parameters */
1192 sp
->s_ev_moderation
= ddi_prop_get_int(DDI_DEV_T_ANY
, sp
->s_dip
,
1193 DDI_PROP_DONTPASS
, "intr_moderation", SFXGE_DEFAULT_MODERATION
);
1195 ev_batch
= ddi_prop_get_int(DDI_DEV_T_ANY
, sp
->s_dip
,
1196 DDI_PROP_DONTPASS
, "ev_batch", SFXGE_EV_BATCH
);
1199 * It is slightly peverse to have a cache for one item. But it allows
1200 * for simple alignment control without increasing the allocation size
1202 sp
->s_eq0c
= sfxge_ev_kmem_cache_create(sp
, "evq0", sfxge_ev_q0ctor
,
1204 sp
->s_eqXc
= sfxge_ev_kmem_cache_create(sp
, "evqX", sfxge_ev_qXctor
,
1207 /* Initialize the event queue(s) */
1208 for (index
= 0; index
< sip
->si_nalloc
; index
++) {
1209 if ((rc
= sfxge_ev_qinit(sp
, index
, ev_batch
)) != 0)
1216 DTRACE_PROBE(fail2
);
1218 while (--index
>= 0)
1219 sfxge_ev_qfini(sp
, index
);
1220 sp
->s_ev_moderation
= 0;
1223 DTRACE_PROBE1(fail1
, int, rc
);
1225 kmem_cache_destroy(sp
->s_eqXc
);
1226 kmem_cache_destroy(sp
->s_eq0c
);
1234 sfxge_ev_start(sfxge_t
*sp
)
1236 sfxge_intr_t
*sip
= &(sp
->s_intr
);
1237 efx_nic_t
*enp
= sp
->s_enp
;
1241 ASSERT3U(sip
->si_state
, ==, SFXGE_INTR_STARTED
);
1243 /* Initialize the event module */
1244 if ((rc
= efx_ev_init(enp
)) != 0)
1247 /* Start the event queues */
1248 for (index
= 0; index
< sip
->si_nalloc
; index
++) {
1249 if ((rc
= sfxge_ev_qstart(sp
, index
)) != 0)
1256 DTRACE_PROBE(fail2
);
1258 /* Stop the event queue(s) */
1259 while (--index
>= 0)
1260 sfxge_ev_qstop(sp
, index
);
1262 /* Tear down the event module */
1266 DTRACE_PROBE1(fail1
, int, rc
);
1272 sfxge_ev_moderation_get(sfxge_t
*sp
, unsigned int *usp
)
1274 *usp
= sp
->s_ev_moderation
;
1278 sfxge_ev_moderation_set(sfxge_t
*sp
, unsigned int us
)
1280 sfxge_intr_t
*sip
= &(sp
->s_intr
);
1284 if (sip
->si_state
!= SFXGE_INTR_STARTED
)
1287 for (index
= 0; index
< sip
->si_nalloc
; index
++) {
1288 if ((rc
= sfxge_ev_qmoderate(sp
, index
, us
)) != 0)
1292 sp
->s_ev_moderation
= us
;
1296 DTRACE_PROBE1(fail1
, int, rc
);
1298 /* The only error path is if the value to set to is invalid. */
1299 ASSERT3U(index
, ==, 0);
1305 sfxge_ev_stop(sfxge_t
*sp
)
1307 sfxge_intr_t
*sip
= &(sp
->s_intr
);
1308 efx_nic_t
*enp
= sp
->s_enp
;
1311 ASSERT3U(sip
->si_state
, ==, SFXGE_INTR_STARTED
);
1313 /* Stop the event queue(s) */
1314 index
= sip
->si_nalloc
;
1315 while (--index
>= 0)
1316 sfxge_ev_qstop(sp
, index
);
1318 /* Tear down the event module */
1323 sfxge_ev_fini(sfxge_t
*sp
)
1325 sfxge_intr_t
*sip
= &(sp
->s_intr
);
1328 ASSERT3U(sip
->si_state
, ==, SFXGE_INTR_INITIALIZED
);
1330 sp
->s_ev_moderation
= 0;
1332 /* Tear down the event queue(s) */
1333 index
= sip
->si_nalloc
;
1334 while (--index
>= 0)
1335 sfxge_ev_qfini(sp
, index
);
1337 kmem_cache_destroy(sp
->s_eqXc
);
1338 kmem_cache_destroy(sp
->s_eq0c
);